aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-09 22:01:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-09 22:01:47 -0500
commit67dd2f5a669f48e48ea1013fb80522adca8287f4 (patch)
treeeee4e7f15df90f899211cde0a669d661085de05d
parent5327b9b83a9c45a3fcbcda224a2b02d9eea9f6bb (diff)
parent42d61b9b415686d81eaa022b846737548876e51d (diff)
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (72 commits) [S390] 3215/3270 console: remove wrong comment [S390] dasd: remove BKL from extended error reporting code [S390] vmlogrdr: remove BKL [S390] vmur: remove BKL [S390] zcrypt: remove BKL [S390] 3270: remove BKL [S390] vmwatchdog: remove lock_kernel() from open() function [S390] monwriter: remove lock_kernel() from open() function [S390] monreader: remove lock_kernel() from open() function [S390] s390: remove unused nfsd #includes [S390] ftrace: build ftrace.o when CONFIG_FTRACE_SYSCALLS is set for s390 [S390] etr/stp: put correct per cpu variable [S390] tty3270: move keyboard compat ioctls [S390] sclp: improve servicability setting [S390] s390: use change recording override for kernel mapping [S390] MAINTAINERS: Add s390 drivers block [S390] use generic sockios.h header file [S390] use generic termbits.h header file [S390] smp: remove unused typedef and defines [S390] cmm: free pages on hibernate. ...
-rw-r--r--MAINTAINERS3
-rw-r--r--arch/s390/Kconfig15
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/include/asm/atomic.h8
-rw-r--r--arch/s390/include/asm/ccwdev.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/include/asm/setup.h17
-rw-r--r--arch/s390/include/asm/smp.h54
-rw-r--r--arch/s390/include/asm/sockios.h21
-rw-r--r--arch/s390/include/asm/termbits.h206
-rw-r--r--arch/s390/include/asm/todclk.h23
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/kernel/Makefile1
-rw-r--r--arch/s390/kernel/compat_linux.c6
-rw-r--r--arch/s390/kernel/compat_linux.h4
-rw-r--r--arch/s390/kernel/head64.S3
-rw-r--r--arch/s390/kernel/setup.c36
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/vdso.c9
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/lib/uaccess_mvcos.c4
-rw-r--r--arch/s390/lib/uaccess_pt.c147
-rw-r--r--arch/s390/mm/cmm.c61
-rw-r--r--arch/s390/mm/fault.c378
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/mm/vmem.c11
-rw-r--r--drivers/s390/block/dasd.c237
-rw-r--r--drivers/s390/block/dasd_3990_erp.c47
-rw-r--r--drivers/s390/block/dasd_alias.c77
-rw-r--r--drivers/s390/block/dasd_diag.c20
-rw-r--r--drivers/s390/block/dasd_eckd.c170
-rw-r--r--drivers/s390/block/dasd_eckd.h4
-rw-r--r--drivers/s390/block/dasd_eer.c5
-rw-r--r--drivers/s390/block/dasd_fba.c11
-rw-r--r--drivers/s390/block/dasd_int.h13
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/char/con3215.c1
-rw-r--r--drivers/s390/char/con3270.c1
-rw-r--r--drivers/s390/char/fs3270.c10
-rw-r--r--drivers/s390/char/monreader.c8
-rw-r--r--drivers/s390/char/monwriter.c7
-rw-r--r--drivers/s390/char/sclp_cmd.c1
-rw-r--r--drivers/s390/char/tape.h9
-rw-r--r--drivers/s390/char/tape_34xx.c8
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_block.c17
-rw-r--r--drivers/s390/char/tape_char.c54
-rw-r--r--drivers/s390/char/tape_core.c65
-rw-r--r--drivers/s390/char/tape_proc.c2
-rw-r--r--drivers/s390/char/tty3270.c20
-rw-r--r--drivers/s390/char/vmlogrdr.c8
-rw-r--r--drivers/s390/char/vmur.c3
-rw-r--r--drivers/s390/char/vmwatchdog.c29
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/ccwreq.c328
-rw-r--r--drivers/s390/cio/cio.h8
-rw-r--r--drivers/s390/cio/css.c57
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c1006
-rw-r--r--drivers/s390/cio/device.h25
-rw-r--r--drivers/s390/cio/device_fsm.c411
-rw-r--r--drivers/s390/cio/device_id.c375
-rw-r--r--drivers/s390/cio/device_ops.c142
-rw-r--r--drivers/s390/cio/device_pgid.c963
-rw-r--r--drivers/s390/cio/device_status.c3
-rw-r--r--drivers/s390/cio/io_sch.h73
-rw-r--r--drivers/s390/crypto/ap_bus.c31
-rw-r--r--drivers/s390/crypto/ap_bus.h18
-rw-r--r--drivers/s390/crypto/zcrypt_api.c11
-rw-r--r--drivers/s390/crypto/zcrypt_api.h2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c75
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c38
76 files changed, 2727 insertions, 2713 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index ea781c1cfb5a..c8973c6102d8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3139,6 +3139,7 @@ S: Supported
3139F: Documentation/s390/kvm.txt 3139F: Documentation/s390/kvm.txt
3140F: arch/s390/include/asm/kvm* 3140F: arch/s390/include/asm/kvm*
3141F: arch/s390/kvm/ 3141F: arch/s390/kvm/
3142F: drivers/s390/kvm/
3142 3143
3143KEXEC 3144KEXEC
3144M: Eric Biederman <ebiederm@xmission.com> 3145M: Eric Biederman <ebiederm@xmission.com>
@@ -4553,6 +4554,7 @@ L: linux-s390@vger.kernel.org
4553W: http://www.ibm.com/developerworks/linux/linux390/ 4554W: http://www.ibm.com/developerworks/linux/linux390/
4554S: Supported 4555S: Supported
4555F: arch/s390/ 4556F: arch/s390/
4557F: drivers/s390/
4556 4558
4557S390 NETWORK DRIVERS 4559S390 NETWORK DRIVERS
4558M: Ursula Braun <ursula.braun@de.ibm.com> 4560M: Ursula Braun <ursula.braun@de.ibm.com>
@@ -4568,6 +4570,7 @@ M: Felix Beck <felix.beck@de.ibm.com>
4568M: Ralph Wuerthner <ralph.wuerthner@de.ibm.com> 4570M: Ralph Wuerthner <ralph.wuerthner@de.ibm.com>
4569M: linux390@de.ibm.com 4571M: linux390@de.ibm.com
4570L: linux-s390@vger.kernel.org 4572L: linux-s390@vger.kernel.org
4573W: http://www.ibm.com/developerworks/linux/linux390/
4571S: Supported 4574S: Supported
4572F: drivers/s390/crypto/ 4575F: drivers/s390/crypto/
4573 4576
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 16c673096a22..c80235206c01 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -220,23 +220,8 @@ config AUDIT_ARCH
220 bool 220 bool
221 default y 221 default y
222 222
223config S390_SWITCH_AMODE
224 bool "Switch kernel/user addressing modes"
225 help
226 This option allows to switch the addressing modes of kernel and user
227 space. The kernel parameter switch_amode=on will enable this feature,
228 default is disabled. Enabling this (via kernel parameter) on machines
229 earlier than IBM System z9-109 EC/BC will reduce system performance.
230
231 Note that this option will also be selected by selecting the execute
232 protection option below. Enabling the execute protection via the
233 noexec kernel parameter will also switch the addressing modes,
234 independent of the switch_amode kernel parameter.
235
236
237config S390_EXEC_PROTECT 223config S390_EXEC_PROTECT
238 bool "Data execute protection" 224 bool "Data execute protection"
239 select S390_SWITCH_AMODE
240 help 225 help
241 This option allows to enable a buffer overflow protection for user 226 This option allows to enable a buffer overflow protection for user
242 space programs and it also selects the addressing mode option above. 227 space programs and it also selects the addressing mode option above.
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index ab4464486b7a..f4e53c6708dc 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -185,7 +185,6 @@ CONFIG_HOTPLUG_CPU=y
185CONFIG_COMPAT=y 185CONFIG_COMPAT=y
186CONFIG_SYSVIPC_COMPAT=y 186CONFIG_SYSVIPC_COMPAT=y
187CONFIG_AUDIT_ARCH=y 187CONFIG_AUDIT_ARCH=y
188CONFIG_S390_SWITCH_AMODE=y
189CONFIG_S390_EXEC_PROTECT=y 188CONFIG_S390_EXEC_PROTECT=y
190 189
191# 190#
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index ae7c8f9f94a5..2a113d6a7dfd 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -21,7 +21,7 @@
21#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 21#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
22 22
23#define __CS_LOOP(ptr, op_val, op_string) ({ \ 23#define __CS_LOOP(ptr, op_val, op_string) ({ \
24 typeof(ptr->counter) old_val, new_val; \ 24 int old_val, new_val; \
25 asm volatile( \ 25 asm volatile( \
26 " l %0,%2\n" \ 26 " l %0,%2\n" \
27 "0: lr %1,%0\n" \ 27 "0: lr %1,%0\n" \
@@ -38,7 +38,7 @@
38#else /* __GNUC__ */ 38#else /* __GNUC__ */
39 39
40#define __CS_LOOP(ptr, op_val, op_string) ({ \ 40#define __CS_LOOP(ptr, op_val, op_string) ({ \
41 typeof(ptr->counter) old_val, new_val; \ 41 int old_val, new_val; \
42 asm volatile( \ 42 asm volatile( \
43 " l %0,0(%3)\n" \ 43 " l %0,0(%3)\n" \
44 "0: lr %1,%0\n" \ 44 "0: lr %1,%0\n" \
@@ -143,7 +143,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
143#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 143#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
144 144
145#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 145#define __CSG_LOOP(ptr, op_val, op_string) ({ \
146 typeof(ptr->counter) old_val, new_val; \ 146 long long old_val, new_val; \
147 asm volatile( \ 147 asm volatile( \
148 " lg %0,%2\n" \ 148 " lg %0,%2\n" \
149 "0: lgr %1,%0\n" \ 149 "0: lgr %1,%0\n" \
@@ -160,7 +160,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
160#else /* __GNUC__ */ 160#else /* __GNUC__ */
161 161
162#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 162#define __CSG_LOOP(ptr, op_val, op_string) ({ \
163 typeof(ptr->counter) old_val, new_val; \ 163 long long old_val, new_val; \
164 asm volatile( \ 164 asm volatile( \
165 " lg %0,0(%3)\n" \ 165 " lg %0,0(%3)\n" \
166 "0: lgr %1,%0\n" \ 166 "0: lgr %1,%0\n" \
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 2a5419551176..f4bd346a52d3 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -142,6 +142,8 @@ struct ccw1;
142extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long); 142extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long);
143extern int ccw_device_set_options(struct ccw_device *, unsigned long); 143extern int ccw_device_set_options(struct ccw_device *, unsigned long);
144extern void ccw_device_clear_options(struct ccw_device *, unsigned long); 144extern void ccw_device_clear_options(struct ccw_device *, unsigned long);
145int ccw_device_is_pathgroup(struct ccw_device *cdev);
146int ccw_device_is_multipath(struct ccw_device *cdev);
145 147
146/* Allow for i/o completion notification after primary interrupt status. */ 148/* Allow for i/o completion notification after primary interrupt status. */
147#define CCWDEV_EARLY_NOTIFICATION 0x0001 149#define CCWDEV_EARLY_NOTIFICATION 0x0001
@@ -151,6 +153,8 @@ extern void ccw_device_clear_options(struct ccw_device *, unsigned long);
151#define CCWDEV_DO_PATHGROUP 0x0004 153#define CCWDEV_DO_PATHGROUP 0x0004
152/* Allow forced onlining of boxed devices. */ 154/* Allow forced onlining of boxed devices. */
153#define CCWDEV_ALLOW_FORCE 0x0008 155#define CCWDEV_ALLOW_FORCE 0x0008
156/* Try to use multipath mode. */
157#define CCWDEV_DO_MULTIPATH 0x0010
154 158
155extern int ccw_device_start(struct ccw_device *, struct ccw1 *, 159extern int ccw_device_start(struct ccw_device *, struct ccw1 *,
156 unsigned long, __u8, unsigned long); 160 unsigned long, __u8, unsigned long);
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index fc7edd6f41b6..976e273988c2 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -36,7 +36,7 @@ static inline int init_new_context(struct task_struct *tsk,
36 mm->context.has_pgste = 1; 36 mm->context.has_pgste = 1;
37 mm->context.alloc_pgste = 1; 37 mm->context.alloc_pgste = 1;
38 } else { 38 } else {
39 mm->context.noexec = s390_noexec; 39 mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
40 mm->context.has_pgste = 0; 40 mm->context.has_pgste = 0;
41 mm->context.alloc_pgste = 0; 41 mm->context.alloc_pgste = 0;
42 } 42 }
@@ -58,7 +58,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
58 pgd_t *pgd = mm->pgd; 58 pgd_t *pgd = mm->pgd;
59 59
60 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 60 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
61 if (switch_amode) { 61 if (user_mode != HOME_SPACE_MODE) {
62 /* Load primary space page table origin. */ 62 /* Load primary space page table origin. */
63 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd; 63 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
64 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd); 64 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index ddad5903341c..68940d0bad91 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -143,7 +143,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
143 spin_lock_init(&mm->context.list_lock); 143 spin_lock_init(&mm->context.list_lock);
144 INIT_LIST_HEAD(&mm->context.crst_list); 144 INIT_LIST_HEAD(&mm->context.crst_list);
145 INIT_LIST_HEAD(&mm->context.pgtable_list); 145 INIT_LIST_HEAD(&mm->context.pgtable_list);
146 return (pgd_t *) crst_table_alloc(mm, s390_noexec); 146 return (pgd_t *)
147 crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
147} 148}
148#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) 149#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
149 150
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 60a7b1a1702f..e2fa79cf0614 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -169,12 +169,13 @@ extern unsigned long VMALLOC_START;
169 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) 169 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
170 * 170 *
171 * A 64 bit pagetable entry of S390 has following format: 171 * A 64 bit pagetable entry of S390 has following format:
172 * | PFRA |0IP0| OS | 172 * | PFRA |0IPC| OS |
173 * 0000000000111111111122222222223333333333444444444455555555556666 173 * 0000000000111111111122222222223333333333444444444455555555556666
174 * 0123456789012345678901234567890123456789012345678901234567890123 174 * 0123456789012345678901234567890123456789012345678901234567890123
175 * 175 *
176 * I Page-Invalid Bit: Page is not available for address-translation 176 * I Page-Invalid Bit: Page is not available for address-translation
177 * P Page-Protection Bit: Store access not possible for page 177 * P Page-Protection Bit: Store access not possible for page
178 * C Change-bit override: HW is not required to set change bit
178 * 179 *
179 * A 64 bit segmenttable entry of S390 has following format: 180 * A 64 bit segmenttable entry of S390 has following format:
180 * | P-table origin | TT 181 * | P-table origin | TT
@@ -218,6 +219,7 @@ extern unsigned long VMALLOC_START;
218 */ 219 */
219 220
220/* Hardware bits in the page table entry */ 221/* Hardware bits in the page table entry */
222#define _PAGE_CO 0x100 /* HW Change-bit override */
221#define _PAGE_RO 0x200 /* HW read-only bit */ 223#define _PAGE_RO 0x200 /* HW read-only bit */
222#define _PAGE_INVALID 0x400 /* HW invalid bit */ 224#define _PAGE_INVALID 0x400 /* HW invalid bit */
223 225
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index e37478e87286..52a779c337e8 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -49,17 +49,12 @@ extern unsigned long memory_end;
49 49
50void detect_memory_layout(struct mem_chunk chunk[]); 50void detect_memory_layout(struct mem_chunk chunk[]);
51 51
52#ifdef CONFIG_S390_SWITCH_AMODE 52#define PRIMARY_SPACE_MODE 0
53extern unsigned int switch_amode; 53#define ACCESS_REGISTER_MODE 1
54#else 54#define SECONDARY_SPACE_MODE 2
55#define switch_amode (0) 55#define HOME_SPACE_MODE 3
56#endif 56
57 57extern unsigned int user_mode;
58#ifdef CONFIG_S390_EXEC_PROTECT
59extern unsigned int s390_noexec;
60#else
61#define s390_noexec (0)
62#endif
63 58
64/* 59/*
65 * Machine features detected in head.S 60 * Machine features detected in head.S
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index a868b272c257..2ab1141eeb50 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -1,57 +1,22 @@
1/* 1/*
2 * include/asm-s390/smp.h 2 * Copyright IBM Corp. 1999,2009
3 * 3 * Author(s): Denis Joseph Barrow,
4 * S390 version 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Heiko Carstens <heiko.carstens@de.ibm.com>,
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 */ 6 */
10#ifndef __ASM_SMP_H 7#ifndef __ASM_SMP_H
11#define __ASM_SMP_H 8#define __ASM_SMP_H
12 9
13#include <linux/threads.h> 10#ifdef CONFIG_SMP
14#include <linux/cpumask.h>
15#include <linux/bitops.h>
16 11
17#if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
18
19#include <asm/lowcore.h>
20#include <asm/sigp.h>
21#include <asm/ptrace.h>
22#include <asm/system.h> 12#include <asm/system.h>
23 13#include <asm/sigp.h>
24/*
25 s390 specific smp.c headers
26 */
27typedef struct
28{
29 int intresting;
30 sigp_ccode ccode;
31 __u32 status;
32 __u16 cpu;
33} sigp_info;
34 14
35extern void machine_restart_smp(char *); 15extern void machine_restart_smp(char *);
36extern void machine_halt_smp(void); 16extern void machine_halt_smp(void);
37extern void machine_power_off_smp(void); 17extern void machine_power_off_smp(void);
38 18
39#define NO_PROC_ID 0xFF /* No processor magic marker */
40
41/*
42 * This magic constant controls our willingness to transfer
43 * a process across CPUs. Such a transfer incurs misses on the L1
44 * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
45 * gut feeling is this will vary by board in value. For a board
46 * with separate L2 cache it probably depends also on the RSS, and
47 * for a board with shared L2 cache it ought to decay fast as other
48 * processes are run.
49 */
50
51#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
52
53#define raw_smp_processor_id() (S390_lowcore.cpu_nr) 19#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
54#define cpu_logical_map(cpu) (cpu)
55 20
56extern int __cpu_disable (void); 21extern int __cpu_disable (void);
57extern void __cpu_die (unsigned int cpu); 22extern void __cpu_die (unsigned int cpu);
@@ -64,7 +29,9 @@ extern int smp_cpu_polarization[];
64extern void arch_send_call_function_single_ipi(int cpu); 29extern void arch_send_call_function_single_ipi(int cpu);
65extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 30extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
66 31
67#endif 32extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
33
34#endif /* CONFIG_SMP */
68 35
69#ifdef CONFIG_HOTPLUG_CPU 36#ifdef CONFIG_HOTPLUG_CPU
70extern int smp_rescan_cpus(void); 37extern int smp_rescan_cpus(void);
@@ -72,5 +39,4 @@ extern int smp_rescan_cpus(void);
72static inline int smp_rescan_cpus(void) { return 0; } 39static inline int smp_rescan_cpus(void) { return 0; }
73#endif 40#endif
74 41
75extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 42#endif /* __ASM_SMP_H */
76#endif
diff --git a/arch/s390/include/asm/sockios.h b/arch/s390/include/asm/sockios.h
index f4fc16c7da59..6f60eee73242 100644
--- a/arch/s390/include/asm/sockios.h
+++ b/arch/s390/include/asm/sockios.h
@@ -1,21 +1,6 @@
1/* 1#ifndef _ASM_S390_SOCKIOS_H
2 * include/asm-s390/sockios.h 2#define _ASM_S390_SOCKIOS_H
3 *
4 * S390 version
5 *
6 * Derived from "include/asm-i386/sockios.h"
7 */
8 3
9#ifndef __ARCH_S390_SOCKIOS__ 4#include <asm-generic/sockios.h>
10#define __ARCH_S390_SOCKIOS__
11
12/* Socket-level I/O control calls. */
13#define FIOSETOWN 0x8901
14#define SIOCSPGRP 0x8902
15#define FIOGETOWN 0x8903
16#define SIOCGPGRP 0x8904
17#define SIOCATMARK 0x8905
18#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
19#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
20 5
21#endif 6#endif
diff --git a/arch/s390/include/asm/termbits.h b/arch/s390/include/asm/termbits.h
index 58731853d529..71bf6ac6a2b9 100644
--- a/arch/s390/include/asm/termbits.h
+++ b/arch/s390/include/asm/termbits.h
@@ -1,206 +1,6 @@
1/* 1#ifndef _ASM_S390_TERMBITS_H
2 * include/asm-s390/termbits.h 2#define _ASM_S390_TERMBITS_H
3 *
4 * S390 version
5 *
6 * Derived from "include/asm-i386/termbits.h"
7 */
8 3
9#ifndef __ARCH_S390_TERMBITS_H__ 4#include <asm-generic/termbits.h>
10#define __ARCH_S390_TERMBITS_H__
11
12#include <linux/posix_types.h>
13
14typedef unsigned char cc_t;
15typedef unsigned int speed_t;
16typedef unsigned int tcflag_t;
17
18#define NCCS 19
19struct termios {
20 tcflag_t c_iflag; /* input mode flags */
21 tcflag_t c_oflag; /* output mode flags */
22 tcflag_t c_cflag; /* control mode flags */
23 tcflag_t c_lflag; /* local mode flags */
24 cc_t c_line; /* line discipline */
25 cc_t c_cc[NCCS]; /* control characters */
26};
27
28struct termios2 {
29 tcflag_t c_iflag; /* input mode flags */
30 tcflag_t c_oflag; /* output mode flags */
31 tcflag_t c_cflag; /* control mode flags */
32 tcflag_t c_lflag; /* local mode flags */
33 cc_t c_line; /* line discipline */
34 cc_t c_cc[NCCS]; /* control characters */
35 speed_t c_ispeed; /* input speed */
36 speed_t c_ospeed; /* output speed */
37};
38
39struct ktermios {
40 tcflag_t c_iflag; /* input mode flags */
41 tcflag_t c_oflag; /* output mode flags */
42 tcflag_t c_cflag; /* control mode flags */
43 tcflag_t c_lflag; /* local mode flags */
44 cc_t c_line; /* line discipline */
45 cc_t c_cc[NCCS]; /* control characters */
46 speed_t c_ispeed; /* input speed */
47 speed_t c_ospeed; /* output speed */
48};
49
50/* c_cc characters */
51#define VINTR 0
52#define VQUIT 1
53#define VERASE 2
54#define VKILL 3
55#define VEOF 4
56#define VTIME 5
57#define VMIN 6
58#define VSWTC 7
59#define VSTART 8
60#define VSTOP 9
61#define VSUSP 10
62#define VEOL 11
63#define VREPRINT 12
64#define VDISCARD 13
65#define VWERASE 14
66#define VLNEXT 15
67#define VEOL2 16
68
69/* c_iflag bits */
70#define IGNBRK 0000001
71#define BRKINT 0000002
72#define IGNPAR 0000004
73#define PARMRK 0000010
74#define INPCK 0000020
75#define ISTRIP 0000040
76#define INLCR 0000100
77#define IGNCR 0000200
78#define ICRNL 0000400
79#define IUCLC 0001000
80#define IXON 0002000
81#define IXANY 0004000
82#define IXOFF 0010000
83#define IMAXBEL 0020000
84#define IUTF8 0040000
85
86/* c_oflag bits */
87#define OPOST 0000001
88#define OLCUC 0000002
89#define ONLCR 0000004
90#define OCRNL 0000010
91#define ONOCR 0000020
92#define ONLRET 0000040
93#define OFILL 0000100
94#define OFDEL 0000200
95#define NLDLY 0000400
96#define NL0 0000000
97#define NL1 0000400
98#define CRDLY 0003000
99#define CR0 0000000
100#define CR1 0001000
101#define CR2 0002000
102#define CR3 0003000
103#define TABDLY 0014000
104#define TAB0 0000000
105#define TAB1 0004000
106#define TAB2 0010000
107#define TAB3 0014000
108#define XTABS 0014000
109#define BSDLY 0020000
110#define BS0 0000000
111#define BS1 0020000
112#define VTDLY 0040000
113#define VT0 0000000
114#define VT1 0040000
115#define FFDLY 0100000
116#define FF0 0000000
117#define FF1 0100000
118
119/* c_cflag bit meaning */
120#define CBAUD 0010017
121#define B0 0000000 /* hang up */
122#define B50 0000001
123#define B75 0000002
124#define B110 0000003
125#define B134 0000004
126#define B150 0000005
127#define B200 0000006
128#define B300 0000007
129#define B600 0000010
130#define B1200 0000011
131#define B1800 0000012
132#define B2400 0000013
133#define B4800 0000014
134#define B9600 0000015
135#define B19200 0000016
136#define B38400 0000017
137#define EXTA B19200
138#define EXTB B38400
139#define CSIZE 0000060
140#define CS5 0000000
141#define CS6 0000020
142#define CS7 0000040
143#define CS8 0000060
144#define CSTOPB 0000100
145#define CREAD 0000200
146#define PARENB 0000400
147#define PARODD 0001000
148#define HUPCL 0002000
149#define CLOCAL 0004000
150#define CBAUDEX 0010000
151#define BOTHER 0010000
152#define B57600 0010001
153#define B115200 0010002
154#define B230400 0010003
155#define B460800 0010004
156#define B500000 0010005
157#define B576000 0010006
158#define B921600 0010007
159#define B1000000 0010010
160#define B1152000 0010011
161#define B1500000 0010012
162#define B2000000 0010013
163#define B2500000 0010014
164#define B3000000 0010015
165#define B3500000 0010016
166#define B4000000 0010017
167#define CIBAUD 002003600000 /* input baud rate */
168#define CMSPAR 010000000000 /* mark or space (stick) parity */
169#define CRTSCTS 020000000000 /* flow control */
170
171#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
172
173/* c_lflag bits */
174#define ISIG 0000001
175#define ICANON 0000002
176#define XCASE 0000004
177#define ECHO 0000010
178#define ECHOE 0000020
179#define ECHOK 0000040
180#define ECHONL 0000100
181#define NOFLSH 0000200
182#define TOSTOP 0000400
183#define ECHOCTL 0001000
184#define ECHOPRT 0002000
185#define ECHOKE 0004000
186#define FLUSHO 0010000
187#define PENDIN 0040000
188#define IEXTEN 0100000
189
190/* tcflow() and TCXONC use these */
191#define TCOOFF 0
192#define TCOON 1
193#define TCIOFF 2
194#define TCION 3
195
196/* tcflush() and TCFLSH use these */
197#define TCIFLUSH 0
198#define TCOFLUSH 1
199#define TCIOFLUSH 2
200
201/* tcsetattr uses these */
202#define TCSANOW 0
203#define TCSADRAIN 1
204#define TCSAFLUSH 2
205 5
206#endif 6#endif
diff --git a/arch/s390/include/asm/todclk.h b/arch/s390/include/asm/todclk.h
deleted file mode 100644
index c7f62055488a..000000000000
--- a/arch/s390/include/asm/todclk.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * File...........: linux/include/asm/todclk.h
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
6 *
7 * History of changes (starts July 2000)
8 */
9
10#ifndef __ASM_TODCLK_H
11#define __ASM_TODCLK_H
12
13#ifdef __KERNEL__
14
15#define TOD_uSEC (0x1000ULL)
16#define TOD_mSEC (1000 * TOD_uSEC)
17#define TOD_SEC (1000 * TOD_mSEC)
18#define TOD_MIN (60 * TOD_SEC)
19#define TOD_HOUR (60 * TOD_MIN)
20
21#endif
22
23#endif
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 8377e91533d2..cbf0a8745bf4 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -93,6 +93,8 @@ extern struct uaccess_ops uaccess_mvcos;
93extern struct uaccess_ops uaccess_mvcos_switch; 93extern struct uaccess_ops uaccess_mvcos_switch;
94extern struct uaccess_ops uaccess_pt; 94extern struct uaccess_ops uaccess_pt;
95 95
96extern int __handle_fault(unsigned long, unsigned long, int);
97
96static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 98static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
97{ 99{
98 size = uaccess.copy_to_user_small(size, ptr, x); 100 size = uaccess.copy_to_user_small(size, ptr, x);
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index c7be8e10b87e..683f6381cc59 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
44obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) 44obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
45obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 45obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
46obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 46obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
47obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
47 48
48# Kexec part 49# Kexec part
49S390_KEXEC_OBJS := machine_kexec.o crash.o 50S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index fda1a8123f9b..25c31d681402 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -31,14 +31,8 @@
31#include <linux/shm.h> 31#include <linux/shm.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/uio.h> 33#include <linux/uio.h>
34#include <linux/nfs_fs.h>
35#include <linux/quota.h> 34#include <linux/quota.h>
36#include <linux/module.h> 35#include <linux/module.h>
37#include <linux/sunrpc/svc.h>
38#include <linux/nfsd/nfsd.h>
39#include <linux/nfsd/cache.h>
40#include <linux/nfsd/xdr.h>
41#include <linux/nfsd/syscall.h>
42#include <linux/poll.h> 36#include <linux/poll.h>
43#include <linux/personality.h> 37#include <linux/personality.h>
44#include <linux/stat.h> 38#include <linux/stat.h>
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 45e9092b3aad..cb97afc85c94 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -4,10 +4,6 @@
4#include <linux/compat.h> 4#include <linux/compat.h>
5#include <linux/socket.h> 5#include <linux/socket.h>
6#include <linux/syscalls.h> 6#include <linux/syscalls.h>
7#include <linux/nfs_fs.h>
8#include <linux/sunrpc/svc.h>
9#include <linux/nfsd/nfsd.h>
10#include <linux/nfsd/export.h>
11 7
12/* Macro that masks the high order bit of an 32 bit pointer and converts it*/ 8/* Macro that masks the high order bit of an 32 bit pointer and converts it*/
13/* to a 64 bit pointer */ 9/* to a 64 bit pointer */
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 6a250808092b..d984a2a380c3 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -83,6 +83,8 @@ startup_continue:
83 slr %r0,%r0 # set cpuid to zero 83 slr %r0,%r0 # set cpuid to zero
84 sigp %r1,%r0,0x12 # switch to esame mode 84 sigp %r1,%r0,0x12 # switch to esame mode
85 sam64 # switch to 64 bit mode 85 sam64 # switch to 64 bit mode
86 llgfr %r13,%r13 # clear high-order half of base reg
87 lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half
86 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 88 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
87 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 89 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
88 # move IPL device to lowcore 90 # move IPL device to lowcore
@@ -127,6 +129,7 @@ startup_continue:
127.L4malign:.quad 0xffffffffffc00000 129.L4malign:.quad 0xffffffffffc00000
128.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 130.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
129.Lnop: .long 0x07000700 131.Lnop: .long 0x07000700
132.Lzero64:.fill 16,4,0x0
130#ifdef CONFIG_ZFCPDUMP 133#ifdef CONFIG_ZFCPDUMP
131.Lcurrent_cpu: 134.Lcurrent_cpu:
132 .long 0x0 135 .long 0x0
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 061479ff029f..0663287fa1b3 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -305,9 +305,8 @@ static int __init early_parse_mem(char *p)
305} 305}
306early_param("mem", early_parse_mem); 306early_param("mem", early_parse_mem);
307 307
308#ifdef CONFIG_S390_SWITCH_AMODE 308unsigned int user_mode = HOME_SPACE_MODE;
309unsigned int switch_amode = 0; 309EXPORT_SYMBOL_GPL(user_mode);
310EXPORT_SYMBOL_GPL(switch_amode);
311 310
312static int set_amode_and_uaccess(unsigned long user_amode, 311static int set_amode_and_uaccess(unsigned long user_amode,
313 unsigned long user32_amode) 312 unsigned long user32_amode)
@@ -340,23 +339,29 @@ static int set_amode_and_uaccess(unsigned long user_amode,
340 */ 339 */
341static int __init early_parse_switch_amode(char *p) 340static int __init early_parse_switch_amode(char *p)
342{ 341{
343 switch_amode = 1; 342 if (user_mode != SECONDARY_SPACE_MODE)
343 user_mode = PRIMARY_SPACE_MODE;
344 return 0; 344 return 0;
345} 345}
346early_param("switch_amode", early_parse_switch_amode); 346early_param("switch_amode", early_parse_switch_amode);
347 347
348#else /* CONFIG_S390_SWITCH_AMODE */ 348static int __init early_parse_user_mode(char *p)
349static inline int set_amode_and_uaccess(unsigned long user_amode,
350 unsigned long user32_amode)
351{ 349{
350 if (p && strcmp(p, "primary") == 0)
351 user_mode = PRIMARY_SPACE_MODE;
352#ifdef CONFIG_S390_EXEC_PROTECT
353 else if (p && strcmp(p, "secondary") == 0)
354 user_mode = SECONDARY_SPACE_MODE;
355#endif
356 else if (!p || strcmp(p, "home") == 0)
357 user_mode = HOME_SPACE_MODE;
358 else
359 return 1;
352 return 0; 360 return 0;
353} 361}
354#endif /* CONFIG_S390_SWITCH_AMODE */ 362early_param("user_mode", early_parse_user_mode);
355 363
356#ifdef CONFIG_S390_EXEC_PROTECT 364#ifdef CONFIG_S390_EXEC_PROTECT
357unsigned int s390_noexec = 0;
358EXPORT_SYMBOL_GPL(s390_noexec);
359
360/* 365/*
361 * Enable execute protection? 366 * Enable execute protection?
362 */ 367 */
@@ -364,8 +369,7 @@ static int __init early_parse_noexec(char *p)
364{ 369{
365 if (!strncmp(p, "off", 3)) 370 if (!strncmp(p, "off", 3))
366 return 0; 371 return 0;
367 switch_amode = 1; 372 user_mode = SECONDARY_SPACE_MODE;
368 s390_noexec = 1;
369 return 0; 373 return 0;
370} 374}
371early_param("noexec", early_parse_noexec); 375early_param("noexec", early_parse_noexec);
@@ -373,7 +377,7 @@ early_param("noexec", early_parse_noexec);
373 377
374static void setup_addressing_mode(void) 378static void setup_addressing_mode(void)
375{ 379{
376 if (s390_noexec) { 380 if (user_mode == SECONDARY_SPACE_MODE) {
377 if (set_amode_and_uaccess(PSW_ASC_SECONDARY, 381 if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
378 PSW32_ASC_SECONDARY)) 382 PSW32_ASC_SECONDARY))
379 pr_info("Execute protection active, " 383 pr_info("Execute protection active, "
@@ -381,7 +385,7 @@ static void setup_addressing_mode(void)
381 else 385 else
382 pr_info("Execute protection active, " 386 pr_info("Execute protection active, "
383 "mvcos not available\n"); 387 "mvcos not available\n");
384 } else if (switch_amode) { 388 } else if (user_mode == PRIMARY_SPACE_MODE) {
385 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) 389 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
386 pr_info("Address spaces switched, " 390 pr_info("Address spaces switched, "
387 "mvcos available\n"); 391 "mvcos available\n");
@@ -411,7 +415,7 @@ setup_lowcore(void)
411 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 415 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
412 lc->restart_psw.addr = 416 lc->restart_psw.addr =
413 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 417 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
414 if (switch_amode) 418 if (user_mode != HOME_SPACE_MODE)
415 lc->restart_psw.mask |= PSW_ASC_HOME; 419 lc->restart_psw.mask |= PSW_ASC_HOME;
416 lc->external_new_psw.mask = psw_kernel_bits; 420 lc->external_new_psw.mask = psw_kernel_bits;
417 lc->external_new_psw.addr = 421 lc->external_new_psw.addr =
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 68e1ecf5ebab..65065ac48ed3 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -335,7 +335,7 @@ int get_sync_clock(unsigned long long *clock)
335 sw0 = atomic_read(sw_ptr); 335 sw0 = atomic_read(sw_ptr);
336 *clock = get_clock(); 336 *clock = get_clock();
337 sw1 = atomic_read(sw_ptr); 337 sw1 = atomic_read(sw_ptr);
338 put_cpu_var(clock_sync_sync); 338 put_cpu_var(clock_sync_word);
339 if (sw0 == sw1 && (sw0 & 0x80000000U)) 339 if (sw0 == sw1 && (sw0 & 0x80000000U))
340 /* Success: time is in sync. */ 340 /* Success: time is in sync. */
341 return 0; 341 return 0;
@@ -385,7 +385,7 @@ static inline int check_sync_clock(void)
385 385
386 sw_ptr = &get_cpu_var(clock_sync_word); 386 sw_ptr = &get_cpu_var(clock_sync_word);
387 rc = (atomic_read(sw_ptr) & 0x80000000U) != 0; 387 rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
388 put_cpu_var(clock_sync_sync); 388 put_cpu_var(clock_sync_word);
389 return rc; 389 return rc;
390} 390}
391 391
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index adfb32aa6d59..5f99e66c51c3 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -86,7 +86,8 @@ static void vdso_init_data(struct vdso_data *vd)
86 unsigned int facility_list; 86 unsigned int facility_list;
87 87
88 facility_list = stfl(); 88 facility_list = stfl();
89 vd->ectg_available = switch_amode && (facility_list & 1); 89 vd->ectg_available =
90 user_mode != HOME_SPACE_MODE && (facility_list & 1);
90} 91}
91 92
92#ifdef CONFIG_64BIT 93#ifdef CONFIG_64BIT
@@ -114,7 +115,7 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
114 115
115 lowcore->vdso_per_cpu_data = __LC_PASTE; 116 lowcore->vdso_per_cpu_data = __LC_PASTE;
116 117
117 if (!switch_amode || !vdso_enabled) 118 if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
118 return 0; 119 return 0;
119 120
120 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 121 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -160,7 +161,7 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
160 unsigned long segment_table, page_table, page_frame; 161 unsigned long segment_table, page_table, page_frame;
161 u32 *psal, *aste; 162 u32 *psal, *aste;
162 163
163 if (!switch_amode || !vdso_enabled) 164 if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
164 return; 165 return;
165 166
166 psal = (u32 *)(addr_t) lowcore->paste[4]; 167 psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -184,7 +185,7 @@ static void __vdso_init_cr5(void *dummy)
184 185
185static void vdso_init_cr5(void) 186static void vdso_init_cr5(void)
186{ 187{
187 if (switch_amode && vdso_enabled) 188 if (user_mode != HOME_SPACE_MODE && vdso_enabled)
188 on_each_cpu(__vdso_init_cr5, NULL, 1); 189 on_each_cpu(__vdso_init_cr5, NULL, 1);
189} 190}
190#endif /* CONFIG_64BIT */ 191#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index bf164fc21864..6ee55ae84ce2 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -20,7 +20,6 @@ config KVM
20 depends on HAVE_KVM && EXPERIMENTAL 20 depends on HAVE_KVM && EXPERIMENTAL
21 select PREEMPT_NOTIFIERS 21 select PREEMPT_NOTIFIERS
22 select ANON_INODES 22 select ANON_INODES
23 select S390_SWITCH_AMODE
24 ---help--- 23 ---help---
25 Support hosting paravirtualized guest machines using the SIE 24 Support hosting paravirtualized guest machines using the SIE
26 virtualization capability on the mainframe. This should work 25 virtualization capability on the mainframe. This should work
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 58da3f461214..60455f104ea3 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -162,7 +162,6 @@ static size_t clear_user_mvcos(size_t size, void __user *to)
162 return size; 162 return size;
163} 163}
164 164
165#ifdef CONFIG_S390_SWITCH_AMODE
166static size_t strnlen_user_mvcos(size_t count, const char __user *src) 165static size_t strnlen_user_mvcos(size_t count, const char __user *src)
167{ 166{
168 char buf[256]; 167 char buf[256];
@@ -200,7 +199,6 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
200 } while ((len_str == len) && (done < count)); 199 } while ((len_str == len) && (done < count));
201 return done; 200 return done;
202} 201}
203#endif /* CONFIG_S390_SWITCH_AMODE */
204 202
205struct uaccess_ops uaccess_mvcos = { 203struct uaccess_ops uaccess_mvcos = {
206 .copy_from_user = copy_from_user_mvcos_check, 204 .copy_from_user = copy_from_user_mvcos_check,
@@ -215,7 +213,6 @@ struct uaccess_ops uaccess_mvcos = {
215 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, 213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
216}; 214};
217 215
218#ifdef CONFIG_S390_SWITCH_AMODE
219struct uaccess_ops uaccess_mvcos_switch = { 216struct uaccess_ops uaccess_mvcos_switch = {
220 .copy_from_user = copy_from_user_mvcos, 217 .copy_from_user = copy_from_user_mvcos,
221 .copy_from_user_small = copy_from_user_mvcos, 218 .copy_from_user_small = copy_from_user_mvcos,
@@ -228,4 +225,3 @@ struct uaccess_ops uaccess_mvcos_switch = {
228 .futex_atomic_op = futex_atomic_op_pt, 225 .futex_atomic_op = futex_atomic_op_pt,
229 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, 226 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
230}; 227};
231#endif
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index cb5d59eab0ee..404f2de296dc 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -23,86 +23,21 @@ static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
23 23
24 pgd = pgd_offset(mm, addr); 24 pgd = pgd_offset(mm, addr);
25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
26 return NULL; 26 return (pte_t *) 0x3a;
27 27
28 pud = pud_offset(pgd, addr); 28 pud = pud_offset(pgd, addr);
29 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 29 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
30 return NULL; 30 return (pte_t *) 0x3b;
31 31
32 pmd = pmd_offset(pud, addr); 32 pmd = pmd_offset(pud, addr);
33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
34 return NULL; 34 return (pte_t *) 0x10;
35 35
36 return pte_offset_map(pmd, addr); 36 return pte_offset_map(pmd, addr);
37} 37}
38 38
39static int __handle_fault(struct mm_struct *mm, unsigned long address, 39static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
40 int write_access) 40 size_t n, int write_user)
41{
42 struct vm_area_struct *vma;
43 int ret = -EFAULT;
44 int fault;
45
46 if (in_atomic())
47 return ret;
48 down_read(&mm->mmap_sem);
49 vma = find_vma(mm, address);
50 if (unlikely(!vma))
51 goto out;
52 if (unlikely(vma->vm_start > address)) {
53 if (!(vma->vm_flags & VM_GROWSDOWN))
54 goto out;
55 if (expand_stack(vma, address))
56 goto out;
57 }
58
59 if (!write_access) {
60 /* page not present, check vm flags */
61 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
62 goto out;
63 } else {
64 if (!(vma->vm_flags & VM_WRITE))
65 goto out;
66 }
67
68survive:
69 fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
70 if (unlikely(fault & VM_FAULT_ERROR)) {
71 if (fault & VM_FAULT_OOM)
72 goto out_of_memory;
73 else if (fault & VM_FAULT_SIGBUS)
74 goto out_sigbus;
75 BUG();
76 }
77 if (fault & VM_FAULT_MAJOR)
78 current->maj_flt++;
79 else
80 current->min_flt++;
81 ret = 0;
82out:
83 up_read(&mm->mmap_sem);
84 return ret;
85
86out_of_memory:
87 up_read(&mm->mmap_sem);
88 if (is_global_init(current)) {
89 yield();
90 down_read(&mm->mmap_sem);
91 goto survive;
92 }
93 printk("VM: killing process %s\n", current->comm);
94 return ret;
95
96out_sigbus:
97 up_read(&mm->mmap_sem);
98 current->thread.prot_addr = address;
99 current->thread.trap_no = 0x11;
100 force_sig(SIGBUS, current);
101 return ret;
102}
103
104static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
105 size_t n, int write_user)
106{ 41{
107 struct mm_struct *mm = current->mm; 42 struct mm_struct *mm = current->mm;
108 unsigned long offset, pfn, done, size; 43 unsigned long offset, pfn, done, size;
@@ -114,12 +49,17 @@ retry:
114 spin_lock(&mm->page_table_lock); 49 spin_lock(&mm->page_table_lock);
115 do { 50 do {
116 pte = follow_table(mm, uaddr); 51 pte = follow_table(mm, uaddr);
117 if (!pte || !pte_present(*pte) || 52 if ((unsigned long) pte < 0x1000)
118 (write_user && !pte_write(*pte)))
119 goto fault; 53 goto fault;
54 if (!pte_present(*pte)) {
55 pte = (pte_t *) 0x11;
56 goto fault;
57 } else if (write_user && !pte_write(*pte)) {
58 pte = (pte_t *) 0x04;
59 goto fault;
60 }
120 61
121 pfn = pte_pfn(*pte); 62 pfn = pte_pfn(*pte);
122
123 offset = uaddr & (PAGE_SIZE - 1); 63 offset = uaddr & (PAGE_SIZE - 1);
124 size = min(n - done, PAGE_SIZE - offset); 64 size = min(n - done, PAGE_SIZE - offset);
125 if (write_user) { 65 if (write_user) {
@@ -137,7 +77,7 @@ retry:
137 return n - done; 77 return n - done;
138fault: 78fault:
139 spin_unlock(&mm->page_table_lock); 79 spin_unlock(&mm->page_table_lock);
140 if (__handle_fault(mm, uaddr, write_user)) 80 if (__handle_fault(uaddr, (unsigned long) pte, write_user))
141 return n - done; 81 return n - done;
142 goto retry; 82 goto retry;
143} 83}
@@ -146,30 +86,31 @@ fault:
146 * Do DAT for user address by page table walk, return kernel address. 86 * Do DAT for user address by page table walk, return kernel address.
147 * This function needs to be called with current->mm->page_table_lock held. 87 * This function needs to be called with current->mm->page_table_lock held.
148 */ 88 */
149static unsigned long __dat_user_addr(unsigned long uaddr) 89static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
150{ 90{
151 struct mm_struct *mm = current->mm; 91 struct mm_struct *mm = current->mm;
152 unsigned long pfn, ret; 92 unsigned long pfn;
153 pte_t *pte; 93 pte_t *pte;
154 int rc; 94 int rc;
155 95
156 ret = 0;
157retry: 96retry:
158 pte = follow_table(mm, uaddr); 97 pte = follow_table(mm, uaddr);
159 if (!pte || !pte_present(*pte)) 98 if ((unsigned long) pte < 0x1000)
160 goto fault; 99 goto fault;
100 if (!pte_present(*pte)) {
101 pte = (pte_t *) 0x11;
102 goto fault;
103 }
161 104
162 pfn = pte_pfn(*pte); 105 pfn = pte_pfn(*pte);
163 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); 106 return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
164out:
165 return ret;
166fault: 107fault:
167 spin_unlock(&mm->page_table_lock); 108 spin_unlock(&mm->page_table_lock);
168 rc = __handle_fault(mm, uaddr, 0); 109 rc = __handle_fault(uaddr, (unsigned long) pte, 0);
169 spin_lock(&mm->page_table_lock); 110 spin_lock(&mm->page_table_lock);
170 if (rc) 111 if (!rc)
171 goto out; 112 goto retry;
172 goto retry; 113 return 0;
173} 114}
174 115
175size_t copy_from_user_pt(size_t n, const void __user *from, void *to) 116size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
@@ -234,8 +175,12 @@ retry:
234 spin_lock(&mm->page_table_lock); 175 spin_lock(&mm->page_table_lock);
235 do { 176 do {
236 pte = follow_table(mm, uaddr); 177 pte = follow_table(mm, uaddr);
237 if (!pte || !pte_present(*pte)) 178 if ((unsigned long) pte < 0x1000)
179 goto fault;
180 if (!pte_present(*pte)) {
181 pte = (pte_t *) 0x11;
238 goto fault; 182 goto fault;
183 }
239 184
240 pfn = pte_pfn(*pte); 185 pfn = pte_pfn(*pte);
241 offset = uaddr & (PAGE_SIZE-1); 186 offset = uaddr & (PAGE_SIZE-1);
@@ -249,9 +194,8 @@ retry:
249 return done + 1; 194 return done + 1;
250fault: 195fault:
251 spin_unlock(&mm->page_table_lock); 196 spin_unlock(&mm->page_table_lock);
252 if (__handle_fault(mm, uaddr, 0)) { 197 if (__handle_fault(uaddr, (unsigned long) pte, 0))
253 return 0; 198 return 0;
254 }
255 goto retry; 199 goto retry;
256} 200}
257 201
@@ -284,7 +228,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
284{ 228{
285 struct mm_struct *mm = current->mm; 229 struct mm_struct *mm = current->mm;
286 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, 230 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
287 uaddr, done, size; 231 uaddr, done, size, error_code;
288 unsigned long uaddr_from = (unsigned long) from; 232 unsigned long uaddr_from = (unsigned long) from;
289 unsigned long uaddr_to = (unsigned long) to; 233 unsigned long uaddr_to = (unsigned long) to;
290 pte_t *pte_from, *pte_to; 234 pte_t *pte_from, *pte_to;
@@ -298,17 +242,28 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
298retry: 242retry:
299 spin_lock(&mm->page_table_lock); 243 spin_lock(&mm->page_table_lock);
300 do { 244 do {
245 write_user = 0;
246 uaddr = uaddr_from;
301 pte_from = follow_table(mm, uaddr_from); 247 pte_from = follow_table(mm, uaddr_from);
302 if (!pte_from || !pte_present(*pte_from)) { 248 error_code = (unsigned long) pte_from;
303 uaddr = uaddr_from; 249 if (error_code < 0x1000)
304 write_user = 0; 250 goto fault;
251 if (!pte_present(*pte_from)) {
252 error_code = 0x11;
305 goto fault; 253 goto fault;
306 } 254 }
307 255
256 write_user = 1;
257 uaddr = uaddr_to;
308 pte_to = follow_table(mm, uaddr_to); 258 pte_to = follow_table(mm, uaddr_to);
309 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { 259 error_code = (unsigned long) pte_to;
310 uaddr = uaddr_to; 260 if (error_code < 0x1000)
311 write_user = 1; 261 goto fault;
262 if (!pte_present(*pte_to)) {
263 error_code = 0x11;
264 goto fault;
265 } else if (!pte_write(*pte_to)) {
266 error_code = 0x04;
312 goto fault; 267 goto fault;
313 } 268 }
314 269
@@ -329,7 +284,7 @@ retry:
329 return n - done; 284 return n - done;
330fault: 285fault:
331 spin_unlock(&mm->page_table_lock); 286 spin_unlock(&mm->page_table_lock);
332 if (__handle_fault(mm, uaddr, write_user)) 287 if (__handle_fault(uaddr, error_code, write_user))
333 return n - done; 288 return n - done;
334 goto retry; 289 goto retry;
335} 290}
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index ff58779bf7e9..76a3637b88e0 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -18,6 +18,7 @@
18#include <linux/swap.h> 18#include <linux/swap.h>
19#include <linux/kthread.h> 19#include <linux/kthread.h>
20#include <linux/oom.h> 20#include <linux/oom.h>
21#include <linux/suspend.h>
21 22
22#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
@@ -44,6 +45,7 @@ static volatile long cmm_pages_target;
44static volatile long cmm_timed_pages_target; 45static volatile long cmm_timed_pages_target;
45static long cmm_timeout_pages; 46static long cmm_timeout_pages;
46static long cmm_timeout_seconds; 47static long cmm_timeout_seconds;
48static int cmm_suspended;
47 49
48static struct cmm_page_array *cmm_page_list; 50static struct cmm_page_array *cmm_page_list;
49static struct cmm_page_array *cmm_timed_page_list; 51static struct cmm_page_array *cmm_timed_page_list;
@@ -147,9 +149,9 @@ cmm_thread(void *dummy)
147 149
148 while (1) { 150 while (1) {
149 rc = wait_event_interruptible(cmm_thread_wait, 151 rc = wait_event_interruptible(cmm_thread_wait,
150 (cmm_pages != cmm_pages_target || 152 (!cmm_suspended && (cmm_pages != cmm_pages_target ||
151 cmm_timed_pages != cmm_timed_pages_target || 153 cmm_timed_pages != cmm_timed_pages_target)) ||
152 kthread_should_stop())); 154 kthread_should_stop());
153 if (kthread_should_stop() || rc == -ERESTARTSYS) { 155 if (kthread_should_stop() || rc == -ERESTARTSYS) {
154 cmm_pages_target = cmm_pages; 156 cmm_pages_target = cmm_pages;
155 cmm_timed_pages_target = cmm_timed_pages; 157 cmm_timed_pages_target = cmm_timed_pages;
@@ -410,6 +412,38 @@ cmm_smsg_target(char *from, char *msg)
410 412
411static struct ctl_table_header *cmm_sysctl_header; 413static struct ctl_table_header *cmm_sysctl_header;
412 414
415static int cmm_suspend(void)
416{
417 cmm_suspended = 1;
418 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
419 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
420 return 0;
421}
422
423static int cmm_resume(void)
424{
425 cmm_suspended = 0;
426 cmm_kick_thread();
427 return 0;
428}
429
430static int cmm_power_event(struct notifier_block *this,
431 unsigned long event, void *ptr)
432{
433 switch (event) {
434 case PM_POST_HIBERNATION:
435 return cmm_resume();
436 case PM_HIBERNATION_PREPARE:
437 return cmm_suspend();
438 default:
439 return NOTIFY_DONE;
440 }
441}
442
443static struct notifier_block cmm_power_notifier = {
444 .notifier_call = cmm_power_event,
445};
446
413static int 447static int
414cmm_init (void) 448cmm_init (void)
415{ 449{
@@ -418,7 +452,7 @@ cmm_init (void)
418#ifdef CONFIG_CMM_PROC 452#ifdef CONFIG_CMM_PROC
419 cmm_sysctl_header = register_sysctl_table(cmm_dir_table); 453 cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
420 if (!cmm_sysctl_header) 454 if (!cmm_sysctl_header)
421 goto out; 455 goto out_sysctl;
422#endif 456#endif
423#ifdef CONFIG_CMM_IUCV 457#ifdef CONFIG_CMM_IUCV
424 rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); 458 rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
@@ -428,17 +462,21 @@ cmm_init (void)
428 rc = register_oom_notifier(&cmm_oom_nb); 462 rc = register_oom_notifier(&cmm_oom_nb);
429 if (rc < 0) 463 if (rc < 0)
430 goto out_oom_notify; 464 goto out_oom_notify;
465 rc = register_pm_notifier(&cmm_power_notifier);
466 if (rc)
467 goto out_pm;
431 init_waitqueue_head(&cmm_thread_wait); 468 init_waitqueue_head(&cmm_thread_wait);
432 init_timer(&cmm_timer); 469 init_timer(&cmm_timer);
433 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); 470 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
434 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; 471 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
435 if (!rc) 472 if (rc)
436 goto out; 473 goto out_kthread;
437 /* 474 return 0;
438 * kthread_create failed. undo all the stuff from above again.
439 */
440 unregister_oom_notifier(&cmm_oom_nb);
441 475
476out_kthread:
477 unregister_pm_notifier(&cmm_power_notifier);
478out_pm:
479 unregister_oom_notifier(&cmm_oom_nb);
442out_oom_notify: 480out_oom_notify:
443#ifdef CONFIG_CMM_IUCV 481#ifdef CONFIG_CMM_IUCV
444 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); 482 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
@@ -446,8 +484,8 @@ out_smsg:
446#endif 484#endif
447#ifdef CONFIG_CMM_PROC 485#ifdef CONFIG_CMM_PROC
448 unregister_sysctl_table(cmm_sysctl_header); 486 unregister_sysctl_table(cmm_sysctl_header);
487out_sysctl:
449#endif 488#endif
450out:
451 return rc; 489 return rc;
452} 490}
453 491
@@ -455,6 +493,7 @@ static void
455cmm_exit(void) 493cmm_exit(void)
456{ 494{
457 kthread_stop(cmm_thread_ptr); 495 kthread_stop(cmm_thread_ptr);
496 unregister_pm_notifier(&cmm_power_notifier);
458 unregister_oom_notifier(&cmm_oom_nb); 497 unregister_oom_notifier(&cmm_oom_nb);
459 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); 498 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
460 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); 499 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6d507462967a..fc102e70d9c2 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -34,16 +34,15 @@
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <asm/s390_ext.h> 35#include <asm/s390_ext.h>
36#include <asm/mmu_context.h> 36#include <asm/mmu_context.h>
37#include <asm/compat.h>
37#include "../kernel/entry.h" 38#include "../kernel/entry.h"
38 39
39#ifndef CONFIG_64BIT 40#ifndef CONFIG_64BIT
40#define __FAIL_ADDR_MASK 0x7ffff000 41#define __FAIL_ADDR_MASK 0x7ffff000
41#define __FIXUP_MASK 0x7fffffff
42#define __SUBCODE_MASK 0x0200 42#define __SUBCODE_MASK 0x0200
43#define __PF_RES_FIELD 0ULL 43#define __PF_RES_FIELD 0ULL
44#else /* CONFIG_64BIT */ 44#else /* CONFIG_64BIT */
45#define __FAIL_ADDR_MASK -4096L 45#define __FAIL_ADDR_MASK -4096L
46#define __FIXUP_MASK ~0L
47#define __SUBCODE_MASK 0x0600 46#define __SUBCODE_MASK 0x0600
48#define __PF_RES_FIELD 0x8000000000000000ULL 47#define __PF_RES_FIELD 0x8000000000000000ULL
49#endif /* CONFIG_64BIT */ 48#endif /* CONFIG_64BIT */
@@ -52,11 +51,15 @@
52extern int sysctl_userprocess_debug; 51extern int sysctl_userprocess_debug;
53#endif 52#endif
54 53
55#ifdef CONFIG_KPROBES 54#define VM_FAULT_BADCONTEXT 0x010000
56static inline int notify_page_fault(struct pt_regs *regs, long err) 55#define VM_FAULT_BADMAP 0x020000
56#define VM_FAULT_BADACCESS 0x040000
57
58static inline int notify_page_fault(struct pt_regs *regs)
57{ 59{
58 int ret = 0; 60 int ret = 0;
59 61
62#ifdef CONFIG_KPROBES
60 /* kprobe_running() needs smp_processor_id() */ 63 /* kprobe_running() needs smp_processor_id() */
61 if (!user_mode(regs)) { 64 if (!user_mode(regs)) {
62 preempt_disable(); 65 preempt_disable();
@@ -64,15 +67,9 @@ static inline int notify_page_fault(struct pt_regs *regs, long err)
64 ret = 1; 67 ret = 1;
65 preempt_enable(); 68 preempt_enable();
66 } 69 }
67 70#endif
68 return ret; 71 return ret;
69} 72}
70#else
71static inline int notify_page_fault(struct pt_regs *regs, long err)
72{
73 return 0;
74}
75#endif
76 73
77 74
78/* 75/*
@@ -100,57 +97,50 @@ void bust_spinlocks(int yes)
100 97
101/* 98/*
102 * Returns the address space associated with the fault. 99 * Returns the address space associated with the fault.
103 * Returns 0 for kernel space, 1 for user space and 100 * Returns 0 for kernel space and 1 for user space.
104 * 2 for code execution in user space with noexec=on.
105 */ 101 */
106static inline int check_space(struct task_struct *tsk) 102static inline int user_space_fault(unsigned long trans_exc_code)
107{ 103{
108 /* 104 /*
109 * The lowest two bits of S390_lowcore.trans_exc_code 105 * The lowest two bits of the translation exception
110 * indicate which paging table was used. 106 * identification indicate which paging table was used.
111 */ 107 */
112 int desc = S390_lowcore.trans_exc_code & 3; 108 trans_exc_code &= 3;
113 109 if (trans_exc_code == 2)
114 if (desc == 3) /* Home Segment Table Descriptor */ 110 /* Access via secondary space, set_fs setting decides */
115 return switch_amode == 0; 111 return current->thread.mm_segment.ar4;
116 if (desc == 2) /* Secondary Segment Table Descriptor */ 112 if (user_mode == HOME_SPACE_MODE)
117 return tsk->thread.mm_segment.ar4; 113 /* User space if the access has been done via home space. */
118#ifdef CONFIG_S390_SWITCH_AMODE 114 return trans_exc_code == 3;
119 if (unlikely(desc == 1)) { /* STD determined via access register */ 115 /*
120 /* %a0 always indicates primary space. */ 116 * If the user space is not the home space the kernel runs in home
121 if (S390_lowcore.exc_access_id != 0) { 117 * space. Access via secondary space has already been covered,
122 save_access_regs(tsk->thread.acrs); 118 * access via primary space or access register is from user space
123 /* 119 * and access via home space is from the kernel.
124 * An alet of 0 indicates primary space. 120 */
125 * An alet of 1 indicates secondary space. 121 return trans_exc_code != 3;
126 * Any other alet values generate an
127 * alen-translation exception.
128 */
129 if (tsk->thread.acrs[S390_lowcore.exc_access_id])
130 return tsk->thread.mm_segment.ar4;
131 }
132 }
133#endif
134 /* Primary Segment Table Descriptor */
135 return switch_amode << s390_noexec;
136} 122}
137 123
138/* 124/*
139 * Send SIGSEGV to task. This is an external routine 125 * Send SIGSEGV to task. This is an external routine
140 * to keep the stack usage of do_page_fault small. 126 * to keep the stack usage of do_page_fault small.
141 */ 127 */
142static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, 128static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
143 int si_code, unsigned long address) 129 int si_code, unsigned long trans_exc_code)
144{ 130{
145 struct siginfo si; 131 struct siginfo si;
132 unsigned long address;
146 133
134 address = trans_exc_code & __FAIL_ADDR_MASK;
135 current->thread.prot_addr = address;
136 current->thread.trap_no = int_code;
147#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 137#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
148#if defined(CONFIG_SYSCTL) 138#if defined(CONFIG_SYSCTL)
149 if (sysctl_userprocess_debug) 139 if (sysctl_userprocess_debug)
150#endif 140#endif
151 { 141 {
152 printk("User process fault: interruption code 0x%lX\n", 142 printk("User process fault: interruption code 0x%lX\n",
153 error_code); 143 int_code);
154 printk("failing address: %lX\n", address); 144 printk("failing address: %lX\n", address);
155 show_regs(regs); 145 show_regs(regs);
156 } 146 }
@@ -161,13 +151,14 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
161 force_sig_info(SIGSEGV, &si, current); 151 force_sig_info(SIGSEGV, &si, current);
162} 152}
163 153
164static void do_no_context(struct pt_regs *regs, unsigned long error_code, 154static noinline void do_no_context(struct pt_regs *regs, long int_code,
165 unsigned long address) 155 unsigned long trans_exc_code)
166{ 156{
167 const struct exception_table_entry *fixup; 157 const struct exception_table_entry *fixup;
158 unsigned long address;
168 159
169 /* Are we prepared to handle this kernel fault? */ 160 /* Are we prepared to handle this kernel fault? */
170 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); 161 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
171 if (fixup) { 162 if (fixup) {
172 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 163 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
173 return; 164 return;
@@ -177,129 +168,149 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code,
177 * Oops. The kernel tried to access some bad page. We'll have to 168 * Oops. The kernel tried to access some bad page. We'll have to
178 * terminate things with extreme prejudice. 169 * terminate things with extreme prejudice.
179 */ 170 */
180 if (check_space(current) == 0) 171 address = trans_exc_code & __FAIL_ADDR_MASK;
172 if (!user_space_fault(trans_exc_code))
181 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 173 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
182 " at virtual kernel address %p\n", (void *)address); 174 " at virtual kernel address %p\n", (void *)address);
183 else 175 else
184 printk(KERN_ALERT "Unable to handle kernel paging request" 176 printk(KERN_ALERT "Unable to handle kernel paging request"
185 " at virtual user address %p\n", (void *)address); 177 " at virtual user address %p\n", (void *)address);
186 178
187 die("Oops", regs, error_code); 179 die("Oops", regs, int_code);
188 do_exit(SIGKILL); 180 do_exit(SIGKILL);
189} 181}
190 182
191static void do_low_address(struct pt_regs *regs, unsigned long error_code) 183static noinline void do_low_address(struct pt_regs *regs, long int_code,
184 unsigned long trans_exc_code)
192{ 185{
193 /* Low-address protection hit in kernel mode means 186 /* Low-address protection hit in kernel mode means
194 NULL pointer write access in kernel mode. */ 187 NULL pointer write access in kernel mode. */
195 if (regs->psw.mask & PSW_MASK_PSTATE) { 188 if (regs->psw.mask & PSW_MASK_PSTATE) {
196 /* Low-address protection hit in user mode 'cannot happen'. */ 189 /* Low-address protection hit in user mode 'cannot happen'. */
197 die ("Low-address protection", regs, error_code); 190 die ("Low-address protection", regs, int_code);
198 do_exit(SIGKILL); 191 do_exit(SIGKILL);
199 } 192 }
200 193
201 do_no_context(regs, error_code, 0); 194 do_no_context(regs, int_code, trans_exc_code);
202} 195}
203 196
204static void do_sigbus(struct pt_regs *regs, unsigned long error_code, 197static noinline void do_sigbus(struct pt_regs *regs, long int_code,
205 unsigned long address) 198 unsigned long trans_exc_code)
206{ 199{
207 struct task_struct *tsk = current; 200 struct task_struct *tsk = current;
208 struct mm_struct *mm = tsk->mm;
209 201
210 up_read(&mm->mmap_sem);
211 /* 202 /*
212 * Send a sigbus, regardless of whether we were in kernel 203 * Send a sigbus, regardless of whether we were in kernel
213 * or user mode. 204 * or user mode.
214 */ 205 */
215 tsk->thread.prot_addr = address; 206 tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK;
216 tsk->thread.trap_no = error_code; 207 tsk->thread.trap_no = int_code;
217 force_sig(SIGBUS, tsk); 208 force_sig(SIGBUS, tsk);
218
219 /* Kernel mode? Handle exceptions or die */
220 if (!(regs->psw.mask & PSW_MASK_PSTATE))
221 do_no_context(regs, error_code, address);
222} 209}
223 210
224#ifdef CONFIG_S390_EXEC_PROTECT 211#ifdef CONFIG_S390_EXEC_PROTECT
225static int signal_return(struct mm_struct *mm, struct pt_regs *regs, 212static noinline int signal_return(struct pt_regs *regs, long int_code,
226 unsigned long address, unsigned long error_code) 213 unsigned long trans_exc_code)
227{ 214{
228 u16 instruction; 215 u16 instruction;
229 int rc; 216 int rc;
230#ifdef CONFIG_COMPAT
231 int compat;
232#endif
233 217
234 pagefault_disable();
235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr); 218 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
236 pagefault_enable();
237 if (rc)
238 return -EFAULT;
239 219
240 up_read(&mm->mmap_sem); 220 if (!rc && instruction == 0x0a77) {
241 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 221 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
242#ifdef CONFIG_COMPAT 222 if (is_compat_task())
243 compat = is_compat_task(); 223 sys32_sigreturn();
244 if (compat && instruction == 0x0a77) 224 else
245 sys32_sigreturn(); 225 sys_sigreturn();
246 else if (compat && instruction == 0x0aad) 226 } else if (!rc && instruction == 0x0aad) {
247 sys32_rt_sigreturn(); 227 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
248 else 228 if (is_compat_task())
249#endif 229 sys32_rt_sigreturn();
250 if (instruction == 0x0a77) 230 else
251 sys_sigreturn(); 231 sys_rt_sigreturn();
252 else if (instruction == 0x0aad) 232 } else
253 sys_rt_sigreturn(); 233 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
254 else {
255 current->thread.prot_addr = address;
256 current->thread.trap_no = error_code;
257 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
258 }
259 return 0; 234 return 0;
260} 235}
261#endif /* CONFIG_S390_EXEC_PROTECT */ 236#endif /* CONFIG_S390_EXEC_PROTECT */
262 237
238static noinline void do_fault_error(struct pt_regs *regs, long int_code,
239 unsigned long trans_exc_code, int fault)
240{
241 int si_code;
242
243 switch (fault) {
244 case VM_FAULT_BADACCESS:
245#ifdef CONFIG_S390_EXEC_PROTECT
246 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
247 (trans_exc_code & 3) == 0) {
248 signal_return(regs, int_code, trans_exc_code);
249 break;
250 }
251#endif /* CONFIG_S390_EXEC_PROTECT */
252 case VM_FAULT_BADMAP:
253 /* Bad memory access. Check if it is kernel or user space. */
254 if (regs->psw.mask & PSW_MASK_PSTATE) {
255 /* User mode accesses just cause a SIGSEGV */
256 si_code = (fault == VM_FAULT_BADMAP) ?
257 SEGV_MAPERR : SEGV_ACCERR;
258 do_sigsegv(regs, int_code, si_code, trans_exc_code);
259 return;
260 }
261 case VM_FAULT_BADCONTEXT:
262 do_no_context(regs, int_code, trans_exc_code);
263 break;
264 default: /* fault & VM_FAULT_ERROR */
265 if (fault & VM_FAULT_OOM)
266 pagefault_out_of_memory();
267 else if (fault & VM_FAULT_SIGBUS) {
268 do_sigbus(regs, int_code, trans_exc_code);
269 /* Kernel mode? Handle exceptions or die */
270 if (!(regs->psw.mask & PSW_MASK_PSTATE))
271 do_no_context(regs, int_code, trans_exc_code);
272 } else
273 BUG();
274 break;
275 }
276}
277
263/* 278/*
264 * This routine handles page faults. It determines the address, 279 * This routine handles page faults. It determines the address,
265 * and the problem, and then passes it off to one of the appropriate 280 * and the problem, and then passes it off to one of the appropriate
266 * routines. 281 * routines.
267 * 282 *
268 * error_code: 283 * interruption code (int_code):
269 * 04 Protection -> Write-Protection (suprression) 284 * 04 Protection -> Write-Protection (suprression)
270 * 10 Segment translation -> Not present (nullification) 285 * 10 Segment translation -> Not present (nullification)
271 * 11 Page translation -> Not present (nullification) 286 * 11 Page translation -> Not present (nullification)
272 * 3b Region third trans. -> Not present (nullification) 287 * 3b Region third trans. -> Not present (nullification)
273 */ 288 */
274static inline void 289static inline int do_exception(struct pt_regs *regs, int access,
275do_exception(struct pt_regs *regs, unsigned long error_code, int write) 290 unsigned long trans_exc_code)
276{ 291{
277 struct task_struct *tsk; 292 struct task_struct *tsk;
278 struct mm_struct *mm; 293 struct mm_struct *mm;
279 struct vm_area_struct *vma; 294 struct vm_area_struct *vma;
280 unsigned long address; 295 unsigned long address;
281 int space;
282 int si_code;
283 int fault; 296 int fault;
284 297
285 if (notify_page_fault(regs, error_code)) 298 if (notify_page_fault(regs))
286 return; 299 return 0;
287 300
288 tsk = current; 301 tsk = current;
289 mm = tsk->mm; 302 mm = tsk->mm;
290 303
291 /* get the failing address and the affected space */
292 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
293 space = check_space(tsk);
294
295 /* 304 /*
296 * Verify that the fault happened in user space, that 305 * Verify that the fault happened in user space, that
297 * we are not in an interrupt and that there is a 306 * we are not in an interrupt and that there is a
298 * user context. 307 * user context.
299 */ 308 */
300 if (unlikely(space == 0 || in_atomic() || !mm)) 309 fault = VM_FAULT_BADCONTEXT;
301 goto no_context; 310 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
311 goto out;
302 312
313 address = trans_exc_code & __FAIL_ADDR_MASK;
303 /* 314 /*
304 * When we get here, the fault happened in the current 315 * When we get here, the fault happened in the current
305 * task's user address space, so we can switch on the 316 * task's user address space, so we can switch on the
@@ -309,42 +320,26 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
309 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 320 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
310 down_read(&mm->mmap_sem); 321 down_read(&mm->mmap_sem);
311 322
312 si_code = SEGV_MAPERR; 323 fault = VM_FAULT_BADMAP;
313 vma = find_vma(mm, address); 324 vma = find_vma(mm, address);
314 if (!vma) 325 if (!vma)
315 goto bad_area; 326 goto out_up;
316
317#ifdef CONFIG_S390_EXEC_PROTECT
318 if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
319 if (!signal_return(mm, regs, address, error_code))
320 /*
321 * signal_return() has done an up_read(&mm->mmap_sem)
322 * if it returns 0.
323 */
324 return;
325#endif
326 327
327 if (vma->vm_start <= address) 328 if (unlikely(vma->vm_start > address)) {
328 goto good_area; 329 if (!(vma->vm_flags & VM_GROWSDOWN))
329 if (!(vma->vm_flags & VM_GROWSDOWN)) 330 goto out_up;
330 goto bad_area; 331 if (expand_stack(vma, address))
331 if (expand_stack(vma, address)) 332 goto out_up;
332 goto bad_area;
333/*
334 * Ok, we have a good vm_area for this memory access, so
335 * we can handle it..
336 */
337good_area:
338 si_code = SEGV_ACCERR;
339 if (!write) {
340 /* page not present, check vm flags */
341 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
342 goto bad_area;
343 } else {
344 if (!(vma->vm_flags & VM_WRITE))
345 goto bad_area;
346 } 333 }
347 334
335 /*
336 * Ok, we have a good vm_area for this memory access, so
337 * we can handle it..
338 */
339 fault = VM_FAULT_BADACCESS;
340 if (unlikely(!(vma->vm_flags & access)))
341 goto out_up;
342
348 if (is_vm_hugetlb_page(vma)) 343 if (is_vm_hugetlb_page(vma))
349 address &= HPAGE_MASK; 344 address &= HPAGE_MASK;
350 /* 345 /*
@@ -352,18 +347,11 @@ good_area:
352 * make sure we exit gracefully rather than endlessly redo 347 * make sure we exit gracefully rather than endlessly redo
353 * the fault. 348 * the fault.
354 */ 349 */
355 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 350 fault = handle_mm_fault(mm, vma, address,
356 if (unlikely(fault & VM_FAULT_ERROR)) { 351 (access == VM_WRITE) ? FAULT_FLAG_WRITE : 0);
357 if (fault & VM_FAULT_OOM) { 352 if (unlikely(fault & VM_FAULT_ERROR))
358 up_read(&mm->mmap_sem); 353 goto out_up;
359 pagefault_out_of_memory(); 354
360 return;
361 } else if (fault & VM_FAULT_SIGBUS) {
362 do_sigbus(regs, error_code, address);
363 return;
364 }
365 BUG();
366 }
367 if (fault & VM_FAULT_MAJOR) { 355 if (fault & VM_FAULT_MAJOR) {
368 tsk->maj_flt++; 356 tsk->maj_flt++;
369 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 357 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
@@ -373,74 +361,69 @@ good_area:
373 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 361 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
374 regs, address); 362 regs, address);
375 } 363 }
376 up_read(&mm->mmap_sem);
377 /* 364 /*
378 * The instruction that caused the program check will 365 * The instruction that caused the program check will
379 * be repeated. Don't signal single step via SIGTRAP. 366 * be repeated. Don't signal single step via SIGTRAP.
380 */ 367 */
381 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); 368 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
382 return; 369 fault = 0;
383 370out_up:
384/*
385 * Something tried to access memory that isn't in our memory map..
386 * Fix it, but check if it's kernel or user first..
387 */
388bad_area:
389 up_read(&mm->mmap_sem); 371 up_read(&mm->mmap_sem);
390 372out:
391 /* User mode accesses just cause a SIGSEGV */ 373 return fault;
392 if (regs->psw.mask & PSW_MASK_PSTATE) {
393 tsk->thread.prot_addr = address;
394 tsk->thread.trap_no = error_code;
395 do_sigsegv(regs, error_code, si_code, address);
396 return;
397 }
398
399no_context:
400 do_no_context(regs, error_code, address);
401} 374}
402 375
403void __kprobes do_protection_exception(struct pt_regs *regs, 376void __kprobes do_protection_exception(struct pt_regs *regs, long int_code)
404 long error_code)
405{ 377{
378 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
379 int fault;
380
406 /* Protection exception is supressing, decrement psw address. */ 381 /* Protection exception is supressing, decrement psw address. */
407 regs->psw.addr -= (error_code >> 16); 382 regs->psw.addr -= (int_code >> 16);
408 /* 383 /*
409 * Check for low-address protection. This needs to be treated 384 * Check for low-address protection. This needs to be treated
410 * as a special case because the translation exception code 385 * as a special case because the translation exception code
411 * field is not guaranteed to contain valid data in this case. 386 * field is not guaranteed to contain valid data in this case.
412 */ 387 */
413 if (unlikely(!(S390_lowcore.trans_exc_code & 4))) { 388 if (unlikely(!(trans_exc_code & 4))) {
414 do_low_address(regs, error_code); 389 do_low_address(regs, int_code, trans_exc_code);
415 return; 390 return;
416 } 391 }
417 do_exception(regs, 4, 1); 392 fault = do_exception(regs, VM_WRITE, trans_exc_code);
393 if (unlikely(fault))
394 do_fault_error(regs, 4, trans_exc_code, fault);
418} 395}
419 396
420void __kprobes do_dat_exception(struct pt_regs *regs, long error_code) 397void __kprobes do_dat_exception(struct pt_regs *regs, long int_code)
421{ 398{
422 do_exception(regs, error_code & 0xff, 0); 399 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
400 int access, fault;
401
402 access = VM_READ | VM_EXEC | VM_WRITE;
403#ifdef CONFIG_S390_EXEC_PROTECT
404 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
405 (trans_exc_code & 3) == 0)
406 access = VM_EXEC;
407#endif
408 fault = do_exception(regs, access, trans_exc_code);
409 if (unlikely(fault))
410 do_fault_error(regs, int_code & 255, trans_exc_code, fault);
423} 411}
424 412
425#ifdef CONFIG_64BIT 413#ifdef CONFIG_64BIT
426void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) 414void __kprobes do_asce_exception(struct pt_regs *regs, long int_code)
427{ 415{
428 struct mm_struct *mm; 416 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
417 struct mm_struct *mm = current->mm;
429 struct vm_area_struct *vma; 418 struct vm_area_struct *vma;
430 unsigned long address;
431 int space;
432
433 mm = current->mm;
434 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
435 space = check_space(current);
436 419
437 if (unlikely(space == 0 || in_atomic() || !mm)) 420 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
438 goto no_context; 421 goto no_context;
439 422
440 local_irq_enable(); 423 local_irq_enable();
441 424
442 down_read(&mm->mmap_sem); 425 down_read(&mm->mmap_sem);
443 vma = find_vma(mm, address); 426 vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
444 up_read(&mm->mmap_sem); 427 up_read(&mm->mmap_sem);
445 428
446 if (vma) { 429 if (vma) {
@@ -450,17 +433,38 @@ void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
450 433
451 /* User mode accesses just cause a SIGSEGV */ 434 /* User mode accesses just cause a SIGSEGV */
452 if (regs->psw.mask & PSW_MASK_PSTATE) { 435 if (regs->psw.mask & PSW_MASK_PSTATE) {
453 current->thread.prot_addr = address; 436 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
454 current->thread.trap_no = error_code;
455 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
456 return; 437 return;
457 } 438 }
458 439
459no_context: 440no_context:
460 do_no_context(regs, error_code, address); 441 do_no_context(regs, int_code, trans_exc_code);
461} 442}
462#endif 443#endif
463 444
445int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user)
446{
447 struct pt_regs regs;
448 int access, fault;
449
450 regs.psw.mask = psw_kernel_bits;
451 if (!irqs_disabled())
452 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
453 regs.psw.addr = (unsigned long) __builtin_return_address(0);
454 regs.psw.addr |= PSW_ADDR_AMODE;
455 uaddr &= PAGE_MASK;
456 access = write_user ? VM_WRITE : VM_READ;
457 fault = do_exception(&regs, access, uaddr | 2);
458 if (unlikely(fault)) {
459 if (fault & VM_FAULT_OOM) {
460 pagefault_out_of_memory();
461 fault = 0;
462 } else if (fault & VM_FAULT_SIGBUS)
463 do_sigbus(&regs, int_code, uaddr);
464 }
465 return fault ? -EFAULT : 0;
466}
467
464#ifdef CONFIG_PFAULT 468#ifdef CONFIG_PFAULT
465/* 469/*
466 * 'pfault' pseudo page faults routines. 470 * 'pfault' pseudo page faults routines.
@@ -522,7 +526,7 @@ void pfault_fini(void)
522 : : "a" (&refbk), "m" (refbk) : "cc"); 526 : : "a" (&refbk), "m" (refbk) : "cc");
523} 527}
524 528
525static void pfault_interrupt(__u16 error_code) 529static void pfault_interrupt(__u16 int_code)
526{ 530{
527 struct task_struct *tsk; 531 struct task_struct *tsk;
528 __u16 subcode; 532 __u16 subcode;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 2757c5616a07..ad621e06ada3 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -269,7 +269,7 @@ int s390_enable_sie(void)
269 struct mm_struct *mm, *old_mm; 269 struct mm_struct *mm, *old_mm;
270 270
271 /* Do we have switched amode? If no, we cannot do sie */ 271 /* Do we have switched amode? If no, we cannot do sie */
272 if (!switch_amode) 272 if (user_mode == HOME_SPACE_MODE)
273 return -EINVAL; 273 return -EINVAL;
274 274
275 /* Do we have pgstes? if yes, we are done */ 275 /* Do we have pgstes? if yes, we are done */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 5f91a38d7592..300ab012b0fd 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -70,8 +70,12 @@ static pte_t __ref *vmem_pte_alloc(void)
70 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 70 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
71 if (!pte) 71 if (!pte)
72 return NULL; 72 return NULL;
73 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, 73 if (MACHINE_HAS_HPAGE)
74 PTRS_PER_PTE * sizeof(pte_t)); 74 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
75 PTRS_PER_PTE * sizeof(pte_t));
76 else
77 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
78 PTRS_PER_PTE * sizeof(pte_t));
75 return pte; 79 return pte;
76} 80}
77 81
@@ -112,7 +116,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
112 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && 116 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
113 (address + HPAGE_SIZE <= start + size) && 117 (address + HPAGE_SIZE <= start + size) &&
114 (address >= HPAGE_SIZE)) { 118 (address >= HPAGE_SIZE)) {
115 pte_val(pte) |= _SEGMENT_ENTRY_LARGE; 119 pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
120 _SEGMENT_ENTRY_CO;
116 pmd_val(*pm_dir) = pte_val(pte); 121 pmd_val(*pm_dir) = pte_val(pte);
117 address += HPAGE_SIZE - PAGE_SIZE; 122 address += HPAGE_SIZE - PAGE_SIZE;
118 continue; 123 continue;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index aaccc8ecfa8f..fdb2e7c14506 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -24,7 +24,6 @@
24#include <asm/ccwdev.h> 24#include <asm/ccwdev.h>
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/idals.h> 26#include <asm/idals.h>
27#include <asm/todclk.h>
28#include <asm/itcw.h> 27#include <asm/itcw.h>
29 28
30/* This is ugly... */ 29/* This is ugly... */
@@ -64,6 +63,7 @@ static void do_restore_device(struct work_struct *);
64static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 63static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
65static void dasd_device_timeout(unsigned long); 64static void dasd_device_timeout(unsigned long);
66static void dasd_block_timeout(unsigned long); 65static void dasd_block_timeout(unsigned long);
66static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
67 67
68/* 68/*
69 * SECTION: Operations on the device structure. 69 * SECTION: Operations on the device structure.
@@ -960,7 +960,7 @@ static void dasd_device_timeout(unsigned long ptr)
960 device = (struct dasd_device *) ptr; 960 device = (struct dasd_device *) ptr;
961 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 961 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
962 /* re-activate request queue */ 962 /* re-activate request queue */
963 device->stopped &= ~DASD_STOPPED_PENDING; 963 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
964 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 964 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
965 dasd_schedule_device_bh(device); 965 dasd_schedule_device_bh(device);
966} 966}
@@ -994,10 +994,9 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
994 return; 994 return;
995 cqr = (struct dasd_ccw_req *) intparm; 995 cqr = (struct dasd_ccw_req *) intparm;
996 if (cqr->status != DASD_CQR_IN_IO) { 996 if (cqr->status != DASD_CQR_IN_IO) {
997 DBF_EVENT(DBF_DEBUG, 997 DBF_EVENT_DEVID(DBF_DEBUG, cdev,
998 "invalid status in handle_killed_request: " 998 "invalid status in handle_killed_request: "
999 "bus_id %s, status %02x", 999 "%02x", cqr->status);
1000 dev_name(&cdev->dev), cqr->status);
1001 return; 1000 return;
1002 } 1001 }
1003 1002
@@ -1023,7 +1022,7 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
1023 /* First of all start sense subsystem status request. */ 1022 /* First of all start sense subsystem status request. */
1024 dasd_eer_snss(device); 1023 dasd_eer_snss(device);
1025 1024
1026 device->stopped &= ~DASD_STOPPED_PENDING; 1025 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1027 dasd_schedule_device_bh(device); 1026 dasd_schedule_device_bh(device);
1028 if (device->block) 1027 if (device->block)
1029 dasd_schedule_block_bh(device->block); 1028 dasd_schedule_block_bh(device->block);
@@ -1045,12 +1044,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1045 case -EIO: 1044 case -EIO:
1046 break; 1045 break;
1047 case -ETIMEDOUT: 1046 case -ETIMEDOUT:
1048 DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n", 1047 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1049 __func__, dev_name(&cdev->dev)); 1048 "request timed out\n", __func__);
1050 break; 1049 break;
1051 default: 1050 default:
1052 DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n", 1051 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1053 __func__, dev_name(&cdev->dev), PTR_ERR(irb)); 1052 "unknown error %ld\n", __func__,
1053 PTR_ERR(irb));
1054 } 1054 }
1055 dasd_handle_killed_request(cdev, intparm); 1055 dasd_handle_killed_request(cdev, intparm);
1056 return; 1056 return;
@@ -1405,6 +1405,20 @@ void dasd_schedule_device_bh(struct dasd_device *device)
1405 tasklet_hi_schedule(&device->tasklet); 1405 tasklet_hi_schedule(&device->tasklet);
1406} 1406}
1407 1407
1408void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
1409{
1410 device->stopped |= bits;
1411}
1412EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
1413
1414void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
1415{
1416 device->stopped &= ~bits;
1417 if (!device->stopped)
1418 wake_up(&generic_waitq);
1419}
1420EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
1421
1408/* 1422/*
1409 * Queue a request to the head of the device ccw_queue. 1423 * Queue a request to the head of the device ccw_queue.
1410 * Start the I/O if possible. 1424 * Start the I/O if possible.
@@ -1465,58 +1479,135 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1465} 1479}
1466 1480
1467/* 1481/*
1468 * Queue a request to the tail of the device ccw_queue and wait for 1482 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
1469 * it's completion.
1470 */ 1483 */
1471int dasd_sleep_on(struct dasd_ccw_req *cqr) 1484static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
1472{ 1485{
1473 struct dasd_device *device; 1486 struct dasd_device *device;
1474 int rc; 1487 dasd_erp_fn_t erp_fn;
1475 1488
1489 if (cqr->status == DASD_CQR_FILLED)
1490 return 0;
1476 device = cqr->startdev; 1491 device = cqr->startdev;
1492 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1493 if (cqr->status == DASD_CQR_TERMINATED) {
1494 device->discipline->handle_terminated_request(cqr);
1495 return 1;
1496 }
1497 if (cqr->status == DASD_CQR_NEED_ERP) {
1498 erp_fn = device->discipline->erp_action(cqr);
1499 erp_fn(cqr);
1500 return 1;
1501 }
1502 if (cqr->status == DASD_CQR_FAILED)
1503 dasd_log_sense(cqr, &cqr->irb);
1504 if (cqr->refers) {
1505 __dasd_process_erp(device, cqr);
1506 return 1;
1507 }
1508 }
1509 return 0;
1510}
1477 1511
1478 cqr->callback = dasd_wakeup_cb; 1512static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
1479 cqr->callback_data = (void *) &generic_waitq; 1513{
1480 dasd_add_request_tail(cqr); 1514 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1481 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1515 if (cqr->refers) /* erp is not done yet */
1516 return 1;
1517 return ((cqr->status != DASD_CQR_DONE) &&
1518 (cqr->status != DASD_CQR_FAILED));
1519 } else
1520 return (cqr->status == DASD_CQR_FILLED);
1521}
1482 1522
1483 if (cqr->status == DASD_CQR_DONE) 1523static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1524{
1525 struct dasd_device *device;
1526 int rc;
1527 struct list_head ccw_queue;
1528 struct dasd_ccw_req *cqr;
1529
1530 INIT_LIST_HEAD(&ccw_queue);
1531 maincqr->status = DASD_CQR_FILLED;
1532 device = maincqr->startdev;
1533 list_add(&maincqr->blocklist, &ccw_queue);
1534 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
1535 cqr = list_first_entry(&ccw_queue,
1536 struct dasd_ccw_req, blocklist)) {
1537
1538 if (__dasd_sleep_on_erp(cqr))
1539 continue;
1540 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1541 continue;
1542
1543 /* Non-temporary stop condition will trigger fail fast */
1544 if (device->stopped & ~DASD_STOPPED_PENDING &&
1545 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1546 (!dasd_eer_enabled(device))) {
1547 cqr->status = DASD_CQR_FAILED;
1548 continue;
1549 }
1550
1551 /* Don't try to start requests if device is stopped */
1552 if (interruptible) {
1553 rc = wait_event_interruptible(
1554 generic_waitq, !(device->stopped));
1555 if (rc == -ERESTARTSYS) {
1556 cqr->status = DASD_CQR_FAILED;
1557 maincqr->intrc = rc;
1558 continue;
1559 }
1560 } else
1561 wait_event(generic_waitq, !(device->stopped));
1562
1563 cqr->callback = dasd_wakeup_cb;
1564 cqr->callback_data = (void *) &generic_waitq;
1565 dasd_add_request_tail(cqr);
1566 if (interruptible) {
1567 rc = wait_event_interruptible(
1568 generic_waitq, _wait_for_wakeup(cqr));
1569 if (rc == -ERESTARTSYS) {
1570 dasd_cancel_req(cqr);
1571 /* wait (non-interruptible) for final status */
1572 wait_event(generic_waitq,
1573 _wait_for_wakeup(cqr));
1574 cqr->status = DASD_CQR_FAILED;
1575 maincqr->intrc = rc;
1576 continue;
1577 }
1578 } else
1579 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1580 }
1581
1582 maincqr->endclk = get_clock();
1583 if ((maincqr->status != DASD_CQR_DONE) &&
1584 (maincqr->intrc != -ERESTARTSYS))
1585 dasd_log_sense(maincqr, &maincqr->irb);
1586 if (maincqr->status == DASD_CQR_DONE)
1484 rc = 0; 1587 rc = 0;
1485 else if (cqr->intrc) 1588 else if (maincqr->intrc)
1486 rc = cqr->intrc; 1589 rc = maincqr->intrc;
1487 else 1590 else
1488 rc = -EIO; 1591 rc = -EIO;
1489 return rc; 1592 return rc;
1490} 1593}
1491 1594
1492/* 1595/*
1596 * Queue a request to the tail of the device ccw_queue and wait for
1597 * it's completion.
1598 */
1599int dasd_sleep_on(struct dasd_ccw_req *cqr)
1600{
1601 return _dasd_sleep_on(cqr, 0);
1602}
1603
1604/*
1493 * Queue a request to the tail of the device ccw_queue and wait 1605 * Queue a request to the tail of the device ccw_queue and wait
1494 * interruptible for it's completion. 1606 * interruptible for it's completion.
1495 */ 1607 */
1496int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1608int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1497{ 1609{
1498 struct dasd_device *device; 1610 return _dasd_sleep_on(cqr, 1);
1499 int rc;
1500
1501 device = cqr->startdev;
1502 cqr->callback = dasd_wakeup_cb;
1503 cqr->callback_data = (void *) &generic_waitq;
1504 dasd_add_request_tail(cqr);
1505 rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
1506 if (rc == -ERESTARTSYS) {
1507 dasd_cancel_req(cqr);
1508 /* wait (non-interruptible) for final status */
1509 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1510 cqr->intrc = rc;
1511 }
1512
1513 if (cqr->status == DASD_CQR_DONE)
1514 rc = 0;
1515 else if (cqr->intrc)
1516 rc = cqr->intrc;
1517 else
1518 rc = -EIO;
1519 return rc;
1520} 1611}
1521 1612
1522/* 1613/*
@@ -1630,7 +1721,7 @@ static void dasd_block_timeout(unsigned long ptr)
1630 block = (struct dasd_block *) ptr; 1721 block = (struct dasd_block *) ptr;
1631 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1722 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1632 /* re-activate request queue */ 1723 /* re-activate request queue */
1633 block->base->stopped &= ~DASD_STOPPED_PENDING; 1724 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
1634 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1725 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1635 dasd_schedule_block_bh(block); 1726 dasd_schedule_block_bh(block);
1636} 1727}
@@ -1657,11 +1748,10 @@ void dasd_block_clear_timer(struct dasd_block *block)
1657/* 1748/*
1658 * Process finished error recovery ccw. 1749 * Process finished error recovery ccw.
1659 */ 1750 */
1660static inline void __dasd_block_process_erp(struct dasd_block *block, 1751static void __dasd_process_erp(struct dasd_device *device,
1661 struct dasd_ccw_req *cqr) 1752 struct dasd_ccw_req *cqr)
1662{ 1753{
1663 dasd_erp_fn_t erp_fn; 1754 dasd_erp_fn_t erp_fn;
1664 struct dasd_device *device = block->base;
1665 1755
1666 if (cqr->status == DASD_CQR_DONE) 1756 if (cqr->status == DASD_CQR_DONE)
1667 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1757 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
@@ -1725,9 +1815,12 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1725 */ 1815 */
1726 if (!list_empty(&block->ccw_queue)) 1816 if (!list_empty(&block->ccw_queue))
1727 break; 1817 break;
1728 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); 1818 spin_lock_irqsave(
1729 basedev->stopped |= DASD_STOPPED_PENDING; 1819 get_ccwdev_lock(basedev->cdev), flags);
1730 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); 1820 dasd_device_set_stop_bits(basedev,
1821 DASD_STOPPED_PENDING);
1822 spin_unlock_irqrestore(
1823 get_ccwdev_lock(basedev->cdev), flags);
1731 dasd_block_set_timer(block, HZ/2); 1824 dasd_block_set_timer(block, HZ/2);
1732 break; 1825 break;
1733 } 1826 }
@@ -1813,7 +1906,7 @@ restart:
1813 cqr->status = DASD_CQR_FILLED; 1906 cqr->status = DASD_CQR_FILLED;
1814 cqr->retries = 255; 1907 cqr->retries = 255;
1815 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1908 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1816 base->stopped |= DASD_STOPPED_QUIESCE; 1909 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
1817 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1910 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1818 flags); 1911 flags);
1819 goto restart; 1912 goto restart;
@@ -1821,7 +1914,7 @@ restart:
1821 1914
1822 /* Process finished ERP request. */ 1915 /* Process finished ERP request. */
1823 if (cqr->refers) { 1916 if (cqr->refers) {
1824 __dasd_block_process_erp(block, cqr); 1917 __dasd_process_erp(base, cqr);
1825 goto restart; 1918 goto restart;
1826 } 1919 }
1827 1920
@@ -1952,7 +2045,7 @@ restart_cb:
1952 /* Process finished ERP request. */ 2045 /* Process finished ERP request. */
1953 if (cqr->refers) { 2046 if (cqr->refers) {
1954 spin_lock_bh(&block->queue_lock); 2047 spin_lock_bh(&block->queue_lock);
1955 __dasd_block_process_erp(block, cqr); 2048 __dasd_process_erp(block->base, cqr);
1956 spin_unlock_bh(&block->queue_lock); 2049 spin_unlock_bh(&block->queue_lock);
1957 /* restart list_for_xx loop since dasd_process_erp 2050 /* restart list_for_xx loop since dasd_process_erp
1958 * might remove multiple elements */ 2051 * might remove multiple elements */
@@ -2208,18 +2301,11 @@ int dasd_generic_probe(struct ccw_device *cdev,
2208{ 2301{
2209 int ret; 2302 int ret;
2210 2303
2211 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
2212 if (ret) {
2213 DBF_EVENT(DBF_WARNING,
2214 "dasd_generic_probe: could not set ccw-device options "
2215 "for %s\n", dev_name(&cdev->dev));
2216 return ret;
2217 }
2218 ret = dasd_add_sysfs_files(cdev); 2304 ret = dasd_add_sysfs_files(cdev);
2219 if (ret) { 2305 if (ret) {
2220 DBF_EVENT(DBF_WARNING, 2306 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
2221 "dasd_generic_probe: could not add sysfs entries " 2307 "dasd_generic_probe: could not add "
2222 "for %s\n", dev_name(&cdev->dev)); 2308 "sysfs entries");
2223 return ret; 2309 return ret;
2224 } 2310 }
2225 cdev->handler = &dasd_int_handler; 2311 cdev->handler = &dasd_int_handler;
@@ -2418,16 +2504,16 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
2418 cqr->status = DASD_CQR_QUEUED; 2504 cqr->status = DASD_CQR_QUEUED;
2419 cqr->retries++; 2505 cqr->retries++;
2420 } 2506 }
2421 device->stopped |= DASD_STOPPED_DC_WAIT; 2507 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2422 dasd_device_clear_timer(device); 2508 dasd_device_clear_timer(device);
2423 dasd_schedule_device_bh(device); 2509 dasd_schedule_device_bh(device);
2424 ret = 1; 2510 ret = 1;
2425 break; 2511 break;
2426 case CIO_OPER: 2512 case CIO_OPER:
2427 /* FIXME: add a sanity check. */ 2513 /* FIXME: add a sanity check. */
2428 device->stopped &= ~DASD_STOPPED_DC_WAIT; 2514 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2429 if (device->stopped & DASD_UNRESUMED_PM) { 2515 if (device->stopped & DASD_UNRESUMED_PM) {
2430 device->stopped &= ~DASD_UNRESUMED_PM; 2516 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2431 dasd_restore_device(device); 2517 dasd_restore_device(device);
2432 ret = 1; 2518 ret = 1;
2433 break; 2519 break;
@@ -2452,7 +2538,7 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
2452 if (IS_ERR(device)) 2538 if (IS_ERR(device))
2453 return PTR_ERR(device); 2539 return PTR_ERR(device);
2454 /* disallow new I/O */ 2540 /* disallow new I/O */
2455 device->stopped |= DASD_STOPPED_PM; 2541 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
2456 /* clear active requests */ 2542 /* clear active requests */
2457 INIT_LIST_HEAD(&freeze_queue); 2543 INIT_LIST_HEAD(&freeze_queue);
2458 spin_lock_irq(get_ccwdev_lock(cdev)); 2544 spin_lock_irq(get_ccwdev_lock(cdev));
@@ -2504,14 +2590,18 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
2504 return PTR_ERR(device); 2590 return PTR_ERR(device);
2505 2591
2506 /* allow new IO again */ 2592 /* allow new IO again */
2507 device->stopped &= ~DASD_STOPPED_PM; 2593 dasd_device_remove_stop_bits(device,
2508 device->stopped &= ~DASD_UNRESUMED_PM; 2594 (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
2509 2595
2510 dasd_schedule_device_bh(device); 2596 dasd_schedule_device_bh(device);
2511 2597
2512 if (device->discipline->restore) 2598 /*
2599 * call discipline restore function
2600 * if device is stopped do nothing e.g. for disconnected devices
2601 */
2602 if (device->discipline->restore && !(device->stopped))
2513 rc = device->discipline->restore(device); 2603 rc = device->discipline->restore(device);
2514 if (rc) 2604 if (rc || device->stopped)
2515 /* 2605 /*
2516 * if the resume failed for the DASD we put it in 2606 * if the resume failed for the DASD we put it in
2517 * an UNRESUMED stop state 2607 * an UNRESUMED stop state
@@ -2561,8 +2651,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2561 cqr->startdev = device; 2651 cqr->startdev = device;
2562 cqr->memdev = device; 2652 cqr->memdev = device;
2563 cqr->expires = 10*HZ; 2653 cqr->expires = 10*HZ;
2564 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2654 cqr->retries = 256;
2565 cqr->retries = 2;
2566 cqr->buildclk = get_clock(); 2655 cqr->buildclk = get_clock();
2567 cqr->status = DASD_CQR_FILLED; 2656 cqr->status = DASD_CQR_FILLED;
2568 return cqr; 2657 return cqr;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index e8ff7b0c961d..44796ba4eb9b 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -12,7 +12,6 @@
12#include <linux/timer.h> 12#include <linux/timer.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <asm/idals.h> 14#include <asm/idals.h>
15#include <asm/todclk.h>
16 15
17#define PRINTK_HEADER "dasd_erp(3990): " 16#define PRINTK_HEADER "dasd_erp(3990): "
18 17
@@ -70,8 +69,7 @@ dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
70 * processing until the started timer has expired or an related 69 * processing until the started timer has expired or an related
71 * interrupt was received. 70 * interrupt was received.
72 */ 71 */
73static void 72static void dasd_3990_erp_block_queue(struct dasd_ccw_req *erp, int expires)
74dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
75{ 73{
76 74
77 struct dasd_device *device = erp->startdev; 75 struct dasd_device *device = erp->startdev;
@@ -81,10 +79,13 @@ dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
81 "blocking request queue for %is", expires/HZ); 79 "blocking request queue for %is", expires/HZ);
82 80
83 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 81 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
84 device->stopped |= DASD_STOPPED_PENDING; 82 dasd_device_set_stop_bits(device, DASD_STOPPED_PENDING);
85 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 83 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
86 erp->status = DASD_CQR_FILLED; 84 erp->status = DASD_CQR_FILLED;
87 dasd_block_set_timer(device->block, expires); 85 if (erp->block)
86 dasd_block_set_timer(erp->block, expires);
87 else
88 dasd_device_set_timer(device, expires);
88} 89}
89 90
90/* 91/*
@@ -243,9 +244,13 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
243 * DESCRIPTION 244 * DESCRIPTION
244 * Setup ERP to do the ERP action 1 (see Reference manual). 245 * Setup ERP to do the ERP action 1 (see Reference manual).
245 * Repeat the operation on a different channel path. 246 * Repeat the operation on a different channel path.
246 * If all alternate paths have been tried, the request is posted with a 247 * As deviation from the recommended recovery action, we reset the path mask
247 * permanent error. 248 * after we have tried each path and go through all paths a second time.
248 * Note: duplex handling is not implemented (yet). 249 * This will cover situations where only one path at a time is actually down,
250 * but all paths fail and recover just with the same sequence and timing as
251 * we try to use them (flapping links).
252 * If all alternate paths have been tried twice, the request is posted with
253 * a permanent error.
249 * 254 *
250 * PARAMETER 255 * PARAMETER
251 * erp pointer to the current ERP 256 * erp pointer to the current ERP
@@ -254,17 +259,25 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
254 * erp pointer to the ERP 259 * erp pointer to the ERP
255 * 260 *
256 */ 261 */
257static struct dasd_ccw_req * 262static struct dasd_ccw_req *dasd_3990_erp_action_1_sec(struct dasd_ccw_req *erp)
258dasd_3990_erp_action_1(struct dasd_ccw_req * erp)
259{ 263{
264 erp->function = dasd_3990_erp_action_1_sec;
265 dasd_3990_erp_alternate_path(erp);
266 return erp;
267}
260 268
269static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
270{
261 erp->function = dasd_3990_erp_action_1; 271 erp->function = dasd_3990_erp_action_1;
262
263 dasd_3990_erp_alternate_path(erp); 272 dasd_3990_erp_alternate_path(erp);
264 273 if (erp->status == DASD_CQR_FAILED) {
274 erp->status = DASD_CQR_FILLED;
275 erp->retries = 10;
276 erp->lpm = LPM_ANYPATH;
277 erp->function = dasd_3990_erp_action_1_sec;
278 }
265 return erp; 279 return erp;
266 280} /* end dasd_3990_erp_action_1(b) */
267} /* end dasd_3990_erp_action_1 */
268 281
269/* 282/*
270 * DASD_3990_ERP_ACTION_4 283 * DASD_3990_ERP_ACTION_4
@@ -2295,6 +2308,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2295 return cqr; 2308 return cqr;
2296 } 2309 }
2297 2310
2311 ccw = cqr->cpaddr;
2298 if (cqr->cpmode == 1) { 2312 if (cqr->cpmode == 1) {
2299 /* make a shallow copy of the original tcw but set new tsb */ 2313 /* make a shallow copy of the original tcw but set new tsb */
2300 erp->cpmode = 1; 2314 erp->cpmode = 1;
@@ -2303,6 +2317,9 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2303 tsb = (struct tsb *) &tcw[1]; 2317 tsb = (struct tsb *) &tcw[1];
2304 *tcw = *((struct tcw *)cqr->cpaddr); 2318 *tcw = *((struct tcw *)cqr->cpaddr);
2305 tcw->tsb = (long)tsb; 2319 tcw->tsb = (long)tsb;
2320 } else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
2321 /* PSF cannot be chained from NOOP/TIC */
2322 erp->cpaddr = cqr->cpaddr;
2306 } else { 2323 } else {
2307 /* initialize request with default TIC to current ERP/CQR */ 2324 /* initialize request with default TIC to current ERP/CQR */
2308 ccw = erp->cpaddr; 2325 ccw = erp->cpaddr;
@@ -2487,6 +2504,8 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
2487 2504
2488 erp = dasd_3990_erp_action_1(erp); 2505 erp = dasd_3990_erp_action_1(erp);
2489 2506
2507 } else if (erp->function == dasd_3990_erp_action_1_sec) {
2508 erp = dasd_3990_erp_action_1_sec(erp);
2490 } else if (erp->function == dasd_3990_erp_action_5) { 2509 } else if (erp->function == dasd_3990_erp_action_5) {
2491 2510
2492 /* retries have not been successful */ 2511 /* retries have not been successful */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 70a008c00522..fd1231738ef4 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -152,6 +152,7 @@ static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
152 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work); 152 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
153 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work); 153 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
154 spin_lock_init(&lcu->lock); 154 spin_lock_init(&lcu->lock);
155 init_completion(&lcu->lcu_setup);
155 return lcu; 156 return lcu;
156 157
157out_err4: 158out_err4:
@@ -240,6 +241,67 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
240} 241}
241 242
242/* 243/*
244 * The first device to be registered on an LCU will have to do
245 * some additional setup steps to configure that LCU on the
246 * storage server. All further devices should wait with their
247 * initialization until the first device is done.
248 * To synchronize this work, the first device will call
249 * dasd_alias_lcu_setup_complete when it is done, and all
250 * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
251 */
252void dasd_alias_lcu_setup_complete(struct dasd_device *device)
253{
254 struct dasd_eckd_private *private;
255 unsigned long flags;
256 struct alias_server *server;
257 struct alias_lcu *lcu;
258 struct dasd_uid *uid;
259
260 private = (struct dasd_eckd_private *) device->private;
261 uid = &private->uid;
262 lcu = NULL;
263 spin_lock_irqsave(&aliastree.lock, flags);
264 server = _find_server(uid);
265 if (server)
266 lcu = _find_lcu(server, uid);
267 spin_unlock_irqrestore(&aliastree.lock, flags);
268 if (!lcu) {
269 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
270 "could not find lcu for %04x %02x",
271 uid->ssid, uid->real_unit_addr);
272 WARN_ON(1);
273 return;
274 }
275 complete_all(&lcu->lcu_setup);
276}
277
278void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
279{
280 struct dasd_eckd_private *private;
281 unsigned long flags;
282 struct alias_server *server;
283 struct alias_lcu *lcu;
284 struct dasd_uid *uid;
285
286 private = (struct dasd_eckd_private *) device->private;
287 uid = &private->uid;
288 lcu = NULL;
289 spin_lock_irqsave(&aliastree.lock, flags);
290 server = _find_server(uid);
291 if (server)
292 lcu = _find_lcu(server, uid);
293 spin_unlock_irqrestore(&aliastree.lock, flags);
294 if (!lcu) {
295 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
296 "could not find lcu for %04x %02x",
297 uid->ssid, uid->real_unit_addr);
298 WARN_ON(1);
299 return;
300 }
301 wait_for_completion(&lcu->lcu_setup);
302}
303
304/*
243 * This function removes a device from the scope of alias management. 305 * This function removes a device from the scope of alias management.
244 * The complicated part is to make sure that it is not in use by 306 * The complicated part is to make sure that it is not in use by
245 * any of the workers. If necessary cancel the work. 307 * any of the workers. If necessary cancel the work.
@@ -755,11 +817,11 @@ static void __stop_device_on_lcu(struct dasd_device *device,
755{ 817{
756 /* If pos == device then device is already locked! */ 818 /* If pos == device then device is already locked! */
757 if (pos == device) { 819 if (pos == device) {
758 pos->stopped |= DASD_STOPPED_SU; 820 dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
759 return; 821 return;
760 } 822 }
761 spin_lock(get_ccwdev_lock(pos->cdev)); 823 spin_lock(get_ccwdev_lock(pos->cdev));
762 pos->stopped |= DASD_STOPPED_SU; 824 dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
763 spin_unlock(get_ccwdev_lock(pos->cdev)); 825 spin_unlock(get_ccwdev_lock(pos->cdev));
764} 826}
765 827
@@ -793,26 +855,26 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
793 855
794 list_for_each_entry(device, &lcu->active_devices, alias_list) { 856 list_for_each_entry(device, &lcu->active_devices, alias_list) {
795 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 857 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
796 device->stopped &= ~DASD_STOPPED_SU; 858 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
797 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 859 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
798 } 860 }
799 861
800 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { 862 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
801 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 863 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
802 device->stopped &= ~DASD_STOPPED_SU; 864 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
803 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 865 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
804 } 866 }
805 867
806 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 868 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
807 list_for_each_entry(device, &pavgroup->baselist, alias_list) { 869 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
808 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 870 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
809 device->stopped &= ~DASD_STOPPED_SU; 871 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
810 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), 872 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
811 flags); 873 flags);
812 } 874 }
813 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { 875 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
814 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 876 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
815 device->stopped &= ~DASD_STOPPED_SU; 877 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
816 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), 878 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
817 flags); 879 flags);
818 } 880 }
@@ -836,7 +898,8 @@ static void summary_unit_check_handling_work(struct work_struct *work)
836 898
837 /* 2. reset summary unit check */ 899 /* 2. reset summary unit check */
838 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 900 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
839 device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING); 901 dasd_device_remove_stop_bits(device,
902 (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
840 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 903 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
841 reset_summary_unit_check(lcu, device, suc_data->reason); 904 reset_summary_unit_check(lcu, device, suc_data->reason);
842 905
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 4e49b4a6c880..f64d0db881b4 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -24,7 +24,6 @@
24#include <asm/ebcdic.h> 24#include <asm/ebcdic.h>
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/s390_ext.h> 26#include <asm/s390_ext.h>
27#include <asm/todclk.h>
28#include <asm/vtoc.h> 27#include <asm/vtoc.h>
29#include <asm/diag.h> 28#include <asm/diag.h>
30 29
@@ -145,6 +144,15 @@ dasd_diag_erp(struct dasd_device *device)
145 144
146 mdsk_term_io(device); 145 mdsk_term_io(device);
147 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); 146 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
147 if (rc == 4) {
148 if (!(device->features & DASD_FEATURE_READONLY)) {
149 dev_warn(&device->cdev->dev,
150 "The access mode of a DIAG device changed"
151 " to read-only");
152 device->features |= DASD_FEATURE_READONLY;
153 }
154 rc = 0;
155 }
148 if (rc) 156 if (rc)
149 dev_warn(&device->cdev->dev, "DIAG ERP failed with " 157 dev_warn(&device->cdev->dev, "DIAG ERP failed with "
150 "rc=%d\n", rc); 158 "rc=%d\n", rc);
@@ -433,16 +441,20 @@ dasd_diag_check_device(struct dasd_device *device)
433 for (sb = 512; sb < bsize; sb = sb << 1) 441 for (sb = 512; sb < bsize; sb = sb << 1)
434 block->s2b_shift++; 442 block->s2b_shift++;
435 rc = mdsk_init_io(device, block->bp_block, 0, NULL); 443 rc = mdsk_init_io(device, block->bp_block, 0, NULL);
436 if (rc) { 444 if (rc && (rc != 4)) {
437 dev_warn(&device->cdev->dev, "DIAG initialization " 445 dev_warn(&device->cdev->dev, "DIAG initialization "
438 "failed with rc=%d\n", rc); 446 "failed with rc=%d\n", rc);
439 rc = -EIO; 447 rc = -EIO;
440 } else { 448 } else {
449 if (rc == 4)
450 device->features |= DASD_FEATURE_READONLY;
441 dev_info(&device->cdev->dev, 451 dev_info(&device->cdev->dev,
442 "New DASD with %ld byte/block, total size %ld KB\n", 452 "New DASD with %ld byte/block, total size %ld KB%s\n",
443 (unsigned long) block->bp_block, 453 (unsigned long) block->bp_block,
444 (unsigned long) (block->blocks << 454 (unsigned long) (block->blocks <<
445 block->s2b_shift) >> 1); 455 block->s2b_shift) >> 1,
456 (rc == 4) ? ", read-only device" : "");
457 rc = 0;
446 } 458 }
447out_label: 459out_label:
448 free_page((long) label); 460 free_page((long) label);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 417b97cd3f94..5819dc02a143 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -24,7 +24,6 @@
24#include <asm/idals.h> 24#include <asm/idals.h>
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/todclk.h>
28#include <asm/uaccess.h> 27#include <asm/uaccess.h>
29#include <asm/cio.h> 28#include <asm/cio.h>
30#include <asm/ccwdev.h> 29#include <asm/ccwdev.h>
@@ -78,6 +77,11 @@ MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
78 77
79static struct ccw_driver dasd_eckd_driver; /* see below */ 78static struct ccw_driver dasd_eckd_driver; /* see below */
80 79
80#define INIT_CQR_OK 0
81#define INIT_CQR_UNFORMATTED 1
82#define INIT_CQR_ERROR 2
83
84
81/* initial attempt at a probe function. this can be simplified once 85/* initial attempt at a probe function. this can be simplified once
82 * the other detection code is gone */ 86 * the other detection code is gone */
83static int 87static int
@@ -86,11 +90,12 @@ dasd_eckd_probe (struct ccw_device *cdev)
86 int ret; 90 int ret;
87 91
88 /* set ECKD specific ccw-device options */ 92 /* set ECKD specific ccw-device options */
89 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE); 93 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
94 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
90 if (ret) { 95 if (ret) {
91 DBF_EVENT(DBF_WARNING, 96 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
92 "dasd_eckd_probe: could not set ccw-device options " 97 "dasd_eckd_probe: could not set "
93 "for %s\n", dev_name(&cdev->dev)); 98 "ccw-device options");
94 return ret; 99 return ret;
95 } 100 }
96 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); 101 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
@@ -749,8 +754,7 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
749 cqr->block = NULL; 754 cqr->block = NULL;
750 cqr->expires = 10*HZ; 755 cqr->expires = 10*HZ;
751 cqr->lpm = lpm; 756 cqr->lpm = lpm;
752 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 757 cqr->retries = 256;
753 cqr->retries = 2;
754 cqr->buildclk = get_clock(); 758 cqr->buildclk = get_clock();
755 cqr->status = DASD_CQR_FILLED; 759 cqr->status = DASD_CQR_FILLED;
756 return cqr; 760 return cqr;
@@ -885,16 +889,15 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
885 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 889 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
886 &conf_len, lpm); 890 &conf_len, lpm);
887 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 891 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
888 DBF_EVENT(DBF_WARNING, 892 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
889 "Read configuration data returned " 893 "Read configuration data returned "
890 "error %d for device: %s", rc, 894 "error %d", rc);
891 dev_name(&device->cdev->dev));
892 return rc; 895 return rc;
893 } 896 }
894 if (conf_data == NULL) { 897 if (conf_data == NULL) {
895 DBF_EVENT(DBF_WARNING, "No configuration " 898 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
896 "data retrieved for device: %s", 899 "No configuration data "
897 dev_name(&device->cdev->dev)); 900 "retrieved");
898 continue; /* no error */ 901 continue; /* no error */
899 } 902 }
900 /* save first valid configuration data */ 903 /* save first valid configuration data */
@@ -941,16 +944,14 @@ static int dasd_eckd_read_features(struct dasd_device *device)
941 sizeof(struct dasd_rssd_features)), 944 sizeof(struct dasd_rssd_features)),
942 device); 945 device);
943 if (IS_ERR(cqr)) { 946 if (IS_ERR(cqr)) {
944 DBF_EVENT(DBF_WARNING, "Could not allocate initialization " 947 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
945 "request for device: %s", 948 "allocate initialization request");
946 dev_name(&device->cdev->dev));
947 return PTR_ERR(cqr); 949 return PTR_ERR(cqr);
948 } 950 }
949 cqr->startdev = device; 951 cqr->startdev = device;
950 cqr->memdev = device; 952 cqr->memdev = device;
951 cqr->block = NULL; 953 cqr->block = NULL;
952 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 954 cqr->retries = 256;
953 cqr->retries = 5;
954 cqr->expires = 10 * HZ; 955 cqr->expires = 10 * HZ;
955 956
956 /* Prepare for Read Subsystem Data */ 957 /* Prepare for Read Subsystem Data */
@@ -1012,9 +1013,9 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1012 } 1013 }
1013 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1014 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1014 psf_ssc_data->order = PSF_ORDER_SSC; 1015 psf_ssc_data->order = PSF_ORDER_SSC;
1015 psf_ssc_data->suborder = 0x40; 1016 psf_ssc_data->suborder = 0xc0;
1016 if (enable_pav) { 1017 if (enable_pav) {
1017 psf_ssc_data->suborder |= 0x88; 1018 psf_ssc_data->suborder |= 0x08;
1018 psf_ssc_data->reserved[0] = 0x88; 1019 psf_ssc_data->reserved[0] = 0x88;
1019 } 1020 }
1020 ccw = cqr->cpaddr; 1021 ccw = cqr->cpaddr;
@@ -1025,6 +1026,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1025 cqr->startdev = device; 1026 cqr->startdev = device;
1026 cqr->memdev = device; 1027 cqr->memdev = device;
1027 cqr->block = NULL; 1028 cqr->block = NULL;
1029 cqr->retries = 256;
1028 cqr->expires = 10*HZ; 1030 cqr->expires = 10*HZ;
1029 cqr->buildclk = get_clock(); 1031 cqr->buildclk = get_clock();
1030 cqr->status = DASD_CQR_FILLED; 1032 cqr->status = DASD_CQR_FILLED;
@@ -1057,7 +1059,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
1057/* 1059/*
1058 * Valide storage server of current device. 1060 * Valide storage server of current device.
1059 */ 1061 */
1060static int dasd_eckd_validate_server(struct dasd_device *device) 1062static void dasd_eckd_validate_server(struct dasd_device *device)
1061{ 1063{
1062 int rc; 1064 int rc;
1063 struct dasd_eckd_private *private; 1065 struct dasd_eckd_private *private;
@@ -1068,15 +1070,12 @@ static int dasd_eckd_validate_server(struct dasd_device *device)
1068 else 1070 else
1069 enable_pav = 1; 1071 enable_pav = 1;
1070 rc = dasd_eckd_psf_ssc(device, enable_pav); 1072 rc = dasd_eckd_psf_ssc(device, enable_pav);
1073
1071 /* may be requested feature is not available on server, 1074 /* may be requested feature is not available on server,
1072 * therefore just report error and go ahead */ 1075 * therefore just report error and go ahead */
1073 private = (struct dasd_eckd_private *) device->private; 1076 private = (struct dasd_eckd_private *) device->private;
1074 DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x " 1077 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1075 "returned rc=%d for device: %s", 1078 "returned rc=%d", private->uid.ssid, rc);
1076 private->uid.vendor, private->uid.serial,
1077 private->uid.ssid, rc, dev_name(&device->cdev->dev));
1078 /* RE-Read Configuration Data */
1079 return dasd_eckd_read_conf(device);
1080} 1079}
1081 1080
1082/* 1081/*
@@ -1090,6 +1089,15 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1090 struct dasd_block *block; 1089 struct dasd_block *block;
1091 int is_known, rc; 1090 int is_known, rc;
1092 1091
1092 if (!ccw_device_is_pathgroup(device->cdev)) {
1093 dev_warn(&device->cdev->dev,
1094 "A channel path group could not be established\n");
1095 return -EIO;
1096 }
1097 if (!ccw_device_is_multipath(device->cdev)) {
1098 dev_info(&device->cdev->dev,
1099 "The DASD is not operating in multipath mode\n");
1100 }
1093 private = (struct dasd_eckd_private *) device->private; 1101 private = (struct dasd_eckd_private *) device->private;
1094 if (!private) { 1102 if (!private) {
1095 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 1103 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
@@ -1123,9 +1131,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1123 if (private->uid.type == UA_BASE_DEVICE) { 1131 if (private->uid.type == UA_BASE_DEVICE) {
1124 block = dasd_alloc_block(); 1132 block = dasd_alloc_block();
1125 if (IS_ERR(block)) { 1133 if (IS_ERR(block)) {
1126 DBF_EVENT(DBF_WARNING, "could not allocate dasd " 1134 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1127 "block structure for device: %s", 1135 "could not allocate dasd "
1128 dev_name(&device->cdev->dev)); 1136 "block structure");
1129 rc = PTR_ERR(block); 1137 rc = PTR_ERR(block);
1130 goto out_err1; 1138 goto out_err1;
1131 } 1139 }
@@ -1139,12 +1147,21 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1139 rc = is_known; 1147 rc = is_known;
1140 goto out_err2; 1148 goto out_err2;
1141 } 1149 }
1150 /*
1151 * dasd_eckd_vaildate_server is done on the first device that
1152 * is found for an LCU. All later other devices have to wait
1153 * for it, so they will read the correct feature codes.
1154 */
1142 if (!is_known) { 1155 if (!is_known) {
1143 /* new lcu found */ 1156 dasd_eckd_validate_server(device);
1144 rc = dasd_eckd_validate_server(device); /* will switch pav on */ 1157 dasd_alias_lcu_setup_complete(device);
1145 if (rc) 1158 } else
1146 goto out_err3; 1159 dasd_alias_wait_for_lcu_setup(device);
1147 } 1160
1161 /* device may report different configuration data after LCU setup */
1162 rc = dasd_eckd_read_conf(device);
1163 if (rc)
1164 goto out_err3;
1148 1165
1149 /* Read Feature Codes */ 1166 /* Read Feature Codes */
1150 dasd_eckd_read_features(device); 1167 dasd_eckd_read_features(device);
@@ -1153,9 +1170,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1153 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 1170 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1154 &private->rdc_data, 64); 1171 &private->rdc_data, 64);
1155 if (rc) { 1172 if (rc) {
1156 DBF_EVENT(DBF_WARNING, 1173 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1157 "Read device characteristics failed, rc=%d for " 1174 "Read device characteristic failed, rc=%d", rc);
1158 "device: %s", rc, dev_name(&device->cdev->dev));
1159 goto out_err3; 1175 goto out_err3;
1160 } 1176 }
1161 /* find the vaild cylinder size */ 1177 /* find the vaild cylinder size */
@@ -1256,12 +1272,29 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
1256 cqr->block = NULL; 1272 cqr->block = NULL;
1257 cqr->startdev = device; 1273 cqr->startdev = device;
1258 cqr->memdev = device; 1274 cqr->memdev = device;
1259 cqr->retries = 0; 1275 cqr->retries = 255;
1260 cqr->buildclk = get_clock(); 1276 cqr->buildclk = get_clock();
1261 cqr->status = DASD_CQR_FILLED; 1277 cqr->status = DASD_CQR_FILLED;
1262 return cqr; 1278 return cqr;
1263} 1279}
1264 1280
1281/* differentiate between 'no record found' and any other error */
1282static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
1283{
1284 char *sense;
1285 if (init_cqr->status == DASD_CQR_DONE)
1286 return INIT_CQR_OK;
1287 else if (init_cqr->status == DASD_CQR_NEED_ERP ||
1288 init_cqr->status == DASD_CQR_FAILED) {
1289 sense = dasd_get_sense(&init_cqr->irb);
1290 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
1291 return INIT_CQR_UNFORMATTED;
1292 else
1293 return INIT_CQR_ERROR;
1294 } else
1295 return INIT_CQR_ERROR;
1296}
1297
1265/* 1298/*
1266 * This is the callback function for the init_analysis cqr. It saves 1299 * This is the callback function for the init_analysis cqr. It saves
1267 * the status of the initial analysis ccw before it frees it and kicks 1300 * the status of the initial analysis ccw before it frees it and kicks
@@ -1269,21 +1302,20 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
1269 * dasd_eckd_do_analysis again (if the devices has not been marked 1302 * dasd_eckd_do_analysis again (if the devices has not been marked
1270 * for deletion in the meantime). 1303 * for deletion in the meantime).
1271 */ 1304 */
1272static void 1305static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1273dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data) 1306 void *data)
1274{ 1307{
1275 struct dasd_eckd_private *private; 1308 struct dasd_eckd_private *private;
1276 struct dasd_device *device; 1309 struct dasd_device *device;
1277 1310
1278 device = init_cqr->startdev; 1311 device = init_cqr->startdev;
1279 private = (struct dasd_eckd_private *) device->private; 1312 private = (struct dasd_eckd_private *) device->private;
1280 private->init_cqr_status = init_cqr->status; 1313 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
1281 dasd_sfree_request(init_cqr, device); 1314 dasd_sfree_request(init_cqr, device);
1282 dasd_kick_device(device); 1315 dasd_kick_device(device);
1283} 1316}
1284 1317
1285static int 1318static int dasd_eckd_start_analysis(struct dasd_block *block)
1286dasd_eckd_start_analysis(struct dasd_block *block)
1287{ 1319{
1288 struct dasd_eckd_private *private; 1320 struct dasd_eckd_private *private;
1289 struct dasd_ccw_req *init_cqr; 1321 struct dasd_ccw_req *init_cqr;
@@ -1295,27 +1327,44 @@ dasd_eckd_start_analysis(struct dasd_block *block)
1295 init_cqr->callback = dasd_eckd_analysis_callback; 1327 init_cqr->callback = dasd_eckd_analysis_callback;
1296 init_cqr->callback_data = NULL; 1328 init_cqr->callback_data = NULL;
1297 init_cqr->expires = 5*HZ; 1329 init_cqr->expires = 5*HZ;
1330 /* first try without ERP, so we can later handle unformatted
1331 * devices as special case
1332 */
1333 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
1334 init_cqr->retries = 0;
1298 dasd_add_request_head(init_cqr); 1335 dasd_add_request_head(init_cqr);
1299 return -EAGAIN; 1336 return -EAGAIN;
1300} 1337}
1301 1338
1302static int 1339static int dasd_eckd_end_analysis(struct dasd_block *block)
1303dasd_eckd_end_analysis(struct dasd_block *block)
1304{ 1340{
1305 struct dasd_device *device; 1341 struct dasd_device *device;
1306 struct dasd_eckd_private *private; 1342 struct dasd_eckd_private *private;
1307 struct eckd_count *count_area; 1343 struct eckd_count *count_area;
1308 unsigned int sb, blk_per_trk; 1344 unsigned int sb, blk_per_trk;
1309 int status, i; 1345 int status, i;
1346 struct dasd_ccw_req *init_cqr;
1310 1347
1311 device = block->base; 1348 device = block->base;
1312 private = (struct dasd_eckd_private *) device->private; 1349 private = (struct dasd_eckd_private *) device->private;
1313 status = private->init_cqr_status; 1350 status = private->init_cqr_status;
1314 private->init_cqr_status = -1; 1351 private->init_cqr_status = -1;
1315 if (status != DASD_CQR_DONE) { 1352 if (status == INIT_CQR_ERROR) {
1316 dev_warn(&device->cdev->dev, 1353 /* try again, this time with full ERP */
1317 "The DASD is not formatted\n"); 1354 init_cqr = dasd_eckd_analysis_ccw(device);
1355 dasd_sleep_on(init_cqr);
1356 status = dasd_eckd_analysis_evaluation(init_cqr);
1357 dasd_sfree_request(init_cqr, device);
1358 }
1359
1360 if (status == INIT_CQR_UNFORMATTED) {
1361 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1318 return -EMEDIUMTYPE; 1362 return -EMEDIUMTYPE;
1363 } else if (status == INIT_CQR_ERROR) {
1364 dev_err(&device->cdev->dev,
1365 "Detecting the DASD disk layout failed because "
1366 "of an I/O error\n");
1367 return -EIO;
1319 } 1368 }
1320 1369
1321 private->uses_cdl = 1; 1370 private->uses_cdl = 1;
@@ -1607,8 +1656,7 @@ dasd_eckd_format_device(struct dasd_device * device,
1607 } 1656 }
1608 fcp->startdev = device; 1657 fcp->startdev = device;
1609 fcp->memdev = device; 1658 fcp->memdev = device;
1610 clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags); 1659 fcp->retries = 256;
1611 fcp->retries = 5; /* set retry counter to enable default ERP */
1612 fcp->buildclk = get_clock(); 1660 fcp->buildclk = get_clock();
1613 fcp->status = DASD_CQR_FILLED; 1661 fcp->status = DASD_CQR_FILLED;
1614 return fcp; 1662 return fcp;
@@ -2690,6 +2738,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
2690 cqr->startdev = device; 2738 cqr->startdev = device;
2691 cqr->memdev = device; 2739 cqr->memdev = device;
2692 cqr->retries = 0; 2740 cqr->retries = 0;
2741 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2693 cqr->expires = 10 * HZ; 2742 cqr->expires = 10 * HZ;
2694 2743
2695 /* Prepare for Read Subsystem Data */ 2744 /* Prepare for Read Subsystem Data */
@@ -3240,11 +3289,15 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3240 if (is_known < 0) 3289 if (is_known < 0)
3241 return is_known; 3290 return is_known;
3242 if (!is_known) { 3291 if (!is_known) {
3243 /* new lcu found */ 3292 dasd_eckd_validate_server(device);
3244 rc = dasd_eckd_validate_server(device); /* will switch pav on */ 3293 dasd_alias_lcu_setup_complete(device);
3245 if (rc) 3294 } else
3246 goto out_err; 3295 dasd_alias_wait_for_lcu_setup(device);
3247 } 3296
3297 /* RE-Read Configuration Data */
3298 rc = dasd_eckd_read_conf(device);
3299 if (rc)
3300 goto out_err;
3248 3301
3249 /* Read Feature Codes */ 3302 /* Read Feature Codes */
3250 dasd_eckd_read_features(device); 3303 dasd_eckd_read_features(device);
@@ -3253,9 +3306,8 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3253 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 3306 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3254 &temp_rdc_data, 64); 3307 &temp_rdc_data, 64);
3255 if (rc) { 3308 if (rc) {
3256 DBF_EVENT(DBF_WARNING, 3309 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
3257 "Read device characteristics failed, rc=%d for " 3310 "Read device characteristic failed, rc=%d", rc);
3258 "device: %s", rc, dev_name(&device->cdev->dev));
3259 goto out_err; 3311 goto out_err;
3260 } 3312 }
3261 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 3313 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index ad45bcac3ce4..864d53c04201 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -414,6 +414,7 @@ struct alias_lcu {
414 struct summary_unit_check_work_data suc_data; 414 struct summary_unit_check_work_data suc_data;
415 struct read_uac_work_data ruac_data; 415 struct read_uac_work_data ruac_data;
416 struct dasd_ccw_req *rsu_cqr; 416 struct dasd_ccw_req *rsu_cqr;
417 struct completion lcu_setup;
417}; 418};
418 419
419struct alias_pav_group { 420struct alias_pav_group {
@@ -460,5 +461,6 @@ int dasd_alias_remove_device(struct dasd_device *);
460struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *); 461struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
461void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *); 462void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
462void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); 463void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
463 464void dasd_alias_lcu_setup_complete(struct dasd_device *);
465void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
464#endif /* DASD_ECKD_H */ 466#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index d96039eae59b..1f3e967aaba8 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -536,7 +536,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
536 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); 536 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
537 if (!eerb) 537 if (!eerb)
538 return -ENOMEM; 538 return -ENOMEM;
539 lock_kernel();
540 eerb->buffer_page_count = eer_pages; 539 eerb->buffer_page_count = eer_pages;
541 if (eerb->buffer_page_count < 1 || 540 if (eerb->buffer_page_count < 1 ||
542 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { 541 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
@@ -544,7 +543,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
544 DBF_EVENT(DBF_WARNING, "can't open device since module " 543 DBF_EVENT(DBF_WARNING, "can't open device since module "
545 "parameter eer_pages is smaller than 1 or" 544 "parameter eer_pages is smaller than 1 or"
546 " bigger than %d", (int)(INT_MAX / PAGE_SIZE)); 545 " bigger than %d", (int)(INT_MAX / PAGE_SIZE));
547 unlock_kernel();
548 return -EINVAL; 546 return -EINVAL;
549 } 547 }
550 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; 548 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
@@ -552,14 +550,12 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
552 GFP_KERNEL); 550 GFP_KERNEL);
553 if (!eerb->buffer) { 551 if (!eerb->buffer) {
554 kfree(eerb); 552 kfree(eerb);
555 unlock_kernel();
556 return -ENOMEM; 553 return -ENOMEM;
557 } 554 }
558 if (dasd_eer_allocate_buffer_pages(eerb->buffer, 555 if (dasd_eer_allocate_buffer_pages(eerb->buffer,
559 eerb->buffer_page_count)) { 556 eerb->buffer_page_count)) {
560 kfree(eerb->buffer); 557 kfree(eerb->buffer);
561 kfree(eerb); 558 kfree(eerb);
562 unlock_kernel();
563 return -ENOMEM; 559 return -ENOMEM;
564 } 560 }
565 filp->private_data = eerb; 561 filp->private_data = eerb;
@@ -567,7 +563,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
567 list_add(&eerb->list, &bufferlist); 563 list_add(&eerb->list, &bufferlist);
568 spin_unlock_irqrestore(&bufferlock, flags); 564 spin_unlock_irqrestore(&bufferlock, flags);
569 565
570 unlock_kernel();
571 return nonseekable_open(inp,filp); 566 return nonseekable_open(inp,filp);
572} 567}
573 568
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index f245377e8e27..0f152444ac77 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -20,7 +20,6 @@
20#include <asm/idals.h> 20#include <asm/idals.h>
21#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/todclk.h>
24#include <asm/ccwdev.h> 23#include <asm/ccwdev.h>
25 24
26#include "dasd_int.h" 25#include "dasd_int.h"
@@ -141,9 +140,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
141 } 140 }
142 block = dasd_alloc_block(); 141 block = dasd_alloc_block();
143 if (IS_ERR(block)) { 142 if (IS_ERR(block)) {
144 DBF_EVENT(DBF_WARNING, "could not allocate dasd block " 143 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
145 "structure for device: %s", 144 "dasd block structure");
146 dev_name(&device->cdev->dev));
147 device->private = NULL; 145 device->private = NULL;
148 kfree(private); 146 kfree(private);
149 return PTR_ERR(block); 147 return PTR_ERR(block);
@@ -155,9 +153,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
155 rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC, 153 rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
156 &private->rdc_data, 32); 154 &private->rdc_data, 32);
157 if (rc) { 155 if (rc) {
158 DBF_EVENT(DBF_WARNING, "Read device characteristics returned " 156 DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
159 "error %d for device: %s", 157 "characteristics returned error %d", rc);
160 rc, dev_name(&device->cdev->dev));
161 device->block = NULL; 158 device->block = NULL;
162 dasd_free_block(block); 159 dasd_free_block(block);
163 device->private = NULL; 160 device->private = NULL;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8afd9fa00875..e4c2143dabf6 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -108,6 +108,16 @@ do { \
108 d_data); \ 108 d_data); \
109} while(0) 109} while(0)
110 110
111#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
112do { \
113 struct ccw_dev_id __dev_id; \
114 ccw_device_get_id(d_cdev, &__dev_id); \
115 debug_sprintf_event(dasd_debug_area, \
116 d_level, \
117 "0.%x.%04x " d_str "\n", \
118 __dev_id.ssid, __dev_id.devno, d_data); \
119} while (0)
120
111#define DBF_EXC(d_level, d_str, d_data...)\ 121#define DBF_EXC(d_level, d_str, d_data...)\
112do { \ 122do { \
113 debug_sprintf_exception(dasd_debug_area, \ 123 debug_sprintf_exception(dasd_debug_area, \
@@ -595,6 +605,9 @@ int dasd_generic_restore_device(struct ccw_device *);
595int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); 605int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
596char *dasd_get_sense(struct irb *); 606char *dasd_get_sense(struct irb *);
597 607
608void dasd_device_set_stop_bits(struct dasd_device *, int);
609void dasd_device_remove_stop_bits(struct dasd_device *, int);
610
598/* externals in dasd_devmap.c */ 611/* externals in dasd_devmap.c */
599extern int dasd_max_devindex; 612extern int dasd_max_devindex;
600extern int dasd_probeonly; 613extern int dasd_probeonly;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index f756a1b0c57a..478bcdb90b6f 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -101,7 +101,7 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
101 pr_info("%s: The DASD has been put in the quiesce " 101 pr_info("%s: The DASD has been put in the quiesce "
102 "state\n", dev_name(&base->cdev->dev)); 102 "state\n", dev_name(&base->cdev->dev));
103 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 103 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
104 base->stopped |= DASD_STOPPED_QUIESCE; 104 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
105 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 105 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
106 return 0; 106 return 0;
107} 107}
@@ -122,7 +122,7 @@ static int dasd_ioctl_resume(struct dasd_block *block)
122 pr_info("%s: I/O operations have been resumed " 122 pr_info("%s: I/O operations have been resumed "
123 "on the DASD\n", dev_name(&base->cdev->dev)); 123 "on the DASD\n", dev_name(&base->cdev->dev));
124 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 124 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
125 base->stopped &= ~DASD_STOPPED_QUIESCE; 125 dasd_device_remove_stop_bits(base, DASD_STOPPED_QUIESCE);
126 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 126 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
127 127
128 dasd_schedule_block_bh(block); 128 dasd_schedule_block_bh(block);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 21639d6c996f..9d61683b5633 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -857,7 +857,6 @@ static struct console con3215 = {
857 857
858/* 858/*
859 * 3215 console initialization code called from console_init(). 859 * 3215 console initialization code called from console_init().
860 * NOTE: This is called before kmalloc is available.
861 */ 860 */
862static int __init con3215_init(void) 861static int __init con3215_init(void)
863{ 862{
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index bb838bdf829d..6bca81aea396 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -572,7 +572,6 @@ static struct console con3270 = {
572 572
573/* 573/*
574 * 3270 console initialization code called from console_init(). 574 * 3270 console initialization code called from console_init().
575 * NOTE: This is called before kmalloc is available.
576 */ 575 */
577static int __init 576static int __init
578con3270_init(void) 577con3270_init(void)
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 097d3846a828..d449063c30fe 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -38,6 +38,8 @@ struct fs3270 {
38 size_t rdbuf_size; /* size of data returned by RDBUF */ 38 size_t rdbuf_size; /* size of data returned by RDBUF */
39}; 39};
40 40
41static DEFINE_MUTEX(fs3270_mutex);
42
41static void 43static void
42fs3270_wake_up(struct raw3270_request *rq, void *data) 44fs3270_wake_up(struct raw3270_request *rq, void *data)
43{ 45{
@@ -328,7 +330,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
328 if (!fp) 330 if (!fp)
329 return -ENODEV; 331 return -ENODEV;
330 rc = 0; 332 rc = 0;
331 lock_kernel(); 333 mutex_lock(&fs3270_mutex);
332 switch (cmd) { 334 switch (cmd) {
333 case TUBICMD: 335 case TUBICMD:
334 fp->read_command = arg; 336 fp->read_command = arg;
@@ -354,7 +356,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
354 rc = -EFAULT; 356 rc = -EFAULT;
355 break; 357 break;
356 } 358 }
357 unlock_kernel(); 359 mutex_unlock(&fs3270_mutex);
358 return rc; 360 return rc;
359} 361}
360 362
@@ -437,7 +439,7 @@ fs3270_open(struct inode *inode, struct file *filp)
437 minor = tty->index + RAW3270_FIRSTMINOR; 439 minor = tty->index + RAW3270_FIRSTMINOR;
438 tty_kref_put(tty); 440 tty_kref_put(tty);
439 } 441 }
440 lock_kernel(); 442 mutex_lock(&fs3270_mutex);
441 /* Check if some other program is already using fullscreen mode. */ 443 /* Check if some other program is already using fullscreen mode. */
442 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor); 444 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
443 if (!IS_ERR(fp)) { 445 if (!IS_ERR(fp)) {
@@ -478,7 +480,7 @@ fs3270_open(struct inode *inode, struct file *filp)
478 } 480 }
479 filp->private_data = fp; 481 filp->private_data = fp;
480out: 482out:
481 unlock_kernel(); 483 mutex_unlock(&fs3270_mutex);
482 return rc; 484 return rc;
483} 485}
484 486
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 66e21dd23154..60473f86e1f9 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -12,7 +12,6 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/smp_lock.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/types.h> 16#include <linux/types.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
@@ -283,7 +282,6 @@ static int mon_open(struct inode *inode, struct file *filp)
283 /* 282 /*
284 * only one user allowed 283 * only one user allowed
285 */ 284 */
286 lock_kernel();
287 rc = -EBUSY; 285 rc = -EBUSY;
288 if (test_and_set_bit(MON_IN_USE, &mon_in_use)) 286 if (test_and_set_bit(MON_IN_USE, &mon_in_use))
289 goto out; 287 goto out;
@@ -321,7 +319,6 @@ static int mon_open(struct inode *inode, struct file *filp)
321 } 319 }
322 filp->private_data = monpriv; 320 filp->private_data = monpriv;
323 dev_set_drvdata(monreader_device, monpriv); 321 dev_set_drvdata(monreader_device, monpriv);
324 unlock_kernel();
325 return nonseekable_open(inode, filp); 322 return nonseekable_open(inode, filp);
326 323
327out_path: 324out_path:
@@ -331,7 +328,6 @@ out_priv:
331out_use: 328out_use:
332 clear_bit(MON_IN_USE, &mon_in_use); 329 clear_bit(MON_IN_USE, &mon_in_use);
333out: 330out:
334 unlock_kernel();
335 return rc; 331 return rc;
336} 332}
337 333
@@ -607,6 +603,10 @@ static int __init mon_init(void)
607 } 603 }
608 dcss_mkname(mon_dcss_name, &user_data_connect[8]); 604 dcss_mkname(mon_dcss_name, &user_data_connect[8]);
609 605
606 /*
607 * misc_register() has to be the last action in module_init(), because
608 * file operations will be available right after this.
609 */
610 rc = misc_register(&mon_dev); 610 rc = misc_register(&mon_dev);
611 if (rc < 0 ) 611 if (rc < 0 )
612 goto out; 612 goto out;
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 66fb8eba93f4..6532ed8b4afa 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -13,7 +13,6 @@
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/smp_lock.h>
17#include <linux/types.h> 16#include <linux/types.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
19#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
@@ -185,13 +184,11 @@ static int monwrite_open(struct inode *inode, struct file *filp)
185 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); 184 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
186 if (!monpriv) 185 if (!monpriv)
187 return -ENOMEM; 186 return -ENOMEM;
188 lock_kernel();
189 INIT_LIST_HEAD(&monpriv->list); 187 INIT_LIST_HEAD(&monpriv->list);
190 monpriv->hdr_to_read = sizeof(monpriv->hdr); 188 monpriv->hdr_to_read = sizeof(monpriv->hdr);
191 mutex_init(&monpriv->thread_mutex); 189 mutex_init(&monpriv->thread_mutex);
192 filp->private_data = monpriv; 190 filp->private_data = monpriv;
193 list_add_tail(&monpriv->priv_list, &mon_priv_list); 191 list_add_tail(&monpriv->priv_list, &mon_priv_list);
194 unlock_kernel();
195 return nonseekable_open(inode, filp); 192 return nonseekable_open(inode, filp);
196} 193}
197 194
@@ -364,6 +361,10 @@ static int __init mon_init(void)
364 goto out_driver; 361 goto out_driver;
365 } 362 }
366 363
364 /*
365 * misc_register() has to be the last action in module_init(), because
366 * file operations will be available right after this.
367 */
367 rc = misc_register(&mon_dev); 368 rc = misc_register(&mon_dev);
368 if (rc) 369 if (rc)
369 goto out_device; 370 goto out_device;
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 5cc11c636d38..28b5afc129c3 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -84,6 +84,7 @@ static void __init sclp_read_info_early(void)
84 do { 84 do {
85 memset(sccb, 0, sizeof(*sccb)); 85 memset(sccb, 0, sizeof(*sccb));
86 sccb->header.length = sizeof(*sccb); 86 sccb->header.length = sizeof(*sccb);
87 sccb->header.function_code = 0x80;
87 sccb->header.control_mask[2] = 0x80; 88 sccb->header.control_mask[2] = 0x80;
88 rc = sclp_cmd_sync_early(commands[i], sccb); 89 rc = sclp_cmd_sync_early(commands[i], sccb);
89 } while (rc == -EBUSY); 90 } while (rc == -EBUSY);
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index a26333774701..7a242f073632 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -212,6 +212,9 @@ struct tape_device {
212 struct tape_class_device * nt; 212 struct tape_class_device * nt;
213 struct tape_class_device * rt; 213 struct tape_class_device * rt;
214 214
215 /* Device mutex to serialize tape commands. */
216 struct mutex mutex;
217
215 /* Device discipline information. */ 218 /* Device discipline information. */
216 struct tape_discipline * discipline; 219 struct tape_discipline * discipline;
217 void * discdata; 220 void * discdata;
@@ -292,9 +295,9 @@ extern int tape_generic_pm_suspend(struct ccw_device *);
292extern int tape_generic_probe(struct ccw_device *); 295extern int tape_generic_probe(struct ccw_device *);
293extern void tape_generic_remove(struct ccw_device *); 296extern void tape_generic_remove(struct ccw_device *);
294 297
295extern struct tape_device *tape_get_device(int devindex); 298extern struct tape_device *tape_find_device(int devindex);
296extern struct tape_device *tape_get_device_reference(struct tape_device *); 299extern struct tape_device *tape_get_device(struct tape_device *);
297extern struct tape_device *tape_put_device(struct tape_device *); 300extern void tape_put_device(struct tape_device *);
298 301
299/* Externals from tape_char.c */ 302/* Externals from tape_char.c */
300extern int tapechar_init(void); 303extern int tapechar_init(void);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 2fe45ff77b75..3657fe103c27 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -113,16 +113,16 @@ tape_34xx_work_handler(struct work_struct *work)
113{ 113{
114 struct tape_34xx_work *p = 114 struct tape_34xx_work *p =
115 container_of(work, struct tape_34xx_work, work); 115 container_of(work, struct tape_34xx_work, work);
116 struct tape_device *device = p->device;
116 117
117 switch(p->op) { 118 switch(p->op) {
118 case TO_MSEN: 119 case TO_MSEN:
119 tape_34xx_medium_sense(p->device); 120 tape_34xx_medium_sense(device);
120 break; 121 break;
121 default: 122 default:
122 DBF_EVENT(3, "T34XX: internal error: unknown work\n"); 123 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
123 } 124 }
124 125 tape_put_device(device);
125 p->device = tape_put_device(p->device);
126 kfree(p); 126 kfree(p);
127} 127}
128 128
@@ -136,7 +136,7 @@ tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
136 136
137 INIT_WORK(&p->work, tape_34xx_work_handler); 137 INIT_WORK(&p->work, tape_34xx_work_handler);
138 138
139 p->device = tape_get_device_reference(device); 139 p->device = tape_get_device(device);
140 p->op = op; 140 p->op = op;
141 141
142 schedule_work(&p->work); 142 schedule_work(&p->work);
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index e4cc3aae9162..0c72aadb8391 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -608,7 +608,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
608 608
609 INIT_WORK(&p->work, tape_3590_work_handler); 609 INIT_WORK(&p->work, tape_3590_work_handler);
610 610
611 p->device = tape_get_device_reference(device); 611 p->device = tape_get_device(device);
612 p->op = op; 612 p->op = op;
613 613
614 schedule_work(&p->work); 614 schedule_work(&p->work);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 0c0705b91c28..4799cc2f73c3 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -54,7 +54,7 @@ static const struct block_device_operations tapeblock_fops = {
54 .owner = THIS_MODULE, 54 .owner = THIS_MODULE,
55 .open = tapeblock_open, 55 .open = tapeblock_open,
56 .release = tapeblock_release, 56 .release = tapeblock_release,
57 .locked_ioctl = tapeblock_ioctl, 57 .ioctl = tapeblock_ioctl,
58 .media_changed = tapeblock_medium_changed, 58 .media_changed = tapeblock_medium_changed,
59 .revalidate_disk = tapeblock_revalidate_disk, 59 .revalidate_disk = tapeblock_revalidate_disk,
60}; 60};
@@ -239,7 +239,7 @@ tapeblock_setup_device(struct tape_device * device)
239 disk->major = tapeblock_major; 239 disk->major = tapeblock_major;
240 disk->first_minor = device->first_minor; 240 disk->first_minor = device->first_minor;
241 disk->fops = &tapeblock_fops; 241 disk->fops = &tapeblock_fops;
242 disk->private_data = tape_get_device_reference(device); 242 disk->private_data = tape_get_device(device);
243 disk->queue = blkdat->request_queue; 243 disk->queue = blkdat->request_queue;
244 set_capacity(disk, 0); 244 set_capacity(disk, 0);
245 sprintf(disk->disk_name, "btibm%d", 245 sprintf(disk->disk_name, "btibm%d",
@@ -247,11 +247,11 @@ tapeblock_setup_device(struct tape_device * device)
247 247
248 blkdat->disk = disk; 248 blkdat->disk = disk;
249 blkdat->medium_changed = 1; 249 blkdat->medium_changed = 1;
250 blkdat->request_queue->queuedata = tape_get_device_reference(device); 250 blkdat->request_queue->queuedata = tape_get_device(device);
251 251
252 add_disk(disk); 252 add_disk(disk);
253 253
254 tape_get_device_reference(device); 254 tape_get_device(device);
255 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue); 255 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
256 256
257 return 0; 257 return 0;
@@ -274,13 +274,14 @@ tapeblock_cleanup_device(struct tape_device *device)
274 } 274 }
275 275
276 del_gendisk(device->blk_data.disk); 276 del_gendisk(device->blk_data.disk);
277 device->blk_data.disk->private_data = 277 device->blk_data.disk->private_data = NULL;
278 tape_put_device(device->blk_data.disk->private_data); 278 tape_put_device(device);
279 put_disk(device->blk_data.disk); 279 put_disk(device->blk_data.disk);
280 280
281 device->blk_data.disk = NULL; 281 device->blk_data.disk = NULL;
282cleanup_queue: 282cleanup_queue:
283 device->blk_data.request_queue->queuedata = tape_put_device(device); 283 device->blk_data.request_queue->queuedata = NULL;
284 tape_put_device(device);
284 285
285 blk_cleanup_queue(device->blk_data.request_queue); 286 blk_cleanup_queue(device->blk_data.request_queue);
286 device->blk_data.request_queue = NULL; 287 device->blk_data.request_queue = NULL;
@@ -363,7 +364,7 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
363 struct tape_device * device; 364 struct tape_device * device;
364 int rc; 365 int rc;
365 366
366 device = tape_get_device_reference(disk->private_data); 367 device = tape_get_device(disk->private_data);
367 368
368 if (device->required_tapemarks) { 369 if (device->required_tapemarks) {
369 DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); 370 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 31566c55adfe..23d773a0d113 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -33,8 +33,7 @@ static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
33static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *); 33static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
34static int tapechar_open(struct inode *,struct file *); 34static int tapechar_open(struct inode *,struct file *);
35static int tapechar_release(struct inode *,struct file *); 35static int tapechar_release(struct inode *,struct file *);
36static int tapechar_ioctl(struct inode *, struct file *, unsigned int, 36static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
37 unsigned long);
38static long tapechar_compat_ioctl(struct file *, unsigned int, 37static long tapechar_compat_ioctl(struct file *, unsigned int,
39 unsigned long); 38 unsigned long);
40 39
@@ -43,7 +42,7 @@ static const struct file_operations tape_fops =
43 .owner = THIS_MODULE, 42 .owner = THIS_MODULE,
44 .read = tapechar_read, 43 .read = tapechar_read,
45 .write = tapechar_write, 44 .write = tapechar_write,
46 .ioctl = tapechar_ioctl, 45 .unlocked_ioctl = tapechar_ioctl,
47 .compat_ioctl = tapechar_compat_ioctl, 46 .compat_ioctl = tapechar_compat_ioctl,
48 .open = tapechar_open, 47 .open = tapechar_open,
49 .release = tapechar_release, 48 .release = tapechar_release,
@@ -170,7 +169,6 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
170 if (rc == 0) { 169 if (rc == 0) {
171 rc = block_size - request->rescnt; 170 rc = block_size - request->rescnt;
172 DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc); 171 DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
173 filp->f_pos += rc;
174 /* Copy data from idal buffer to user space. */ 172 /* Copy data from idal buffer to user space. */
175 if (idal_buffer_to_user(device->char_data.idal_buf, 173 if (idal_buffer_to_user(device->char_data.idal_buf,
176 data, rc) != 0) 174 data, rc) != 0)
@@ -238,7 +236,6 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
238 break; 236 break;
239 DBF_EVENT(6, "TCHAR:wbytes: %lx\n", 237 DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
240 block_size - request->rescnt); 238 block_size - request->rescnt);
241 filp->f_pos += block_size - request->rescnt;
242 written += block_size - request->rescnt; 239 written += block_size - request->rescnt;
243 if (request->rescnt != 0) 240 if (request->rescnt != 0)
244 break; 241 break;
@@ -286,26 +283,20 @@ tapechar_open (struct inode *inode, struct file *filp)
286 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) 283 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
287 return -ENODEV; 284 return -ENODEV;
288 285
289 lock_kernel();
290 minor = iminor(filp->f_path.dentry->d_inode); 286 minor = iminor(filp->f_path.dentry->d_inode);
291 device = tape_get_device(minor / TAPE_MINORS_PER_DEV); 287 device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
292 if (IS_ERR(device)) { 288 if (IS_ERR(device)) {
293 DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n"); 289 DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
294 rc = PTR_ERR(device); 290 return PTR_ERR(device);
295 goto out;
296 } 291 }
297 292
298
299 rc = tape_open(device); 293 rc = tape_open(device);
300 if (rc == 0) { 294 if (rc == 0) {
301 filp->private_data = device; 295 filp->private_data = device;
302 rc = nonseekable_open(inode, filp); 296 nonseekable_open(inode, filp);
303 } 297 } else
304 else
305 tape_put_device(device); 298 tape_put_device(device);
306 299
307out:
308 unlock_kernel();
309 return rc; 300 return rc;
310} 301}
311 302
@@ -342,7 +333,8 @@ tapechar_release(struct inode *inode, struct file *filp)
342 device->char_data.idal_buf = NULL; 333 device->char_data.idal_buf = NULL;
343 } 334 }
344 tape_release(device); 335 tape_release(device);
345 filp->private_data = tape_put_device(device); 336 filp->private_data = NULL;
337 tape_put_device(device);
346 338
347 return 0; 339 return 0;
348} 340}
@@ -351,16 +343,11 @@ tapechar_release(struct inode *inode, struct file *filp)
351 * Tape device io controls. 343 * Tape device io controls.
352 */ 344 */
353static int 345static int
354tapechar_ioctl(struct inode *inp, struct file *filp, 346__tapechar_ioctl(struct tape_device *device,
355 unsigned int no, unsigned long data) 347 unsigned int no, unsigned long data)
356{ 348{
357 struct tape_device *device;
358 int rc; 349 int rc;
359 350
360 DBF_EVENT(6, "TCHAR:ioct\n");
361
362 device = (struct tape_device *) filp->private_data;
363
364 if (no == MTIOCTOP) { 351 if (no == MTIOCTOP) {
365 struct mtop op; 352 struct mtop op;
366 353
@@ -453,15 +440,30 @@ tapechar_ioctl(struct inode *inp, struct file *filp,
453} 440}
454 441
455static long 442static long
443tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
444{
445 struct tape_device *device;
446 long rc;
447
448 DBF_EVENT(6, "TCHAR:ioct\n");
449
450 device = (struct tape_device *) filp->private_data;
451 mutex_lock(&device->mutex);
452 rc = __tapechar_ioctl(device, no, data);
453 mutex_unlock(&device->mutex);
454 return rc;
455}
456
457static long
456tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data) 458tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
457{ 459{
458 struct tape_device *device = filp->private_data; 460 struct tape_device *device = filp->private_data;
459 int rval = -ENOIOCTLCMD; 461 int rval = -ENOIOCTLCMD;
460 462
461 if (device->discipline->ioctl_fn) { 463 if (device->discipline->ioctl_fn) {
462 lock_kernel(); 464 mutex_lock(&device->mutex);
463 rval = device->discipline->ioctl_fn(device, no, data); 465 rval = device->discipline->ioctl_fn(device, no, data);
464 unlock_kernel(); 466 mutex_unlock(&device->mutex);
465 if (rval == -EINVAL) 467 if (rval == -EINVAL)
466 rval = -ENOIOCTLCMD; 468 rval = -ENOIOCTLCMD;
467 } 469 }
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 5cd31e071647..f5d6802dc5da 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -492,6 +492,7 @@ tape_alloc_device(void)
492 kfree(device); 492 kfree(device);
493 return ERR_PTR(-ENOMEM); 493 return ERR_PTR(-ENOMEM);
494 } 494 }
495 mutex_init(&device->mutex);
495 INIT_LIST_HEAD(&device->req_queue); 496 INIT_LIST_HEAD(&device->req_queue);
496 INIT_LIST_HEAD(&device->node); 497 INIT_LIST_HEAD(&device->node);
497 init_waitqueue_head(&device->state_change_wq); 498 init_waitqueue_head(&device->state_change_wq);
@@ -511,11 +512,12 @@ tape_alloc_device(void)
511 * increment the reference count. 512 * increment the reference count.
512 */ 513 */
513struct tape_device * 514struct tape_device *
514tape_get_device_reference(struct tape_device *device) 515tape_get_device(struct tape_device *device)
515{ 516{
516 DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device, 517 int count;
517 atomic_inc_return(&device->ref_count));
518 518
519 count = atomic_inc_return(&device->ref_count);
520 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
519 return device; 521 return device;
520} 522}
521 523
@@ -525,32 +527,25 @@ tape_get_device_reference(struct tape_device *device)
525 * The function returns a NULL pointer to be used by the caller 527 * The function returns a NULL pointer to be used by the caller
526 * for clearing reference pointers. 528 * for clearing reference pointers.
527 */ 529 */
528struct tape_device * 530void
529tape_put_device(struct tape_device *device) 531tape_put_device(struct tape_device *device)
530{ 532{
531 int remain; 533 int count;
532 534
533 remain = atomic_dec_return(&device->ref_count); 535 count = atomic_dec_return(&device->ref_count);
534 if (remain > 0) { 536 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
535 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain); 537 BUG_ON(count < 0);
536 } else { 538 if (count == 0) {
537 if (remain < 0) { 539 kfree(device->modeset_byte);
538 DBF_EVENT(4, "put device without reference\n"); 540 kfree(device);
539 } else {
540 DBF_EVENT(4, "tape_free_device(%p)\n", device);
541 kfree(device->modeset_byte);
542 kfree(device);
543 }
544 } 541 }
545
546 return NULL;
547} 542}
548 543
549/* 544/*
550 * Find tape device by a device index. 545 * Find tape device by a device index.
551 */ 546 */
552struct tape_device * 547struct tape_device *
553tape_get_device(int devindex) 548tape_find_device(int devindex)
554{ 549{
555 struct tape_device *device, *tmp; 550 struct tape_device *device, *tmp;
556 551
@@ -558,7 +553,7 @@ tape_get_device(int devindex)
558 read_lock(&tape_device_lock); 553 read_lock(&tape_device_lock);
559 list_for_each_entry(tmp, &tape_device_list, node) { 554 list_for_each_entry(tmp, &tape_device_list, node) {
560 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { 555 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
561 device = tape_get_device_reference(tmp); 556 device = tape_get_device(tmp);
562 break; 557 break;
563 } 558 }
564 } 559 }
@@ -579,7 +574,8 @@ tape_generic_probe(struct ccw_device *cdev)
579 device = tape_alloc_device(); 574 device = tape_alloc_device();
580 if (IS_ERR(device)) 575 if (IS_ERR(device))
581 return -ENODEV; 576 return -ENODEV;
582 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 577 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
578 CCWDEV_DO_MULTIPATH);
583 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 579 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
584 if (ret) { 580 if (ret) {
585 tape_put_device(device); 581 tape_put_device(device);
@@ -606,7 +602,8 @@ __tape_discard_requests(struct tape_device *device)
606 list_del(&request->list); 602 list_del(&request->list);
607 603
608 /* Decrease ref_count for removed request. */ 604 /* Decrease ref_count for removed request. */
609 request->device = tape_put_device(device); 605 request->device = NULL;
606 tape_put_device(device);
610 request->rc = -EIO; 607 request->rc = -EIO;
611 if (request->callback != NULL) 608 if (request->callback != NULL)
612 request->callback(request, request->callback_data); 609 request->callback(request, request->callback_data);
@@ -664,9 +661,11 @@ tape_generic_remove(struct ccw_device *cdev)
664 tape_cleanup_device(device); 661 tape_cleanup_device(device);
665 } 662 }
666 663
667 if (!dev_get_drvdata(&cdev->dev)) { 664 device = dev_get_drvdata(&cdev->dev);
665 if (device) {
668 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 666 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
669 dev_set_drvdata(&cdev->dev, tape_put_device(dev_get_drvdata(&cdev->dev))); 667 dev_set_drvdata(&cdev->dev, NULL);
668 tape_put_device(device);
670 } 669 }
671} 670}
672 671
@@ -721,9 +720,8 @@ tape_free_request (struct tape_request * request)
721{ 720{
722 DBF_LH(6, "Free request %p\n", request); 721 DBF_LH(6, "Free request %p\n", request);
723 722
724 if (request->device != NULL) { 723 if (request->device)
725 request->device = tape_put_device(request->device); 724 tape_put_device(request->device);
726 }
727 kfree(request->cpdata); 725 kfree(request->cpdata);
728 kfree(request->cpaddr); 726 kfree(request->cpaddr);
729 kfree(request); 727 kfree(request);
@@ -838,7 +836,8 @@ static void tape_long_busy_timeout(unsigned long data)
838 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY); 836 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
839 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); 837 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
840 __tape_start_next_request(device); 838 __tape_start_next_request(device);
841 device->lb_timeout.data = (unsigned long) tape_put_device(device); 839 device->lb_timeout.data = 0UL;
840 tape_put_device(device);
842 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 841 spin_unlock_irq(get_ccwdev_lock(device->cdev));
843} 842}
844 843
@@ -918,7 +917,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
918 } 917 }
919 918
920 /* Increase use count of device for the added request. */ 919 /* Increase use count of device for the added request. */
921 request->device = tape_get_device_reference(device); 920 request->device = tape_get_device(device);
922 921
923 if (list_empty(&device->req_queue)) { 922 if (list_empty(&device->req_queue)) {
924 /* No other requests are on the queue. Start this one. */ 923 /* No other requests are on the queue. Start this one. */
@@ -1117,8 +1116,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1117 if (req->status == TAPE_REQUEST_LONG_BUSY) { 1116 if (req->status == TAPE_REQUEST_LONG_BUSY) {
1118 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); 1117 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1119 if (del_timer(&device->lb_timeout)) { 1118 if (del_timer(&device->lb_timeout)) {
1120 device->lb_timeout.data = (unsigned long) 1119 device->lb_timeout.data = 0UL;
1121 tape_put_device(device); 1120 tape_put_device(device);
1122 __tape_start_next_request(device); 1121 __tape_start_next_request(device);
1123 } 1122 }
1124 return; 1123 return;
@@ -1173,7 +1172,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1173 break; 1172 break;
1174 case TAPE_IO_LONG_BUSY: 1173 case TAPE_IO_LONG_BUSY:
1175 device->lb_timeout.data = 1174 device->lb_timeout.data =
1176 (unsigned long)tape_get_device_reference(device); 1175 (unsigned long) tape_get_device(device);
1177 device->lb_timeout.expires = jiffies + 1176 device->lb_timeout.expires = jiffies +
1178 LONG_BUSY_TIMEOUT * HZ; 1177 LONG_BUSY_TIMEOUT * HZ;
1179 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); 1178 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
@@ -1326,7 +1325,7 @@ EXPORT_SYMBOL(tape_generic_online);
1326EXPORT_SYMBOL(tape_generic_offline); 1325EXPORT_SYMBOL(tape_generic_offline);
1327EXPORT_SYMBOL(tape_generic_pm_suspend); 1326EXPORT_SYMBOL(tape_generic_pm_suspend);
1328EXPORT_SYMBOL(tape_put_device); 1327EXPORT_SYMBOL(tape_put_device);
1329EXPORT_SYMBOL(tape_get_device_reference); 1328EXPORT_SYMBOL(tape_get_device);
1330EXPORT_SYMBOL(tape_state_verbose); 1329EXPORT_SYMBOL(tape_state_verbose);
1331EXPORT_SYMBOL(tape_op_verbose); 1330EXPORT_SYMBOL(tape_op_verbose);
1332EXPORT_SYMBOL(tape_state_set); 1331EXPORT_SYMBOL(tape_state_set);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 202f42132939..ebd820ccfb24 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -45,7 +45,7 @@ static int tape_proc_show(struct seq_file *m, void *v)
45 seq_printf(m, "TapeNo\tBusID CuType/Model\t" 45 seq_printf(m, "TapeNo\tBusID CuType/Model\t"
46 "DevType/Model\tBlkSize\tState\tOp\tMedState\n"); 46 "DevType/Model\tBlkSize\tState\tOp\tMedState\n");
47 } 47 }
48 device = tape_get_device(n); 48 device = tape_find_device(n);
49 if (IS_ERR(device)) 49 if (IS_ERR(device))
50 return 0; 50 return 0;
51 spin_lock_irq(get_ccwdev_lock(device->cdev)); 51 spin_lock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 38385677c653..911822db614d 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/bootmem.h> 21#include <linux/bootmem.h>
22#include <linux/compat.h>
22 23
23#include <asm/ccwdev.h> 24#include <asm/ccwdev.h>
24#include <asm/cio.h> 25#include <asm/cio.h>
@@ -1731,6 +1732,22 @@ tty3270_ioctl(struct tty_struct *tty, struct file *file,
1731 return kbd_ioctl(tp->kbd, file, cmd, arg); 1732 return kbd_ioctl(tp->kbd, file, cmd, arg);
1732} 1733}
1733 1734
1735#ifdef CONFIG_COMPAT
1736static long
1737tty3270_compat_ioctl(struct tty_struct *tty, struct file *file,
1738 unsigned int cmd, unsigned long arg)
1739{
1740 struct tty3270 *tp;
1741
1742 tp = tty->driver_data;
1743 if (!tp)
1744 return -ENODEV;
1745 if (tty->flags & (1 << TTY_IO_ERROR))
1746 return -EIO;
1747 return kbd_ioctl(tp->kbd, file, cmd, (unsigned long)compat_ptr(arg));
1748}
1749#endif
1750
1734static const struct tty_operations tty3270_ops = { 1751static const struct tty_operations tty3270_ops = {
1735 .open = tty3270_open, 1752 .open = tty3270_open,
1736 .close = tty3270_close, 1753 .close = tty3270_close,
@@ -1745,6 +1762,9 @@ static const struct tty_operations tty3270_ops = {
1745 .hangup = tty3270_hangup, 1762 .hangup = tty3270_hangup,
1746 .wait_until_sent = tty3270_wait_until_sent, 1763 .wait_until_sent = tty3270_wait_until_sent,
1747 .ioctl = tty3270_ioctl, 1764 .ioctl = tty3270_ioctl,
1765#ifdef CONFIG_COMPAT
1766 .compat_ioctl = tty3270_compat_ioctl,
1767#endif
1748 .set_termios = tty3270_set_termios 1768 .set_termios = tty3270_set_termios
1749}; 1769};
1750 1770
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index d1a142fa3eb4..899aa795bf38 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -312,11 +312,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
312 return -ENOSYS; 312 return -ENOSYS;
313 313
314 /* Besure this device hasn't already been opened */ 314 /* Besure this device hasn't already been opened */
315 lock_kernel();
316 spin_lock_bh(&logptr->priv_lock); 315 spin_lock_bh(&logptr->priv_lock);
317 if (logptr->dev_in_use) { 316 if (logptr->dev_in_use) {
318 spin_unlock_bh(&logptr->priv_lock); 317 spin_unlock_bh(&logptr->priv_lock);
319 unlock_kernel();
320 return -EBUSY; 318 return -EBUSY;
321 } 319 }
322 logptr->dev_in_use = 1; 320 logptr->dev_in_use = 1;
@@ -360,9 +358,8 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
360 || (logptr->iucv_path_severed)); 358 || (logptr->iucv_path_severed));
361 if (logptr->iucv_path_severed) 359 if (logptr->iucv_path_severed)
362 goto out_record; 360 goto out_record;
363 ret = nonseekable_open(inode, filp); 361 nonseekable_open(inode, filp);
364 unlock_kernel(); 362 return 0;
365 return ret;
366 363
367out_record: 364out_record:
368 if (logptr->autorecording) 365 if (logptr->autorecording)
@@ -372,7 +369,6 @@ out_path:
372 logptr->path = NULL; 369 logptr->path = NULL;
373out_dev: 370out_dev:
374 logptr->dev_in_use = 0; 371 logptr->dev_in_use = 0;
375 unlock_kernel();
376 return -EIO; 372 return -EIO;
377} 373}
378 374
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 77571b68539a..cc56fc708bae 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -695,7 +695,6 @@ static int ur_open(struct inode *inode, struct file *file)
695 695
696 if (accmode == O_RDWR) 696 if (accmode == O_RDWR)
697 return -EACCES; 697 return -EACCES;
698 lock_kernel();
699 /* 698 /*
700 * We treat the minor number as the devno of the ur device 699 * We treat the minor number as the devno of the ur device
701 * to find in the driver tree. 700 * to find in the driver tree.
@@ -749,7 +748,6 @@ static int ur_open(struct inode *inode, struct file *file)
749 goto fail_urfile_free; 748 goto fail_urfile_free;
750 urf->file_reclen = rc; 749 urf->file_reclen = rc;
751 file->private_data = urf; 750 file->private_data = urf;
752 unlock_kernel();
753 return 0; 751 return 0;
754 752
755fail_urfile_free: 753fail_urfile_free:
@@ -761,7 +759,6 @@ fail_unlock:
761fail_put: 759fail_put:
762 urdev_put(urd); 760 urdev_put(urd);
763out: 761out:
764 unlock_kernel();
765 return rc; 762 return rc;
766} 763}
767 764
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index f2bc287b69e4..c974058e48d2 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -19,7 +19,6 @@
19#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
20#include <linux/suspend.h> 20#include <linux/suspend.h>
21#include <linux/watchdog.h> 21#include <linux/watchdog.h>
22#include <linux/smp_lock.h>
23 22
24#include <asm/ebcdic.h> 23#include <asm/ebcdic.h>
25#include <asm/io.h> 24#include <asm/io.h>
@@ -49,6 +48,8 @@ static unsigned int vmwdt_interval = 60;
49static unsigned long vmwdt_is_open; 48static unsigned long vmwdt_is_open;
50static int vmwdt_expect_close; 49static int vmwdt_expect_close;
51 50
51static DEFINE_MUTEX(vmwdt_mutex);
52
52#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */ 53#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */
53#define VMWDT_RUNNING 1 /* The watchdog is armed */ 54#define VMWDT_RUNNING 1 /* The watchdog is armed */
54 55
@@ -133,15 +134,11 @@ static int __init vmwdt_probe(void)
133static int vmwdt_open(struct inode *i, struct file *f) 134static int vmwdt_open(struct inode *i, struct file *f)
134{ 135{
135 int ret; 136 int ret;
136 lock_kernel(); 137 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open))
137 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
138 unlock_kernel();
139 return -EBUSY; 138 return -EBUSY;
140 }
141 ret = vmwdt_keepalive(); 139 ret = vmwdt_keepalive();
142 if (ret) 140 if (ret)
143 clear_bit(VMWDT_OPEN, &vmwdt_is_open); 141 clear_bit(VMWDT_OPEN, &vmwdt_is_open);
144 unlock_kernel();
145 return ret ? ret : nonseekable_open(i, f); 142 return ret ? ret : nonseekable_open(i, f);
146} 143}
147 144
@@ -160,8 +157,7 @@ static struct watchdog_info vmwdt_info = {
160 .identity = "z/VM Watchdog Timer", 157 .identity = "z/VM Watchdog Timer",
161}; 158};
162 159
163static int vmwdt_ioctl(struct inode *i, struct file *f, 160static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg)
164 unsigned int cmd, unsigned long arg)
165{ 161{
166 switch (cmd) { 162 switch (cmd) {
167 case WDIOC_GETSUPPORT: 163 case WDIOC_GETSUPPORT:
@@ -205,10 +201,19 @@ static int vmwdt_ioctl(struct inode *i, struct file *f,
205 case WDIOC_KEEPALIVE: 201 case WDIOC_KEEPALIVE:
206 return vmwdt_keepalive(); 202 return vmwdt_keepalive();
207 } 203 }
208
209 return -EINVAL; 204 return -EINVAL;
210} 205}
211 206
207static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
208{
209 int rc;
210
211 mutex_lock(&vmwdt_mutex);
212 rc = __vmwdt_ioctl(cmd, arg);
213 mutex_unlock(&vmwdt_mutex);
214 return (long) rc;
215}
216
212static ssize_t vmwdt_write(struct file *f, const char __user *buf, 217static ssize_t vmwdt_write(struct file *f, const char __user *buf,
213 size_t count, loff_t *ppos) 218 size_t count, loff_t *ppos)
214{ 219{
@@ -288,7 +293,7 @@ static struct notifier_block vmwdt_power_notifier = {
288static const struct file_operations vmwdt_fops = { 293static const struct file_operations vmwdt_fops = {
289 .open = &vmwdt_open, 294 .open = &vmwdt_open,
290 .release = &vmwdt_close, 295 .release = &vmwdt_close,
291 .ioctl = &vmwdt_ioctl, 296 .unlocked_ioctl = &vmwdt_ioctl,
292 .write = &vmwdt_write, 297 .write = &vmwdt_write,
293 .owner = THIS_MODULE, 298 .owner = THIS_MODULE,
294}; 299};
@@ -309,6 +314,10 @@ static int __init vmwdt_init(void)
309 ret = register_pm_notifier(&vmwdt_power_notifier); 314 ret = register_pm_notifier(&vmwdt_power_notifier);
310 if (ret) 315 if (ret)
311 return ret; 316 return ret;
317 /*
318 * misc_register() has to be the last action in module_init(), because
319 * file operations will be available right after this.
320 */
312 ret = misc_register(&vmwdt_dev); 321 ret = misc_register(&vmwdt_dev);
313 if (ret) { 322 if (ret) {
314 unregister_pm_notifier(&vmwdt_power_notifier); 323 unregister_pm_notifier(&vmwdt_power_notifier);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index fa4c9662f65e..d033414f7599 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
6 fcx.o itcw.o crw.o 6 fcx.o itcw.o crw.o ccwreq.o
7ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
8ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
9obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
new file mode 100644
index 000000000000..9509e3860934
--- /dev/null
+++ b/drivers/s390/cio/ccwreq.c
@@ -0,0 +1,328 @@
1/*
2 * Handling of internal CCW device requests.
3 *
4 * Copyright IBM Corp. 2009
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/err.h>
10#include <asm/ccwdev.h>
11#include <asm/cio.h>
12
13#include "io_sch.h"
14#include "cio.h"
15#include "device.h"
16#include "cio_debug.h"
17
18/**
19 * lpm_adjust - adjust path mask
20 * @lpm: path mask to adjust
21 * @mask: mask of available paths
22 *
23 * Shift @lpm right until @lpm and @mask have at least one bit in common or
24 * until @lpm is zero. Return the resulting lpm.
25 */
26int lpm_adjust(int lpm, int mask)
27{
28 while (lpm && ((lpm & mask) == 0))
29 lpm >>= 1;
30 return lpm;
31}
32
33/*
34 * Adjust path mask to use next path and reset retry count. Return resulting
35 * path mask.
36 */
37static u16 ccwreq_next_path(struct ccw_device *cdev)
38{
39 struct ccw_request *req = &cdev->private->req;
40
41 req->retries = req->maxretries;
42 req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
43
44 return req->mask;
45}
46
47/*
48 * Clean up device state and report to callback.
49 */
50static void ccwreq_stop(struct ccw_device *cdev, int rc)
51{
52 struct subchannel *sch = to_subchannel(cdev->dev.parent);
53 struct ccw_request *req = &cdev->private->req;
54
55 if (req->done)
56 return;
57 req->done = 1;
58 ccw_device_set_timeout(cdev, 0);
59 memset(&cdev->private->irb, 0, sizeof(struct irb));
60 sch->lpm = sch->schib.pmcw.pam;
61 if (rc && rc != -ENODEV && req->drc)
62 rc = req->drc;
63 req->callback(cdev, req->data, rc);
64}
65
66/*
67 * (Re-)Start the operation until retries and paths are exhausted.
68 */
69static void ccwreq_do(struct ccw_device *cdev)
70{
71 struct ccw_request *req = &cdev->private->req;
72 struct subchannel *sch = to_subchannel(cdev->dev.parent);
73 struct ccw1 *cp = req->cp;
74 int rc = -EACCES;
75
76 while (req->mask) {
77 if (req->retries-- == 0) {
78 /* Retries exhausted, try next path. */
79 ccwreq_next_path(cdev);
80 continue;
81 }
82 /* Perform start function. */
83 sch->lpm = 0xff;
84 memset(&cdev->private->irb, 0, sizeof(struct irb));
85 rc = cio_start(sch, cp, (u8) req->mask);
86 if (rc == 0) {
87 /* I/O started successfully. */
88 ccw_device_set_timeout(cdev, req->timeout);
89 return;
90 }
91 if (rc == -ENODEV) {
92 /* Permanent device error. */
93 break;
94 }
95 if (rc == -EACCES) {
96 /* Permant path error. */
97 ccwreq_next_path(cdev);
98 continue;
99 }
100 /* Temporary improper status. */
101 rc = cio_clear(sch);
102 if (rc)
103 break;
104 return;
105 }
106 ccwreq_stop(cdev, rc);
107}
108
109/**
110 * ccw_request_start - perform I/O request
111 * @cdev: ccw device
112 *
113 * Perform the I/O request specified by cdev->req.
114 */
115void ccw_request_start(struct ccw_device *cdev)
116{
117 struct ccw_request *req = &cdev->private->req;
118
119 /* Try all paths twice to counter link flapping. */
120 req->mask = 0x8080;
121 req->retries = req->maxretries;
122 req->mask = lpm_adjust(req->mask, req->lpm);
123 req->drc = 0;
124 req->done = 0;
125 req->cancel = 0;
126 if (!req->mask)
127 goto out_nopath;
128 ccwreq_do(cdev);
129 return;
130
131out_nopath:
132 ccwreq_stop(cdev, -EACCES);
133}
134
135/**
136 * ccw_request_cancel - cancel running I/O request
137 * @cdev: ccw device
138 *
139 * Cancel the I/O request specified by cdev->req. Return non-zero if request
140 * has already finished, zero otherwise.
141 */
142int ccw_request_cancel(struct ccw_device *cdev)
143{
144 struct subchannel *sch = to_subchannel(cdev->dev.parent);
145 struct ccw_request *req = &cdev->private->req;
146 int rc;
147
148 if (req->done)
149 return 1;
150 req->cancel = 1;
151 rc = cio_clear(sch);
152 if (rc)
153 ccwreq_stop(cdev, rc);
154 return 0;
155}
156
157/*
158 * Return the status of the internal I/O started on the specified ccw device.
159 * Perform BASIC SENSE if required.
160 */
161static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
162{
163 struct irb *irb = &cdev->private->irb;
164 struct cmd_scsw *scsw = &irb->scsw.cmd;
165
166 /* Perform BASIC SENSE if needed. */
167 if (ccw_device_accumulate_and_sense(cdev, lcirb))
168 return IO_RUNNING;
169 /* Check for halt/clear interrupt. */
170 if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
171 return IO_KILLED;
172 /* Check for path error. */
173 if (scsw->cc == 3 || scsw->pno)
174 return IO_PATH_ERROR;
175 /* Handle BASIC SENSE data. */
176 if (irb->esw.esw0.erw.cons) {
177 CIO_TRACE_EVENT(2, "sensedata");
178 CIO_HEX_EVENT(2, &cdev->private->dev_id,
179 sizeof(struct ccw_dev_id));
180 CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
181 /* Check for command reject. */
182 if (irb->ecw[0] & SNS0_CMD_REJECT)
183 return IO_REJECTED;
184 /* Assume that unexpected SENSE data implies an error. */
185 return IO_STATUS_ERROR;
186 }
187 /* Check for channel errors. */
188 if (scsw->cstat != 0)
189 return IO_STATUS_ERROR;
190 /* Check for device errors. */
191 if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
192 return IO_STATUS_ERROR;
193 /* Check for final state. */
194 if (!(scsw->dstat & DEV_STAT_DEV_END))
195 return IO_RUNNING;
196 /* Check for other improper status. */
197 if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
198 return IO_STATUS_ERROR;
199 return IO_DONE;
200}
201
202/*
203 * Log ccw request status.
204 */
205static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
206{
207 struct ccw_request *req = &cdev->private->req;
208 struct {
209 struct ccw_dev_id dev_id;
210 u16 retries;
211 u8 lpm;
212 u8 status;
213 } __attribute__ ((packed)) data;
214 data.dev_id = cdev->private->dev_id;
215 data.retries = req->retries;
216 data.lpm = (u8) req->mask;
217 data.status = (u8) status;
218 CIO_TRACE_EVENT(2, "reqstat");
219 CIO_HEX_EVENT(2, &data, sizeof(data));
220}
221
222/**
223 * ccw_request_handler - interrupt handler for I/O request procedure.
224 * @cdev: ccw device
225 *
226 * Handle interrupt during I/O request procedure.
227 */
228void ccw_request_handler(struct ccw_device *cdev)
229{
230 struct ccw_request *req = &cdev->private->req;
231 struct irb *irb = (struct irb *) __LC_IRB;
232 enum io_status status;
233 int rc = -EOPNOTSUPP;
234
235 /* Check status of I/O request. */
236 status = ccwreq_status(cdev, irb);
237 if (req->filter)
238 status = req->filter(cdev, req->data, irb, status);
239 if (status != IO_RUNNING)
240 ccw_device_set_timeout(cdev, 0);
241 if (status != IO_DONE && status != IO_RUNNING)
242 ccwreq_log_status(cdev, status);
243 switch (status) {
244 case IO_DONE:
245 break;
246 case IO_RUNNING:
247 return;
248 case IO_REJECTED:
249 goto err;
250 case IO_PATH_ERROR:
251 goto out_next_path;
252 case IO_STATUS_ERROR:
253 goto out_restart;
254 case IO_KILLED:
255 /* Check if request was cancelled on purpose. */
256 if (req->cancel) {
257 rc = -EIO;
258 goto err;
259 }
260 goto out_restart;
261 }
262 /* Check back with request initiator. */
263 if (!req->check)
264 goto out;
265 switch (req->check(cdev, req->data)) {
266 case 0:
267 break;
268 case -EAGAIN:
269 goto out_restart;
270 case -EACCES:
271 goto out_next_path;
272 default:
273 goto err;
274 }
275out:
276 ccwreq_stop(cdev, 0);
277 return;
278
279out_next_path:
280 /* Try next path and restart I/O. */
281 if (!ccwreq_next_path(cdev)) {
282 rc = -EACCES;
283 goto err;
284 }
285out_restart:
286 /* Restart. */
287 ccwreq_do(cdev);
288 return;
289err:
290 ccwreq_stop(cdev, rc);
291}
292
293
294/**
295 * ccw_request_timeout - timeout handler for I/O request procedure
296 * @cdev: ccw device
297 *
298 * Handle timeout during I/O request procedure.
299 */
300void ccw_request_timeout(struct ccw_device *cdev)
301{
302 struct subchannel *sch = to_subchannel(cdev->dev.parent);
303 struct ccw_request *req = &cdev->private->req;
304 int rc;
305
306 if (!ccwreq_next_path(cdev)) {
307 /* set the final return code for this request */
308 req->drc = -ETIME;
309 }
310 rc = cio_clear(sch);
311 if (rc)
312 goto err;
313 return;
314
315err:
316 ccwreq_stop(cdev, rc);
317}
318
319/**
320 * ccw_request_notoper - notoper handler for I/O request procedure
321 * @cdev: ccw device
322 *
323 * Handle timeout during I/O request procedure.
324 */
325void ccw_request_notoper(struct ccw_device *cdev)
326{
327 ccwreq_stop(cdev, -ENODEV);
328}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 2e43558c704b..bf7f80f5a330 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,6 +68,11 @@ struct schib {
68 __u8 mda[4]; /* model dependent area */ 68 __u8 mda[4]; /* model dependent area */
69} __attribute__ ((packed,aligned(4))); 69} __attribute__ ((packed,aligned(4)));
70 70
71enum sch_todo {
72 SCH_TODO_NOTHING,
73 SCH_TODO_UNREG,
74};
75
71/* subchannel data structure used by I/O subroutines */ 76/* subchannel data structure used by I/O subroutines */
72struct subchannel { 77struct subchannel {
73 struct subchannel_id schid; 78 struct subchannel_id schid;
@@ -95,7 +100,8 @@ struct subchannel {
95 struct device dev; /* entry in device tree */ 100 struct device dev; /* entry in device tree */
96 struct css_driver *driver; 101 struct css_driver *driver;
97 void *private; /* private per subchannel type data */ 102 void *private; /* private per subchannel type data */
98 struct work_struct work; 103 enum sch_todo todo;
104 struct work_struct todo_work;
99 struct schib_config config; 105 struct schib_config config;
100} __attribute__ ((aligned(8))); 106} __attribute__ ((aligned(8)));
101 107
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 91c25706fa83..92ff88ac1107 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -133,6 +133,8 @@ out:
133 return rc; 133 return rc;
134} 134}
135 135
136static void css_sch_todo(struct work_struct *work);
137
136static struct subchannel * 138static struct subchannel *
137css_alloc_subchannel(struct subchannel_id schid) 139css_alloc_subchannel(struct subchannel_id schid)
138{ 140{
@@ -147,6 +149,7 @@ css_alloc_subchannel(struct subchannel_id schid)
147 kfree(sch); 149 kfree(sch);
148 return ERR_PTR(ret); 150 return ERR_PTR(ret);
149 } 151 }
152 INIT_WORK(&sch->todo_work, css_sch_todo);
150 return sch; 153 return sch;
151} 154}
152 155
@@ -190,6 +193,51 @@ void css_sch_device_unregister(struct subchannel *sch)
190} 193}
191EXPORT_SYMBOL_GPL(css_sch_device_unregister); 194EXPORT_SYMBOL_GPL(css_sch_device_unregister);
192 195
196static void css_sch_todo(struct work_struct *work)
197{
198 struct subchannel *sch;
199 enum sch_todo todo;
200
201 sch = container_of(work, struct subchannel, todo_work);
202 /* Find out todo. */
203 spin_lock_irq(sch->lock);
204 todo = sch->todo;
205 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
206 sch->schid.sch_no, todo);
207 sch->todo = SCH_TODO_NOTHING;
208 spin_unlock_irq(sch->lock);
209 /* Perform todo. */
210 if (todo == SCH_TODO_UNREG)
211 css_sch_device_unregister(sch);
212 /* Release workqueue ref. */
213 put_device(&sch->dev);
214}
215
216/**
217 * css_sched_sch_todo - schedule a subchannel operation
218 * @sch: subchannel
219 * @todo: todo
220 *
221 * Schedule the operation identified by @todo to be performed on the slow path
222 * workqueue. Do nothing if another operation with higher priority is already
223 * scheduled. Needs to be called with subchannel lock held.
224 */
225void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
226{
227 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
228 sch->schid.ssid, sch->schid.sch_no, todo);
229 if (sch->todo >= todo)
230 return;
231 /* Get workqueue ref. */
232 if (!get_device(&sch->dev))
233 return;
234 sch->todo = todo;
235 if (!queue_work(slow_path_wq, &sch->todo_work)) {
236 /* Already queued, release workqueue ref. */
237 put_device(&sch->dev);
238 }
239}
240
193static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 241static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
194{ 242{
195 int i; 243 int i;
@@ -376,8 +424,8 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
376 /* Unusable - ignore. */ 424 /* Unusable - ignore. */
377 return 0; 425 return 0;
378 } 426 }
379 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " 427 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
380 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); 428 schid.sch_no);
381 429
382 return css_probe_device(schid); 430 return css_probe_device(schid);
383} 431}
@@ -394,6 +442,10 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
394 "Got subchannel machine check but " 442 "Got subchannel machine check but "
395 "no sch_event handler provided.\n"); 443 "no sch_event handler provided.\n");
396 } 444 }
445 if (ret != 0 && ret != -EAGAIN) {
446 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
447 sch->schid.ssid, sch->schid.sch_no, ret);
448 }
397 return ret; 449 return ret;
398} 450}
399 451
@@ -684,6 +736,7 @@ static int __init setup_css(int nr)
684 css->pseudo_subchannel->dev.parent = &css->device; 736 css->pseudo_subchannel->dev.parent = &css->device;
685 css->pseudo_subchannel->dev.release = css_subchannel_release; 737 css->pseudo_subchannel->dev.release = css_subchannel_release;
686 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 738 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
739 mutex_init(&css->pseudo_subchannel->reg_mutex);
687 ret = cio_create_sch_lock(css->pseudo_subchannel); 740 ret = cio_create_sch_lock(css->pseudo_subchannel);
688 if (ret) { 741 if (ret) {
689 kfree(css->pseudo_subchannel); 742 kfree(css->pseudo_subchannel);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 68d6b0bf151c..fe84b92cde60 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -11,6 +11,8 @@
11#include <asm/chpid.h> 11#include <asm/chpid.h>
12#include <asm/schid.h> 12#include <asm/schid.h>
13 13
14#include "cio.h"
15
14/* 16/*
15 * path grouping stuff 17 * path grouping stuff
16 */ 18 */
@@ -151,4 +153,5 @@ int css_sch_is_valid(struct schib *);
151 153
152extern struct workqueue_struct *slow_path_wq; 154extern struct workqueue_struct *slow_path_wq;
153void css_wait_for_slow_path(void); 155void css_wait_for_slow_path(void);
156void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
154#endif 157#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 2490b741e16a..9fecfb4223a8 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -7,6 +7,10 @@
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */ 9 */
10
11#define KMSG_COMPONENT "cio"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
10#include <linux/module.h> 14#include <linux/module.h>
11#include <linux/init.h> 15#include <linux/init.h>
12#include <linux/spinlock.h> 16#include <linux/spinlock.h>
@@ -299,53 +303,18 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
299 303
300static void ccw_device_unregister(struct ccw_device *cdev) 304static void ccw_device_unregister(struct ccw_device *cdev)
301{ 305{
302 if (test_and_clear_bit(1, &cdev->private->registered)) { 306 if (device_is_registered(&cdev->dev)) {
307 /* Undo device_add(). */
303 device_del(&cdev->dev); 308 device_del(&cdev->dev);
309 }
310 if (cdev->private->flags.initialized) {
311 cdev->private->flags.initialized = 0;
304 /* Release reference from device_initialize(). */ 312 /* Release reference from device_initialize(). */
305 put_device(&cdev->dev); 313 put_device(&cdev->dev);
306 } 314 }
307} 315}
308 316
309static void ccw_device_remove_orphan_cb(struct work_struct *work) 317static void io_subchannel_quiesce(struct subchannel *);
310{
311 struct ccw_device_private *priv;
312 struct ccw_device *cdev;
313
314 priv = container_of(work, struct ccw_device_private, kick_work);
315 cdev = priv->cdev;
316 ccw_device_unregister(cdev);
317 /* Release cdev reference for workqueue processing. */
318 put_device(&cdev->dev);
319}
320
321static void
322ccw_device_remove_disconnected(struct ccw_device *cdev)
323{
324 unsigned long flags;
325
326 /*
327 * Forced offline in disconnected state means
328 * 'throw away device'.
329 */
330 if (ccw_device_is_orphan(cdev)) {
331 /*
332 * Deregister ccw device.
333 * Unfortunately, we cannot do this directly from the
334 * attribute method.
335 */
336 /* Get cdev reference for workqueue processing. */
337 if (!get_device(&cdev->dev))
338 return;
339 spin_lock_irqsave(cdev->ccwlock, flags);
340 cdev->private->state = DEV_STATE_NOT_OPER;
341 spin_unlock_irqrestore(cdev->ccwlock, flags);
342 PREPARE_WORK(&cdev->private->kick_work,
343 ccw_device_remove_orphan_cb);
344 queue_work(slow_path_wq, &cdev->private->kick_work);
345 } else
346 /* Deregister subchannel, which will kill the ccw device. */
347 ccw_device_schedule_sch_unregister(cdev);
348}
349 318
350/** 319/**
351 * ccw_device_set_offline() - disable a ccw device for I/O 320 * ccw_device_set_offline() - disable a ccw device for I/O
@@ -360,7 +329,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
360 */ 329 */
361int ccw_device_set_offline(struct ccw_device *cdev) 330int ccw_device_set_offline(struct ccw_device *cdev)
362{ 331{
363 int ret; 332 struct subchannel *sch;
333 int ret, state;
364 334
365 if (!cdev) 335 if (!cdev)
366 return -ENODEV; 336 return -ENODEV;
@@ -374,6 +344,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
374 } 344 }
375 cdev->online = 0; 345 cdev->online = 0;
376 spin_lock_irq(cdev->ccwlock); 346 spin_lock_irq(cdev->ccwlock);
347 sch = to_subchannel(cdev->dev.parent);
377 /* Wait until a final state or DISCONNECTED is reached */ 348 /* Wait until a final state or DISCONNECTED is reached */
378 while (!dev_fsm_final_state(cdev) && 349 while (!dev_fsm_final_state(cdev) &&
379 cdev->private->state != DEV_STATE_DISCONNECTED) { 350 cdev->private->state != DEV_STATE_DISCONNECTED) {
@@ -382,20 +353,37 @@ int ccw_device_set_offline(struct ccw_device *cdev)
382 cdev->private->state == DEV_STATE_DISCONNECTED)); 353 cdev->private->state == DEV_STATE_DISCONNECTED));
383 spin_lock_irq(cdev->ccwlock); 354 spin_lock_irq(cdev->ccwlock);
384 } 355 }
385 ret = ccw_device_offline(cdev); 356 do {
386 if (ret) 357 ret = ccw_device_offline(cdev);
387 goto error; 358 if (!ret)
359 break;
360 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
361 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
362 cdev->private->dev_id.devno);
363 if (ret != -EBUSY)
364 goto error;
365 state = cdev->private->state;
366 spin_unlock_irq(cdev->ccwlock);
367 io_subchannel_quiesce(sch);
368 spin_lock_irq(cdev->ccwlock);
369 cdev->private->state = state;
370 } while (ret == -EBUSY);
388 spin_unlock_irq(cdev->ccwlock); 371 spin_unlock_irq(cdev->ccwlock);
389 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 372 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
390 cdev->private->state == DEV_STATE_DISCONNECTED)); 373 cdev->private->state == DEV_STATE_DISCONNECTED));
374 /* Inform the user if set offline failed. */
375 if (cdev->private->state == DEV_STATE_BOXED) {
376 pr_warning("%s: The device entered boxed state while "
377 "being set offline\n", dev_name(&cdev->dev));
378 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
379 pr_warning("%s: The device stopped operating while "
380 "being set offline\n", dev_name(&cdev->dev));
381 }
391 /* Give up reference from ccw_device_set_online(). */ 382 /* Give up reference from ccw_device_set_online(). */
392 put_device(&cdev->dev); 383 put_device(&cdev->dev);
393 return 0; 384 return 0;
394 385
395error: 386error:
396 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
397 ret, cdev->private->dev_id.ssid,
398 cdev->private->dev_id.devno);
399 cdev->private->state = DEV_STATE_OFFLINE; 387 cdev->private->state = DEV_STATE_OFFLINE;
400 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 388 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
401 spin_unlock_irq(cdev->ccwlock); 389 spin_unlock_irq(cdev->ccwlock);
@@ -448,6 +436,16 @@ int ccw_device_set_online(struct ccw_device *cdev)
448 if ((cdev->private->state != DEV_STATE_ONLINE) && 436 if ((cdev->private->state != DEV_STATE_ONLINE) &&
449 (cdev->private->state != DEV_STATE_W4SENSE)) { 437 (cdev->private->state != DEV_STATE_W4SENSE)) {
450 spin_unlock_irq(cdev->ccwlock); 438 spin_unlock_irq(cdev->ccwlock);
439 /* Inform the user that set online failed. */
440 if (cdev->private->state == DEV_STATE_BOXED) {
441 pr_warning("%s: Setting the device online failed "
442 "because it is boxed\n",
443 dev_name(&cdev->dev));
444 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
445 pr_warning("%s: Setting the device online failed "
446 "because it is not operational\n",
447 dev_name(&cdev->dev));
448 }
451 /* Give up online reference since onlining failed. */ 449 /* Give up online reference since onlining failed. */
452 put_device(&cdev->dev); 450 put_device(&cdev->dev);
453 return -ENODEV; 451 return -ENODEV;
@@ -494,27 +492,22 @@ error:
494 492
495static int online_store_handle_offline(struct ccw_device *cdev) 493static int online_store_handle_offline(struct ccw_device *cdev)
496{ 494{
497 if (cdev->private->state == DEV_STATE_DISCONNECTED) 495 if (cdev->private->state == DEV_STATE_DISCONNECTED) {
498 ccw_device_remove_disconnected(cdev); 496 spin_lock_irq(cdev->ccwlock);
499 else if (cdev->online && cdev->drv && cdev->drv->set_offline) 497 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
498 spin_unlock_irq(cdev->ccwlock);
499 } else if (cdev->online && cdev->drv && cdev->drv->set_offline)
500 return ccw_device_set_offline(cdev); 500 return ccw_device_set_offline(cdev);
501 return 0; 501 return 0;
502} 502}
503 503
504static int online_store_recog_and_online(struct ccw_device *cdev) 504static int online_store_recog_and_online(struct ccw_device *cdev)
505{ 505{
506 int ret;
507
508 /* Do device recognition, if needed. */ 506 /* Do device recognition, if needed. */
509 if (cdev->private->state == DEV_STATE_BOXED) { 507 if (cdev->private->state == DEV_STATE_BOXED) {
510 ret = ccw_device_recognition(cdev); 508 spin_lock_irq(cdev->ccwlock);
511 if (ret) { 509 ccw_device_recognition(cdev);
512 CIO_MSG_EVENT(0, "Couldn't start recognition " 510 spin_unlock_irq(cdev->ccwlock);
513 "for device 0.%x.%04x (ret=%d)\n",
514 cdev->private->dev_id.ssid,
515 cdev->private->dev_id.devno, ret);
516 return ret;
517 }
518 wait_event(cdev->private->wait_q, 511 wait_event(cdev->private->wait_q,
519 cdev->private->flags.recog_done); 512 cdev->private->flags.recog_done);
520 if (cdev->private->state != DEV_STATE_OFFLINE) 513 if (cdev->private->state != DEV_STATE_OFFLINE)
@@ -553,11 +546,10 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
553 int force, ret; 546 int force, ret;
554 unsigned long i; 547 unsigned long i;
555 548
556 if ((cdev->private->state != DEV_STATE_OFFLINE && 549 if (!dev_fsm_final_state(cdev) &&
557 cdev->private->state != DEV_STATE_ONLINE && 550 cdev->private->state != DEV_STATE_DISCONNECTED)
558 cdev->private->state != DEV_STATE_BOXED && 551 return -EAGAIN;
559 cdev->private->state != DEV_STATE_DISCONNECTED) || 552 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
560 atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
561 return -EAGAIN; 553 return -EAGAIN;
562 554
563 if (cdev->drv && !try_module_get(cdev->drv->owner)) { 555 if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -665,81 +657,31 @@ static int ccw_device_register(struct ccw_device *cdev)
665 cdev->private->dev_id.devno); 657 cdev->private->dev_id.devno);
666 if (ret) 658 if (ret)
667 return ret; 659 return ret;
668 ret = device_add(dev); 660 return device_add(dev);
669 if (ret)
670 return ret;
671
672 set_bit(1, &cdev->private->registered);
673 return ret;
674} 661}
675 662
676struct match_data { 663static int match_dev_id(struct device *dev, void *data)
677 struct ccw_dev_id dev_id;
678 struct ccw_device * sibling;
679};
680
681static int
682match_devno(struct device * dev, void * data)
683{
684 struct match_data * d = data;
685 struct ccw_device * cdev;
686
687 cdev = to_ccwdev(dev);
688 if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
689 !ccw_device_is_orphan(cdev) &&
690 ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
691 (cdev != d->sibling))
692 return 1;
693 return 0;
694}
695
696static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
697 struct ccw_device *sibling)
698{
699 struct device *dev;
700 struct match_data data;
701
702 data.dev_id = *dev_id;
703 data.sibling = sibling;
704 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
705
706 return dev ? to_ccwdev(dev) : NULL;
707}
708
709static int match_orphan(struct device *dev, void *data)
710{ 664{
711 struct ccw_dev_id *dev_id; 665 struct ccw_device *cdev = to_ccwdev(dev);
712 struct ccw_device *cdev; 666 struct ccw_dev_id *dev_id = data;
713 667
714 dev_id = data;
715 cdev = to_ccwdev(dev);
716 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 668 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
717} 669}
718 670
719static struct ccw_device * 671static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
720get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
721 struct ccw_dev_id *dev_id)
722{ 672{
723 struct device *dev; 673 struct device *dev;
724 674
725 dev = device_find_child(&css->pseudo_subchannel->dev, dev_id, 675 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
726 match_orphan);
727 676
728 return dev ? to_ccwdev(dev) : NULL; 677 return dev ? to_ccwdev(dev) : NULL;
729} 678}
730 679
731void ccw_device_do_unbind_bind(struct work_struct *work) 680static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
732{ 681{
733 struct ccw_device_private *priv;
734 struct ccw_device *cdev;
735 struct subchannel *sch;
736 int ret; 682 int ret;
737 683
738 priv = container_of(work, struct ccw_device_private, kick_work); 684 if (device_is_registered(&cdev->dev)) {
739 cdev = priv->cdev;
740 sch = to_subchannel(cdev->dev.parent);
741
742 if (test_bit(1, &cdev->private->registered)) {
743 device_release_driver(&cdev->dev); 685 device_release_driver(&cdev->dev);
744 ret = device_attach(&cdev->dev); 686 ret = device_attach(&cdev->dev);
745 WARN_ON(ret == -ENODEV); 687 WARN_ON(ret == -ENODEV);
@@ -773,6 +715,8 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
773 return ERR_PTR(-ENOMEM); 715 return ERR_PTR(-ENOMEM);
774} 716}
775 717
718static void ccw_device_todo(struct work_struct *work);
719
776static int io_subchannel_initialize_dev(struct subchannel *sch, 720static int io_subchannel_initialize_dev(struct subchannel *sch,
777 struct ccw_device *cdev) 721 struct ccw_device *cdev)
778{ 722{
@@ -780,7 +724,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
780 atomic_set(&cdev->private->onoff, 0); 724 atomic_set(&cdev->private->onoff, 0);
781 cdev->dev.parent = &sch->dev; 725 cdev->dev.parent = &sch->dev;
782 cdev->dev.release = ccw_device_release; 726 cdev->dev.release = ccw_device_release;
783 INIT_WORK(&cdev->private->kick_work, NULL); 727 INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
784 cdev->dev.groups = ccwdev_attr_groups; 728 cdev->dev.groups = ccwdev_attr_groups;
785 /* Do first half of device_register. */ 729 /* Do first half of device_register. */
786 device_initialize(&cdev->dev); 730 device_initialize(&cdev->dev);
@@ -789,6 +733,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
789 put_device(&cdev->dev); 733 put_device(&cdev->dev);
790 return -ENODEV; 734 return -ENODEV;
791 } 735 }
736 cdev->private->flags.initialized = 1;
792 return 0; 737 return 0;
793} 738}
794 739
@@ -806,76 +751,7 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
806 return cdev; 751 return cdev;
807} 752}
808 753
809static int io_subchannel_recog(struct ccw_device *, struct subchannel *); 754static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
810
811static void sch_attach_device(struct subchannel *sch,
812 struct ccw_device *cdev)
813{
814 css_update_ssd_info(sch);
815 spin_lock_irq(sch->lock);
816 sch_set_cdev(sch, cdev);
817 cdev->private->schid = sch->schid;
818 cdev->ccwlock = sch->lock;
819 ccw_device_trigger_reprobe(cdev);
820 spin_unlock_irq(sch->lock);
821}
822
823static void sch_attach_disconnected_device(struct subchannel *sch,
824 struct ccw_device *cdev)
825{
826 struct subchannel *other_sch;
827 int ret;
828
829 /* Get reference for new parent. */
830 if (!get_device(&sch->dev))
831 return;
832 other_sch = to_subchannel(cdev->dev.parent);
833 /* Note: device_move() changes cdev->dev.parent */
834 ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
835 if (ret) {
836 CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
837 "(ret=%d)!\n", cdev->private->dev_id.ssid,
838 cdev->private->dev_id.devno, ret);
839 /* Put reference for new parent. */
840 put_device(&sch->dev);
841 return;
842 }
843 sch_set_cdev(other_sch, NULL);
844 /* No need to keep a subchannel without ccw device around. */
845 css_sch_device_unregister(other_sch);
846 sch_attach_device(sch, cdev);
847 /* Put reference for old parent. */
848 put_device(&other_sch->dev);
849}
850
851static void sch_attach_orphaned_device(struct subchannel *sch,
852 struct ccw_device *cdev)
853{
854 int ret;
855 struct subchannel *pseudo_sch;
856
857 /* Get reference for new parent. */
858 if (!get_device(&sch->dev))
859 return;
860 pseudo_sch = to_subchannel(cdev->dev.parent);
861 /*
862 * Try to move the ccw device to its new subchannel.
863 * Note: device_move() changes cdev->dev.parent
864 */
865 ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
866 if (ret) {
867 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
868 "failed (ret=%d)!\n",
869 cdev->private->dev_id.ssid,
870 cdev->private->dev_id.devno, ret);
871 /* Put reference for new parent. */
872 put_device(&sch->dev);
873 return;
874 }
875 sch_attach_device(sch, cdev);
876 /* Put reference on pseudo subchannel. */
877 put_device(&pseudo_sch->dev);
878}
879 755
880static void sch_create_and_recog_new_device(struct subchannel *sch) 756static void sch_create_and_recog_new_device(struct subchannel *sch)
881{ 757{
@@ -888,100 +764,19 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
888 css_sch_device_unregister(sch); 764 css_sch_device_unregister(sch);
889 return; 765 return;
890 } 766 }
891 spin_lock_irq(sch->lock);
892 sch_set_cdev(sch, cdev);
893 spin_unlock_irq(sch->lock);
894 /* Start recognition for the new ccw device. */ 767 /* Start recognition for the new ccw device. */
895 if (io_subchannel_recog(cdev, sch)) { 768 io_subchannel_recog(cdev, sch);
896 spin_lock_irq(sch->lock);
897 sch_set_cdev(sch, NULL);
898 spin_unlock_irq(sch->lock);
899 css_sch_device_unregister(sch);
900 /* Put reference from io_subchannel_create_ccwdev(). */
901 put_device(&sch->dev);
902 /* Give up initial reference. */
903 put_device(&cdev->dev);
904 }
905}
906
907
908void ccw_device_move_to_orphanage(struct work_struct *work)
909{
910 struct ccw_device_private *priv;
911 struct ccw_device *cdev;
912 struct ccw_device *replacing_cdev;
913 struct subchannel *sch;
914 int ret;
915 struct channel_subsystem *css;
916 struct ccw_dev_id dev_id;
917
918 priv = container_of(work, struct ccw_device_private, kick_work);
919 cdev = priv->cdev;
920 sch = to_subchannel(cdev->dev.parent);
921 css = to_css(sch->dev.parent);
922 dev_id.devno = sch->schib.pmcw.dev;
923 dev_id.ssid = sch->schid.ssid;
924
925 /* Increase refcount for pseudo subchannel. */
926 get_device(&css->pseudo_subchannel->dev);
927 /*
928 * Move the orphaned ccw device to the orphanage so the replacing
929 * ccw device can take its place on the subchannel.
930 * Note: device_move() changes cdev->dev.parent
931 */
932 ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev,
933 DPM_ORDER_NONE);
934 if (ret) {
935 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
936 "(ret=%d)!\n", cdev->private->dev_id.ssid,
937 cdev->private->dev_id.devno, ret);
938 /* Decrease refcount for pseudo subchannel again. */
939 put_device(&css->pseudo_subchannel->dev);
940 return;
941 }
942 cdev->ccwlock = css->pseudo_subchannel->lock;
943 /*
944 * Search for the replacing ccw device
945 * - among the disconnected devices
946 * - in the orphanage
947 */
948 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
949 if (replacing_cdev) {
950 sch_attach_disconnected_device(sch, replacing_cdev);
951 /* Release reference from get_disc_ccwdev_by_dev_id() */
952 put_device(&replacing_cdev->dev);
953 /* Release reference of subchannel from old cdev. */
954 put_device(&sch->dev);
955 return;
956 }
957 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
958 if (replacing_cdev) {
959 sch_attach_orphaned_device(sch, replacing_cdev);
960 /* Release reference from get_orphaned_ccwdev_by_dev_id() */
961 put_device(&replacing_cdev->dev);
962 /* Release reference of subchannel from old cdev. */
963 put_device(&sch->dev);
964 return;
965 }
966 sch_create_and_recog_new_device(sch);
967 /* Release reference of subchannel from old cdev. */
968 put_device(&sch->dev);
969} 769}
970 770
971/* 771/*
972 * Register recognized device. 772 * Register recognized device.
973 */ 773 */
974static void 774static void io_subchannel_register(struct ccw_device *cdev)
975io_subchannel_register(struct work_struct *work)
976{ 775{
977 struct ccw_device_private *priv;
978 struct ccw_device *cdev;
979 struct subchannel *sch; 776 struct subchannel *sch;
980 int ret; 777 int ret;
981 unsigned long flags; 778 unsigned long flags;
982 779
983 priv = container_of(work, struct ccw_device_private, kick_work);
984 cdev = priv->cdev;
985 sch = to_subchannel(cdev->dev.parent); 780 sch = to_subchannel(cdev->dev.parent);
986 /* 781 /*
987 * Check if subchannel is still registered. It may have become 782 * Check if subchannel is still registered. It may have become
@@ -1033,41 +828,23 @@ out:
1033 cdev->private->flags.recog_done = 1; 828 cdev->private->flags.recog_done = 1;
1034 wake_up(&cdev->private->wait_q); 829 wake_up(&cdev->private->wait_q);
1035out_err: 830out_err:
1036 /* Release reference for workqueue processing. */
1037 put_device(&cdev->dev);
1038 if (atomic_dec_and_test(&ccw_device_init_count)) 831 if (atomic_dec_and_test(&ccw_device_init_count))
1039 wake_up(&ccw_device_init_wq); 832 wake_up(&ccw_device_init_wq);
1040} 833}
1041 834
1042static void ccw_device_call_sch_unregister(struct work_struct *work) 835static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
1043{ 836{
1044 struct ccw_device_private *priv;
1045 struct ccw_device *cdev;
1046 struct subchannel *sch; 837 struct subchannel *sch;
1047 838
1048 priv = container_of(work, struct ccw_device_private, kick_work);
1049 cdev = priv->cdev;
1050 /* Get subchannel reference for local processing. */ 839 /* Get subchannel reference for local processing. */
1051 if (!get_device(cdev->dev.parent)) 840 if (!get_device(cdev->dev.parent))
1052 return; 841 return;
1053 sch = to_subchannel(cdev->dev.parent); 842 sch = to_subchannel(cdev->dev.parent);
1054 css_sch_device_unregister(sch); 843 css_sch_device_unregister(sch);
1055 /* Release cdev reference for workqueue processing.*/
1056 put_device(&cdev->dev);
1057 /* Release subchannel reference for local processing. */ 844 /* Release subchannel reference for local processing. */
1058 put_device(&sch->dev); 845 put_device(&sch->dev);
1059} 846}
1060 847
1061void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
1062{
1063 /* Get cdev reference for workqueue processing. */
1064 if (!get_device(&cdev->dev))
1065 return;
1066 PREPARE_WORK(&cdev->private->kick_work,
1067 ccw_device_call_sch_unregister);
1068 queue_work(slow_path_wq, &cdev->private->kick_work);
1069}
1070
1071/* 848/*
1072 * subchannel recognition done. Called from the state machine. 849 * subchannel recognition done. Called from the state machine.
1073 */ 850 */
@@ -1083,7 +860,8 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1083 /* Device did not respond in time. */ 860 /* Device did not respond in time. */
1084 case DEV_STATE_NOT_OPER: 861 case DEV_STATE_NOT_OPER:
1085 cdev->private->flags.recog_done = 1; 862 cdev->private->flags.recog_done = 1;
1086 ccw_device_schedule_sch_unregister(cdev); 863 /* Remove device found not operational. */
864 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1087 if (atomic_dec_and_test(&ccw_device_init_count)) 865 if (atomic_dec_and_test(&ccw_device_init_count))
1088 wake_up(&ccw_device_init_wq); 866 wake_up(&ccw_device_init_wq);
1089 break; 867 break;
@@ -1092,22 +870,15 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1092 * We can't register the device in interrupt context so 870 * We can't register the device in interrupt context so
1093 * we schedule a work item. 871 * we schedule a work item.
1094 */ 872 */
1095 if (!get_device(&cdev->dev)) 873 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
1096 break;
1097 PREPARE_WORK(&cdev->private->kick_work,
1098 io_subchannel_register);
1099 queue_work(slow_path_wq, &cdev->private->kick_work);
1100 break; 874 break;
1101 } 875 }
1102} 876}
1103 877
1104static int 878static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1105io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1106{ 879{
1107 int rc;
1108 struct ccw_device_private *priv; 880 struct ccw_device_private *priv;
1109 881
1110 sch_set_cdev(sch, cdev);
1111 cdev->ccwlock = sch->lock; 882 cdev->ccwlock = sch->lock;
1112 883
1113 /* Init private data. */ 884 /* Init private data. */
@@ -1125,62 +896,81 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1125 896
1126 /* Start async. device sensing. */ 897 /* Start async. device sensing. */
1127 spin_lock_irq(sch->lock); 898 spin_lock_irq(sch->lock);
1128 rc = ccw_device_recognition(cdev); 899 sch_set_cdev(sch, cdev);
900 ccw_device_recognition(cdev);
1129 spin_unlock_irq(sch->lock); 901 spin_unlock_irq(sch->lock);
1130 if (rc) {
1131 if (atomic_dec_and_test(&ccw_device_init_count))
1132 wake_up(&ccw_device_init_wq);
1133 }
1134 return rc;
1135} 902}
1136 903
1137static void ccw_device_move_to_sch(struct work_struct *work) 904static int ccw_device_move_to_sch(struct ccw_device *cdev,
905 struct subchannel *sch)
1138{ 906{
1139 struct ccw_device_private *priv; 907 struct subchannel *old_sch;
1140 int rc; 908 int rc, old_enabled = 0;
1141 struct subchannel *sch;
1142 struct ccw_device *cdev;
1143 struct subchannel *former_parent;
1144 909
1145 priv = container_of(work, struct ccw_device_private, kick_work); 910 old_sch = to_subchannel(cdev->dev.parent);
1146 sch = priv->sch; 911 /* Obtain child reference for new parent. */
1147 cdev = priv->cdev;
1148 former_parent = to_subchannel(cdev->dev.parent);
1149 /* Get reference for new parent. */
1150 if (!get_device(&sch->dev)) 912 if (!get_device(&sch->dev))
1151 return; 913 return -ENODEV;
914
915 if (!sch_is_pseudo_sch(old_sch)) {
916 spin_lock_irq(old_sch->lock);
917 old_enabled = old_sch->schib.pmcw.ena;
918 rc = 0;
919 if (old_enabled)
920 rc = cio_disable_subchannel(old_sch);
921 spin_unlock_irq(old_sch->lock);
922 if (rc == -EBUSY) {
923 /* Release child reference for new parent. */
924 put_device(&sch->dev);
925 return rc;
926 }
927 }
928
1152 mutex_lock(&sch->reg_mutex); 929 mutex_lock(&sch->reg_mutex);
1153 /*
1154 * Try to move the ccw device to its new subchannel.
1155 * Note: device_move() changes cdev->dev.parent
1156 */
1157 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); 930 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
1158 mutex_unlock(&sch->reg_mutex); 931 mutex_unlock(&sch->reg_mutex);
1159 if (rc) { 932 if (rc) {
1160 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel " 933 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
1161 "0.%x.%04x failed (ret=%d)!\n",
1162 cdev->private->dev_id.ssid, 934 cdev->private->dev_id.ssid,
1163 cdev->private->dev_id.devno, sch->schid.ssid, 935 cdev->private->dev_id.devno, sch->schid.ssid,
1164 sch->schid.sch_no, rc); 936 sch->schib.pmcw.dev, rc);
1165 css_sch_device_unregister(sch); 937 if (old_enabled) {
1166 /* Put reference for new parent again. */ 938 /* Try to reenable the old subchannel. */
939 spin_lock_irq(old_sch->lock);
940 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
941 spin_unlock_irq(old_sch->lock);
942 }
943 /* Release child reference for new parent. */
1167 put_device(&sch->dev); 944 put_device(&sch->dev);
1168 goto out; 945 return rc;
1169 } 946 }
1170 if (!sch_is_pseudo_sch(former_parent)) { 947 /* Clean up old subchannel. */
1171 spin_lock_irq(former_parent->lock); 948 if (!sch_is_pseudo_sch(old_sch)) {
1172 sch_set_cdev(former_parent, NULL); 949 spin_lock_irq(old_sch->lock);
1173 spin_unlock_irq(former_parent->lock); 950 sch_set_cdev(old_sch, NULL);
1174 css_sch_device_unregister(former_parent); 951 spin_unlock_irq(old_sch->lock);
1175 /* Reset intparm to zeroes. */ 952 css_schedule_eval(old_sch->schid);
1176 former_parent->config.intparm = 0;
1177 cio_commit_config(former_parent);
1178 } 953 }
1179 sch_attach_device(sch, cdev); 954 /* Release child reference for old parent. */
1180out: 955 put_device(&old_sch->dev);
1181 /* Put reference for old parent. */ 956 /* Initialize new subchannel. */
1182 put_device(&former_parent->dev); 957 spin_lock_irq(sch->lock);
1183 put_device(&cdev->dev); 958 cdev->private->schid = sch->schid;
959 cdev->ccwlock = sch->lock;
960 if (!sch_is_pseudo_sch(sch))
961 sch_set_cdev(sch, cdev);
962 spin_unlock_irq(sch->lock);
963 if (!sch_is_pseudo_sch(sch))
964 css_update_ssd_info(sch);
965 return 0;
966}
967
968static int ccw_device_move_to_orph(struct ccw_device *cdev)
969{
970 struct subchannel *sch = to_subchannel(cdev->dev.parent);
971 struct channel_subsystem *css = to_css(sch->dev.parent);
972
973 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
1184} 974}
1185 975
1186static void io_subchannel_irq(struct subchannel *sch) 976static void io_subchannel_irq(struct subchannel *sch)
@@ -1199,9 +989,6 @@ void io_subchannel_init_config(struct subchannel *sch)
1199{ 989{
1200 memset(&sch->config, 0, sizeof(sch->config)); 990 memset(&sch->config, 0, sizeof(sch->config));
1201 sch->config.csense = 1; 991 sch->config.csense = 1;
1202 /* Use subchannel mp mode when there is more than 1 installed CHPID. */
1203 if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0)
1204 sch->config.mp = 1;
1205} 992}
1206 993
1207static void io_subchannel_init_fields(struct subchannel *sch) 994static void io_subchannel_init_fields(struct subchannel *sch)
@@ -1222,23 +1009,6 @@ static void io_subchannel_init_fields(struct subchannel *sch)
1222 io_subchannel_init_config(sch); 1009 io_subchannel_init_config(sch);
1223} 1010}
1224 1011
1225static void io_subchannel_do_unreg(struct work_struct *work)
1226{
1227 struct subchannel *sch;
1228
1229 sch = container_of(work, struct subchannel, work);
1230 css_sch_device_unregister(sch);
1231 put_device(&sch->dev);
1232}
1233
1234/* Schedule unregister if we have no cdev. */
1235static void io_subchannel_schedule_removal(struct subchannel *sch)
1236{
1237 get_device(&sch->dev);
1238 INIT_WORK(&sch->work, io_subchannel_do_unreg);
1239 queue_work(slow_path_wq, &sch->work);
1240}
1241
1242/* 1012/*
1243 * Note: We always return 0 so that we bind to the device even on error. 1013 * Note: We always return 0 so that we bind to the device even on error.
1244 * This is needed so that our remove function is called on unregister. 1014 * This is needed so that our remove function is called on unregister.
@@ -1247,8 +1017,6 @@ static int io_subchannel_probe(struct subchannel *sch)
1247{ 1017{
1248 struct ccw_device *cdev; 1018 struct ccw_device *cdev;
1249 int rc; 1019 int rc;
1250 unsigned long flags;
1251 struct ccw_dev_id dev_id;
1252 1020
1253 if (cio_is_console(sch->schid)) { 1021 if (cio_is_console(sch->schid)) {
1254 rc = sysfs_create_group(&sch->dev.kobj, 1022 rc = sysfs_create_group(&sch->dev.kobj,
@@ -1268,6 +1036,7 @@ static int io_subchannel_probe(struct subchannel *sch)
1268 cdev = sch_get_cdev(sch); 1036 cdev = sch_get_cdev(sch);
1269 cdev->dev.groups = ccwdev_attr_groups; 1037 cdev->dev.groups = ccwdev_attr_groups;
1270 device_initialize(&cdev->dev); 1038 device_initialize(&cdev->dev);
1039 cdev->private->flags.initialized = 1;
1271 ccw_device_register(cdev); 1040 ccw_device_register(cdev);
1272 /* 1041 /*
1273 * Check if the device is already online. If it is 1042 * Check if the device is already online. If it is
@@ -1292,44 +1061,14 @@ static int io_subchannel_probe(struct subchannel *sch)
1292 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1061 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1293 GFP_KERNEL | GFP_DMA); 1062 GFP_KERNEL | GFP_DMA);
1294 if (!sch->private) 1063 if (!sch->private)
1295 goto out_err; 1064 goto out_schedule;
1296 /* 1065 css_schedule_eval(sch->schid);
1297 * First check if a fitting device may be found amongst the
1298 * disconnected devices or in the orphanage.
1299 */
1300 dev_id.devno = sch->schib.pmcw.dev;
1301 dev_id.ssid = sch->schid.ssid;
1302 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1303 if (!cdev)
1304 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
1305 &dev_id);
1306 if (cdev) {
1307 /*
1308 * Schedule moving the device until when we have a registered
1309 * subchannel to move to and succeed the probe. We can
1310 * unregister later again, when the probe is through.
1311 */
1312 cdev->private->sch = sch;
1313 PREPARE_WORK(&cdev->private->kick_work,
1314 ccw_device_move_to_sch);
1315 queue_work(slow_path_wq, &cdev->private->kick_work);
1316 return 0;
1317 }
1318 cdev = io_subchannel_create_ccwdev(sch);
1319 if (IS_ERR(cdev))
1320 goto out_err;
1321 rc = io_subchannel_recog(cdev, sch);
1322 if (rc) {
1323 spin_lock_irqsave(sch->lock, flags);
1324 io_subchannel_recog_done(cdev);
1325 spin_unlock_irqrestore(sch->lock, flags);
1326 }
1327 return 0; 1066 return 0;
1328out_err: 1067
1329 kfree(sch->private);
1330 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1331out_schedule: 1068out_schedule:
1332 io_subchannel_schedule_removal(sch); 1069 spin_lock_irq(sch->lock);
1070 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1071 spin_unlock_irq(sch->lock);
1333 return 0; 1072 return 0;
1334} 1073}
1335 1074
@@ -1337,32 +1076,23 @@ static int
1337io_subchannel_remove (struct subchannel *sch) 1076io_subchannel_remove (struct subchannel *sch)
1338{ 1077{
1339 struct ccw_device *cdev; 1078 struct ccw_device *cdev;
1340 unsigned long flags;
1341 1079
1342 cdev = sch_get_cdev(sch); 1080 cdev = sch_get_cdev(sch);
1343 if (!cdev) 1081 if (!cdev)
1344 return 0; 1082 goto out_free;
1083 io_subchannel_quiesce(sch);
1345 /* Set ccw device to not operational and drop reference. */ 1084 /* Set ccw device to not operational and drop reference. */
1346 spin_lock_irqsave(cdev->ccwlock, flags); 1085 spin_lock_irq(cdev->ccwlock);
1347 sch_set_cdev(sch, NULL); 1086 sch_set_cdev(sch, NULL);
1348 cdev->private->state = DEV_STATE_NOT_OPER; 1087 cdev->private->state = DEV_STATE_NOT_OPER;
1349 spin_unlock_irqrestore(cdev->ccwlock, flags); 1088 spin_unlock_irq(cdev->ccwlock);
1350 ccw_device_unregister(cdev); 1089 ccw_device_unregister(cdev);
1090out_free:
1351 kfree(sch->private); 1091 kfree(sch->private);
1352 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1092 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1353 return 0; 1093 return 0;
1354} 1094}
1355 1095
1356static int io_subchannel_notify(struct subchannel *sch, int event)
1357{
1358 struct ccw_device *cdev;
1359
1360 cdev = sch_get_cdev(sch);
1361 if (!cdev)
1362 return 0;
1363 return ccw_device_notify(cdev, event);
1364}
1365
1366static void io_subchannel_verify(struct subchannel *sch) 1096static void io_subchannel_verify(struct subchannel *sch)
1367{ 1097{
1368 struct ccw_device *cdev; 1098 struct ccw_device *cdev;
@@ -1372,36 +1102,6 @@ static void io_subchannel_verify(struct subchannel *sch)
1372 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1102 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1373} 1103}
1374 1104
1375static int check_for_io_on_path(struct subchannel *sch, int mask)
1376{
1377 if (cio_update_schib(sch))
1378 return 0;
1379 if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
1380 return 1;
1381 return 0;
1382}
1383
1384static void terminate_internal_io(struct subchannel *sch,
1385 struct ccw_device *cdev)
1386{
1387 if (cio_clear(sch)) {
1388 /* Recheck device in case clear failed. */
1389 sch->lpm = 0;
1390 if (cdev->online)
1391 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1392 else
1393 css_schedule_eval(sch->schid);
1394 return;
1395 }
1396 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1397 /* Request retry of internal operation. */
1398 cdev->private->flags.intretry = 1;
1399 /* Call handler. */
1400 if (cdev->handler)
1401 cdev->handler(cdev, cdev->private->intparm,
1402 ERR_PTR(-EIO));
1403}
1404
1405static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) 1105static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1406{ 1106{
1407 struct ccw_device *cdev; 1107 struct ccw_device *cdev;
@@ -1409,18 +1109,24 @@ static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1409 cdev = sch_get_cdev(sch); 1109 cdev = sch_get_cdev(sch);
1410 if (!cdev) 1110 if (!cdev)
1411 return; 1111 return;
1412 if (check_for_io_on_path(sch, mask)) { 1112 if (cio_update_schib(sch))
1413 if (cdev->private->state == DEV_STATE_ONLINE) 1113 goto err;
1414 ccw_device_kill_io(cdev); 1114 /* Check for I/O on path. */
1415 else { 1115 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1416 terminate_internal_io(sch, cdev); 1116 goto out;
1417 /* Re-start path verification. */ 1117 if (cdev->private->state == DEV_STATE_ONLINE) {
1418 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1118 ccw_device_kill_io(cdev);
1419 } 1119 goto out;
1420 } else 1120 }
1421 /* trigger path verification. */ 1121 if (cio_clear(sch))
1422 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1122 goto err;
1123out:
1124 /* Trigger path verification. */
1125 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1126 return;
1423 1127
1128err:
1129 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1424} 1130}
1425 1131
1426static int io_subchannel_chp_event(struct subchannel *sch, 1132static int io_subchannel_chp_event(struct subchannel *sch,
@@ -1457,46 +1163,41 @@ static int io_subchannel_chp_event(struct subchannel *sch,
1457 return 0; 1163 return 0;
1458} 1164}
1459 1165
1460static void 1166static void io_subchannel_quiesce(struct subchannel *sch)
1461io_subchannel_shutdown(struct subchannel *sch)
1462{ 1167{
1463 struct ccw_device *cdev; 1168 struct ccw_device *cdev;
1464 int ret; 1169 int ret;
1465 1170
1171 spin_lock_irq(sch->lock);
1466 cdev = sch_get_cdev(sch); 1172 cdev = sch_get_cdev(sch);
1467
1468 if (cio_is_console(sch->schid)) 1173 if (cio_is_console(sch->schid))
1469 return; 1174 goto out_unlock;
1470 if (!sch->schib.pmcw.ena) 1175 if (!sch->schib.pmcw.ena)
1471 /* Nothing to do. */ 1176 goto out_unlock;
1472 return;
1473 ret = cio_disable_subchannel(sch); 1177 ret = cio_disable_subchannel(sch);
1474 if (ret != -EBUSY) 1178 if (ret != -EBUSY)
1475 /* Subchannel is disabled, we're done. */ 1179 goto out_unlock;
1476 return;
1477 cdev->private->state = DEV_STATE_QUIESCE;
1478 if (cdev->handler) 1180 if (cdev->handler)
1479 cdev->handler(cdev, cdev->private->intparm, 1181 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1480 ERR_PTR(-EIO)); 1182 while (ret == -EBUSY) {
1481 ret = ccw_device_cancel_halt_clear(cdev); 1183 cdev->private->state = DEV_STATE_QUIESCE;
1482 if (ret == -EBUSY) { 1184 ret = ccw_device_cancel_halt_clear(cdev);
1483 ccw_device_set_timeout(cdev, HZ/10); 1185 if (ret == -EBUSY) {
1484 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1186 ccw_device_set_timeout(cdev, HZ/10);
1187 spin_unlock_irq(sch->lock);
1188 wait_event(cdev->private->wait_q,
1189 cdev->private->state != DEV_STATE_QUIESCE);
1190 spin_lock_irq(sch->lock);
1191 }
1192 ret = cio_disable_subchannel(sch);
1485 } 1193 }
1486 cio_disable_subchannel(sch); 1194out_unlock:
1195 spin_unlock_irq(sch->lock);
1487} 1196}
1488 1197
1489static int io_subchannel_get_status(struct subchannel *sch) 1198static void io_subchannel_shutdown(struct subchannel *sch)
1490{ 1199{
1491 struct schib schib; 1200 io_subchannel_quiesce(sch);
1492
1493 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1494 return CIO_GONE;
1495 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1496 return CIO_REVALIDATE;
1497 if (!sch->lpm)
1498 return CIO_NO_PATH;
1499 return CIO_OPER;
1500} 1201}
1501 1202
1502static int device_is_disconnected(struct ccw_device *cdev) 1203static int device_is_disconnected(struct ccw_device *cdev)
@@ -1575,20 +1276,16 @@ static void ccw_device_schedule_recovery(void)
1575static int purge_fn(struct device *dev, void *data) 1276static int purge_fn(struct device *dev, void *data)
1576{ 1277{
1577 struct ccw_device *cdev = to_ccwdev(dev); 1278 struct ccw_device *cdev = to_ccwdev(dev);
1578 struct ccw_device_private *priv = cdev->private; 1279 struct ccw_dev_id *id = &cdev->private->dev_id;
1579 int unreg;
1580 1280
1581 spin_lock_irq(cdev->ccwlock); 1281 spin_lock_irq(cdev->ccwlock);
1582 unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) && 1282 if (is_blacklisted(id->ssid, id->devno) &&
1583 (priv->state == DEV_STATE_OFFLINE); 1283 (cdev->private->state == DEV_STATE_OFFLINE)) {
1284 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1285 id->devno);
1286 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1287 }
1584 spin_unlock_irq(cdev->ccwlock); 1288 spin_unlock_irq(cdev->ccwlock);
1585 if (!unreg)
1586 goto out;
1587 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
1588 priv->dev_id.devno);
1589 ccw_device_schedule_sch_unregister(cdev);
1590
1591out:
1592 /* Abort loop in case of pending signal. */ 1289 /* Abort loop in case of pending signal. */
1593 if (signal_pending(current)) 1290 if (signal_pending(current))
1594 return -EINTR; 1291 return -EINTR;
@@ -1630,91 +1327,169 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
1630 cdev->private->state = DEV_STATE_NOT_OPER; 1327 cdev->private->state = DEV_STATE_NOT_OPER;
1631} 1328}
1632 1329
1633static int io_subchannel_sch_event(struct subchannel *sch, int slow) 1330enum io_sch_action {
1331 IO_SCH_UNREG,
1332 IO_SCH_ORPH_UNREG,
1333 IO_SCH_ATTACH,
1334 IO_SCH_UNREG_ATTACH,
1335 IO_SCH_ORPH_ATTACH,
1336 IO_SCH_REPROBE,
1337 IO_SCH_VERIFY,
1338 IO_SCH_DISC,
1339 IO_SCH_NOP,
1340};
1341
1342static enum io_sch_action sch_get_action(struct subchannel *sch)
1343{
1344 struct ccw_device *cdev;
1345
1346 cdev = sch_get_cdev(sch);
1347 if (cio_update_schib(sch)) {
1348 /* Not operational. */
1349 if (!cdev)
1350 return IO_SCH_UNREG;
1351 if (!ccw_device_notify(cdev, CIO_GONE))
1352 return IO_SCH_UNREG;
1353 return IO_SCH_ORPH_UNREG;
1354 }
1355 /* Operational. */
1356 if (!cdev)
1357 return IO_SCH_ATTACH;
1358 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1359 if (!ccw_device_notify(cdev, CIO_GONE))
1360 return IO_SCH_UNREG_ATTACH;
1361 return IO_SCH_ORPH_ATTACH;
1362 }
1363 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1364 if (!ccw_device_notify(cdev, CIO_NO_PATH))
1365 return IO_SCH_UNREG;
1366 return IO_SCH_DISC;
1367 }
1368 if (device_is_disconnected(cdev))
1369 return IO_SCH_REPROBE;
1370 if (cdev->online)
1371 return IO_SCH_VERIFY;
1372 return IO_SCH_NOP;
1373}
1374
1375/**
1376 * io_subchannel_sch_event - process subchannel event
1377 * @sch: subchannel
1378 * @process: non-zero if function is called in process context
1379 *
1380 * An unspecified event occurred for this subchannel. Adjust data according
1381 * to the current operational state of the subchannel and device. Return
1382 * zero when the event has been handled sufficiently or -EAGAIN when this
1383 * function should be called again in process context.
1384 */
1385static int io_subchannel_sch_event(struct subchannel *sch, int process)
1634{ 1386{
1635 int event, ret, disc;
1636 unsigned long flags; 1387 unsigned long flags;
1637 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE, DISC } action;
1638 struct ccw_device *cdev; 1388 struct ccw_device *cdev;
1389 struct ccw_dev_id dev_id;
1390 enum io_sch_action action;
1391 int rc = -EAGAIN;
1639 1392
1640 spin_lock_irqsave(sch->lock, flags); 1393 spin_lock_irqsave(sch->lock, flags);
1394 if (!device_is_registered(&sch->dev))
1395 goto out_unlock;
1396 if (work_pending(&sch->todo_work))
1397 goto out_unlock;
1641 cdev = sch_get_cdev(sch); 1398 cdev = sch_get_cdev(sch);
1642 disc = device_is_disconnected(cdev); 1399 if (cdev && work_pending(&cdev->private->todo_work))
1643 if (disc && slow) { 1400 goto out_unlock;
1644 /* Disconnected devices are evaluated directly only.*/ 1401 action = sch_get_action(sch);
1645 spin_unlock_irqrestore(sch->lock, flags); 1402 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1646 return 0; 1403 sch->schid.ssid, sch->schid.sch_no, process,
1647 } 1404 action);
1648 /* No interrupt after machine check - kill pending timers. */ 1405 /* Perform immediate actions while holding the lock. */
1649 if (cdev) 1406 switch (action) {
1650 ccw_device_set_timeout(cdev, 0); 1407 case IO_SCH_REPROBE:
1651 if (!disc && !slow) { 1408 /* Trigger device recognition. */
1652 /* Non-disconnected devices are evaluated on the slow path. */ 1409 ccw_device_trigger_reprobe(cdev);
1653 spin_unlock_irqrestore(sch->lock, flags); 1410 rc = 0;
1654 return -EAGAIN; 1411 goto out_unlock;
1412 case IO_SCH_VERIFY:
1413 /* Trigger path verification. */
1414 io_subchannel_verify(sch);
1415 rc = 0;
1416 goto out_unlock;
1417 case IO_SCH_DISC:
1418 ccw_device_set_disconnected(cdev);
1419 rc = 0;
1420 goto out_unlock;
1421 case IO_SCH_ORPH_UNREG:
1422 case IO_SCH_ORPH_ATTACH:
1423 ccw_device_set_disconnected(cdev);
1424 break;
1425 case IO_SCH_UNREG_ATTACH:
1426 case IO_SCH_UNREG:
1427 if (cdev)
1428 ccw_device_set_notoper(cdev);
1429 break;
1430 case IO_SCH_NOP:
1431 rc = 0;
1432 goto out_unlock;
1433 default:
1434 break;
1655 } 1435 }
1656 event = io_subchannel_get_status(sch); 1436 spin_unlock_irqrestore(sch->lock, flags);
1657 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 1437 /* All other actions require process context. */
1658 sch->schid.ssid, sch->schid.sch_no, event, 1438 if (!process)
1659 disc ? "disconnected" : "normal", 1439 goto out;
1660 slow ? "slow" : "fast"); 1440 /* Handle attached ccw device. */
1661 /* Analyze subchannel status. */ 1441 switch (action) {
1662 action = NONE; 1442 case IO_SCH_ORPH_UNREG:
1663 switch (event) { 1443 case IO_SCH_ORPH_ATTACH:
1664 case CIO_NO_PATH: 1444 /* Move ccw device to orphanage. */
1665 if (disc) { 1445 rc = ccw_device_move_to_orph(cdev);
1666 /* Check if paths have become available. */ 1446 if (rc)
1667 action = REPROBE; 1447 goto out;
1668 break;
1669 }
1670 /* fall through */
1671 case CIO_GONE:
1672 /* Ask driver what to do with device. */
1673 if (io_subchannel_notify(sch, event))
1674 action = DISC;
1675 else
1676 action = UNREGISTER;
1677 break; 1448 break;
1678 case CIO_REVALIDATE: 1449 case IO_SCH_UNREG_ATTACH:
1679 /* Device will be removed, so no notify necessary. */ 1450 /* Unregister ccw device. */
1680 if (disc) 1451 ccw_device_unregister(cdev);
1681 /* Reprobe because immediate unregister might block. */
1682 action = REPROBE;
1683 else
1684 action = UNREGISTER_PROBE;
1685 break; 1452 break;
1686 case CIO_OPER: 1453 default:
1687 if (disc)
1688 /* Get device operational again. */
1689 action = REPROBE;
1690 break; 1454 break;
1691 } 1455 }
1692 /* Perform action. */ 1456 /* Handle subchannel. */
1693 ret = 0;
1694 switch (action) { 1457 switch (action) {
1695 case UNREGISTER: 1458 case IO_SCH_ORPH_UNREG:
1696 case UNREGISTER_PROBE: 1459 case IO_SCH_UNREG:
1697 ccw_device_set_notoper(cdev);
1698 /* Unregister device (will use subchannel lock). */
1699 spin_unlock_irqrestore(sch->lock, flags);
1700 css_sch_device_unregister(sch); 1460 css_sch_device_unregister(sch);
1701 spin_lock_irqsave(sch->lock, flags);
1702 break; 1461 break;
1703 case REPROBE: 1462 case IO_SCH_ORPH_ATTACH:
1463 case IO_SCH_UNREG_ATTACH:
1464 case IO_SCH_ATTACH:
1465 dev_id.ssid = sch->schid.ssid;
1466 dev_id.devno = sch->schib.pmcw.dev;
1467 cdev = get_ccwdev_by_dev_id(&dev_id);
1468 if (!cdev) {
1469 sch_create_and_recog_new_device(sch);
1470 break;
1471 }
1472 rc = ccw_device_move_to_sch(cdev, sch);
1473 if (rc) {
1474 /* Release reference from get_ccwdev_by_dev_id() */
1475 put_device(&cdev->dev);
1476 goto out;
1477 }
1478 spin_lock_irqsave(sch->lock, flags);
1704 ccw_device_trigger_reprobe(cdev); 1479 ccw_device_trigger_reprobe(cdev);
1705 break; 1480 spin_unlock_irqrestore(sch->lock, flags);
1706 case DISC: 1481 /* Release reference from get_ccwdev_by_dev_id() */
1707 ccw_device_set_disconnected(cdev); 1482 put_device(&cdev->dev);
1708 break; 1483 break;
1709 default: 1484 default:
1710 break; 1485 break;
1711 } 1486 }
1712 spin_unlock_irqrestore(sch->lock, flags); 1487 return 0;
1713 /* Probe if necessary. */
1714 if (action == UNREGISTER_PROBE)
1715 ret = css_probe_device(sch->schid);
1716 1488
1717 return ret; 1489out_unlock:
1490 spin_unlock_irqrestore(sch->lock, flags);
1491out:
1492 return rc;
1718} 1493}
1719 1494
1720#ifdef CONFIG_CCW_CONSOLE 1495#ifdef CONFIG_CCW_CONSOLE
@@ -1744,10 +1519,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
1744 sch->driver = &io_subchannel_driver; 1519 sch->driver = &io_subchannel_driver;
1745 /* Initialize the ccw_device structure. */ 1520 /* Initialize the ccw_device structure. */
1746 cdev->dev.parent= &sch->dev; 1521 cdev->dev.parent= &sch->dev;
1747 rc = io_subchannel_recog(cdev, sch); 1522 io_subchannel_recog(cdev, sch);
1748 if (rc)
1749 return rc;
1750
1751 /* Now wait for the async. recognition to come to an end. */ 1523 /* Now wait for the async. recognition to come to an end. */
1752 spin_lock_irq(cdev->ccwlock); 1524 spin_lock_irq(cdev->ccwlock);
1753 while (!dev_fsm_final_state(cdev)) 1525 while (!dev_fsm_final_state(cdev))
@@ -1763,7 +1535,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
1763 rc = 0; 1535 rc = 0;
1764out_unlock: 1536out_unlock:
1765 spin_unlock_irq(cdev->ccwlock); 1537 spin_unlock_irq(cdev->ccwlock);
1766 return 0; 1538 return rc;
1767} 1539}
1768 1540
1769struct ccw_device * 1541struct ccw_device *
@@ -1919,7 +1691,7 @@ static int ccw_device_pm_prepare(struct device *dev)
1919{ 1691{
1920 struct ccw_device *cdev = to_ccwdev(dev); 1692 struct ccw_device *cdev = to_ccwdev(dev);
1921 1693
1922 if (work_pending(&cdev->private->kick_work)) 1694 if (work_pending(&cdev->private->todo_work))
1923 return -EAGAIN; 1695 return -EAGAIN;
1924 /* Fail while device is being set online/offline. */ 1696 /* Fail while device is being set online/offline. */
1925 if (atomic_read(&cdev->private->onoff)) 1697 if (atomic_read(&cdev->private->onoff))
@@ -2005,7 +1777,6 @@ static int ccw_device_pm_thaw(struct device *dev)
2005static void __ccw_device_pm_restore(struct ccw_device *cdev) 1777static void __ccw_device_pm_restore(struct ccw_device *cdev)
2006{ 1778{
2007 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1779 struct subchannel *sch = to_subchannel(cdev->dev.parent);
2008 int ret;
2009 1780
2010 if (cio_is_console(sch->schid)) 1781 if (cio_is_console(sch->schid))
2011 goto out; 1782 goto out;
@@ -2015,22 +1786,10 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
2015 */ 1786 */
2016 spin_lock_irq(sch->lock); 1787 spin_lock_irq(sch->lock);
2017 cdev->private->flags.resuming = 1; 1788 cdev->private->flags.resuming = 1;
2018 ret = ccw_device_recognition(cdev); 1789 ccw_device_recognition(cdev);
2019 spin_unlock_irq(sch->lock); 1790 spin_unlock_irq(sch->lock);
2020 if (ret) {
2021 CIO_MSG_EVENT(0, "Couldn't start recognition for device "
2022 "0.%x.%04x (ret=%d)\n",
2023 cdev->private->dev_id.ssid,
2024 cdev->private->dev_id.devno, ret);
2025 spin_lock_irq(sch->lock);
2026 cdev->private->state = DEV_STATE_DISCONNECTED;
2027 spin_unlock_irq(sch->lock);
2028 /* notify driver after the resume cb */
2029 goto out;
2030 }
2031 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || 1791 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
2032 cdev->private->state == DEV_STATE_DISCONNECTED); 1792 cdev->private->state == DEV_STATE_DISCONNECTED);
2033
2034out: 1793out:
2035 cdev->private->flags.resuming = 0; 1794 cdev->private->flags.resuming = 0;
2036} 1795}
@@ -2040,7 +1799,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
2040 cdev->private->state = DEV_STATE_BOXED; 1799 cdev->private->state = DEV_STATE_BOXED;
2041 if (ccw_device_notify(cdev, CIO_BOXED)) 1800 if (ccw_device_notify(cdev, CIO_BOXED))
2042 return 0; 1801 return 0;
2043 ccw_device_schedule_sch_unregister(cdev); 1802 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
2044 return -ENODEV; 1803 return -ENODEV;
2045} 1804}
2046 1805
@@ -2049,7 +1808,7 @@ static int resume_handle_disc(struct ccw_device *cdev)
2049 cdev->private->state = DEV_STATE_DISCONNECTED; 1808 cdev->private->state = DEV_STATE_DISCONNECTED;
2050 if (ccw_device_notify(cdev, CIO_GONE)) 1809 if (ccw_device_notify(cdev, CIO_GONE))
2051 return 0; 1810 return 0;
2052 ccw_device_schedule_sch_unregister(cdev); 1811 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
2053 return -ENODEV; 1812 return -ENODEV;
2054} 1813}
2055 1814
@@ -2094,9 +1853,7 @@ static int ccw_device_pm_restore(struct device *dev)
2094 /* check if the device type has changed */ 1853 /* check if the device type has changed */
2095 if (!ccw_device_test_sense_data(cdev)) { 1854 if (!ccw_device_test_sense_data(cdev)) {
2096 ccw_device_update_sense_data(cdev); 1855 ccw_device_update_sense_data(cdev);
2097 PREPARE_WORK(&cdev->private->kick_work, 1856 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
2098 ccw_device_do_unbind_bind);
2099 queue_work(ccw_device_work, &cdev->private->kick_work);
2100 ret = -ENODEV; 1857 ret = -ENODEV;
2101 goto out_unlock; 1858 goto out_unlock;
2102 } 1859 }
@@ -2140,7 +1897,7 @@ out_disc_unlock:
2140 goto out_restore; 1897 goto out_restore;
2141 1898
2142out_unreg_unlock: 1899out_unreg_unlock:
2143 ccw_device_schedule_sch_unregister(cdev); 1900 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
2144 ret = -ENODEV; 1901 ret = -ENODEV;
2145out_unlock: 1902out_unlock:
2146 spin_unlock_irq(sch->lock); 1903 spin_unlock_irq(sch->lock);
@@ -2205,6 +1962,77 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
2205 return sch->schid; 1962 return sch->schid;
2206} 1963}
2207 1964
1965static void ccw_device_todo(struct work_struct *work)
1966{
1967 struct ccw_device_private *priv;
1968 struct ccw_device *cdev;
1969 struct subchannel *sch;
1970 enum cdev_todo todo;
1971
1972 priv = container_of(work, struct ccw_device_private, todo_work);
1973 cdev = priv->cdev;
1974 sch = to_subchannel(cdev->dev.parent);
1975 /* Find out todo. */
1976 spin_lock_irq(cdev->ccwlock);
1977 todo = priv->todo;
1978 priv->todo = CDEV_TODO_NOTHING;
1979 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1980 priv->dev_id.ssid, priv->dev_id.devno, todo);
1981 spin_unlock_irq(cdev->ccwlock);
1982 /* Perform todo. */
1983 switch (todo) {
1984 case CDEV_TODO_ENABLE_CMF:
1985 cmf_reenable(cdev);
1986 break;
1987 case CDEV_TODO_REBIND:
1988 ccw_device_do_unbind_bind(cdev);
1989 break;
1990 case CDEV_TODO_REGISTER:
1991 io_subchannel_register(cdev);
1992 break;
1993 case CDEV_TODO_UNREG_EVAL:
1994 if (!sch_is_pseudo_sch(sch))
1995 css_schedule_eval(sch->schid);
1996 /* fall-through */
1997 case CDEV_TODO_UNREG:
1998 if (sch_is_pseudo_sch(sch))
1999 ccw_device_unregister(cdev);
2000 else
2001 ccw_device_call_sch_unregister(cdev);
2002 break;
2003 default:
2004 break;
2005 }
2006 /* Release workqueue ref. */
2007 put_device(&cdev->dev);
2008}
2009
2010/**
2011 * ccw_device_sched_todo - schedule ccw device operation
2012 * @cdev: ccw device
2013 * @todo: todo
2014 *
2015 * Schedule the operation identified by @todo to be performed on the slow path
2016 * workqueue. Do nothing if another operation with higher priority is already
2017 * scheduled. Needs to be called with ccwdev lock held.
2018 */
2019void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2020{
2021 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2022 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2023 todo);
2024 if (cdev->private->todo >= todo)
2025 return;
2026 cdev->private->todo = todo;
2027 /* Get workqueue ref. */
2028 if (!get_device(&cdev->dev))
2029 return;
2030 if (!queue_work(slow_path_wq, &cdev->private->todo_work)) {
2031 /* Already queued, release workqueue ref. */
2032 put_device(&cdev->dev);
2033 }
2034}
2035
2208MODULE_LICENSE("GPL"); 2036MODULE_LICENSE("GPL");
2209EXPORT_SYMBOL(ccw_device_set_online); 2037EXPORT_SYMBOL(ccw_device_set_online);
2210EXPORT_SYMBOL(ccw_device_set_offline); 2038EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 246c6482842c..bcfe13e42638 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -21,7 +21,6 @@ enum dev_state {
21 DEV_STATE_DISBAND_PGID, 21 DEV_STATE_DISBAND_PGID,
22 DEV_STATE_BOXED, 22 DEV_STATE_BOXED,
23 /* states to wait for i/o completion before doing something */ 23 /* states to wait for i/o completion before doing something */
24 DEV_STATE_CLEAR_VERIFY,
25 DEV_STATE_TIMEOUT_KILL, 24 DEV_STATE_TIMEOUT_KILL,
26 DEV_STATE_QUIESCE, 25 DEV_STATE_QUIESCE,
27 /* special states for devices gone not operational */ 26 /* special states for devices gone not operational */
@@ -29,6 +28,7 @@ enum dev_state {
29 DEV_STATE_DISCONNECTED_SENSE_ID, 28 DEV_STATE_DISCONNECTED_SENSE_ID,
30 DEV_STATE_CMFCHANGE, 29 DEV_STATE_CMFCHANGE,
31 DEV_STATE_CMFUPDATE, 30 DEV_STATE_CMFUPDATE,
31 DEV_STATE_STEAL_LOCK,
32 /* last element! */ 32 /* last element! */
33 NR_DEV_STATES 33 NR_DEV_STATES
34}; 34};
@@ -81,17 +81,16 @@ void io_subchannel_init_config(struct subchannel *sch);
81 81
82int ccw_device_cancel_halt_clear(struct ccw_device *); 82int ccw_device_cancel_halt_clear(struct ccw_device *);
83 83
84void ccw_device_do_unbind_bind(struct work_struct *);
85void ccw_device_move_to_orphanage(struct work_struct *);
86int ccw_device_is_orphan(struct ccw_device *); 84int ccw_device_is_orphan(struct ccw_device *);
87 85
88int ccw_device_recognition(struct ccw_device *); 86void ccw_device_recognition(struct ccw_device *);
89int ccw_device_online(struct ccw_device *); 87int ccw_device_online(struct ccw_device *);
90int ccw_device_offline(struct ccw_device *); 88int ccw_device_offline(struct ccw_device *);
91void ccw_device_update_sense_data(struct ccw_device *); 89void ccw_device_update_sense_data(struct ccw_device *);
92int ccw_device_test_sense_data(struct ccw_device *); 90int ccw_device_test_sense_data(struct ccw_device *);
93void ccw_device_schedule_sch_unregister(struct ccw_device *); 91void ccw_device_schedule_sch_unregister(struct ccw_device *);
94int ccw_purge_blacklisted(void); 92int ccw_purge_blacklisted(void);
93void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
95 94
96/* Function prototypes for device status and basic sense stuff. */ 95/* Function prototypes for device status and basic sense stuff. */
97void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 96void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
@@ -99,24 +98,28 @@ void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
99int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *); 98int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
100int ccw_device_do_sense(struct ccw_device *, struct irb *); 99int ccw_device_do_sense(struct ccw_device *, struct irb *);
101 100
101/* Function prototype for internal request handling. */
102int lpm_adjust(int lpm, int mask);
103void ccw_request_start(struct ccw_device *);
104int ccw_request_cancel(struct ccw_device *cdev);
105void ccw_request_handler(struct ccw_device *cdev);
106void ccw_request_timeout(struct ccw_device *cdev);
107void ccw_request_notoper(struct ccw_device *cdev);
108
102/* Function prototypes for sense id stuff. */ 109/* Function prototypes for sense id stuff. */
103void ccw_device_sense_id_start(struct ccw_device *); 110void ccw_device_sense_id_start(struct ccw_device *);
104void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event);
105void ccw_device_sense_id_done(struct ccw_device *, int); 111void ccw_device_sense_id_done(struct ccw_device *, int);
106 112
107/* Function prototypes for path grouping stuff. */ 113/* Function prototypes for path grouping stuff. */
108void ccw_device_sense_pgid_start(struct ccw_device *);
109void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event);
110void ccw_device_sense_pgid_done(struct ccw_device *, int);
111
112void ccw_device_verify_start(struct ccw_device *); 114void ccw_device_verify_start(struct ccw_device *);
113void ccw_device_verify_irq(struct ccw_device *, enum dev_event);
114void ccw_device_verify_done(struct ccw_device *, int); 115void ccw_device_verify_done(struct ccw_device *, int);
115 116
116void ccw_device_disband_start(struct ccw_device *); 117void ccw_device_disband_start(struct ccw_device *);
117void ccw_device_disband_irq(struct ccw_device *, enum dev_event);
118void ccw_device_disband_done(struct ccw_device *, int); 118void ccw_device_disband_done(struct ccw_device *, int);
119 119
120void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
121void ccw_device_stlck_done(struct ccw_device *, void *, int);
122
120int ccw_device_call_handler(struct ccw_device *); 123int ccw_device_call_handler(struct ccw_device *);
121 124
122int ccw_device_stlck(struct ccw_device *); 125int ccw_device_stlck(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index b9613d7df9ef..ae760658a131 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -229,8 +229,8 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
229 229
230 sch = to_subchannel(cdev->dev.parent); 230 sch = to_subchannel(cdev->dev.parent);
231 231
232 ccw_device_set_timeout(cdev, 0); 232 if (cio_disable_subchannel(sch))
233 cio_disable_subchannel(sch); 233 state = DEV_STATE_NOT_OPER;
234 /* 234 /*
235 * Now that we tried recognition, we have performed device selection 235 * Now that we tried recognition, we have performed device selection
236 * through ssch() and the path information is up to date. 236 * through ssch() and the path information is up to date.
@@ -263,22 +263,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
263 } 263 }
264 switch (state) { 264 switch (state) {
265 case DEV_STATE_NOT_OPER: 265 case DEV_STATE_NOT_OPER:
266 CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
267 "subchannel 0.%x.%04x\n",
268 cdev->private->dev_id.devno,
269 sch->schid.ssid, sch->schid.sch_no);
270 break; 266 break;
271 case DEV_STATE_OFFLINE: 267 case DEV_STATE_OFFLINE:
272 if (!cdev->online) { 268 if (!cdev->online) {
273 ccw_device_update_sense_data(cdev); 269 ccw_device_update_sense_data(cdev);
274 /* Issue device info message. */
275 CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
276 "CU Type/Mod = %04X/%02X, Dev Type/Mod "
277 "= %04X/%02X\n",
278 cdev->private->dev_id.ssid,
279 cdev->private->dev_id.devno,
280 cdev->id.cu_type, cdev->id.cu_model,
281 cdev->id.dev_type, cdev->id.dev_model);
282 break; 270 break;
283 } 271 }
284 cdev->private->state = DEV_STATE_OFFLINE; 272 cdev->private->state = DEV_STATE_OFFLINE;
@@ -289,16 +277,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
289 wake_up(&cdev->private->wait_q); 277 wake_up(&cdev->private->wait_q);
290 } else { 278 } else {
291 ccw_device_update_sense_data(cdev); 279 ccw_device_update_sense_data(cdev);
292 PREPARE_WORK(&cdev->private->kick_work, 280 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
293 ccw_device_do_unbind_bind);
294 queue_work(ccw_device_work, &cdev->private->kick_work);
295 } 281 }
296 return; 282 return;
297 case DEV_STATE_BOXED: 283 case DEV_STATE_BOXED:
298 CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
299 " subchannel 0.%x.%04x\n",
300 cdev->private->dev_id.devno,
301 sch->schid.ssid, sch->schid.sch_no);
302 if (cdev->id.cu_type != 0) { /* device was recognized before */ 284 if (cdev->id.cu_type != 0) { /* device was recognized before */
303 cdev->private->flags.recog_done = 1; 285 cdev->private->flags.recog_done = 1;
304 cdev->private->state = DEV_STATE_BOXED; 286 cdev->private->state = DEV_STATE_BOXED;
@@ -343,28 +325,16 @@ int ccw_device_notify(struct ccw_device *cdev, int event)
343 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 325 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
344} 326}
345 327
346static void cmf_reenable_delayed(struct work_struct *work)
347{
348 struct ccw_device_private *priv;
349 struct ccw_device *cdev;
350
351 priv = container_of(work, struct ccw_device_private, kick_work);
352 cdev = priv->cdev;
353 cmf_reenable(cdev);
354}
355
356static void ccw_device_oper_notify(struct ccw_device *cdev) 328static void ccw_device_oper_notify(struct ccw_device *cdev)
357{ 329{
358 if (ccw_device_notify(cdev, CIO_OPER)) { 330 if (ccw_device_notify(cdev, CIO_OPER)) {
359 /* Reenable channel measurements, if needed. */ 331 /* Reenable channel measurements, if needed. */
360 PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed); 332 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
361 queue_work(ccw_device_work, &cdev->private->kick_work);
362 return; 333 return;
363 } 334 }
364 /* Driver doesn't want device back. */ 335 /* Driver doesn't want device back. */
365 ccw_device_set_notoper(cdev); 336 ccw_device_set_notoper(cdev);
366 PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind); 337 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
367 queue_work(ccw_device_work, &cdev->private->kick_work);
368} 338}
369 339
370/* 340/*
@@ -392,14 +362,14 @@ ccw_device_done(struct ccw_device *cdev, int state)
392 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 362 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
393 cdev->private->dev_id.devno, sch->schid.sch_no); 363 cdev->private->dev_id.devno, sch->schid.sch_no);
394 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) 364 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED))
395 ccw_device_schedule_sch_unregister(cdev); 365 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
396 cdev->private->flags.donotify = 0; 366 cdev->private->flags.donotify = 0;
397 break; 367 break;
398 case DEV_STATE_NOT_OPER: 368 case DEV_STATE_NOT_OPER:
399 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 369 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
400 cdev->private->dev_id.devno, sch->schid.sch_no); 370 cdev->private->dev_id.devno, sch->schid.sch_no);
401 if (!ccw_device_notify(cdev, CIO_GONE)) 371 if (!ccw_device_notify(cdev, CIO_GONE))
402 ccw_device_schedule_sch_unregister(cdev); 372 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
403 else 373 else
404 ccw_device_set_disconnected(cdev); 374 ccw_device_set_disconnected(cdev);
405 cdev->private->flags.donotify = 0; 375 cdev->private->flags.donotify = 0;
@@ -409,7 +379,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
409 "%04x\n", cdev->private->dev_id.devno, 379 "%04x\n", cdev->private->dev_id.devno,
410 sch->schid.sch_no); 380 sch->schid.sch_no);
411 if (!ccw_device_notify(cdev, CIO_NO_PATH)) 381 if (!ccw_device_notify(cdev, CIO_NO_PATH))
412 ccw_device_schedule_sch_unregister(cdev); 382 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
413 else 383 else
414 ccw_device_set_disconnected(cdev); 384 ccw_device_set_disconnected(cdev);
415 cdev->private->flags.donotify = 0; 385 cdev->private->flags.donotify = 0;
@@ -425,107 +395,12 @@ ccw_device_done(struct ccw_device *cdev, int state)
425 wake_up(&cdev->private->wait_q); 395 wake_up(&cdev->private->wait_q);
426} 396}
427 397
428static int cmp_pgid(struct pgid *p1, struct pgid *p2)
429{
430 char *c1;
431 char *c2;
432
433 c1 = (char *)p1;
434 c2 = (char *)p2;
435
436 return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
437}
438
439static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
440{
441 int i;
442 int last;
443
444 last = 0;
445 for (i = 0; i < 8; i++) {
446 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
447 /* No PGID yet */
448 continue;
449 if (cdev->private->pgid[last].inf.ps.state1 ==
450 SNID_STATE1_RESET) {
451 /* First non-zero PGID */
452 last = i;
453 continue;
454 }
455 if (cmp_pgid(&cdev->private->pgid[i],
456 &cdev->private->pgid[last]) == 0)
457 /* Non-conflicting PGIDs */
458 continue;
459
460 /* PGID mismatch, can't pathgroup. */
461 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
462 "0.%x.%04x, can't pathgroup\n",
463 cdev->private->dev_id.ssid,
464 cdev->private->dev_id.devno);
465 cdev->private->options.pgroup = 0;
466 return;
467 }
468 if (cdev->private->pgid[last].inf.ps.state1 ==
469 SNID_STATE1_RESET)
470 /* No previous pgid found */
471 memcpy(&cdev->private->pgid[0],
472 &channel_subsystems[0]->global_pgid,
473 sizeof(struct pgid));
474 else
475 /* Use existing pgid */
476 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
477 sizeof(struct pgid));
478}
479
480/*
481 * Function called from device_pgid.c after sense path ground has completed.
482 */
483void
484ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
485{
486 struct subchannel *sch;
487
488 sch = to_subchannel(cdev->dev.parent);
489 switch (err) {
490 case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
491 cdev->private->options.pgroup = 0;
492 break;
493 case 0: /* success */
494 case -EACCES: /* partial success, some paths not operational */
495 /* Check if all pgids are equal or 0. */
496 __ccw_device_get_common_pgid(cdev);
497 break;
498 case -ETIME: /* Sense path group id stopped by timeout. */
499 case -EUSERS: /* device is reserved for someone else. */
500 ccw_device_done(cdev, DEV_STATE_BOXED);
501 return;
502 default:
503 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
504 return;
505 }
506 /* Start Path Group verification. */
507 cdev->private->state = DEV_STATE_VERIFY;
508 cdev->private->flags.doverify = 0;
509 ccw_device_verify_start(cdev);
510}
511
512/* 398/*
513 * Start device recognition. 399 * Start device recognition.
514 */ 400 */
515int 401void ccw_device_recognition(struct ccw_device *cdev)
516ccw_device_recognition(struct ccw_device *cdev)
517{ 402{
518 struct subchannel *sch; 403 struct subchannel *sch = to_subchannel(cdev->dev.parent);
519 int ret;
520
521 sch = to_subchannel(cdev->dev.parent);
522 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
523 if (ret != 0)
524 /* Couldn't enable the subchannel for i/o. Sick device. */
525 return ret;
526
527 /* After 60s the device recognition is considered to have failed. */
528 ccw_device_set_timeout(cdev, 60*HZ);
529 404
530 /* 405 /*
531 * We used to start here with a sense pgid to find out whether a device 406 * We used to start here with a sense pgid to find out whether a device
@@ -537,32 +412,33 @@ ccw_device_recognition(struct ccw_device *cdev)
537 */ 412 */
538 cdev->private->flags.recog_done = 0; 413 cdev->private->flags.recog_done = 0;
539 cdev->private->state = DEV_STATE_SENSE_ID; 414 cdev->private->state = DEV_STATE_SENSE_ID;
415 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
416 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
417 return;
418 }
540 ccw_device_sense_id_start(cdev); 419 ccw_device_sense_id_start(cdev);
541 return 0;
542} 420}
543 421
544/* 422/*
545 * Handle timeout in device recognition. 423 * Handle events for states that use the ccw request infrastructure.
546 */ 424 */
547static void 425static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
548ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
549{ 426{
550 int ret; 427 switch (e) {
551 428 case DEV_EVENT_NOTOPER:
552 ret = ccw_device_cancel_halt_clear(cdev); 429 ccw_request_notoper(cdev);
553 switch (ret) {
554 case 0:
555 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
556 break; 430 break;
557 case -ENODEV: 431 case DEV_EVENT_INTERRUPT:
558 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 432 ccw_request_handler(cdev);
433 break;
434 case DEV_EVENT_TIMEOUT:
435 ccw_request_timeout(cdev);
559 break; 436 break;
560 default: 437 default:
561 ccw_device_set_timeout(cdev, 3*HZ); 438 break;
562 } 439 }
563} 440}
564 441
565
566void 442void
567ccw_device_verify_done(struct ccw_device *cdev, int err) 443ccw_device_verify_done(struct ccw_device *cdev, int err)
568{ 444{
@@ -571,21 +447,18 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
571 sch = to_subchannel(cdev->dev.parent); 447 sch = to_subchannel(cdev->dev.parent);
572 /* Update schib - pom may have changed. */ 448 /* Update schib - pom may have changed. */
573 if (cio_update_schib(sch)) { 449 if (cio_update_schib(sch)) {
574 cdev->private->flags.donotify = 0; 450 err = -ENODEV;
575 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 451 goto callback;
576 return;
577 } 452 }
578 /* Update lpm with verified path mask. */ 453 /* Update lpm with verified path mask. */
579 sch->lpm = sch->vpm; 454 sch->lpm = sch->vpm;
580 /* Repeat path verification? */ 455 /* Repeat path verification? */
581 if (cdev->private->flags.doverify) { 456 if (cdev->private->flags.doverify) {
582 cdev->private->flags.doverify = 0;
583 ccw_device_verify_start(cdev); 457 ccw_device_verify_start(cdev);
584 return; 458 return;
585 } 459 }
460callback:
586 switch (err) { 461 switch (err) {
587 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
588 cdev->private->options.pgroup = 0;
589 case 0: 462 case 0:
590 ccw_device_done(cdev, DEV_STATE_ONLINE); 463 ccw_device_done(cdev, DEV_STATE_ONLINE);
591 /* Deliver fake irb to device driver, if needed. */ 464 /* Deliver fake irb to device driver, if needed. */
@@ -604,18 +477,20 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
604 } 477 }
605 break; 478 break;
606 case -ETIME: 479 case -ETIME:
480 case -EUSERS:
607 /* Reset oper notify indication after verify error. */ 481 /* Reset oper notify indication after verify error. */
608 cdev->private->flags.donotify = 0; 482 cdev->private->flags.donotify = 0;
609 ccw_device_done(cdev, DEV_STATE_BOXED); 483 ccw_device_done(cdev, DEV_STATE_BOXED);
610 break; 484 break;
485 case -EACCES:
486 /* Reset oper notify indication after verify error. */
487 cdev->private->flags.donotify = 0;
488 ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
489 break;
611 default: 490 default:
612 /* Reset oper notify indication after verify error. */ 491 /* Reset oper notify indication after verify error. */
613 cdev->private->flags.donotify = 0; 492 cdev->private->flags.donotify = 0;
614 if (cdev->online) { 493 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
615 ccw_device_set_timeout(cdev, 0);
616 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
617 } else
618 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
619 break; 494 break;
620 } 495 }
621} 496}
@@ -640,17 +515,9 @@ ccw_device_online(struct ccw_device *cdev)
640 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 515 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
641 return ret; 516 return ret;
642 } 517 }
643 /* Do we want to do path grouping? */ 518 /* Start initial path verification. */
644 if (!cdev->private->options.pgroup) { 519 cdev->private->state = DEV_STATE_VERIFY;
645 /* Start initial path verification. */ 520 ccw_device_verify_start(cdev);
646 cdev->private->state = DEV_STATE_VERIFY;
647 cdev->private->flags.doverify = 0;
648 ccw_device_verify_start(cdev);
649 return 0;
650 }
651 /* Do a SensePGID first. */
652 cdev->private->state = DEV_STATE_SENSE_PGID;
653 ccw_device_sense_pgid_start(cdev);
654 return 0; 521 return 0;
655} 522}
656 523
@@ -666,7 +533,6 @@ ccw_device_disband_done(struct ccw_device *cdev, int err)
666 break; 533 break;
667 default: 534 default:
668 cdev->private->flags.donotify = 0; 535 cdev->private->flags.donotify = 0;
669 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
670 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 536 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
671 break; 537 break;
672 } 538 }
@@ -703,7 +569,7 @@ ccw_device_offline(struct ccw_device *cdev)
703 if (cdev->private->state != DEV_STATE_ONLINE) 569 if (cdev->private->state != DEV_STATE_ONLINE)
704 return -EINVAL; 570 return -EINVAL;
705 /* Are we doing path grouping? */ 571 /* Are we doing path grouping? */
706 if (!cdev->private->options.pgroup) { 572 if (!cdev->private->flags.pgroup) {
707 /* No, set state offline immediately. */ 573 /* No, set state offline immediately. */
708 ccw_device_done(cdev, DEV_STATE_OFFLINE); 574 ccw_device_done(cdev, DEV_STATE_OFFLINE);
709 return 0; 575 return 0;
@@ -715,43 +581,13 @@ ccw_device_offline(struct ccw_device *cdev)
715} 581}
716 582
717/* 583/*
718 * Handle timeout in device online/offline process.
719 */
720static void
721ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
722{
723 int ret;
724
725 ret = ccw_device_cancel_halt_clear(cdev);
726 switch (ret) {
727 case 0:
728 ccw_device_done(cdev, DEV_STATE_BOXED);
729 break;
730 case -ENODEV:
731 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
732 break;
733 default:
734 ccw_device_set_timeout(cdev, 3*HZ);
735 }
736}
737
738/*
739 * Handle not oper event in device recognition.
740 */
741static void
742ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
743{
744 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
745}
746
747/*
748 * Handle not operational event in non-special state. 584 * Handle not operational event in non-special state.
749 */ 585 */
750static void ccw_device_generic_notoper(struct ccw_device *cdev, 586static void ccw_device_generic_notoper(struct ccw_device *cdev,
751 enum dev_event dev_event) 587 enum dev_event dev_event)
752{ 588{
753 if (!ccw_device_notify(cdev, CIO_GONE)) 589 if (!ccw_device_notify(cdev, CIO_GONE))
754 ccw_device_schedule_sch_unregister(cdev); 590 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
755 else 591 else
756 ccw_device_set_disconnected(cdev); 592 ccw_device_set_disconnected(cdev);
757} 593}
@@ -802,11 +638,27 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
802 } 638 }
803 /* Device is idle, we can do the path verification. */ 639 /* Device is idle, we can do the path verification. */
804 cdev->private->state = DEV_STATE_VERIFY; 640 cdev->private->state = DEV_STATE_VERIFY;
805 cdev->private->flags.doverify = 0;
806 ccw_device_verify_start(cdev); 641 ccw_device_verify_start(cdev);
807} 642}
808 643
809/* 644/*
645 * Handle path verification event in boxed state.
646 */
647static void ccw_device_boxed_verify(struct ccw_device *cdev,
648 enum dev_event dev_event)
649{
650 struct subchannel *sch = to_subchannel(cdev->dev.parent);
651
652 if (cdev->online) {
653 if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
654 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
655 else
656 ccw_device_online_verify(cdev, dev_event);
657 } else
658 css_schedule_eval(sch->schid);
659}
660
661/*
810 * Got an interrupt for a normal io (state online). 662 * Got an interrupt for a normal io (state online).
811 */ 663 */
812static void 664static void
@@ -904,12 +756,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
904 */ 756 */
905 if (scsw_fctl(&irb->scsw) & 757 if (scsw_fctl(&irb->scsw) &
906 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 758 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
907 /* Retry Basic Sense if requested. */
908 if (cdev->private->flags.intretry) {
909 cdev->private->flags.intretry = 0;
910 ccw_device_do_sense(cdev, irb);
911 return;
912 }
913 cdev->private->flags.dosense = 0; 759 cdev->private->flags.dosense = 0;
914 memset(&cdev->private->irb, 0, sizeof(struct irb)); 760 memset(&cdev->private->irb, 0, sizeof(struct irb));
915 ccw_device_accumulate_irb(cdev, irb); 761 ccw_device_accumulate_irb(cdev, irb);
@@ -933,21 +779,6 @@ call_handler:
933} 779}
934 780
935static void 781static void
936ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
937{
938 struct irb *irb;
939
940 irb = (struct irb *) __LC_IRB;
941 /* Accumulate status. We don't do basic sense. */
942 ccw_device_accumulate_irb(cdev, irb);
943 /* Remember to clear irb to avoid residuals. */
944 memset(&cdev->private->irb, 0, sizeof(struct irb));
945 /* Try to start delayed device verification. */
946 ccw_device_online_verify(cdev, 0);
947 /* Note: Don't call handler for cio initiated clear! */
948}
949
950static void
951ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 782ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
952{ 783{
953 struct subchannel *sch; 784 struct subchannel *sch;
@@ -1004,32 +835,6 @@ ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1004} 835}
1005 836
1006static void 837static void
1007ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1008{
1009 struct irb *irb;
1010
1011 switch (dev_event) {
1012 case DEV_EVENT_INTERRUPT:
1013 irb = (struct irb *) __LC_IRB;
1014 /* Check for unsolicited interrupt. */
1015 if ((scsw_stctl(&irb->scsw) ==
1016 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1017 (!scsw_cc(&irb->scsw)))
1018 /* FIXME: we should restart stlck here, but this
1019 * is extremely unlikely ... */
1020 goto out_wakeup;
1021
1022 ccw_device_accumulate_irb(cdev, irb);
1023 /* We don't care about basic sense etc. */
1024 break;
1025 default: /* timeout */
1026 break;
1027 }
1028out_wakeup:
1029 wake_up(&cdev->private->wait_q);
1030}
1031
1032static void
1033ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) 838ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1034{ 839{
1035 struct subchannel *sch; 840 struct subchannel *sch;
@@ -1038,10 +843,6 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1038 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) 843 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
1039 /* Couldn't enable the subchannel for i/o. Sick device. */ 844 /* Couldn't enable the subchannel for i/o. Sick device. */
1040 return; 845 return;
1041
1042 /* After 60s the device recognition is considered to have failed. */
1043 ccw_device_set_timeout(cdev, 60*HZ);
1044
1045 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; 846 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1046 ccw_device_sense_id_start(cdev); 847 ccw_device_sense_id_start(cdev);
1047} 848}
@@ -1072,22 +873,20 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
1072 873
1073 /* We should also udate ssd info, but this has to wait. */ 874 /* We should also udate ssd info, but this has to wait. */
1074 /* Check if this is another device which appeared on the same sch. */ 875 /* Check if this is another device which appeared on the same sch. */
1075 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 876 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
1076 PREPARE_WORK(&cdev->private->kick_work, 877 css_schedule_eval(sch->schid);
1077 ccw_device_move_to_orphanage); 878 else
1078 queue_work(slow_path_wq, &cdev->private->kick_work);
1079 } else
1080 ccw_device_start_id(cdev, 0); 879 ccw_device_start_id(cdev, 0);
1081} 880}
1082 881
1083static void 882static void ccw_device_disabled_irq(struct ccw_device *cdev,
1084ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) 883 enum dev_event dev_event)
1085{ 884{
1086 struct subchannel *sch; 885 struct subchannel *sch;
1087 886
1088 sch = to_subchannel(cdev->dev.parent); 887 sch = to_subchannel(cdev->dev.parent);
1089 /* 888 /*
1090 * An interrupt in state offline means a previous disable was not 889 * An interrupt in a disabled state means a previous disable was not
1091 * successful - should not happen, but we try to disable again. 890 * successful - should not happen, but we try to disable again.
1092 */ 891 */
1093 cio_disable_subchannel(sch); 892 cio_disable_subchannel(sch);
@@ -1113,10 +912,7 @@ static void
1113ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 912ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1114{ 913{
1115 ccw_device_set_timeout(cdev, 0); 914 ccw_device_set_timeout(cdev, 0);
1116 if (dev_event == DEV_EVENT_NOTOPER) 915 cdev->private->state = DEV_STATE_NOT_OPER;
1117 cdev->private->state = DEV_STATE_NOT_OPER;
1118 else
1119 cdev->private->state = DEV_STATE_OFFLINE;
1120 wake_up(&cdev->private->wait_q); 916 wake_up(&cdev->private->wait_q);
1121} 917}
1122 918
@@ -1126,17 +922,11 @@ ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1126 int ret; 922 int ret;
1127 923
1128 ret = ccw_device_cancel_halt_clear(cdev); 924 ret = ccw_device_cancel_halt_clear(cdev);
1129 switch (ret) { 925 if (ret == -EBUSY) {
1130 case 0: 926 ccw_device_set_timeout(cdev, HZ/10);
1131 cdev->private->state = DEV_STATE_OFFLINE; 927 } else {
1132 wake_up(&cdev->private->wait_q);
1133 break;
1134 case -ENODEV:
1135 cdev->private->state = DEV_STATE_NOT_OPER; 928 cdev->private->state = DEV_STATE_NOT_OPER;
1136 wake_up(&cdev->private->wait_q); 929 wake_up(&cdev->private->wait_q);
1137 break;
1138 default:
1139 ccw_device_set_timeout(cdev, HZ/10);
1140 } 930 }
1141} 931}
1142 932
@@ -1150,50 +940,37 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1150} 940}
1151 941
1152/* 942/*
1153 * Bug operation action.
1154 */
1155static void
1156ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1157{
1158 CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
1159 "0.%x.%04x\n", cdev->private->state, dev_event,
1160 cdev->private->dev_id.ssid,
1161 cdev->private->dev_id.devno);
1162 BUG();
1163}
1164
1165/*
1166 * device statemachine 943 * device statemachine
1167 */ 944 */
1168fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { 945fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1169 [DEV_STATE_NOT_OPER] = { 946 [DEV_STATE_NOT_OPER] = {
1170 [DEV_EVENT_NOTOPER] = ccw_device_nop, 947 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1171 [DEV_EVENT_INTERRUPT] = ccw_device_bug, 948 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1172 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 949 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1173 [DEV_EVENT_VERIFY] = ccw_device_nop, 950 [DEV_EVENT_VERIFY] = ccw_device_nop,
1174 }, 951 },
1175 [DEV_STATE_SENSE_PGID] = { 952 [DEV_STATE_SENSE_PGID] = {
1176 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 953 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1177 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, 954 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1178 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 955 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1179 [DEV_EVENT_VERIFY] = ccw_device_nop, 956 [DEV_EVENT_VERIFY] = ccw_device_nop,
1180 }, 957 },
1181 [DEV_STATE_SENSE_ID] = { 958 [DEV_STATE_SENSE_ID] = {
1182 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 959 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1183 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 960 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1184 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 961 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1185 [DEV_EVENT_VERIFY] = ccw_device_nop, 962 [DEV_EVENT_VERIFY] = ccw_device_nop,
1186 }, 963 },
1187 [DEV_STATE_OFFLINE] = { 964 [DEV_STATE_OFFLINE] = {
1188 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 965 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1189 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, 966 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1190 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 967 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1191 [DEV_EVENT_VERIFY] = ccw_device_offline_verify, 968 [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
1192 }, 969 },
1193 [DEV_STATE_VERIFY] = { 970 [DEV_STATE_VERIFY] = {
1194 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 971 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1195 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 972 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1196 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 973 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1197 [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 974 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1198 }, 975 },
1199 [DEV_STATE_ONLINE] = { 976 [DEV_STATE_ONLINE] = {
@@ -1209,24 +986,18 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1209 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 986 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1210 }, 987 },
1211 [DEV_STATE_DISBAND_PGID] = { 988 [DEV_STATE_DISBAND_PGID] = {
1212 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 989 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1213 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, 990 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1214 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 991 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1215 [DEV_EVENT_VERIFY] = ccw_device_nop, 992 [DEV_EVENT_VERIFY] = ccw_device_nop,
1216 }, 993 },
1217 [DEV_STATE_BOXED] = { 994 [DEV_STATE_BOXED] = {
1218 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 995 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1219 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, 996 [DEV_EVENT_INTERRUPT] = ccw_device_nop,
1220 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
1221 [DEV_EVENT_VERIFY] = ccw_device_nop,
1222 },
1223 /* states to wait for i/o completion before doing something */
1224 [DEV_STATE_CLEAR_VERIFY] = {
1225 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1226 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
1227 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 997 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1228 [DEV_EVENT_VERIFY] = ccw_device_nop, 998 [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
1229 }, 999 },
1000 /* states to wait for i/o completion before doing something */
1230 [DEV_STATE_TIMEOUT_KILL] = { 1001 [DEV_STATE_TIMEOUT_KILL] = {
1231 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1002 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1232 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1003 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
@@ -1243,13 +1014,13 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1243 [DEV_STATE_DISCONNECTED] = { 1014 [DEV_STATE_DISCONNECTED] = {
1244 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1015 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1245 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1016 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1246 [DEV_EVENT_TIMEOUT] = ccw_device_bug, 1017 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1247 [DEV_EVENT_VERIFY] = ccw_device_start_id, 1018 [DEV_EVENT_VERIFY] = ccw_device_start_id,
1248 }, 1019 },
1249 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1020 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1250 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1021 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1251 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 1022 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1252 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 1023 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1253 [DEV_EVENT_VERIFY] = ccw_device_nop, 1024 [DEV_EVENT_VERIFY] = ccw_device_nop,
1254 }, 1025 },
1255 [DEV_STATE_CMFCHANGE] = { 1026 [DEV_STATE_CMFCHANGE] = {
@@ -1264,6 +1035,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1264 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, 1035 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
1265 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, 1036 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
1266 }, 1037 },
1038 [DEV_STATE_STEAL_LOCK] = {
1039 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1040 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1041 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1042 [DEV_EVENT_VERIFY] = ccw_device_nop,
1043 },
1267}; 1044};
1268 1045
1269EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1046EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 1bdaa614e34f..78a0b43862c5 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,40 +1,39 @@
1/* 1/*
2 * drivers/s390/cio/device_id.c 2 * CCW device SENSE ID I/O handling.
3 * 3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 4 * Copyright IBM Corp. 2002,2009
5 * IBM Corporation 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 *
9 * Sense ID functions.
10 */ 8 */
11 9
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h> 10#include <linux/kernel.h>
15 11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/errno.h>
16#include <asm/ccwdev.h> 14#include <asm/ccwdev.h>
17#include <asm/delay.h> 15#include <asm/setup.h>
18#include <asm/cio.h> 16#include <asm/cio.h>
19#include <asm/lowcore.h>
20#include <asm/diag.h> 17#include <asm/diag.h>
21 18
22#include "cio.h" 19#include "cio.h"
23#include "cio_debug.h" 20#include "cio_debug.h"
24#include "css.h"
25#include "device.h" 21#include "device.h"
26#include "ioasm.h"
27#include "io_sch.h" 22#include "io_sch.h"
28 23
24#define SENSE_ID_RETRIES 256
25#define SENSE_ID_TIMEOUT (10 * HZ)
26#define SENSE_ID_MIN_LEN 4
27#define SENSE_ID_BASIC_LEN 7
28
29/** 29/**
30 * vm_vdev_to_cu_type - Convert vm virtual device into control unit type 30 * diag210_to_senseid - convert diag 0x210 data to sense id information
31 * for certain devices. 31 * @senseid: sense id
32 * @class: virtual device class 32 * @diag: diag 0x210 data
33 * @type: virtual device type
34 * 33 *
35 * Returns control unit type if a match was made or %0xffff otherwise. 34 * Return 0 on success, non-zero otherwise.
36 */ 35 */
37static int vm_vdev_to_cu_type(int class, int type) 36static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
38{ 37{
39 static struct { 38 static struct {
40 int class, type, cu_type; 39 int class, type, cu_type;
@@ -71,253 +70,153 @@ static int vm_vdev_to_cu_type(int class, int type)
71 }; 70 };
72 int i; 71 int i;
73 72
74 for (i = 0; i < ARRAY_SIZE(vm_devices); i++) 73 /* Special case for osa devices. */
75 if (class == vm_devices[i].class && type == vm_devices[i].type) 74 if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
76 return vm_devices[i].cu_type; 75 senseid->cu_type = 0x3088;
76 senseid->cu_model = 0x60;
77 senseid->reserved = 0xff;
78 return 0;
79 }
80 for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
81 if (diag->vrdcvcla == vm_devices[i].class &&
82 diag->vrdcvtyp == vm_devices[i].type) {
83 senseid->cu_type = vm_devices[i].cu_type;
84 senseid->reserved = 0xff;
85 return 0;
86 }
87 }
77 88
78 return 0xffff; 89 return -ENODEV;
79} 90}
80 91
81/** 92/**
82 * diag_get_dev_info - retrieve device information via DIAG X'210' 93 * diag_get_dev_info - retrieve device information via diag 0x210
83 * @devno: device number 94 * @cdev: ccw device
84 * @ps: pointer to sense ID data area
85 * 95 *
86 * Returns zero on success, non-zero otherwise. 96 * Returns zero on success, non-zero otherwise.
87 */ 97 */
88static int diag_get_dev_info(u16 devno, struct senseid *ps) 98static int diag210_get_dev_info(struct ccw_device *cdev)
89{ 99{
100 struct ccw_dev_id *dev_id = &cdev->private->dev_id;
101 struct senseid *senseid = &cdev->private->senseid;
90 struct diag210 diag_data; 102 struct diag210 diag_data;
91 int ccode; 103 int rc;
92 104
93 CIO_TRACE_EVENT (4, "VMvdinf"); 105 if (dev_id->ssid != 0)
94 106 return -ENODEV;
95 diag_data = (struct diag210) { 107 memset(&diag_data, 0, sizeof(diag_data));
96 .vrdcdvno = devno, 108 diag_data.vrdcdvno = dev_id->devno;
97 .vrdclen = sizeof (diag_data), 109 diag_data.vrdclen = sizeof(diag_data);
98 }; 110 rc = diag210(&diag_data);
99 111 CIO_TRACE_EVENT(4, "diag210");
100 ccode = diag210 (&diag_data); 112 CIO_HEX_EVENT(4, &rc, sizeof(rc));
101 if ((ccode == 0) || (ccode == 2)) { 113 CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
102 ps->reserved = 0xff; 114 if (rc != 0 && rc != 2)
103 115 goto err_failed;
104 /* Special case for osa devices. */ 116 if (diag210_to_senseid(senseid, &diag_data))
105 if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) { 117 goto err_unknown;
106 ps->cu_type = 0x3088; 118 return 0;
107 ps->cu_model = 0x60; 119
108 return 0; 120err_unknown:
109 } 121 CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
110 ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla, 122 dev_id->ssid, dev_id->devno);
111 diag_data.vrdcvtyp); 123 return -ENODEV;
112 if (ps->cu_type != 0xffff) 124err_failed:
113 return 0; 125 CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
114 } 126 dev_id->ssid, dev_id->devno, rc);
115
116 CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
117 "vdev class : %02X, vdev type : %04X \n ... "
118 "rdev class : %02X, rdev type : %04X, "
119 "rdev model: %02X\n",
120 devno, ccode,
121 diag_data.vrdcvcla, diag_data.vrdcvtyp,
122 diag_data.vrdcrccl, diag_data.vrdccrty,
123 diag_data.vrdccrmd);
124
125 return -ENODEV; 127 return -ENODEV;
126} 128}
127 129
128/* 130/*
129 * Start Sense ID helper function. 131 * Initialize SENSE ID data.
130 * Try to obtain the 'control unit'/'device type' information
131 * associated with the subchannel.
132 */ 132 */
133static int 133static void snsid_init(struct ccw_device *cdev)
134__ccw_device_sense_id_start(struct ccw_device *cdev)
135{
136 struct subchannel *sch;
137 struct ccw1 *ccw;
138 int ret;
139
140 sch = to_subchannel(cdev->dev.parent);
141 /* Setup sense channel program. */
142 ccw = cdev->private->iccws;
143 ccw->cmd_code = CCW_CMD_SENSE_ID;
144 ccw->cda = (__u32) __pa (&cdev->private->senseid);
145 ccw->count = sizeof (struct senseid);
146 ccw->flags = CCW_FLAG_SLI;
147
148 /* Reset device status. */
149 memset(&cdev->private->irb, 0, sizeof(struct irb));
150
151 /* Try on every path. */
152 ret = -ENODEV;
153 while (cdev->private->imask != 0) {
154 cdev->private->senseid.cu_type = 0xFFFF;
155 if ((sch->opm & cdev->private->imask) != 0 &&
156 cdev->private->iretry > 0) {
157 cdev->private->iretry--;
158 /* Reset internal retry indication. */
159 cdev->private->flags.intretry = 0;
160 ret = cio_start (sch, cdev->private->iccws,
161 cdev->private->imask);
162 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
163 if (ret != -EACCES)
164 return ret;
165 }
166 cdev->private->imask >>= 1;
167 cdev->private->iretry = 5;
168 }
169 return ret;
170}
171
172void
173ccw_device_sense_id_start(struct ccw_device *cdev)
174{ 134{
175 int ret; 135 cdev->private->flags.esid = 0;
176 136 memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
177 memset (&cdev->private->senseid, 0, sizeof (struct senseid)); 137 cdev->private->senseid.cu_type = 0xffff;
178 cdev->private->imask = 0x80;
179 cdev->private->iretry = 5;
180 ret = __ccw_device_sense_id_start(cdev);
181 if (ret && ret != -EBUSY)
182 ccw_device_sense_id_done(cdev, ret);
183} 138}
184 139
185/* 140/*
186 * Called from interrupt context to check if a valid answer 141 * Check for complete SENSE ID data.
187 * to Sense ID was received.
188 */ 142 */
189static int 143static int snsid_check(struct ccw_device *cdev, void *data)
190ccw_device_check_sense_id(struct ccw_device *cdev)
191{ 144{
192 struct subchannel *sch; 145 struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
193 struct irb *irb; 146 int len = sizeof(struct senseid) - scsw->count;
194 147
195 sch = to_subchannel(cdev->dev.parent); 148 /* Check for incomplete SENSE ID data. */
196 irb = &cdev->private->irb; 149 if (len < SENSE_ID_MIN_LEN)
197 150 goto out_restart;
198 /* Check the error cases. */ 151 if (cdev->private->senseid.cu_type == 0xffff)
199 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 152 goto out_restart;
200 /* Retry Sense ID if requested. */ 153 /* Check for incompatible SENSE ID data. */
201 if (cdev->private->flags.intretry) { 154 if (cdev->private->senseid.reserved != 0xff)
202 cdev->private->flags.intretry = 0;
203 return -EAGAIN;
204 }
205 return -ETIME;
206 }
207 if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
208 /*
209 * if the device doesn't support the SenseID
210 * command further retries wouldn't help ...
211 * NB: We don't check here for intervention required like we
212 * did before, because tape devices with no tape inserted
213 * may present this status *in conjunction with* the
214 * sense id information. So, for intervention required,
215 * we use the "whack it until it talks" strategy...
216 */
217 CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel "
218 "0.%x.%04x reports cmd reject\n",
219 cdev->private->dev_id.devno, sch->schid.ssid,
220 sch->schid.sch_no);
221 return -EOPNOTSUPP; 155 return -EOPNOTSUPP;
222 } 156 /* Check for extended-identification information. */
223 if (irb->esw.esw0.erw.cons) { 157 if (len > SENSE_ID_BASIC_LEN)
224 CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, " 158 cdev->private->flags.esid = 1;
225 "lpum %02X, cnt %02d, sns :" 159 return 0;
226 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
227 cdev->private->dev_id.ssid,
228 cdev->private->dev_id.devno,
229 irb->esw.esw0.sublog.lpum,
230 irb->esw.esw0.erw.scnt,
231 irb->ecw[0], irb->ecw[1],
232 irb->ecw[2], irb->ecw[3],
233 irb->ecw[4], irb->ecw[5],
234 irb->ecw[6], irb->ecw[7]);
235 return -EAGAIN;
236 }
237 if (irb->scsw.cmd.cc == 3) {
238 u8 lpm;
239 160
240 lpm = to_io_private(sch)->orb.cmd.lpm; 161out_restart:
241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 162 snsid_init(cdev);
242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
243 "on subchannel 0.%x.%04x is "
244 "'not operational'\n", lpm,
245 cdev->private->dev_id.devno,
246 sch->schid.ssid, sch->schid.sch_no);
247 return -EACCES;
248 }
249
250 /* Did we get a proper answer ? */
251 if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
252 cdev->private->senseid.reserved == 0xFF) {
253 if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
254 cdev->private->flags.esid = 1;
255 return 0; /* Success */
256 }
257
258 /* Hmm, whatever happened, try again. */
259 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
260 "subchannel 0.%x.%04x returns status %02X%02X\n",
261 cdev->private->dev_id.devno, sch->schid.ssid,
262 sch->schid.sch_no,
263 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
264 return -EAGAIN; 163 return -EAGAIN;
265} 164}
266 165
267/* 166/*
268 * Got interrupt for Sense ID. 167 * Process SENSE ID request result.
269 */ 168 */
270void 169static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
271ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
272{ 170{
273 struct subchannel *sch; 171 struct ccw_dev_id *id = &cdev->private->dev_id;
274 struct irb *irb; 172 struct senseid *senseid = &cdev->private->senseid;
275 int ret; 173 int vm = 0;
276 174
277 sch = to_subchannel(cdev->dev.parent); 175 if (rc && MACHINE_IS_VM) {
278 irb = (struct irb *) __LC_IRB; 176 /* Try diag 0x210 fallback on z/VM. */
279 /* Retry sense id, if needed. */ 177 snsid_init(cdev);
280 if (irb->scsw.cmd.stctl == 178 if (diag210_get_dev_info(cdev) == 0) {
281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 179 rc = 0;
282 if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) { 180 vm = 1;
283 ret = __ccw_device_sense_id_start(cdev);
284 if (ret && ret != -EBUSY)
285 ccw_device_sense_id_done(cdev, ret);
286 } 181 }
287 return;
288 } 182 }
289 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 183 CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
290 return; 184 "%04x/%02x%s\n", id->ssid, id->devno, rc,
291 ret = ccw_device_check_sense_id(cdev); 185 senseid->cu_type, senseid->cu_model, senseid->dev_type,
292 memset(&cdev->private->irb, 0, sizeof(struct irb)); 186 senseid->dev_model, vm ? " (diag210)" : "");
293 switch (ret) { 187 ccw_device_sense_id_done(cdev, rc);
294 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */ 188}
295 case 0: /* Sense id succeeded. */
296 case -ETIME: /* Sense id stopped by timeout. */
297 ccw_device_sense_id_done(cdev, ret);
298 break;
299 case -EACCES: /* channel is not operational. */
300 sch->lpm &= ~cdev->private->imask;
301 cdev->private->imask >>= 1;
302 cdev->private->iretry = 5;
303 /* fall through. */
304 case -EAGAIN: /* try again. */
305 ret = __ccw_device_sense_id_start(cdev);
306 if (ret == 0 || ret == -EBUSY)
307 break;
308 /* fall through. */
309 default: /* Sense ID failed. Try asking VM. */
310 if (MACHINE_IS_VM)
311 ret = diag_get_dev_info(cdev->private->dev_id.devno,
312 &cdev->private->senseid);
313 else
314 /*
315 * If we can't couldn't identify the device type we
316 * consider the device "not operational".
317 */
318 ret = -ENODEV;
319 189
320 ccw_device_sense_id_done(cdev, ret); 190/**
321 break; 191 * ccw_device_sense_id_start - perform SENSE ID
322 } 192 * @cdev: ccw device
193 *
194 * Execute a SENSE ID channel program on @cdev to update its sense id
195 * information. When finished, call ccw_device_sense_id_done with a
196 * return code specifying the result.
197 */
198void ccw_device_sense_id_start(struct ccw_device *cdev)
199{
200 struct subchannel *sch = to_subchannel(cdev->dev.parent);
201 struct ccw_request *req = &cdev->private->req;
202 struct ccw1 *cp = cdev->private->iccws;
203
204 CIO_TRACE_EVENT(4, "snsid");
205 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
206 /* Data setup. */
207 snsid_init(cdev);
208 /* Channel program setup. */
209 cp->cmd_code = CCW_CMD_SENSE_ID;
210 cp->cda = (u32) (addr_t) &cdev->private->senseid;
211 cp->count = sizeof(struct senseid);
212 cp->flags = CCW_FLAG_SLI;
213 /* Request setup. */
214 memset(req, 0, sizeof(*req));
215 req->cp = cp;
216 req->timeout = SENSE_ID_TIMEOUT;
217 req->maxretries = SENSE_ID_RETRIES;
218 req->lpm = sch->schib.pmcw.pam & sch->opm;
219 req->check = snsid_check;
220 req->callback = snsid_callback;
221 ccw_request_start(cdev);
323} 222}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 2d0efee8a290..6da84543dfe9 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -11,6 +11,7 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/completion.h>
14 15
15#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
16#include <asm/idals.h> 17#include <asm/idals.h>
@@ -46,6 +47,7 @@ int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
46 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0; 47 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
47 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0; 48 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
48 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0; 49 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
50 cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
49 return 0; 51 return 0;
50} 52}
51 53
@@ -74,6 +76,7 @@ int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
74 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0; 76 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
75 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0; 77 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
76 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0; 78 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
79 cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
77 return 0; 80 return 0;
78} 81}
79 82
@@ -90,9 +93,34 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
90 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0; 93 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
91 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0; 94 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
92 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0; 95 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
96 cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
93} 97}
94 98
95/** 99/**
100 * ccw_device_is_pathgroup - determine if paths to this device are grouped
101 * @cdev: ccw device
102 *
103 * Return non-zero if there is a path group, zero otherwise.
104 */
105int ccw_device_is_pathgroup(struct ccw_device *cdev)
106{
107 return cdev->private->flags.pgroup;
108}
109EXPORT_SYMBOL(ccw_device_is_pathgroup);
110
111/**
112 * ccw_device_is_multipath - determine if device is operating in multipath mode
113 * @cdev: ccw device
114 *
115 * Return non-zero if device is operating in multipath mode, zero otherwise.
116 */
117int ccw_device_is_multipath(struct ccw_device *cdev)
118{
119 return cdev->private->flags.mpath;
120}
121EXPORT_SYMBOL(ccw_device_is_multipath);
122
123/**
96 * ccw_device_clear() - terminate I/O request processing 124 * ccw_device_clear() - terminate I/O request processing
97 * @cdev: target ccw device 125 * @cdev: target ccw device
98 * @intparm: interruption parameter; value is only used if no I/O is 126 * @intparm: interruption parameter; value is only used if no I/O is
@@ -167,8 +195,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
167 return -EINVAL; 195 return -EINVAL;
168 if (cdev->private->state == DEV_STATE_NOT_OPER) 196 if (cdev->private->state == DEV_STATE_NOT_OPER)
169 return -ENODEV; 197 return -ENODEV;
170 if (cdev->private->state == DEV_STATE_VERIFY || 198 if (cdev->private->state == DEV_STATE_VERIFY) {
171 cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
172 /* Remember to fake irb when finished. */ 199 /* Remember to fake irb when finished. */
173 if (!cdev->private->flags.fake_irb) { 200 if (!cdev->private->flags.fake_irb) {
174 cdev->private->flags.fake_irb = 1; 201 cdev->private->flags.fake_irb = 1;
@@ -478,74 +505,65 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
478 return sch->lpm; 505 return sch->lpm;
479} 506}
480 507
481/* 508struct stlck_data {
482 * Try to break the lock on a boxed device. 509 struct completion done;
483 */ 510 int rc;
484int 511};
485ccw_device_stlck(struct ccw_device *cdev)
486{
487 void *buf, *buf2;
488 unsigned long flags;
489 struct subchannel *sch;
490 int ret;
491 512
492 if (!cdev) 513void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
493 return -ENODEV; 514{
515 struct stlck_data *sdata = data;
494 516
495 if (cdev->drv && !cdev->private->options.force) 517 sdata->rc = rc;
496 return -EINVAL; 518 complete(&sdata->done);
519}
497 520
498 sch = to_subchannel(cdev->dev.parent); 521/*
499 522 * Perform unconditional reserve + release.
500 CIO_TRACE_EVENT(2, "stl lock"); 523 */
501 CIO_TRACE_EVENT(2, dev_name(&cdev->dev)); 524int ccw_device_stlck(struct ccw_device *cdev)
525{
526 struct subchannel *sch = to_subchannel(cdev->dev.parent);
527 struct stlck_data data;
528 u8 *buffer;
529 int rc;
502 530
503 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 531 /* Check if steal lock operation is valid for this device. */
504 if (!buf) 532 if (cdev->drv) {
505 return -ENOMEM; 533 if (!cdev->private->options.force)
506 buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 534 return -EINVAL;
507 if (!buf2) {
508 kfree(buf);
509 return -ENOMEM;
510 } 535 }
511 spin_lock_irqsave(sch->lock, flags); 536 buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
512 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 537 if (!buffer)
513 if (ret) 538 return -ENOMEM;
514 goto out_unlock; 539 init_completion(&data.done);
515 /* 540 data.rc = -EIO;
516 * Setup ccw. We chain an unconditional reserve and a release so we 541 spin_lock_irq(sch->lock);
517 * only break the lock. 542 rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
518 */ 543 if (rc)
519 cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
520 cdev->private->iccws[0].cda = (__u32) __pa(buf);
521 cdev->private->iccws[0].count = 32;
522 cdev->private->iccws[0].flags = CCW_FLAG_CC;
523 cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
524 cdev->private->iccws[1].cda = (__u32) __pa(buf2);
525 cdev->private->iccws[1].count = 32;
526 cdev->private->iccws[1].flags = 0;
527 ret = cio_start(sch, cdev->private->iccws, 0);
528 if (ret) {
529 cio_disable_subchannel(sch); //FIXME: return code?
530 goto out_unlock; 544 goto out_unlock;
545 /* Perform operation. */
546 cdev->private->state = DEV_STATE_STEAL_LOCK,
547 ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
548 spin_unlock_irq(sch->lock);
549 /* Wait for operation to finish. */
550 if (wait_for_completion_interruptible(&data.done)) {
551 /* Got a signal. */
552 spin_lock_irq(sch->lock);
553 ccw_request_cancel(cdev);
554 spin_unlock_irq(sch->lock);
555 wait_for_completion(&data.done);
531 } 556 }
532 cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 557 rc = data.rc;
533 spin_unlock_irqrestore(sch->lock, flags); 558 /* Check results. */
534 wait_event(cdev->private->wait_q, 559 spin_lock_irq(sch->lock);
535 cdev->private->irb.scsw.cmd.actl == 0); 560 cio_disable_subchannel(sch);
536 spin_lock_irqsave(sch->lock, flags); 561 cdev->private->state = DEV_STATE_BOXED;
537 cio_disable_subchannel(sch); //FIXME: return code?
538 if ((cdev->private->irb.scsw.cmd.dstat !=
539 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
540 (cdev->private->irb.scsw.cmd.cstat != 0))
541 ret = -EIO;
542 /* Clear irb. */
543 memset(&cdev->private->irb, 0, sizeof(struct irb));
544out_unlock: 562out_unlock:
545 kfree(buf); 563 spin_unlock_irq(sch->lock);
546 kfree(buf2); 564 kfree(buffer);
547 spin_unlock_irqrestore(sch->lock, flags); 565
548 return ret; 566 return rc;
549} 567}
550 568
551void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) 569void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index fc5ca1dd52b3..aad188e43b4f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,594 +1,561 @@
1/* 1/*
2 * drivers/s390/cio/device_pgid.c 2 * CCW device PGID and path verification I/O handling.
3 * 3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 4 * Copyright IBM Corp. 2002,2009
5 * IBM Corporation 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 *
9 * Path Group ID functions.
10 */ 8 */
11 9
12#include <linux/module.h> 10#include <linux/kernel.h>
13#include <linux/init.h> 11#include <linux/string.h>
14 12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/bitops.h>
15#include <asm/ccwdev.h> 15#include <asm/ccwdev.h>
16#include <asm/cio.h> 16#include <asm/cio.h>
17#include <asm/delay.h>
18#include <asm/lowcore.h>
19 17
20#include "cio.h" 18#include "cio.h"
21#include "cio_debug.h" 19#include "cio_debug.h"
22#include "css.h"
23#include "device.h" 20#include "device.h"
24#include "ioasm.h"
25#include "io_sch.h" 21#include "io_sch.h"
26 22
23#define PGID_RETRIES 256
24#define PGID_TIMEOUT (10 * HZ)
25
27/* 26/*
28 * Helper function called from interrupt context to decide whether an 27 * Process path verification data and report result.
29 * operation should be tried again.
30 */ 28 */
31static int __ccw_device_should_retry(union scsw *scsw) 29static void verify_done(struct ccw_device *cdev, int rc)
32{ 30{
33 /* CC is only valid if start function bit is set. */ 31 struct subchannel *sch = to_subchannel(cdev->dev.parent);
34 if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1) 32 struct ccw_dev_id *id = &cdev->private->dev_id;
35 return 1; 33 int mpath = cdev->private->flags.mpath;
36 /* No more activity. For sense and set PGID we stubbornly try again. */ 34 int pgroup = cdev->private->flags.pgroup;
37 if (!scsw->cmd.actl) 35
38 return 1; 36 if (rc)
39 return 0; 37 goto out;
38 /* Ensure consistent multipathing state at device and channel. */
39 if (sch->config.mp != mpath) {
40 sch->config.mp = mpath;
41 rc = cio_commit_config(sch);
42 }
43out:
44 CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
45 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
46 sch->vpm);
47 ccw_device_verify_done(cdev, rc);
40} 48}
41 49
42/* 50/*
43 * Start Sense Path Group ID helper function. Used in ccw_device_recog 51 * Create channel program to perform a NOOP.
44 * and ccw_device_sense_pgid.
45 */ 52 */
46static int 53static void nop_build_cp(struct ccw_device *cdev)
47__ccw_device_sense_pgid_start(struct ccw_device *cdev)
48{ 54{
49 struct subchannel *sch; 55 struct ccw_request *req = &cdev->private->req;
50 struct ccw1 *ccw; 56 struct ccw1 *cp = cdev->private->iccws;
51 int ret; 57
52 int i; 58 cp->cmd_code = CCW_CMD_NOOP;
53 59 cp->cda = 0;
54 sch = to_subchannel(cdev->dev.parent); 60 cp->count = 0;
55 /* Return if we already checked on all paths. */ 61 cp->flags = CCW_FLAG_SLI;
56 if (cdev->private->imask == 0) 62 req->cp = cp;
57 return (sch->lpm == 0) ? -ENODEV : -EACCES;
58 i = 8 - ffs(cdev->private->imask);
59
60 /* Setup sense path group id channel program. */
61 ccw = cdev->private->iccws;
62 ccw->cmd_code = CCW_CMD_SENSE_PGID;
63 ccw->count = sizeof (struct pgid);
64 ccw->flags = CCW_FLAG_SLI;
65
66 /* Reset device status. */
67 memset(&cdev->private->irb, 0, sizeof(struct irb));
68 /* Try on every path. */
69 ret = -ENODEV;
70 while (cdev->private->imask != 0) {
71 /* Try every path multiple times. */
72 ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
73 if (cdev->private->iretry > 0) {
74 cdev->private->iretry--;
75 /* Reset internal retry indication. */
76 cdev->private->flags.intretry = 0;
77 ret = cio_start (sch, cdev->private->iccws,
78 cdev->private->imask);
79 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
80 if (ret != -EACCES)
81 return ret;
82 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel "
83 "0.%x.%04x, lpm %02X, became 'not "
84 "operational'\n",
85 cdev->private->dev_id.devno,
86 sch->schid.ssid,
87 sch->schid.sch_no, cdev->private->imask);
88
89 }
90 cdev->private->imask >>= 1;
91 cdev->private->iretry = 5;
92 i++;
93 }
94
95 return ret;
96} 63}
97 64
98void 65/*
99ccw_device_sense_pgid_start(struct ccw_device *cdev) 66 * Perform NOOP on a single path.
67 */
68static void nop_do(struct ccw_device *cdev)
100{ 69{
101 int ret; 70 struct subchannel *sch = to_subchannel(cdev->dev.parent);
102 71 struct ccw_request *req = &cdev->private->req;
103 /* Set a timeout of 60s */ 72
104 ccw_device_set_timeout(cdev, 60*HZ); 73 /* Adjust lpm. */
105 74 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
106 cdev->private->state = DEV_STATE_SENSE_PGID; 75 if (!req->lpm)
107 cdev->private->imask = 0x80; 76 goto out_nopath;
108 cdev->private->iretry = 5; 77 nop_build_cp(cdev);
109 memset (&cdev->private->pgid, 0, sizeof (cdev->private->pgid)); 78 ccw_request_start(cdev);
110 ret = __ccw_device_sense_pgid_start(cdev); 79 return;
111 if (ret && ret != -EBUSY) 80
112 ccw_device_sense_pgid_done(cdev, ret); 81out_nopath:
82 verify_done(cdev, sch->vpm ? 0 : -EACCES);
113} 83}
114 84
115/* 85/*
116 * Called from interrupt context to check if a valid answer 86 * Adjust NOOP I/O status.
117 * to Sense Path Group ID was received.
118 */ 87 */
119static int 88static enum io_status nop_filter(struct ccw_device *cdev, void *data,
120__ccw_device_check_sense_pgid(struct ccw_device *cdev) 89 struct irb *irb, enum io_status status)
121{ 90{
122 struct subchannel *sch; 91 /* Only subchannel status might indicate a path error. */
123 struct irb *irb; 92 if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
124 int i; 93 return IO_DONE;
125 94 return status;
126 sch = to_subchannel(cdev->dev.parent);
127 irb = &cdev->private->irb;
128 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
129 /* Retry Sense PGID if requested. */
130 if (cdev->private->flags.intretry) {
131 cdev->private->flags.intretry = 0;
132 return -EAGAIN;
133 }
134 return -ETIME;
135 }
136 if (irb->esw.esw0.erw.cons &&
137 (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
138 /*
139 * If the device doesn't support the Sense Path Group ID
140 * command further retries wouldn't help ...
141 */
142 return -EOPNOTSUPP;
143 }
144 if (irb->esw.esw0.erw.cons) {
145 CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
146 "lpum %02X, cnt %02d, sns : "
147 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
148 cdev->private->dev_id.ssid,
149 cdev->private->dev_id.devno,
150 irb->esw.esw0.sublog.lpum,
151 irb->esw.esw0.erw.scnt,
152 irb->ecw[0], irb->ecw[1],
153 irb->ecw[2], irb->ecw[3],
154 irb->ecw[4], irb->ecw[5],
155 irb->ecw[6], irb->ecw[7]);
156 return -EAGAIN;
157 }
158 if (irb->scsw.cmd.cc == 3) {
159 u8 lpm;
160
161 lpm = to_io_private(sch)->orb.cmd.lpm;
162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
163 " lpm %02X, became 'not operational'\n",
164 cdev->private->dev_id.devno, sch->schid.ssid,
165 sch->schid.sch_no, lpm);
166 return -EACCES;
167 }
168 i = 8 - ffs(cdev->private->imask);
169 if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
170 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
171 "is reserved by someone else\n",
172 cdev->private->dev_id.devno, sch->schid.ssid,
173 sch->schid.sch_no);
174 return -EUSERS;
175 }
176 return 0;
177} 95}
178 96
179/* 97/*
180 * Got interrupt for Sense Path Group ID. 98 * Process NOOP request result for a single path.
181 */ 99 */
182void 100static void nop_callback(struct ccw_device *cdev, void *data, int rc)
183ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
184{ 101{
185 struct subchannel *sch; 102 struct subchannel *sch = to_subchannel(cdev->dev.parent);
186 struct irb *irb; 103 struct ccw_request *req = &cdev->private->req;
187 int ret; 104
188 105 if (rc == 0)
189 irb = (struct irb *) __LC_IRB; 106 sch->vpm |= req->lpm;
190 107 else if (rc != -EACCES)
191 if (irb->scsw.cmd.stctl == 108 goto err;
192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 109 req->lpm >>= 1;
193 if (__ccw_device_should_retry(&irb->scsw)) { 110 nop_do(cdev);
194 ret = __ccw_device_sense_pgid_start(cdev); 111 return;
195 if (ret && ret != -EBUSY) 112
196 ccw_device_sense_pgid_done(cdev, ret); 113err:
197 } 114 verify_done(cdev, rc);
198 return;
199 }
200 if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
201 return;
202 sch = to_subchannel(cdev->dev.parent);
203 ret = __ccw_device_check_sense_pgid(cdev);
204 memset(&cdev->private->irb, 0, sizeof(struct irb));
205 switch (ret) {
206 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
207 case -EOPNOTSUPP: /* Sense Path Group ID not supported */
208 ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
209 break;
210 case -ETIME: /* Sense path group id stopped by timeout. */
211 ccw_device_sense_pgid_done(cdev, -ETIME);
212 break;
213 case -EACCES: /* channel is not operational. */
214 sch->lpm &= ~cdev->private->imask;
215 /* Fall through. */
216 case 0: /* Sense Path Group ID successful. */
217 cdev->private->imask >>= 1;
218 cdev->private->iretry = 5;
219 /* Fall through. */
220 case -EAGAIN: /* Try again. */
221 ret = __ccw_device_sense_pgid_start(cdev);
222 if (ret != 0 && ret != -EBUSY)
223 ccw_device_sense_pgid_done(cdev, ret);
224 break;
225 case -EUSERS: /* device is reserved for someone else. */
226 ccw_device_sense_pgid_done(cdev, -EUSERS);
227 break;
228 }
229} 115}
230 116
231/* 117/*
232 * Path Group ID helper function. 118 * Create channel program to perform SET PGID on a single path.
233 */ 119 */
234static int 120static void spid_build_cp(struct ccw_device *cdev, u8 fn)
235__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
236{ 121{
237 struct subchannel *sch; 122 struct ccw_request *req = &cdev->private->req;
238 struct ccw1 *ccw; 123 struct ccw1 *cp = cdev->private->iccws;
239 int ret; 124 int i = 8 - ffs(req->lpm);
240 125 struct pgid *pgid = &cdev->private->pgid[i];
241 sch = to_subchannel(cdev->dev.parent); 126
242 127 pgid->inf.fc = fn;
243 /* Setup sense path group id channel program. */ 128 cp->cmd_code = CCW_CMD_SET_PGID;
244 cdev->private->pgid[0].inf.fc = func; 129 cp->cda = (u32) (addr_t) pgid;
245 ccw = cdev->private->iccws; 130 cp->count = sizeof(*pgid);
246 if (cdev->private->flags.pgid_single) 131 cp->flags = CCW_FLAG_SLI;
247 cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH; 132 req->cp = cp;
248 else
249 cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH;
250 ccw->cmd_code = CCW_CMD_SET_PGID;
251 ccw->cda = (__u32) __pa (&cdev->private->pgid[0]);
252 ccw->count = sizeof (struct pgid);
253 ccw->flags = CCW_FLAG_SLI;
254
255 /* Reset device status. */
256 memset(&cdev->private->irb, 0, sizeof(struct irb));
257
258 /* Try multiple times. */
259 ret = -EACCES;
260 if (cdev->private->iretry > 0) {
261 cdev->private->iretry--;
262 /* Reset internal retry indication. */
263 cdev->private->flags.intretry = 0;
264 ret = cio_start (sch, cdev->private->iccws,
265 cdev->private->imask);
266 /* We expect an interrupt in case of success or busy
267 * indication. */
268 if ((ret == 0) || (ret == -EBUSY))
269 return ret;
270 }
271 /* PGID command failed on this path. */
272 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel "
273 "0.%x.%04x, lpm %02X, became 'not operational'\n",
274 cdev->private->dev_id.devno, sch->schid.ssid,
275 sch->schid.sch_no, cdev->private->imask);
276 return ret;
277} 133}
278 134
279/* 135/*
280 * Helper function to send a nop ccw down a path. 136 * Perform establish/resign SET PGID on a single path.
281 */ 137 */
282static int __ccw_device_do_nop(struct ccw_device *cdev) 138static void spid_do(struct ccw_device *cdev)
283{ 139{
284 struct subchannel *sch; 140 struct subchannel *sch = to_subchannel(cdev->dev.parent);
285 struct ccw1 *ccw; 141 struct ccw_request *req = &cdev->private->req;
286 int ret; 142 u8 fn;
287 143
288 sch = to_subchannel(cdev->dev.parent); 144 /* Use next available path that is not already in correct state. */
289 145 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & ~sch->vpm);
290 /* Setup nop channel program. */ 146 if (!req->lpm)
291 ccw = cdev->private->iccws; 147 goto out_nopath;
292 ccw->cmd_code = CCW_CMD_NOOP; 148 /* Channel program setup. */
293 ccw->cda = 0; 149 if (req->lpm & sch->opm)
294 ccw->count = 0; 150 fn = SPID_FUNC_ESTABLISH;
295 ccw->flags = CCW_FLAG_SLI; 151 else
296 152 fn = SPID_FUNC_RESIGN;
297 /* Reset device status. */ 153 if (cdev->private->flags.mpath)
298 memset(&cdev->private->irb, 0, sizeof(struct irb)); 154 fn |= SPID_FUNC_MULTI_PATH;
299 155 spid_build_cp(cdev, fn);
300 /* Try multiple times. */ 156 ccw_request_start(cdev);
301 ret = -EACCES; 157 return;
302 if (cdev->private->iretry > 0) { 158
303 cdev->private->iretry--; 159out_nopath:
304 /* Reset internal retry indication. */ 160 verify_done(cdev, sch->vpm ? 0 : -EACCES);
305 cdev->private->flags.intretry = 0;
306 ret = cio_start (sch, cdev->private->iccws,
307 cdev->private->imask);
308 /* We expect an interrupt in case of success or busy
309 * indication. */
310 if ((ret == 0) || (ret == -EBUSY))
311 return ret;
312 }
313 /* nop command failed on this path. */
314 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel "
315 "0.%x.%04x, lpm %02X, became 'not operational'\n",
316 cdev->private->dev_id.devno, sch->schid.ssid,
317 sch->schid.sch_no, cdev->private->imask);
318 return ret;
319} 161}
320 162
163static void verify_start(struct ccw_device *cdev);
321 164
322/* 165/*
323 * Called from interrupt context to check if a valid answer 166 * Process SET PGID request result for a single path.
324 * to Set Path Group ID was received.
325 */ 167 */
326static int 168static void spid_callback(struct ccw_device *cdev, void *data, int rc)
327__ccw_device_check_pgid(struct ccw_device *cdev)
328{ 169{
329 struct subchannel *sch; 170 struct subchannel *sch = to_subchannel(cdev->dev.parent);
330 struct irb *irb; 171 struct ccw_request *req = &cdev->private->req;
331 172
332 sch = to_subchannel(cdev->dev.parent); 173 switch (rc) {
333 irb = &cdev->private->irb; 174 case 0:
334 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 175 sch->vpm |= req->lpm & sch->opm;
335 /* Retry Set PGID if requested. */ 176 break;
336 if (cdev->private->flags.intretry) { 177 case -EACCES:
337 cdev->private->flags.intretry = 0; 178 break;
338 return -EAGAIN; 179 case -EOPNOTSUPP:
180 if (cdev->private->flags.mpath) {
181 /* Try without multipathing. */
182 cdev->private->flags.mpath = 0;
183 goto out_restart;
339 } 184 }
340 return -ETIME; 185 /* Try without pathgrouping. */
186 cdev->private->flags.pgroup = 0;
187 goto out_restart;
188 default:
189 goto err;
341 } 190 }
342 if (irb->esw.esw0.erw.cons) { 191 req->lpm >>= 1;
343 if (irb->ecw[0] & SNS0_CMD_REJECT) 192 spid_do(cdev);
344 return -EOPNOTSUPP; 193 return;
345 /* Hmm, whatever happened, try again. */ 194
346 CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, " 195out_restart:
347 "cnt %02d, " 196 verify_start(cdev);
348 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 197 return;
349 cdev->private->dev_id.ssid, 198err:
350 cdev->private->dev_id.devno, 199 verify_done(cdev, rc);
351 irb->esw.esw0.erw.scnt, 200}
352 irb->ecw[0], irb->ecw[1], 201
353 irb->ecw[2], irb->ecw[3], 202static void spid_start(struct ccw_device *cdev)
354 irb->ecw[4], irb->ecw[5], 203{
355 irb->ecw[6], irb->ecw[7]); 204 struct ccw_request *req = &cdev->private->req;
356 return -EAGAIN; 205
357 } 206 /* Initialize request data. */
358 if (irb->scsw.cmd.cc == 3) { 207 memset(req, 0, sizeof(*req));
359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," 208 req->timeout = PGID_TIMEOUT;
360 " lpm %02X, became 'not operational'\n", 209 req->maxretries = PGID_RETRIES;
361 cdev->private->dev_id.devno, sch->schid.ssid, 210 req->lpm = 0x80;
362 sch->schid.sch_no, cdev->private->imask); 211 req->callback = spid_callback;
363 return -EACCES; 212 spid_do(cdev);
364 } 213}
365 return 0; 214
215static int pgid_cmp(struct pgid *p1, struct pgid *p2)
216{
217 return memcmp((char *) p1 + 1, (char *) p2 + 1,
218 sizeof(struct pgid) - 1);
366} 219}
367 220
368/* 221/*
369 * Called from interrupt context to check the path status after a nop has 222 * Determine pathgroup state from PGID data.
370 * been send.
371 */ 223 */
372static int __ccw_device_check_nop(struct ccw_device *cdev) 224static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
225 int *mismatch, int *reserved, int *reset)
373{ 226{
374 struct subchannel *sch; 227 struct pgid *pgid = &cdev->private->pgid[0];
375 struct irb *irb; 228 struct pgid *first = NULL;
376 229 int lpm;
377 sch = to_subchannel(cdev->dev.parent); 230 int i;
378 irb = &cdev->private->irb; 231
379 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 232 *mismatch = 0;
380 /* Retry NOP if requested. */ 233 *reserved = 0;
381 if (cdev->private->flags.intretry) { 234 *reset = 0;
382 cdev->private->flags.intretry = 0; 235 for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
383 return -EAGAIN; 236 if ((cdev->private->pgid_valid_mask & lpm) == 0)
237 continue;
238 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
239 *reserved = 1;
240 if (pgid->inf.ps.state1 == SNID_STATE1_RESET) {
241 /* A PGID was reset. */
242 *reset = 1;
243 continue;
384 } 244 }
385 return -ETIME; 245 if (!first) {
386 } 246 first = pgid;
387 if (irb->scsw.cmd.cc == 3) { 247 continue;
388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," 248 }
389 " lpm %02X, became 'not operational'\n", 249 if (pgid_cmp(pgid, first) != 0)
390 cdev->private->dev_id.devno, sch->schid.ssid, 250 *mismatch = 1;
391 sch->schid.sch_no, cdev->private->imask);
392 return -EACCES;
393 } 251 }
394 return 0; 252 if (!first)
253 first = &channel_subsystems[0]->global_pgid;
254 *p = first;
395} 255}
396 256
397static void 257static u8 pgid_to_vpm(struct ccw_device *cdev)
398__ccw_device_verify_start(struct ccw_device *cdev)
399{ 258{
400 struct subchannel *sch; 259 struct subchannel *sch = to_subchannel(cdev->dev.parent);
401 __u8 func; 260 struct pgid *pgid;
402 int ret; 261 int i;
403 262 int lpm;
404 sch = to_subchannel(cdev->dev.parent); 263 u8 vpm = 0;
405 /* Repeat for all paths. */ 264
406 for (; cdev->private->imask; cdev->private->imask >>= 1, 265 /* Set VPM bits for paths which are already in the target state. */
407 cdev->private->iretry = 5) { 266 for (i = 0; i < 8; i++) {
408 if ((cdev->private->imask & sch->schib.pmcw.pam) == 0) 267 lpm = 0x80 >> i;
409 /* Path not available, try next. */ 268 if ((cdev->private->pgid_valid_mask & lpm) == 0)
410 continue; 269 continue;
411 if (cdev->private->options.pgroup) { 270 pgid = &cdev->private->pgid[i];
412 if (sch->opm & cdev->private->imask) 271 if (sch->opm & lpm) {
413 func = SPID_FUNC_ESTABLISH; 272 if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
414 else 273 continue;
415 func = SPID_FUNC_RESIGN; 274 } else {
416 ret = __ccw_device_do_pgid(cdev, func); 275 if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
417 } else 276 continue;
418 ret = __ccw_device_do_nop(cdev); 277 }
419 /* We expect an interrupt in case of success or busy 278 if (cdev->private->flags.mpath) {
420 * indication. */ 279 if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
421 if (ret == 0 || ret == -EBUSY) 280 continue;
422 return; 281 } else {
423 /* Permanent path failure, try next. */ 282 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
283 continue;
284 }
285 vpm |= lpm;
424 } 286 }
425 /* Done with all paths. */ 287
426 ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV); 288 return vpm;
427} 289}
428 290
429/* 291static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
430 * Got interrupt for Set Path Group ID.
431 */
432void
433ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
434{ 292{
435 struct subchannel *sch; 293 int i;
436 struct irb *irb;
437 int ret;
438 294
439 irb = (struct irb *) __LC_IRB; 295 for (i = 0; i < 8; i++)
296 memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
297}
440 298
441 if (irb->scsw.cmd.stctl == 299/*
442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 300 * Process SENSE PGID data and report result.
443 if (__ccw_device_should_retry(&irb->scsw)) 301 */
444 __ccw_device_verify_start(cdev); 302static void snid_done(struct ccw_device *cdev, int rc)
445 return; 303{
304 struct ccw_dev_id *id = &cdev->private->dev_id;
305 struct subchannel *sch = to_subchannel(cdev->dev.parent);
306 struct pgid *pgid;
307 int mismatch = 0;
308 int reserved = 0;
309 int reset = 0;
310
311 if (rc)
312 goto out;
313 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
314 if (reserved)
315 rc = -EUSERS;
316 else if (mismatch)
317 rc = -EOPNOTSUPP;
318 else {
319 sch->vpm = pgid_to_vpm(cdev);
320 pgid_fill(cdev, pgid);
446 } 321 }
447 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 322out:
448 return; 323 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
449 sch = to_subchannel(cdev->dev.parent); 324 "mism=%d rsvd=%d reset=%d\n", id->ssid, id->devno, rc,
450 if (cdev->private->options.pgroup) 325 cdev->private->pgid_valid_mask, sch->vpm, mismatch,
451 ret = __ccw_device_check_pgid(cdev); 326 reserved, reset);
452 else 327 switch (rc) {
453 ret = __ccw_device_check_nop(cdev);
454 memset(&cdev->private->irb, 0, sizeof(struct irb));
455
456 switch (ret) {
457 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
458 case 0: 328 case 0:
459 /* Path verification ccw finished successfully, update lpm. */ 329 /* Anything left to do? */
460 sch->vpm |= sch->opm & cdev->private->imask; 330 if (sch->vpm == sch->schib.pmcw.pam) {
461 /* Go on with next path. */ 331 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
462 cdev->private->imask >>= 1; 332 return;
463 cdev->private->iretry = 5; 333 }
464 __ccw_device_verify_start(cdev); 334 /* Perform path-grouping. */
335 spid_start(cdev);
465 break; 336 break;
466 case -EOPNOTSUPP: 337 case -EOPNOTSUPP:
467 /* 338 /* Path-grouping not supported. */
468 * One of those strange devices which claim to be able 339 cdev->private->flags.pgroup = 0;
469 * to do multipathing but not for Set Path Group ID. 340 cdev->private->flags.mpath = 0;
470 */ 341 verify_start(cdev);
471 if (cdev->private->flags.pgid_single)
472 cdev->private->options.pgroup = 0;
473 else
474 cdev->private->flags.pgid_single = 1;
475 /* Retry */
476 sch->vpm = 0;
477 cdev->private->imask = 0x80;
478 cdev->private->iretry = 5;
479 /* fall through. */
480 case -EAGAIN: /* Try again. */
481 __ccw_device_verify_start(cdev);
482 break;
483 case -ETIME: /* Set path group id stopped by timeout. */
484 ccw_device_verify_done(cdev, -ETIME);
485 break;
486 case -EACCES: /* channel is not operational. */
487 cdev->private->imask >>= 1;
488 cdev->private->iretry = 5;
489 __ccw_device_verify_start(cdev);
490 break; 342 break;
343 default:
344 verify_done(cdev, rc);
491 } 345 }
492} 346}
493 347
494void 348/*
495ccw_device_verify_start(struct ccw_device *cdev) 349 * Create channel program to perform a SENSE PGID on a single path.
350 */
351static void snid_build_cp(struct ccw_device *cdev)
352{
353 struct ccw_request *req = &cdev->private->req;
354 struct ccw1 *cp = cdev->private->iccws;
355 int i = 8 - ffs(req->lpm);
356
357 /* Channel program setup. */
358 cp->cmd_code = CCW_CMD_SENSE_PGID;
359 cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
360 cp->count = sizeof(struct pgid);
361 cp->flags = CCW_FLAG_SLI;
362 req->cp = cp;
363}
364
365/*
366 * Perform SENSE PGID on a single path.
367 */
368static void snid_do(struct ccw_device *cdev)
496{ 369{
497 struct subchannel *sch = to_subchannel(cdev->dev.parent); 370 struct subchannel *sch = to_subchannel(cdev->dev.parent);
371 struct ccw_request *req = &cdev->private->req;
372
373 /* Adjust lpm if paths are not set in pam. */
374 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
375 if (!req->lpm)
376 goto out_nopath;
377 snid_build_cp(cdev);
378 ccw_request_start(cdev);
379 return;
380
381out_nopath:
382 snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
383}
498 384
499 cdev->private->flags.pgid_single = 0; 385/*
500 cdev->private->imask = 0x80; 386 * Process SENSE PGID request result for single path.
501 cdev->private->iretry = 5; 387 */
388static void snid_callback(struct ccw_device *cdev, void *data, int rc)
389{
390 struct ccw_request *req = &cdev->private->req;
391
392 if (rc == 0)
393 cdev->private->pgid_valid_mask |= req->lpm;
394 else if (rc != -EACCES)
395 goto err;
396 req->lpm >>= 1;
397 snid_do(cdev);
398 return;
399
400err:
401 snid_done(cdev, rc);
402}
502 403
503 /* Start with empty vpm. */ 404/*
504 sch->vpm = 0; 405 * Perform path verification.
406 */
407static void verify_start(struct ccw_device *cdev)
408{
409 struct subchannel *sch = to_subchannel(cdev->dev.parent);
410 struct ccw_request *req = &cdev->private->req;
411 struct ccw_dev_id *devid = &cdev->private->dev_id;
505 412
506 /* Get current pam. */ 413 sch->vpm = 0;
507 if (cio_update_schib(sch)) { 414 /* Initialize request data. */
508 ccw_device_verify_done(cdev, -ENODEV); 415 memset(req, 0, sizeof(*req));
509 return; 416 req->timeout = PGID_TIMEOUT;
417 req->maxretries = PGID_RETRIES;
418 req->lpm = 0x80;
419 if (cdev->private->flags.pgroup) {
420 CIO_TRACE_EVENT(4, "snid");
421 CIO_HEX_EVENT(4, devid, sizeof(*devid));
422 req->callback = snid_callback;
423 snid_do(cdev);
424 } else {
425 CIO_TRACE_EVENT(4, "nop");
426 CIO_HEX_EVENT(4, devid, sizeof(*devid));
427 req->filter = nop_filter;
428 req->callback = nop_callback;
429 nop_do(cdev);
510 } 430 }
511 /* After 60s path verification is considered to have failed. */
512 ccw_device_set_timeout(cdev, 60*HZ);
513 __ccw_device_verify_start(cdev);
514} 431}
515 432
516static void 433/**
517__ccw_device_disband_start(struct ccw_device *cdev) 434 * ccw_device_verify_start - perform path verification
435 * @cdev: ccw device
436 *
437 * Perform an I/O on each available channel path to @cdev to determine which
438 * paths are operational. The resulting path mask is stored in sch->vpm.
439 * If device options specify pathgrouping, establish a pathgroup for the
440 * operational paths. When finished, call ccw_device_verify_done with a
441 * return code specifying the result.
442 */
443void ccw_device_verify_start(struct ccw_device *cdev)
518{ 444{
519 struct subchannel *sch; 445 CIO_TRACE_EVENT(4, "vrfy");
520 int ret; 446 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
521 447 /* Initialize PGID data. */
522 sch = to_subchannel(cdev->dev.parent); 448 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
523 while (cdev->private->imask != 0) { 449 cdev->private->pgid_valid_mask = 0;
524 if (sch->lpm & cdev->private->imask) { 450 /*
525 ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND); 451 * Initialize pathgroup and multipath state with target values.
526 if (ret == 0) 452 * They may change in the course of path verification.
527 return; 453 */
528 } 454 cdev->private->flags.pgroup = cdev->private->options.pgroup;
529 cdev->private->iretry = 5; 455 cdev->private->flags.mpath = cdev->private->options.mpath;
530 cdev->private->imask >>= 1; 456 cdev->private->flags.doverify = 0;
531 } 457 verify_start(cdev);
532 ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
533} 458}
534 459
535/* 460/*
536 * Got interrupt for Unset Path Group ID. 461 * Process disband SET PGID request result.
537 */ 462 */
538void 463static void disband_callback(struct ccw_device *cdev, void *data, int rc)
539ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
540{ 464{
541 struct subchannel *sch; 465 struct subchannel *sch = to_subchannel(cdev->dev.parent);
542 struct irb *irb; 466 struct ccw_dev_id *id = &cdev->private->dev_id;
543 int ret; 467
468 if (rc)
469 goto out;
470 /* Ensure consistent multipathing state at device and channel. */
471 cdev->private->flags.mpath = 0;
472 if (sch->config.mp) {
473 sch->config.mp = 0;
474 rc = cio_commit_config(sch);
475 }
476out:
477 CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
478 rc);
479 ccw_device_disband_done(cdev, rc);
480}
544 481
545 irb = (struct irb *) __LC_IRB; 482/**
483 * ccw_device_disband_start - disband pathgroup
484 * @cdev: ccw device
485 *
486 * Execute a SET PGID channel program on @cdev to disband a previously
487 * established pathgroup. When finished, call ccw_device_disband_done with
488 * a return code specifying the result.
489 */
490void ccw_device_disband_start(struct ccw_device *cdev)
491{
492 struct subchannel *sch = to_subchannel(cdev->dev.parent);
493 struct ccw_request *req = &cdev->private->req;
494 u8 fn;
495
496 CIO_TRACE_EVENT(4, "disb");
497 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
498 /* Request setup. */
499 memset(req, 0, sizeof(*req));
500 req->timeout = PGID_TIMEOUT;
501 req->maxretries = PGID_RETRIES;
502 req->lpm = sch->schib.pmcw.pam & sch->opm;
503 req->callback = disband_callback;
504 fn = SPID_FUNC_DISBAND;
505 if (cdev->private->flags.mpath)
506 fn |= SPID_FUNC_MULTI_PATH;
507 spid_build_cp(cdev, fn);
508 ccw_request_start(cdev);
509}
546 510
547 if (irb->scsw.cmd.stctl == 511static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 512{
549 if (__ccw_device_should_retry(&irb->scsw)) 513 struct ccw_request *req = &cdev->private->req;
550 __ccw_device_disband_start(cdev); 514 struct ccw1 *cp = cdev->private->iccws;
551 return; 515
552 } 516 cp[0].cmd_code = CCW_CMD_STLCK;
553 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 517 cp[0].cda = (u32) (addr_t) buf1;
554 return; 518 cp[0].count = 32;
555 sch = to_subchannel(cdev->dev.parent); 519 cp[0].flags = CCW_FLAG_CC;
556 ret = __ccw_device_check_pgid(cdev); 520 cp[1].cmd_code = CCW_CMD_RELEASE;
557 memset(&cdev->private->irb, 0, sizeof(struct irb)); 521 cp[1].cda = (u32) (addr_t) buf2;
558 switch (ret) { 522 cp[1].count = 32;
559 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 523 cp[1].flags = 0;
560 case 0: /* disband successful. */ 524 req->cp = cp;
561 ccw_device_disband_done(cdev, ret);
562 break;
563 case -EOPNOTSUPP:
564 /*
565 * One of those strange devices which claim to be able
566 * to do multipathing but not for Unset Path Group ID.
567 */
568 cdev->private->flags.pgid_single = 1;
569 /* fall through. */
570 case -EAGAIN: /* Try again. */
571 __ccw_device_disband_start(cdev);
572 break;
573 case -ETIME: /* Set path group id stopped by timeout. */
574 ccw_device_disband_done(cdev, -ETIME);
575 break;
576 case -EACCES: /* channel is not operational. */
577 cdev->private->imask >>= 1;
578 cdev->private->iretry = 5;
579 __ccw_device_disband_start(cdev);
580 break;
581 }
582} 525}
583 526
584void 527static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
585ccw_device_disband_start(struct ccw_device *cdev)
586{ 528{
587 /* After 60s disbanding is considered to have failed. */ 529 ccw_device_stlck_done(cdev, data, rc);
588 ccw_device_set_timeout(cdev, 60*HZ); 530}
589 531
590 cdev->private->flags.pgid_single = 0; 532/**
591 cdev->private->iretry = 5; 533 * ccw_device_stlck_start - perform unconditional release
592 cdev->private->imask = 0x80; 534 * @cdev: ccw device
593 __ccw_device_disband_start(cdev); 535 * @data: data pointer to be passed to ccw_device_stlck_done
536 * @buf1: data pointer used in channel program
537 * @buf2: data pointer used in channel program
538 *
539 * Execute a channel program on @cdev to release an existing PGID reservation.
540 * When finished, call ccw_device_stlck_done with a return code specifying the
541 * result.
542 */
543void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
544 void *buf2)
545{
546 struct subchannel *sch = to_subchannel(cdev->dev.parent);
547 struct ccw_request *req = &cdev->private->req;
548
549 CIO_TRACE_EVENT(4, "stlck");
550 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
551 /* Request setup. */
552 memset(req, 0, sizeof(*req));
553 req->timeout = PGID_TIMEOUT;
554 req->maxretries = PGID_RETRIES;
555 req->lpm = sch->schib.pmcw.pam & sch->opm;
556 req->data = data;
557 req->callback = stlck_callback;
558 stlck_build_cp(cdev, buf1, buf2);
559 ccw_request_start(cdev);
594} 560}
561
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 5814dbee2410..66d8066ef22a 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -336,9 +336,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
336 sense_ccw->count = SENSE_MAX_COUNT; 336 sense_ccw->count = SENSE_MAX_COUNT;
337 sense_ccw->flags = CCW_FLAG_SLI; 337 sense_ccw->flags = CCW_FLAG_SLI;
338 338
339 /* Reset internal retry indication. */
340 cdev->private->flags.intretry = 0;
341
342 rc = cio_start(sch, sense_ccw, 0xff); 339 rc = cio_start(sch, sense_ccw, 0xff);
343 if (rc == -ENODEV || rc == -EACCES) 340 if (rc == -ENODEV || rc == -EACCES)
344 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 341 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 0b8f381bd20e..d72ae4c93af9 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,7 +1,10 @@
1#ifndef S390_IO_SCH_H 1#ifndef S390_IO_SCH_H
2#define S390_IO_SCH_H 2#define S390_IO_SCH_H
3 3
4#include <linux/types.h>
4#include <asm/schid.h> 5#include <asm/schid.h>
6#include <asm/ccwdev.h>
7#include "css.h"
5 8
6/* 9/*
7 * command-mode operation request block 10 * command-mode operation request block
@@ -68,6 +71,52 @@ struct io_subchannel_private {
68#define MAX_CIWS 8 71#define MAX_CIWS 8
69 72
70/* 73/*
74 * Possible status values for a CCW request's I/O.
75 */
76enum io_status {
77 IO_DONE,
78 IO_RUNNING,
79 IO_STATUS_ERROR,
80 IO_PATH_ERROR,
81 IO_REJECTED,
82 IO_KILLED
83};
84
85/**
86 * ccw_request - Internal CCW request.
87 * @cp: channel program to start
88 * @timeout: maximum allowable time in jiffies between start I/O and interrupt
89 * @maxretries: number of retries per I/O operation and path
90 * @lpm: mask of paths to use
91 * @check: optional callback that determines if results are final
92 * @filter: optional callback to adjust request status based on IRB data
93 * @callback: final callback
94 * @data: user-defined pointer passed to all callbacks
95 * @mask: current path mask
96 * @retries: current number of retries
97 * @drc: delayed return code
98 * @cancel: non-zero if request was cancelled
99 * @done: non-zero if request was finished
100 */
101struct ccw_request {
102 struct ccw1 *cp;
103 unsigned long timeout;
104 u16 maxretries;
105 u8 lpm;
106 int (*check)(struct ccw_device *, void *);
107 enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
108 enum io_status);
109 void (*callback)(struct ccw_device *, void *, int);
110 void *data;
111 /* These fields are used internally. */
112 u16 mask;
113 u16 retries;
114 int drc;
115 int cancel:1;
116 int done:1;
117} __attribute__((packed));
118
119/*
71 * sense-id response buffer layout 120 * sense-id response buffer layout
72 */ 121 */
73struct senseid { 122struct senseid {
@@ -82,32 +131,43 @@ struct senseid {
82 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */ 131 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
83} __attribute__ ((packed, aligned(4))); 132} __attribute__ ((packed, aligned(4)));
84 133
134enum cdev_todo {
135 CDEV_TODO_NOTHING,
136 CDEV_TODO_ENABLE_CMF,
137 CDEV_TODO_REBIND,
138 CDEV_TODO_REGISTER,
139 CDEV_TODO_UNREG,
140 CDEV_TODO_UNREG_EVAL,
141};
142
85struct ccw_device_private { 143struct ccw_device_private {
86 struct ccw_device *cdev; 144 struct ccw_device *cdev;
87 struct subchannel *sch; 145 struct subchannel *sch;
88 int state; /* device state */ 146 int state; /* device state */
89 atomic_t onoff; 147 atomic_t onoff;
90 unsigned long registered;
91 struct ccw_dev_id dev_id; /* device id */ 148 struct ccw_dev_id dev_id; /* device id */
92 struct subchannel_id schid; /* subchannel number */ 149 struct subchannel_id schid; /* subchannel number */
93 u8 imask; /* lpm mask for SNID/SID/SPGID */ 150 struct ccw_request req; /* internal I/O request */
94 int iretry; /* retry counter SNID/SID/SPGID */ 151 int iretry;
152 u8 pgid_valid_mask; /* mask of valid PGIDs */
95 struct { 153 struct {
96 unsigned int fast:1; /* post with "channel end" */ 154 unsigned int fast:1; /* post with "channel end" */
97 unsigned int repall:1; /* report every interrupt status */ 155 unsigned int repall:1; /* report every interrupt status */
98 unsigned int pgroup:1; /* do path grouping */ 156 unsigned int pgroup:1; /* do path grouping */
99 unsigned int force:1; /* allow forced online */ 157 unsigned int force:1; /* allow forced online */
158 unsigned int mpath:1; /* do multipathing */
100 } __attribute__ ((packed)) options; 159 } __attribute__ ((packed)) options;
101 struct { 160 struct {
102 unsigned int pgid_single:1; /* use single path for Set PGID */
103 unsigned int esid:1; /* Ext. SenseID supported by HW */ 161 unsigned int esid:1; /* Ext. SenseID supported by HW */
104 unsigned int dosense:1; /* delayed SENSE required */ 162 unsigned int dosense:1; /* delayed SENSE required */
105 unsigned int doverify:1; /* delayed path verification */ 163 unsigned int doverify:1; /* delayed path verification */
106 unsigned int donotify:1; /* call notify function */ 164 unsigned int donotify:1; /* call notify function */
107 unsigned int recog_done:1; /* dev. recog. complete */ 165 unsigned int recog_done:1; /* dev. recog. complete */
108 unsigned int fake_irb:1; /* deliver faked irb */ 166 unsigned int fake_irb:1; /* deliver faked irb */
109 unsigned int intretry:1; /* retry internal operation */
110 unsigned int resuming:1; /* recognition while resume */ 167 unsigned int resuming:1; /* recognition while resume */
168 unsigned int pgroup:1; /* pathgroup is set up */
169 unsigned int mpath:1; /* multipathing is set up */
170 unsigned int initialized:1; /* set if initial reference held */
111 } __attribute__((packed)) flags; 171 } __attribute__((packed)) flags;
112 unsigned long intparm; /* user interruption parameter */ 172 unsigned long intparm; /* user interruption parameter */
113 struct qdio_irq *qdio_data; 173 struct qdio_irq *qdio_data;
@@ -115,7 +175,8 @@ struct ccw_device_private {
115 struct senseid senseid; /* SenseID info */ 175 struct senseid senseid; /* SenseID info */
116 struct pgid pgid[8]; /* path group IDs per chpid*/ 176 struct pgid pgid[8]; /* path group IDs per chpid*/
117 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ 177 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
118 struct work_struct kick_work; 178 struct work_struct todo_work;
179 enum cdev_todo todo;
119 wait_queue_head_t wait_q; 180 wait_queue_head_t wait_q;
120 struct timer_list timer; 181 struct timer_list timer;
121 void *cmb; /* measurement information */ 182 void *cmb; /* measurement information */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 1294876bf7b4..20836eff88c5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -102,6 +102,7 @@ static atomic_t ap_poll_requests = ATOMIC_INIT(0);
102static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 102static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
103static struct task_struct *ap_poll_kthread = NULL; 103static struct task_struct *ap_poll_kthread = NULL;
104static DEFINE_MUTEX(ap_poll_thread_mutex); 104static DEFINE_MUTEX(ap_poll_thread_mutex);
105static DEFINE_SPINLOCK(ap_poll_timer_lock);
105static void *ap_interrupt_indicator; 106static void *ap_interrupt_indicator;
106static struct hrtimer ap_poll_timer; 107static struct hrtimer ap_poll_timer;
107/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. 108/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
@@ -282,6 +283,7 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
282 * @psmid: The program supplied message identifier 283 * @psmid: The program supplied message identifier
283 * @msg: The message text 284 * @msg: The message text
284 * @length: The message length 285 * @length: The message length
286 * @special: Special Bit
285 * 287 *
286 * Returns AP queue status structure. 288 * Returns AP queue status structure.
287 * Condition code 1 on NQAP can't happen because the L bit is 1. 289 * Condition code 1 on NQAP can't happen because the L bit is 1.
@@ -289,7 +291,8 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
289 * because a segment boundary was reached. The NQAP is repeated. 291 * because a segment boundary was reached. The NQAP is repeated.
290 */ 292 */
291static inline struct ap_queue_status 293static inline struct ap_queue_status
292__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 294__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
295 unsigned int special)
293{ 296{
294 typedef struct { char _[length]; } msgblock; 297 typedef struct { char _[length]; } msgblock;
295 register unsigned long reg0 asm ("0") = qid | 0x40000000UL; 298 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
@@ -299,6 +302,9 @@ __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
299 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); 302 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
300 register unsigned long reg5 asm ("5") = (unsigned int) psmid; 303 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
301 304
305 if (special == 1)
306 reg0 |= 0x400000UL;
307
302 asm volatile ( 308 asm volatile (
303 "0: .long 0xb2ad0042\n" /* DQAP */ 309 "0: .long 0xb2ad0042\n" /* DQAP */
304 " brc 2,0b" 310 " brc 2,0b"
@@ -312,13 +318,15 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
312{ 318{
313 struct ap_queue_status status; 319 struct ap_queue_status status;
314 320
315 status = __ap_send(qid, psmid, msg, length); 321 status = __ap_send(qid, psmid, msg, length, 0);
316 switch (status.response_code) { 322 switch (status.response_code) {
317 case AP_RESPONSE_NORMAL: 323 case AP_RESPONSE_NORMAL:
318 return 0; 324 return 0;
319 case AP_RESPONSE_Q_FULL: 325 case AP_RESPONSE_Q_FULL:
320 case AP_RESPONSE_RESET_IN_PROGRESS: 326 case AP_RESPONSE_RESET_IN_PROGRESS:
321 return -EBUSY; 327 return -EBUSY;
328 case AP_RESPONSE_REQ_FAC_NOT_INST:
329 return -EINVAL;
322 default: /* Device is gone. */ 330 default: /* Device is gone. */
323 return -ENODEV; 331 return -ENODEV;
324 } 332 }
@@ -1008,7 +1016,7 @@ static int ap_probe_device_type(struct ap_device *ap_dev)
1008 } 1016 }
1009 1017
1010 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, 1018 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1011 msg, sizeof(msg)); 1019 msg, sizeof(msg), 0);
1012 if (status.response_code != AP_RESPONSE_NORMAL) { 1020 if (status.response_code != AP_RESPONSE_NORMAL) {
1013 rc = -ENODEV; 1021 rc = -ENODEV;
1014 goto out_free; 1022 goto out_free;
@@ -1163,16 +1171,19 @@ ap_config_timeout(unsigned long ptr)
1163static inline void ap_schedule_poll_timer(void) 1171static inline void ap_schedule_poll_timer(void)
1164{ 1172{
1165 ktime_t hr_time; 1173 ktime_t hr_time;
1174
1175 spin_lock_bh(&ap_poll_timer_lock);
1166 if (ap_using_interrupts() || ap_suspend_flag) 1176 if (ap_using_interrupts() || ap_suspend_flag)
1167 return; 1177 goto out;
1168 if (hrtimer_is_queued(&ap_poll_timer)) 1178 if (hrtimer_is_queued(&ap_poll_timer))
1169 return; 1179 goto out;
1170 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1180 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1171 hr_time = ktime_set(0, poll_timeout); 1181 hr_time = ktime_set(0, poll_timeout);
1172 hrtimer_forward_now(&ap_poll_timer, hr_time); 1182 hrtimer_forward_now(&ap_poll_timer, hr_time);
1173 hrtimer_restart(&ap_poll_timer); 1183 hrtimer_restart(&ap_poll_timer);
1174 } 1184 }
1175 return; 1185out:
1186 spin_unlock_bh(&ap_poll_timer_lock);
1176} 1187}
1177 1188
1178/** 1189/**
@@ -1243,7 +1254,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1243 /* Start the next request on the queue. */ 1254 /* Start the next request on the queue. */
1244 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 1255 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1245 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1256 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1246 ap_msg->message, ap_msg->length); 1257 ap_msg->message, ap_msg->length, ap_msg->special);
1247 switch (status.response_code) { 1258 switch (status.response_code) {
1248 case AP_RESPONSE_NORMAL: 1259 case AP_RESPONSE_NORMAL:
1249 atomic_inc(&ap_poll_requests); 1260 atomic_inc(&ap_poll_requests);
@@ -1261,6 +1272,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1261 *flags |= 2; 1272 *flags |= 2;
1262 break; 1273 break;
1263 case AP_RESPONSE_MESSAGE_TOO_BIG: 1274 case AP_RESPONSE_MESSAGE_TOO_BIG:
1275 case AP_RESPONSE_REQ_FAC_NOT_INST:
1264 return -EINVAL; 1276 return -EINVAL;
1265 default: 1277 default:
1266 return -ENODEV; 1278 return -ENODEV;
@@ -1302,7 +1314,8 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
1302 if (list_empty(&ap_dev->requestq) && 1314 if (list_empty(&ap_dev->requestq) &&
1303 ap_dev->queue_count < ap_dev->queue_depth) { 1315 ap_dev->queue_count < ap_dev->queue_depth) {
1304 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1316 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1305 ap_msg->message, ap_msg->length); 1317 ap_msg->message, ap_msg->length,
1318 ap_msg->special);
1306 switch (status.response_code) { 1319 switch (status.response_code) {
1307 case AP_RESPONSE_NORMAL: 1320 case AP_RESPONSE_NORMAL:
1308 list_add_tail(&ap_msg->list, &ap_dev->pendingq); 1321 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
@@ -1317,6 +1330,7 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
1317 ap_dev->requestq_count++; 1330 ap_dev->requestq_count++;
1318 ap_dev->total_request_count++; 1331 ap_dev->total_request_count++;
1319 return -EBUSY; 1332 return -EBUSY;
1333 case AP_RESPONSE_REQ_FAC_NOT_INST:
1320 case AP_RESPONSE_MESSAGE_TOO_BIG: 1334 case AP_RESPONSE_MESSAGE_TOO_BIG:
1321 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1335 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1322 return -EINVAL; 1336 return -EINVAL;
@@ -1658,6 +1672,7 @@ int __init ap_module_init(void)
1658 */ 1672 */
1659 if (MACHINE_IS_VM) 1673 if (MACHINE_IS_VM)
1660 poll_timeout = 1500000; 1674 poll_timeout = 1500000;
1675 spin_lock_init(&ap_poll_timer_lock);
1661 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1676 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1662 ap_poll_timer.function = ap_poll_timeout; 1677 ap_poll_timer.function = ap_poll_timeout;
1663 1678
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index a35362241805..4785d07cd447 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -87,6 +87,7 @@ struct ap_queue_status {
87#define AP_RESPONSE_INDEX_TOO_BIG 0x11 87#define AP_RESPONSE_INDEX_TOO_BIG 0x11
88#define AP_RESPONSE_NO_FIRST_PART 0x13 88#define AP_RESPONSE_NO_FIRST_PART 0x13
89#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15 89#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
90#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
90 91
91/* 92/*
92 * Known device types 93 * Known device types
@@ -96,8 +97,8 @@ struct ap_queue_status {
96#define AP_DEVICE_TYPE_PCIXCC 5 97#define AP_DEVICE_TYPE_PCIXCC 5
97#define AP_DEVICE_TYPE_CEX2A 6 98#define AP_DEVICE_TYPE_CEX2A 6
98#define AP_DEVICE_TYPE_CEX2C 7 99#define AP_DEVICE_TYPE_CEX2C 7
99#define AP_DEVICE_TYPE_CEX2A2 8 100#define AP_DEVICE_TYPE_CEX3A 8
100#define AP_DEVICE_TYPE_CEX2C2 9 101#define AP_DEVICE_TYPE_CEX3C 9
101 102
102/* 103/*
103 * AP reset flag states 104 * AP reset flag states
@@ -161,12 +162,25 @@ struct ap_message {
161 size_t length; /* Message length. */ 162 size_t length; /* Message length. */
162 163
163 void *private; /* ap driver private pointer. */ 164 void *private; /* ap driver private pointer. */
165 unsigned int special:1; /* Used for special commands. */
164}; 166};
165 167
166#define AP_DEVICE(dt) \ 168#define AP_DEVICE(dt) \
167 .dev_type=(dt), \ 169 .dev_type=(dt), \
168 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, 170 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
169 171
172/**
173 * ap_init_message() - Initialize ap_message.
174 * Initialize a message before using. Otherwise this might result in
175 * unexpected behaviour.
176 */
177static inline void ap_init_message(struct ap_message *ap_msg)
178{
179 ap_msg->psmid = 0;
180 ap_msg->length = 0;
181 ap_msg->special = 0;
182}
183
170/* 184/*
171 * Note: don't use ap_send/ap_recv after using ap_queue_message 185 * Note: don't use ap_send/ap_recv after using ap_queue_message
172 * for the first time. Otherwise the ap message queue will get 186 * for the first time. Otherwise the ap message queue will get
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 65b6a96afe6b..0d4d18bdd45c 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -299,9 +299,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
299 */ 299 */
300static int zcrypt_open(struct inode *inode, struct file *filp) 300static int zcrypt_open(struct inode *inode, struct file *filp)
301{ 301{
302 lock_kernel();
303 atomic_inc(&zcrypt_open_count); 302 atomic_inc(&zcrypt_open_count);
304 unlock_kernel();
305 return 0; 303 return 0;
306} 304}
307 305
@@ -1009,6 +1007,10 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
1009 zcrypt_count_type(ZCRYPT_CEX2C)); 1007 zcrypt_count_type(ZCRYPT_CEX2C));
1010 len += sprintf(resp_buff + len, "CEX2A count: %d\n", 1008 len += sprintf(resp_buff + len, "CEX2A count: %d\n",
1011 zcrypt_count_type(ZCRYPT_CEX2A)); 1009 zcrypt_count_type(ZCRYPT_CEX2A));
1010 len += sprintf(resp_buff + len, "CEX3C count: %d\n",
1011 zcrypt_count_type(ZCRYPT_CEX3C));
1012 len += sprintf(resp_buff + len, "CEX3A count: %d\n",
1013 zcrypt_count_type(ZCRYPT_CEX3A));
1012 len += sprintf(resp_buff + len, "requestq count: %d\n", 1014 len += sprintf(resp_buff + len, "requestq count: %d\n",
1013 zcrypt_requestq_count()); 1015 zcrypt_requestq_count());
1014 len += sprintf(resp_buff + len, "pendingq count: %d\n", 1016 len += sprintf(resp_buff + len, "pendingq count: %d\n",
@@ -1017,7 +1019,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
1017 atomic_read(&zcrypt_open_count)); 1019 atomic_read(&zcrypt_open_count));
1018 zcrypt_status_mask(workarea); 1020 zcrypt_status_mask(workarea);
1019 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1021 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
1020 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A", 1022 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
1021 resp_buff+len, workarea, AP_DEVICES); 1023 resp_buff+len, workarea, AP_DEVICES);
1022 zcrypt_qdepth_mask(workarea); 1024 zcrypt_qdepth_mask(workarea);
1023 len += sprinthx("Waiting work element counts", 1025 len += sprinthx("Waiting work element counts",
@@ -1095,8 +1097,9 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
1095 * '0' for no device, '1' for PCICA, '2' for PCICC, 1097 * '0' for no device, '1' for PCICA, '2' for PCICC,
1096 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1098 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1097 * '5' for CEX2C and '6' for CEX2A' 1099 * '5' for CEX2C and '6' for CEX2A'
1100 * '7' for CEX3C and '8' for CEX3A
1098 */ 1101 */
1099 if (*ptr >= '0' && *ptr <= '6') 1102 if (*ptr >= '0' && *ptr <= '8')
1100 j++; 1103 j++;
1101 else if (*ptr == 'd' || *ptr == 'D') 1104 else if (*ptr == 'd' || *ptr == 'D')
1102 zcrypt_disable_card(j++); 1105 zcrypt_disable_card(j++);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 1d1ec74dadb2..8e7ffbf2466c 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -71,6 +71,8 @@ struct ica_z90_status {
71#define ZCRYPT_PCIXCC_MCL3 4 71#define ZCRYPT_PCIXCC_MCL3 4
72#define ZCRYPT_CEX2C 5 72#define ZCRYPT_CEX2C 5
73#define ZCRYPT_CEX2A 6 73#define ZCRYPT_CEX2A 6
74#define ZCRYPT_CEX3C 7
75#define ZCRYPT_CEX3A 8
74 76
75/** 77/**
76 * Large random numbers are pulled in 4096 byte chunks from the crypto cards 78 * Large random numbers are pulled in 4096 byte chunks from the crypto cards
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 326ea08f67c9..c6fb0aa89507 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -39,17 +39,24 @@
39 39
40#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ 40#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
41#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ 41#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
42#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
43#define CEX3A_MAX_MOD_SIZE CEX2A_MAX_MOD_SIZE
42 44
43#define CEX2A_SPEED_RATING 970 45#define CEX2A_SPEED_RATING 970
46#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
44 47
45#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ 48#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
46#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ 49#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
47 50
51#define CEX3A_MAX_MESSAGE_SIZE CEX2A_MAX_MESSAGE_SIZE
52#define CEX3A_MAX_RESPONSE_SIZE CEX2A_MAX_RESPONSE_SIZE
53
48#define CEX2A_CLEANUP_TIME (15*HZ) 54#define CEX2A_CLEANUP_TIME (15*HZ)
55#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
49 56
50static struct ap_device_id zcrypt_cex2a_ids[] = { 57static struct ap_device_id zcrypt_cex2a_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, 58 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
52 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) }, 59 { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) },
53 { /* end of list */ }, 60 { /* end of list */ },
54}; 61};
55 62
@@ -298,6 +305,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
298 struct completion work; 305 struct completion work;
299 int rc; 306 int rc;
300 307
308 ap_init_message(&ap_msg);
301 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 309 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
302 if (!ap_msg.message) 310 if (!ap_msg.message)
303 return -ENOMEM; 311 return -ENOMEM;
@@ -335,6 +343,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
335 struct completion work; 343 struct completion work;
336 int rc; 344 int rc;
337 345
346 ap_init_message(&ap_msg);
338 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 347 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
339 if (!ap_msg.message) 348 if (!ap_msg.message)
340 return -ENOMEM; 349 return -ENOMEM;
@@ -373,31 +382,45 @@ static struct zcrypt_ops zcrypt_cex2a_ops = {
373 */ 382 */
374static int zcrypt_cex2a_probe(struct ap_device *ap_dev) 383static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
375{ 384{
376 struct zcrypt_device *zdev; 385 struct zcrypt_device *zdev = NULL;
377 int rc; 386 int rc = 0;
378 387
379 zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE); 388 switch (ap_dev->device_type) {
380 if (!zdev) 389 case AP_DEVICE_TYPE_CEX2A:
381 return -ENOMEM; 390 zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
382 zdev->ap_dev = ap_dev; 391 if (!zdev)
383 zdev->ops = &zcrypt_cex2a_ops; 392 return -ENOMEM;
384 zdev->online = 1; 393 zdev->user_space_type = ZCRYPT_CEX2A;
385 zdev->user_space_type = ZCRYPT_CEX2A; 394 zdev->type_string = "CEX2A";
386 zdev->type_string = "CEX2A"; 395 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
387 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; 396 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
388 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 397 zdev->short_crt = 1;
389 zdev->short_crt = 1; 398 zdev->speed_rating = CEX2A_SPEED_RATING;
390 zdev->speed_rating = CEX2A_SPEED_RATING; 399 break;
391 ap_dev->reply = &zdev->reply; 400 case AP_DEVICE_TYPE_CEX3A:
392 ap_dev->private = zdev; 401 zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
393 rc = zcrypt_device_register(zdev); 402 if (!zdev)
394 if (rc) 403 return -ENOMEM;
395 goto out_free; 404 zdev->user_space_type = ZCRYPT_CEX3A;
396 return 0; 405 zdev->type_string = "CEX3A";
397 406 zdev->min_mod_size = CEX3A_MIN_MOD_SIZE;
398out_free: 407 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
399 ap_dev->private = NULL; 408 zdev->short_crt = 1;
400 zcrypt_device_free(zdev); 409 zdev->speed_rating = CEX3A_SPEED_RATING;
410 break;
411 }
412 if (zdev != NULL) {
413 zdev->ap_dev = ap_dev;
414 zdev->ops = &zcrypt_cex2a_ops;
415 zdev->online = 1;
416 ap_dev->reply = &zdev->reply;
417 ap_dev->private = zdev;
418 rc = zcrypt_device_register(zdev);
419 }
420 if (rc) {
421 ap_dev->private = NULL;
422 zcrypt_device_free(zdev);
423 }
401 return rc; 424 return rc;
402} 425}
403 426
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 17ba81b58c78..e78df3671caf 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -281,6 +281,7 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
281 struct completion work; 281 struct completion work;
282 int rc; 282 int rc;
283 283
284 ap_init_message(&ap_msg);
284 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 285 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
285 if (!ap_msg.message) 286 if (!ap_msg.message)
286 return -ENOMEM; 287 return -ENOMEM;
@@ -318,6 +319,7 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
318 struct completion work; 319 struct completion work;
319 int rc; 320 int rc;
320 321
322 ap_init_message(&ap_msg);
321 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 323 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
322 if (!ap_msg.message) 324 if (!ap_msg.message)
323 return -ENOMEM; 325 return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index f4b0c4795434..a23726a0735c 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -483,6 +483,7 @@ static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
483 struct completion work; 483 struct completion work;
484 int rc; 484 int rc;
485 485
486 ap_init_message(&ap_msg);
486 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 487 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
487 if (!ap_msg.message) 488 if (!ap_msg.message)
488 return -ENOMEM; 489 return -ENOMEM;
@@ -521,6 +522,7 @@ static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
521 struct completion work; 522 struct completion work;
522 int rc; 523 int rc;
523 524
525 ap_init_message(&ap_msg);
524 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 526 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
525 if (!ap_msg.message) 527 if (!ap_msg.message)
526 return -ENOMEM; 528 return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 5677b40e4ac0..79c120578e61 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -43,10 +43,13 @@
43#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */ 43#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
44#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ 44#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
45#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */ 45#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
46#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
47#define CEX3C_MAX_MOD_SIZE PCIXCC_MAX_MOD_SIZE
46 48
47#define PCIXCC_MCL2_SPEED_RATING 7870 /* FIXME: needs finetuning */ 49#define PCIXCC_MCL2_SPEED_RATING 7870
48#define PCIXCC_MCL3_SPEED_RATING 7870 50#define PCIXCC_MCL3_SPEED_RATING 7870
49#define CEX2C_SPEED_RATING 8540 51#define CEX2C_SPEED_RATING 7000
52#define CEX3C_SPEED_RATING 6500 /* FIXME: needs finetuning */
50 53
51#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ 54#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
52#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 55#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
@@ -72,7 +75,7 @@ struct response_type {
72static struct ap_device_id zcrypt_pcixcc_ids[] = { 75static struct ap_device_id zcrypt_pcixcc_ids[] = {
73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, 76 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, 77 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
75 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) }, 78 { AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
76 { /* end of list */ }, 79 { /* end of list */ },
77}; 80};
78 81
@@ -326,6 +329,11 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
326 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; 329 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
327 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); 330 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
328 331
332 if (memcmp(function_code, "US", 2) == 0)
333 ap_msg->special = 1;
334 else
335 ap_msg->special = 0;
336
329 /* copy data block */ 337 /* copy data block */
330 if (xcRB->request_data_length && 338 if (xcRB->request_data_length &&
331 copy_from_user(req_data, xcRB->request_data_address, 339 copy_from_user(req_data, xcRB->request_data_address,
@@ -688,6 +696,7 @@ static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
688 }; 696 };
689 int rc; 697 int rc;
690 698
699 ap_init_message(&ap_msg);
691 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 700 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
692 if (!ap_msg.message) 701 if (!ap_msg.message)
693 return -ENOMEM; 702 return -ENOMEM;
@@ -727,6 +736,7 @@ static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
727 }; 736 };
728 int rc; 737 int rc;
729 738
739 ap_init_message(&ap_msg);
730 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 740 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
731 if (!ap_msg.message) 741 if (!ap_msg.message)
732 return -ENOMEM; 742 return -ENOMEM;
@@ -766,6 +776,7 @@ static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
766 }; 776 };
767 int rc; 777 int rc;
768 778
779 ap_init_message(&ap_msg);
769 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); 780 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
770 if (!ap_msg.message) 781 if (!ap_msg.message)
771 return -ENOMEM; 782 return -ENOMEM;
@@ -805,6 +816,7 @@ static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
805 }; 816 };
806 int rc; 817 int rc;
807 818
819 ap_init_message(&ap_msg);
808 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); 820 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
809 if (!ap_msg.message) 821 if (!ap_msg.message)
810 return -ENOMEM; 822 return -ENOMEM;
@@ -972,6 +984,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
972 } __attribute__((packed)) *reply; 984 } __attribute__((packed)) *reply;
973 int rc, i; 985 int rc, i;
974 986
987 ap_init_message(&ap_msg);
975 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 988 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
976 if (!ap_msg.message) 989 if (!ap_msg.message)
977 return -ENOMEM; 990 return -ENOMEM;
@@ -1016,14 +1029,15 @@ out_free:
1016static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) 1029static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1017{ 1030{
1018 struct zcrypt_device *zdev; 1031 struct zcrypt_device *zdev;
1019 int rc; 1032 int rc = 0;
1020 1033
1021 zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE); 1034 zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
1022 if (!zdev) 1035 if (!zdev)
1023 return -ENOMEM; 1036 return -ENOMEM;
1024 zdev->ap_dev = ap_dev; 1037 zdev->ap_dev = ap_dev;
1025 zdev->online = 1; 1038 zdev->online = 1;
1026 if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) { 1039 switch (ap_dev->device_type) {
1040 case AP_DEVICE_TYPE_PCIXCC:
1027 rc = zcrypt_pcixcc_mcl(ap_dev); 1041 rc = zcrypt_pcixcc_mcl(ap_dev);
1028 if (rc < 0) { 1042 if (rc < 0) {
1029 zcrypt_device_free(zdev); 1043 zcrypt_device_free(zdev);
@@ -1041,13 +1055,25 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1041 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1055 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1042 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1056 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1043 } 1057 }
1044 } else { 1058 break;
1059 case AP_DEVICE_TYPE_CEX2C:
1045 zdev->user_space_type = ZCRYPT_CEX2C; 1060 zdev->user_space_type = ZCRYPT_CEX2C;
1046 zdev->type_string = "CEX2C"; 1061 zdev->type_string = "CEX2C";
1047 zdev->speed_rating = CEX2C_SPEED_RATING; 1062 zdev->speed_rating = CEX2C_SPEED_RATING;
1048 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1063 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1049 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1064 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1065 break;
1066 case AP_DEVICE_TYPE_CEX3C:
1067 zdev->user_space_type = ZCRYPT_CEX3C;
1068 zdev->type_string = "CEX3C";
1069 zdev->speed_rating = CEX3C_SPEED_RATING;
1070 zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
1071 zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
1072 break;
1073 default:
1074 goto out_free;
1050 } 1075 }
1076
1051 rc = zcrypt_pcixcc_rng_supported(ap_dev); 1077 rc = zcrypt_pcixcc_rng_supported(ap_dev);
1052 if (rc < 0) { 1078 if (rc < 0) {
1053 zcrypt_device_free(zdev); 1079 zcrypt_device_free(zdev);