aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/Kconfig209
-rw-r--r--drivers/s390/Makefile9
-rw-r--r--drivers/s390/block/Kconfig68
-rw-r--r--drivers/s390/block/Makefile17
-rw-r--r--drivers/s390/block/dasd.c2065
-rw-r--r--drivers/s390/block/dasd_3370_erp.c104
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2742
-rw-r--r--drivers/s390/block/dasd_9336_erp.c61
-rw-r--r--drivers/s390/block/dasd_9343_erp.c22
-rw-r--r--drivers/s390/block/dasd_cmb.c145
-rw-r--r--drivers/s390/block/dasd_devmap.c772
-rw-r--r--drivers/s390/block/dasd_diag.c541
-rw-r--r--drivers/s390/block/dasd_diag.h66
-rw-r--r--drivers/s390/block/dasd_eckd.c1722
-rw-r--r--drivers/s390/block/dasd_eckd.h346
-rw-r--r--drivers/s390/block/dasd_erp.c254
-rw-r--r--drivers/s390/block/dasd_fba.c607
-rw-r--r--drivers/s390/block/dasd_fba.h73
-rw-r--r--drivers/s390/block/dasd_genhd.c185
-rw-r--r--drivers/s390/block/dasd_int.h576
-rw-r--r--drivers/s390/block/dasd_ioctl.c554
-rw-r--r--drivers/s390/block/dasd_proc.c319
-rw-r--r--drivers/s390/block/dcssblk.c775
-rw-r--r--drivers/s390/block/xpram.c539
-rw-r--r--drivers/s390/char/Makefile28
-rw-r--r--drivers/s390/char/con3215.c1192
-rw-r--r--drivers/s390/char/con3270.c638
-rw-r--r--drivers/s390/char/ctrlchar.c75
-rw-r--r--drivers/s390/char/ctrlchar.h20
-rw-r--r--drivers/s390/char/defkeymap.c156
-rw-r--r--drivers/s390/char/defkeymap.map191
-rw-r--r--drivers/s390/char/fs3270.c373
-rw-r--r--drivers/s390/char/keyboard.c519
-rw-r--r--drivers/s390/char/keyboard.h57
-rw-r--r--drivers/s390/char/monreader.c662
-rw-r--r--drivers/s390/char/raw3270.c1335
-rw-r--r--drivers/s390/char/raw3270.h274
-rw-r--r--drivers/s390/char/sclp.c915
-rw-r--r--drivers/s390/char/sclp.h159
-rw-r--r--drivers/s390/char/sclp_con.c252
-rw-r--r--drivers/s390/char/sclp_cpi.c254
-rw-r--r--drivers/s390/char/sclp_quiesce.c99
-rw-r--r--drivers/s390/char/sclp_rw.c471
-rw-r--r--drivers/s390/char/sclp_rw.h96
-rw-r--r--drivers/s390/char/sclp_tty.c813
-rw-r--r--drivers/s390/char/sclp_tty.h71
-rw-r--r--drivers/s390/char/sclp_vt220.c785
-rw-r--r--drivers/s390/char/tape.h384
-rw-r--r--drivers/s390/char/tape_34xx.c1385
-rw-r--r--drivers/s390/char/tape_block.c492
-rw-r--r--drivers/s390/char/tape_char.c492
-rw-r--r--drivers/s390/char/tape_class.c126
-rw-r--r--drivers/s390/char/tape_class.h61
-rw-r--r--drivers/s390/char/tape_core.c1242
-rw-r--r--drivers/s390/char/tape_proc.c145
-rw-r--r--drivers/s390/char/tape_std.c765
-rw-r--r--drivers/s390/char/tape_std.h152
-rw-r--r--drivers/s390/char/tty3270.c1836
-rw-r--r--drivers/s390/char/vmlogrdr.c920
-rw-r--r--drivers/s390/char/vmwatchdog.c292
-rw-r--r--drivers/s390/cio/Makefile10
-rw-r--r--drivers/s390/cio/airq.c87
-rw-r--r--drivers/s390/cio/airq.h10
-rw-r--r--drivers/s390/cio/blacklist.c351
-rw-r--r--drivers/s390/cio/blacklist.h6
-rw-r--r--drivers/s390/cio/ccwgroup.c482
-rw-r--r--drivers/s390/cio/chsc.c1114
-rw-r--r--drivers/s390/cio/chsc.h66
-rw-r--r--drivers/s390/cio/cio.c860
-rw-r--r--drivers/s390/cio/cio.h143
-rw-r--r--drivers/s390/cio/cio_debug.h32
-rw-r--r--drivers/s390/cio/cmf.c1042
-rw-r--r--drivers/s390/cio/css.c575
-rw-r--r--drivers/s390/cio/css.h155
-rw-r--r--drivers/s390/cio/device.c1135
-rw-r--r--drivers/s390/cio/device.h115
-rw-r--r--drivers/s390/cio/device_fsm.c1250
-rw-r--r--drivers/s390/cio/device_id.c355
-rw-r--r--drivers/s390/cio/device_ops.c603
-rw-r--r--drivers/s390/cio/device_pgid.c448
-rw-r--r--drivers/s390/cio/device_status.c385
-rw-r--r--drivers/s390/cio/ioasm.h228
-rw-r--r--drivers/s390/cio/qdio.c3468
-rw-r--r--drivers/s390/cio/qdio.h648
-rw-r--r--drivers/s390/crypto/Makefile6
-rw-r--r--drivers/s390/crypto/z90common.h168
-rw-r--r--drivers/s390/crypto/z90crypt.h258
-rw-r--r--drivers/s390/crypto/z90hardware.c2243
-rw-r--r--drivers/s390/crypto/z90main.c3563
-rw-r--r--drivers/s390/ebcdic.c246
-rw-r--r--drivers/s390/net/Kconfig108
-rw-r--r--drivers/s390/net/Makefile14
-rw-r--r--drivers/s390/net/claw.c4447
-rw-r--r--drivers/s390/net/claw.h335
-rw-r--r--drivers/s390/net/ctcdbug.c83
-rw-r--r--drivers/s390/net/ctcdbug.h123
-rw-r--r--drivers/s390/net/ctcmain.c3304
-rw-r--r--drivers/s390/net/ctctty.c1276
-rw-r--r--drivers/s390/net/ctctty.h37
-rw-r--r--drivers/s390/net/cu3088.c166
-rw-r--r--drivers/s390/net/cu3088.h41
-rw-r--r--drivers/s390/net/fsm.c220
-rw-r--r--drivers/s390/net/fsm.h265
-rw-r--r--drivers/s390/net/iucv.c2567
-rw-r--r--drivers/s390/net/iucv.h849
-rw-r--r--drivers/s390/net/lcs.c2347
-rw-r--r--drivers/s390/net/lcs.h321
-rw-r--r--drivers/s390/net/netiucv.c2149
-rw-r--r--drivers/s390/net/qeth.h1162
-rw-r--r--drivers/s390/net/qeth_eddp.c643
-rw-r--r--drivers/s390/net/qeth_eddp.h85
-rw-r--r--drivers/s390/net/qeth_fs.h163
-rw-r--r--drivers/s390/net/qeth_main.c8236
-rw-r--r--drivers/s390/net/qeth_mpc.c168
-rw-r--r--drivers/s390/net/qeth_mpc.h538
-rw-r--r--drivers/s390/net/qeth_proc.c495
-rw-r--r--drivers/s390/net/qeth_sys.c1788
-rw-r--r--drivers/s390/net/qeth_tso.c285
-rw-r--r--drivers/s390/net/qeth_tso.h58
-rw-r--r--drivers/s390/net/smsgiucv.c180
-rw-r--r--drivers/s390/net/smsgiucv.h10
-rw-r--r--drivers/s390/s390mach.c219
-rw-r--r--drivers/s390/s390mach.h79
-rw-r--r--drivers/s390/scsi/Makefile9
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1977
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c312
-rw-r--r--drivers/s390/scsi/zfcp_def.h1121
-rw-r--r--drivers/s390/scsi/zfcp_erp.c3585
-rw-r--r--drivers/s390/scsi/zfcp_ext.h186
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c5087
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h472
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c868
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c949
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c298
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_driver.c135
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_port.c311
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_unit.c179
-rw-r--r--drivers/s390/sysinfo.c347
138 files changed, 99147 insertions, 0 deletions
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig
new file mode 100644
index 000000000000..96413c2cd1ad
--- /dev/null
+++ b/drivers/s390/Kconfig
@@ -0,0 +1,209 @@
1config CCW
2 bool
3 default y
4
5source "drivers/block/Kconfig"
6
7source "drivers/md/Kconfig"
8
9
10menu "Character device drivers"
11
12config UNIX98_PTYS
13 bool "Unix98 PTY support"
14 ---help---
15 A pseudo terminal (PTY) is a software device consisting of two
16 halves: a master and a slave. The slave device behaves identical to
17 a physical terminal; the master device is used by a process to
18 read data from and write data to the slave, thereby emulating a
19 terminal. Typical programs for the master side are telnet servers
20 and xterms.
21
22 Linux has traditionally used the BSD-like names /dev/ptyxx for
23 masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
24 has a number of problems. The GNU C library glibc 2.1 and later,
25 however, supports the Unix98 naming standard: in order to acquire a
26 pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
27 terminal is then made available to the process and the pseudo
28 terminal slave can be accessed as /dev/pts/<number>. What was
29 traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
30
31 The entries in /dev/pts/ are created on the fly by a virtual
32 file system; therefore, if you say Y here you should say Y to
33 "/dev/pts file system for Unix98 PTYs" as well.
34
35 If you want to say Y here, you need to have the C library glibc 2.1
36 or later (equal to libc-6.1, check with "ls -l /lib/libc.so.*").
37 Read the instructions in <file:Documentation/Changes> pertaining to
38 pseudo terminals. It's safe to say N.
39
40config UNIX98_PTY_COUNT
41 int "Maximum number of Unix98 PTYs in use (0-2048)"
42 depends on UNIX98_PTYS
43 default "256"
44 help
45 The maximum number of Unix98 PTYs that can be used at any one time.
46 The default is 256, and should be enough for desktop systems. Server
47 machines which support incoming telnet/rlogin/ssh connections and/or
48 serve several X terminals may want to increase this: every incoming
49 connection and every xterm uses up one PTY.
50
51 When not in use, each additional set of 256 PTYs occupy
52 approximately 8 KB of kernel memory on 32-bit architectures.
53
54source "drivers/char/watchdog/Kconfig"
55
56comment "S/390 character device drivers"
57
58config TN3270
59 tristate "Support for locally attached 3270 terminals"
60 help
61 Include support for IBM 3270 terminals.
62
63config TN3270_TTY
64 tristate "Support for tty input/output on 3270 terminals"
65 depends on TN3270
66 help
67 Include support for using an IBM 3270 terminal as a Linux tty.
68
69config TN3270_FS
70 tristate "Support for fullscreen applications on 3270 terminals"
71 depends on TN3270
72 help
73 Include support for fullscreen applications on an IBM 3270 terminal.
74
75config TN3270_CONSOLE
76 bool "Support for console on 3270 terminal"
77 depends on TN3270=y && TN3270_TTY=y
78 help
79 Include support for using an IBM 3270 terminal as a Linux system
80 console. Available only if 3270 support is compiled in statically.
81
82config TN3215
83 bool "Support for 3215 line mode terminal"
84 help
85 Include support for IBM 3215 line-mode terminals.
86
87config TN3215_CONSOLE
88 bool "Support for console on 3215 line mode terminal"
89 depends on TN3215
90 help
91 Include support for using an IBM 3215 line-mode terminal as a
92 Linux system console.
93
94config CCW_CONSOLE
95 bool
96 depends on TN3215_CONSOLE || TN3270_CONSOLE
97 default y
98
99config SCLP
100 bool "Support for SCLP"
101 help
102 Include support for the SCLP interface to the service element.
103
104config SCLP_TTY
105 bool "Support for SCLP line mode terminal"
106 depends on SCLP
107 help
108 Include support for IBM SCLP line-mode terminals.
109
110config SCLP_CONSOLE
111 bool "Support for console on SCLP line mode terminal"
112 depends on SCLP_TTY
113 help
114 Include support for using an IBM HWC line-mode terminal as the Linux
115 system console.
116
117config SCLP_VT220_TTY
118 bool "Support for SCLP VT220-compatible terminal"
119 depends on SCLP
120 help
121 Include support for an IBM SCLP VT220-compatible terminal.
122
123config SCLP_VT220_CONSOLE
124 bool "Support for console on SCLP VT220-compatible terminal"
125 depends on SCLP_VT220_TTY
126 help
127 Include support for using an IBM SCLP VT220-compatible terminal as a
128 Linux system console.
129
130config SCLP_CPI
131 tristate "Control-Program Identification"
132 depends on SCLP
133 help
134 This option enables the hardware console interface for system
135 identification. This is commonly used for workload management and
136 gives you a nice name for the system on the service element.
137 Please select this option as a module since built-in operation is
138 completely untested.
139 You should only select this option if you know what you are doing,
140 need this feature and intend to run your kernel in LPAR.
141
142config S390_TAPE
143 tristate "S/390 tape device support"
144 help
145 Select this option if you want to access channel-attached tape
146 devices on IBM S/390 or zSeries.
147 If you select this option you will also want to select at
148 least one of the tape interface options and one of the tape
149 hardware options in order to access a tape device.
150 This option is also available as a module. The module will be
151 called tape390 and include all selected interfaces and
152 hardware drivers.
153
154comment "S/390 tape interface support"
155 depends on S390_TAPE
156
157config S390_TAPE_BLOCK
158 bool "Support for tape block devices"
159 depends on S390_TAPE
160 help
161 Select this option if you want to access your channel-attached tape
162 devices using the block device interface. This interface is similar
163 to CD-ROM devices on other platforms. The tapes can only be
164 accessed read-only when using this interface. Have a look at
165 <file:Documentation/s390/TAPE> for further information about creating
166 volumes for and using this interface. It is safe to say "Y" here.
167
168comment "S/390 tape hardware support"
169 depends on S390_TAPE
170
171config S390_TAPE_34XX
172 tristate "Support for 3480/3490 tape hardware"
173 depends on S390_TAPE
174 help
175 Select this option if you want to access IBM 3480/3490 magnetic
176 tape subsystems and 100% compatibles.
177 It is safe to say "Y" here.
178
179
180
181config VMLOGRDR
182 tristate "Support for the z/VM recording system services (VM only)"
183 depends on IUCV
184 help
185 Select this option if you want to be able to receive records collected
186 by the z/VM recording system services, eg. from *LOGREC, *ACCOUNT or
187 *SYMPTOM.
188 This driver depends on the IUCV support driver.
189
190config MONREADER
191 tristate "API for reading z/VM monitor service records"
192 depends on IUCV
193 help
194 Character device driver for reading z/VM monitor service records
195
196endmenu
197
198menu "Cryptographic devices"
199
200config Z90CRYPT
201 tristate "Support for PCI-attached cryptographic adapters"
202 default "m"
203 help
204 Select this option if you want to use a PCI-attached cryptographic
205 adapter like the PCI Cryptographic Accelerator (PCICA) or the PCI
206 Cryptographic Coprocessor (PCICC). This option is also available
207 as a module called z90crypt.ko.
208
209endmenu
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
new file mode 100644
index 000000000000..c99a2fe92fb0
--- /dev/null
+++ b/drivers/s390/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the S/390 specific device drivers
3#
4
5obj-y += s390mach.o sysinfo.o
6obj-y += cio/ block/ char/ crypto/ net/ scsi/
7
8drivers-y += drivers/s390/built-in.o
9
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
new file mode 100644
index 000000000000..dc1c89dbdb8f
--- /dev/null
+++ b/drivers/s390/block/Kconfig
@@ -0,0 +1,68 @@
1if ARCH_S390
2
3comment "S/390 block device drivers"
4 depends on ARCH_S390
5
6config BLK_DEV_XPRAM
7 tristate "XPRAM disk support"
8 depends on ARCH_S390
9 help
10 Select this option if you want to use your expanded storage on S/390
11 or zSeries as a disk. This is useful as a _fast_ swap device if you
12 want to access more than 2G of memory when running in 31 bit mode.
13 This option is also available as a module which will be called
14 xpram. If unsure, say "N".
15
16config DCSSBLK
17 tristate "DCSSBLK support"
18 help
19 Support for dcss block device
20
21config DASD
22 tristate "Support for DASD devices"
23 depends on CCW
24 help
25 Enable this option if you want to access DASDs directly utilizing
26 S/390s channel subsystem commands. This is necessary for running
27 natively on a single image or an LPAR.
28
29config DASD_PROFILE
30 bool "Profiling support for dasd devices"
31 depends on DASD
32 help
33 Enable this option if you want to see profiling information
34 in /proc/dasd/statistics.
35
36config DASD_ECKD
37 tristate "Support for ECKD Disks"
38 depends on DASD
39 help
40 ECKD devices are the most commonly used devices. You should enable
41 this option unless you are very sure to have no ECKD device.
42
43config DASD_FBA
44 tristate "Support for FBA Disks"
45 depends on DASD
46 help
47 Select this option to be able to access FBA devices. It is safe to
48 say "Y".
49
50config DASD_DIAG
51 tristate "Support for DIAG access to Disks"
52 depends on DASD && ARCH_S390X = 'n'
53 help
54 Select this option if you want to use Diagnose250 command to access
55 Disks under VM. If you are not running under VM or unsure what it is,
56 say "N".
57
58config DASD_CMB
59 tristate "Compatibility interface for DASD channel measurement blocks"
60 depends on DASD
61 help
62 This driver provides an additional interface to the channel measurement
63 facility, which is normally accessed though sysfs, with a set of
64 ioctl functions specific to the dasd driver.
65 This is only needed if you want to use applications written for
66 linux-2.4 dasd channel measurement facility interface.
67
68endif
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
new file mode 100644
index 000000000000..58c6780134f7
--- /dev/null
+++ b/drivers/s390/block/Makefile
@@ -0,0 +1,17 @@
1#
2# S/390 block devices
3#
4
5dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o
6dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o
7dasd_diag_mod-objs := dasd_diag.o
8dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
9 dasd_genhd.o dasd_erp.o
10
11obj-$(CONFIG_DASD) += dasd_mod.o
12obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
13obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
14obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
15obj-$(CONFIG_DASD_CMB) += dasd_cmb.o
16obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
17obj-$(CONFIG_DCSSBLK) += dcssblk.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
new file mode 100644
index 000000000000..b755bac6ccbc
--- /dev/null
+++ b/drivers/s390/block/dasd.c
@@ -0,0 +1,2065 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
10 * $Revision: 1.158 $
11 */
12
13#include <linux/config.h>
14#include <linux/kmod.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/ctype.h>
18#include <linux/major.h>
19#include <linux/slab.h>
20#include <linux/buffer_head.h>
21
22#include <asm/ccwdev.h>
23#include <asm/ebcdic.h>
24#include <asm/idals.h>
25#include <asm/todclk.h>
26
27/* This is ugly... */
28#define PRINTK_HEADER "dasd:"
29
30#include "dasd_int.h"
31/*
32 * SECTION: Constant definitions to be used within this file
33 */
34#define DASD_CHANQ_MAX_SIZE 4
35
36/*
37 * SECTION: exported variables of dasd.c
38 */
39debug_info_t *dasd_debug_area;
40struct dasd_discipline *dasd_diag_discipline_pointer;
41
42MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
43MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
44 " Copyright 2000 IBM Corporation");
45MODULE_SUPPORTED_DEVICE("dasd");
46MODULE_PARM(dasd, "1-" __MODULE_STRING(256) "s");
47MODULE_LICENSE("GPL");
48
49/*
50 * SECTION: prototypes for static functions of dasd.c
51 */
52static int dasd_alloc_queue(struct dasd_device * device);
53static void dasd_setup_queue(struct dasd_device * device);
54static void dasd_free_queue(struct dasd_device * device);
55static void dasd_flush_request_queue(struct dasd_device *);
56static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
57static void dasd_flush_ccw_queue(struct dasd_device *, int);
58static void dasd_tasklet(struct dasd_device *);
59static void do_kick_device(void *data);
60
61/*
62 * SECTION: Operations on the device structure.
63 */
64static wait_queue_head_t dasd_init_waitq;
65
66/*
67 * Allocate memory for a new device structure.
68 */
69struct dasd_device *
70dasd_alloc_device(void)
71{
72 struct dasd_device *device;
73
74 device = kmalloc(sizeof (struct dasd_device), GFP_ATOMIC);
75 if (device == NULL)
76 return ERR_PTR(-ENOMEM);
77 memset(device, 0, sizeof (struct dasd_device));
78 /* open_count = 0 means device online but not in use */
79 atomic_set(&device->open_count, -1);
80
81 /* Get two pages for normal block device operations. */
82 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
83 if (device->ccw_mem == NULL) {
84 kfree(device);
85 return ERR_PTR(-ENOMEM);
86 }
87 /* Get one page for error recovery. */
88 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
89 if (device->erp_mem == NULL) {
90 free_pages((unsigned long) device->ccw_mem, 1);
91 kfree(device);
92 return ERR_PTR(-ENOMEM);
93 }
94
95 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
96 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
97 spin_lock_init(&device->mem_lock);
98 spin_lock_init(&device->request_queue_lock);
99 atomic_set (&device->tasklet_scheduled, 0);
100 tasklet_init(&device->tasklet,
101 (void (*)(unsigned long)) dasd_tasklet,
102 (unsigned long) device);
103 INIT_LIST_HEAD(&device->ccw_queue);
104 init_timer(&device->timer);
105 INIT_WORK(&device->kick_work, do_kick_device, device);
106 device->state = DASD_STATE_NEW;
107 device->target = DASD_STATE_NEW;
108
109 return device;
110}
111
112/*
113 * Free memory of a device structure.
114 */
115void
116dasd_free_device(struct dasd_device *device)
117{
118 if (device->private)
119 kfree(device->private);
120 free_page((unsigned long) device->erp_mem);
121 free_pages((unsigned long) device->ccw_mem, 1);
122 kfree(device);
123}
124
125/*
126 * Make a new device known to the system.
127 */
128static inline int
129dasd_state_new_to_known(struct dasd_device *device)
130{
131 int rc;
132
133 /*
134 * As long as the device is not in state DASD_STATE_NEW we want to
135 * keep the reference count > 0.
136 */
137 dasd_get_device(device);
138
139 rc = dasd_alloc_queue(device);
140 if (rc) {
141 dasd_put_device(device);
142 return rc;
143 }
144
145 device->state = DASD_STATE_KNOWN;
146 return 0;
147}
148
149/*
150 * Let the system forget about a device.
151 */
152static inline void
153dasd_state_known_to_new(struct dasd_device * device)
154{
155 /* Forget the discipline information. */
156 device->discipline = NULL;
157 device->state = DASD_STATE_NEW;
158
159 dasd_free_queue(device);
160
161 /* Give up reference we took in dasd_state_new_to_known. */
162 dasd_put_device(device);
163}
164
165/*
166 * Request the irq line for the device.
167 */
168static inline int
169dasd_state_known_to_basic(struct dasd_device * device)
170{
171 int rc;
172
173 /* Allocate and register gendisk structure. */
174 rc = dasd_gendisk_alloc(device);
175 if (rc)
176 return rc;
177
178 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
179 device->debug_area = debug_register(device->cdev->dev.bus_id, 0, 2,
180 8 * sizeof (long));
181 debug_register_view(device->debug_area, &debug_sprintf_view);
182 debug_set_level(device->debug_area, DBF_EMERG);
183 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
184
185 device->state = DASD_STATE_BASIC;
186 return 0;
187}
188
189/*
190 * Release the irq line for the device. Terminate any running i/o.
191 */
192static inline void
193dasd_state_basic_to_known(struct dasd_device * device)
194{
195 dasd_gendisk_free(device);
196 dasd_flush_ccw_queue(device, 1);
197 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
198 if (device->debug_area != NULL) {
199 debug_unregister(device->debug_area);
200 device->debug_area = NULL;
201 }
202 device->state = DASD_STATE_KNOWN;
203}
204
205/*
206 * Do the initial analysis. The do_analysis function may return
207 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
208 * until the discipline decides to continue the startup sequence
209 * by calling the function dasd_change_state. The eckd disciplines
210 * uses this to start a ccw that detects the format. The completion
211 * interrupt for this detection ccw uses the kernel event daemon to
212 * trigger the call to dasd_change_state. All this is done in the
213 * discipline code, see dasd_eckd.c.
214 * After the analysis ccw is done (do_analysis returned 0 or error)
215 * the block device is setup. Either a fake disk is added to allow
216 * formatting or a proper device request queue is created.
217 */
218static inline int
219dasd_state_basic_to_ready(struct dasd_device * device)
220{
221 int rc;
222
223 rc = 0;
224 if (device->discipline->do_analysis != NULL)
225 rc = device->discipline->do_analysis(device);
226 if (rc)
227 return rc;
228 dasd_setup_queue(device);
229 device->state = DASD_STATE_READY;
230 if (dasd_scan_partitions(device) != 0)
231 device->state = DASD_STATE_BASIC;
232 return 0;
233}
234
235/*
236 * Remove device from block device layer. Destroy dirty buffers.
237 * Forget format information. Check if the target level is basic
238 * and if it is create fake disk for formatting.
239 */
240static inline void
241dasd_state_ready_to_basic(struct dasd_device * device)
242{
243 dasd_flush_ccw_queue(device, 0);
244 dasd_destroy_partitions(device);
245 dasd_flush_request_queue(device);
246 device->blocks = 0;
247 device->bp_block = 0;
248 device->s2b_shift = 0;
249 device->state = DASD_STATE_BASIC;
250}
251
252/*
253 * Make the device online and schedule the bottom half to start
254 * the requeueing of requests from the linux request queue to the
255 * ccw queue.
256 */
257static inline int
258dasd_state_ready_to_online(struct dasd_device * device)
259{
260 device->state = DASD_STATE_ONLINE;
261 dasd_schedule_bh(device);
262 return 0;
263}
264
265/*
266 * Stop the requeueing of requests again.
267 */
268static inline void
269dasd_state_online_to_ready(struct dasd_device * device)
270{
271 device->state = DASD_STATE_READY;
272}
273
274/*
275 * Device startup state changes.
276 */
277static inline int
278dasd_increase_state(struct dasd_device *device)
279{
280 int rc;
281
282 rc = 0;
283 if (device->state == DASD_STATE_NEW &&
284 device->target >= DASD_STATE_KNOWN)
285 rc = dasd_state_new_to_known(device);
286
287 if (!rc &&
288 device->state == DASD_STATE_KNOWN &&
289 device->target >= DASD_STATE_BASIC)
290 rc = dasd_state_known_to_basic(device);
291
292 if (!rc &&
293 device->state == DASD_STATE_BASIC &&
294 device->target >= DASD_STATE_READY)
295 rc = dasd_state_basic_to_ready(device);
296
297 if (!rc &&
298 device->state == DASD_STATE_READY &&
299 device->target >= DASD_STATE_ONLINE)
300 rc = dasd_state_ready_to_online(device);
301
302 return rc;
303}
304
305/*
306 * Device shutdown state changes.
307 */
308static inline int
309dasd_decrease_state(struct dasd_device *device)
310{
311 if (device->state == DASD_STATE_ONLINE &&
312 device->target <= DASD_STATE_READY)
313 dasd_state_online_to_ready(device);
314
315 if (device->state == DASD_STATE_READY &&
316 device->target <= DASD_STATE_BASIC)
317 dasd_state_ready_to_basic(device);
318
319 if (device->state == DASD_STATE_BASIC &&
320 device->target <= DASD_STATE_KNOWN)
321 dasd_state_basic_to_known(device);
322
323 if (device->state == DASD_STATE_KNOWN &&
324 device->target <= DASD_STATE_NEW)
325 dasd_state_known_to_new(device);
326
327 return 0;
328}
329
330/*
331 * This is the main startup/shutdown routine.
332 */
333static void
334dasd_change_state(struct dasd_device *device)
335{
336 int rc;
337
338 if (device->state == device->target)
339 /* Already where we want to go today... */
340 return;
341 if (device->state < device->target)
342 rc = dasd_increase_state(device);
343 else
344 rc = dasd_decrease_state(device);
345 if (rc && rc != -EAGAIN)
346 device->target = device->state;
347
348 if (device->state == device->target)
349 wake_up(&dasd_init_waitq);
350}
351
352/*
353 * Kick starter for devices that did not complete the startup/shutdown
354 * procedure or were sleeping because of a pending state.
355 * dasd_kick_device will schedule a call do do_kick_device to the kernel
356 * event daemon.
357 */
358static void
359do_kick_device(void *data)
360{
361 struct dasd_device *device;
362
363 device = (struct dasd_device *) data;
364 dasd_change_state(device);
365 dasd_schedule_bh(device);
366 dasd_put_device(device);
367}
368
369void
370dasd_kick_device(struct dasd_device *device)
371{
372 dasd_get_device(device);
373 /* queue call to dasd_kick_device to the kernel event daemon. */
374 schedule_work(&device->kick_work);
375}
376
377/*
378 * Set the target state for a device and starts the state change.
379 */
380void
381dasd_set_target_state(struct dasd_device *device, int target)
382{
383 /* If we are in probeonly mode stop at DASD_STATE_READY. */
384 if (dasd_probeonly && target > DASD_STATE_READY)
385 target = DASD_STATE_READY;
386 if (device->target != target) {
387 if (device->state == target)
388 wake_up(&dasd_init_waitq);
389 device->target = target;
390 }
391 if (device->state != device->target)
392 dasd_change_state(device);
393}
394
395/*
396 * Enable devices with device numbers in [from..to].
397 */
398static inline int
399_wait_for_device(struct dasd_device *device)
400{
401 return (device->state == device->target);
402}
403
404void
405dasd_enable_device(struct dasd_device *device)
406{
407 dasd_set_target_state(device, DASD_STATE_ONLINE);
408 if (device->state <= DASD_STATE_KNOWN)
409 /* No discipline for device found. */
410 dasd_set_target_state(device, DASD_STATE_NEW);
411 /* Now wait for the devices to come up. */
412 wait_event(dasd_init_waitq, _wait_for_device(device));
413}
414
415/*
416 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
417 */
418#ifdef CONFIG_DASD_PROFILE
419
420struct dasd_profile_info_t dasd_global_profile;
421unsigned int dasd_profile_level = DASD_PROFILE_OFF;
422
423/*
424 * Increments counter in global and local profiling structures.
425 */
426#define dasd_profile_counter(value, counter, device) \
427{ \
428 int index; \
429 for (index = 0; index < 31 && value >> (2+index); index++); \
430 dasd_global_profile.counter[index]++; \
431 device->profile.counter[index]++; \
432}
433
434/*
435 * Add profiling information for cqr before execution.
436 */
437static inline void
438dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
439 struct request *req)
440{
441 struct list_head *l;
442 unsigned int counter;
443
444 if (dasd_profile_level != DASD_PROFILE_ON)
445 return;
446
447 /* count the length of the chanq for statistics */
448 counter = 0;
449 list_for_each(l, &device->ccw_queue)
450 if (++counter >= 31)
451 break;
452 dasd_global_profile.dasd_io_nr_req[counter]++;
453 device->profile.dasd_io_nr_req[counter]++;
454}
455
456/*
457 * Add profiling information for cqr after execution.
458 */
459static inline void
460dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
461 struct request *req)
462{
463 long strtime, irqtime, endtime, tottime; /* in microseconds */
464 long tottimeps, sectors;
465
466 if (dasd_profile_level != DASD_PROFILE_ON)
467 return;
468
469 sectors = req->nr_sectors;
470 if (!cqr->buildclk || !cqr->startclk ||
471 !cqr->stopclk || !cqr->endclk ||
472 !sectors)
473 return;
474
475 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
476 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
477 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
478 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
479 tottimeps = tottime / sectors;
480
481 if (!dasd_global_profile.dasd_io_reqs)
482 memset(&dasd_global_profile, 0,
483 sizeof (struct dasd_profile_info_t));
484 dasd_global_profile.dasd_io_reqs++;
485 dasd_global_profile.dasd_io_sects += sectors;
486
487 if (!device->profile.dasd_io_reqs)
488 memset(&device->profile, 0,
489 sizeof (struct dasd_profile_info_t));
490 device->profile.dasd_io_reqs++;
491 device->profile.dasd_io_sects += sectors;
492
493 dasd_profile_counter(sectors, dasd_io_secs, device);
494 dasd_profile_counter(tottime, dasd_io_times, device);
495 dasd_profile_counter(tottimeps, dasd_io_timps, device);
496 dasd_profile_counter(strtime, dasd_io_time1, device);
497 dasd_profile_counter(irqtime, dasd_io_time2, device);
498 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
499 dasd_profile_counter(endtime, dasd_io_time3, device);
500}
501#else
502#define dasd_profile_start(device, cqr, req) do {} while (0)
503#define dasd_profile_end(device, cqr, req) do {} while (0)
504#endif /* CONFIG_DASD_PROFILE */
505
506/*
507 * Allocate memory for a channel program with 'cplength' channel
508 * command words and 'datasize' additional space. There are two
509 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
510 * memory and 2) dasd_smalloc_request uses the static ccw memory
511 * that gets allocated for each device.
512 */
513struct dasd_ccw_req *
514dasd_kmalloc_request(char *magic, int cplength, int datasize,
515 struct dasd_device * device)
516{
517 struct dasd_ccw_req *cqr;
518
519 /* Sanity checks */
520 if ( magic == NULL || datasize > PAGE_SIZE ||
521 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
522 BUG();
523
524 cqr = kmalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
525 if (cqr == NULL)
526 return ERR_PTR(-ENOMEM);
527 memset(cqr, 0, sizeof(struct dasd_ccw_req));
528 cqr->cpaddr = NULL;
529 if (cplength > 0) {
530 cqr->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
531 GFP_ATOMIC | GFP_DMA);
532 if (cqr->cpaddr == NULL) {
533 kfree(cqr);
534 return ERR_PTR(-ENOMEM);
535 }
536 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
537 }
538 cqr->data = NULL;
539 if (datasize > 0) {
540 cqr->data = kmalloc(datasize, GFP_ATOMIC | GFP_DMA);
541 if (cqr->data == NULL) {
542 if (cqr->cpaddr != NULL)
543 kfree(cqr->cpaddr);
544 kfree(cqr);
545 return ERR_PTR(-ENOMEM);
546 }
547 memset(cqr->data, 0, datasize);
548 }
549 strncpy((char *) &cqr->magic, magic, 4);
550 ASCEBC((char *) &cqr->magic, 4);
551 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
552 dasd_get_device(device);
553 return cqr;
554}
555
556struct dasd_ccw_req *
557dasd_smalloc_request(char *magic, int cplength, int datasize,
558 struct dasd_device * device)
559{
560 unsigned long flags;
561 struct dasd_ccw_req *cqr;
562 char *data;
563 int size;
564
565 /* Sanity checks */
566 if ( magic == NULL || datasize > PAGE_SIZE ||
567 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
568 BUG();
569
570 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
571 if (cplength > 0)
572 size += cplength * sizeof(struct ccw1);
573 if (datasize > 0)
574 size += datasize;
575 spin_lock_irqsave(&device->mem_lock, flags);
576 cqr = (struct dasd_ccw_req *)
577 dasd_alloc_chunk(&device->ccw_chunks, size);
578 spin_unlock_irqrestore(&device->mem_lock, flags);
579 if (cqr == NULL)
580 return ERR_PTR(-ENOMEM);
581 memset(cqr, 0, sizeof(struct dasd_ccw_req));
582 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
583 cqr->cpaddr = NULL;
584 if (cplength > 0) {
585 cqr->cpaddr = (struct ccw1 *) data;
586 data += cplength*sizeof(struct ccw1);
587 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
588 }
589 cqr->data = NULL;
590 if (datasize > 0) {
591 cqr->data = data;
592 memset(cqr->data, 0, datasize);
593 }
594 strncpy((char *) &cqr->magic, magic, 4);
595 ASCEBC((char *) &cqr->magic, 4);
596 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
597 dasd_get_device(device);
598 return cqr;
599}
600
601/*
602 * Free memory of a channel program. This function needs to free all the
603 * idal lists that might have been created by dasd_set_cda and the
604 * struct dasd_ccw_req itself.
605 */
606void
607dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
608{
609#ifdef CONFIG_ARCH_S390X
610 struct ccw1 *ccw;
611
612 /* Clear any idals used for the request. */
613 ccw = cqr->cpaddr;
614 do {
615 clear_normalized_cda(ccw);
616 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
617#endif
618 if (cqr->cpaddr != NULL)
619 kfree(cqr->cpaddr);
620 if (cqr->data != NULL)
621 kfree(cqr->data);
622 kfree(cqr);
623 dasd_put_device(device);
624}
625
626void
627dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
628{
629 unsigned long flags;
630
631 spin_lock_irqsave(&device->mem_lock, flags);
632 dasd_free_chunk(&device->ccw_chunks, cqr);
633 spin_unlock_irqrestore(&device->mem_lock, flags);
634 dasd_put_device(device);
635}
636
637/*
638 * Check discipline magic in cqr.
639 */
640static inline int
641dasd_check_cqr(struct dasd_ccw_req *cqr)
642{
643 struct dasd_device *device;
644
645 if (cqr == NULL)
646 return -EINVAL;
647 device = cqr->device;
648 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
649 DEV_MESSAGE(KERN_WARNING, device,
650 " dasd_ccw_req 0x%08x magic doesn't match"
651 " discipline 0x%08x",
652 cqr->magic,
653 *(unsigned int *) device->discipline->name);
654 return -EINVAL;
655 }
656 return 0;
657}
658
659/*
660 * Terminate the current i/o and set the request to clear_pending.
661 * Timer keeps device runnig.
662 * ccw_device_clear can fail if the i/o subsystem
663 * is in a bad mood.
664 */
665int
666dasd_term_IO(struct dasd_ccw_req * cqr)
667{
668 struct dasd_device *device;
669 int retries, rc;
670
671 /* Check the cqr */
672 rc = dasd_check_cqr(cqr);
673 if (rc)
674 return rc;
675 retries = 0;
676 device = (struct dasd_device *) cqr->device;
677 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
678 rc = ccw_device_clear(device->cdev, (long) cqr);
679 switch (rc) {
680 case 0: /* termination successful */
681 if (cqr->retries > 0) {
682 cqr->retries--;
683 cqr->status = DASD_CQR_CLEAR;
684 } else
685 cqr->status = DASD_CQR_FAILED;
686 cqr->stopclk = get_clock();
687 DBF_DEV_EVENT(DBF_DEBUG, device,
688 "terminate cqr %p successful",
689 cqr);
690 break;
691 case -ENODEV:
692 DBF_DEV_EVENT(DBF_ERR, device, "%s",
693 "device gone, retry");
694 break;
695 case -EIO:
696 DBF_DEV_EVENT(DBF_ERR, device, "%s",
697 "I/O error, retry");
698 break;
699 case -EINVAL:
700 case -EBUSY:
701 DBF_DEV_EVENT(DBF_ERR, device, "%s",
702 "device busy, retry later");
703 break;
704 default:
705 DEV_MESSAGE(KERN_ERR, device,
706 "line %d unknown RC=%d, please "
707 "report to linux390@de.ibm.com",
708 __LINE__, rc);
709 BUG();
710 break;
711 }
712 retries++;
713 }
714 dasd_schedule_bh(device);
715 return rc;
716}
717
718/*
719 * Start the i/o. This start_IO can fail if the channel is really busy.
720 * In that case set up a timer to start the request later.
721 */
722int
723dasd_start_IO(struct dasd_ccw_req * cqr)
724{
725 struct dasd_device *device;
726 int rc;
727
728 /* Check the cqr */
729 rc = dasd_check_cqr(cqr);
730 if (rc)
731 return rc;
732 device = (struct dasd_device *) cqr->device;
733 if (cqr->retries < 0) {
734 DEV_MESSAGE(KERN_DEBUG, device,
735 "start_IO: request %p (%02x/%i) - no retry left.",
736 cqr, cqr->status, cqr->retries);
737 cqr->status = DASD_CQR_FAILED;
738 return -EIO;
739 }
740 cqr->startclk = get_clock();
741 cqr->starttime = jiffies;
742 cqr->retries--;
743 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
744 cqr->lpm, 0);
745 switch (rc) {
746 case 0:
747 cqr->status = DASD_CQR_IN_IO;
748 DBF_DEV_EVENT(DBF_DEBUG, device,
749 "start_IO: request %p started successful",
750 cqr);
751 break;
752 case -EBUSY:
753 DBF_DEV_EVENT(DBF_ERR, device, "%s",
754 "start_IO: device busy, retry later");
755 break;
756 case -ETIMEDOUT:
757 DBF_DEV_EVENT(DBF_ERR, device, "%s",
758 "start_IO: request timeout, retry later");
759 break;
760 case -EACCES:
761 /* -EACCES indicates that the request used only a
762 * subset of the available pathes and all these
763 * pathes are gone.
764 * Do a retry with all available pathes.
765 */
766 cqr->lpm = LPM_ANYPATH;
767 DBF_DEV_EVENT(DBF_ERR, device, "%s",
768 "start_IO: selected pathes gone,"
769 " retry on all pathes");
770 break;
771 case -ENODEV:
772 case -EIO:
773 DBF_DEV_EVENT(DBF_ERR, device, "%s",
774 "start_IO: device gone, retry");
775 break;
776 default:
777 DEV_MESSAGE(KERN_ERR, device,
778 "line %d unknown RC=%d, please report"
779 " to linux390@de.ibm.com", __LINE__, rc);
780 BUG();
781 break;
782 }
783 return rc;
784}
785
786/*
787 * Timeout function for dasd devices. This is used for different purposes
788 * 1) missing interrupt handler for normal operation
789 * 2) delayed start of request where start_IO failed with -EBUSY
790 * 3) timeout for missing state change interrupts
791 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
792 * DASD_CQR_QUEUED for 2) and 3).
793 */
794static void
795dasd_timeout_device(unsigned long ptr)
796{
797 unsigned long flags;
798 struct dasd_device *device;
799
800 device = (struct dasd_device *) ptr;
801 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
802 /* re-activate request queue */
803 device->stopped &= ~DASD_STOPPED_PENDING;
804 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
805 dasd_schedule_bh(device);
806}
807
808/*
809 * Setup timeout for a device in jiffies.
810 */
811void
812dasd_set_timer(struct dasd_device *device, int expires)
813{
814 if (expires == 0) {
815 if (timer_pending(&device->timer))
816 del_timer(&device->timer);
817 return;
818 }
819 if (timer_pending(&device->timer)) {
820 if (mod_timer(&device->timer, jiffies + expires))
821 return;
822 }
823 device->timer.function = dasd_timeout_device;
824 device->timer.data = (unsigned long) device;
825 device->timer.expires = jiffies + expires;
826 add_timer(&device->timer);
827}
828
829/*
830 * Clear timeout for a device.
831 */
832void
833dasd_clear_timer(struct dasd_device *device)
834{
835 if (timer_pending(&device->timer))
836 del_timer(&device->timer);
837}
838
839static void
840dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
841{
842 struct dasd_ccw_req *cqr;
843 struct dasd_device *device;
844
845 cqr = (struct dasd_ccw_req *) intparm;
846 if (cqr->status != DASD_CQR_IN_IO) {
847 MESSAGE(KERN_DEBUG,
848 "invalid status in handle_killed_request: "
849 "bus_id %s, status %02x",
850 cdev->dev.bus_id, cqr->status);
851 return;
852 }
853
854 device = (struct dasd_device *) cqr->device;
855 if (device == NULL ||
856 device != dasd_device_from_cdev(cdev) ||
857 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
858 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
859 cdev->dev.bus_id);
860 return;
861 }
862
863 /* Schedule request to be retried. */
864 cqr->status = DASD_CQR_QUEUED;
865
866 dasd_clear_timer(device);
867 dasd_schedule_bh(device);
868 dasd_put_device(device);
869}
870
871static void
872dasd_handle_state_change_pending(struct dasd_device *device)
873{
874 struct dasd_ccw_req *cqr;
875 struct list_head *l, *n;
876
877 device->stopped &= ~DASD_STOPPED_PENDING;
878
879 /* restart all 'running' IO on queue */
880 list_for_each_safe(l, n, &device->ccw_queue) {
881 cqr = list_entry(l, struct dasd_ccw_req, list);
882 if (cqr->status == DASD_CQR_IN_IO) {
883 cqr->status = DASD_CQR_QUEUED;
884 }
885 }
886 dasd_clear_timer(device);
887 dasd_schedule_bh(device);
888}
889
890/*
891 * Interrupt handler for "normal" ssch-io based dasd devices.
892 */
893void
894dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
895 struct irb *irb)
896{
897 struct dasd_ccw_req *cqr, *next;
898 struct dasd_device *device;
899 unsigned long long now;
900 int expires;
901 dasd_era_t era;
902 char mask;
903
904 if (IS_ERR(irb)) {
905 switch (PTR_ERR(irb)) {
906 case -EIO:
907 dasd_handle_killed_request(cdev, intparm);
908 break;
909 case -ETIMEDOUT:
910 printk(KERN_WARNING"%s(%s): request timed out\n",
911 __FUNCTION__, cdev->dev.bus_id);
912 //FIXME - dasd uses own timeout interface...
913 break;
914 default:
915 printk(KERN_WARNING"%s(%s): unknown error %ld\n",
916 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
917 }
918 return;
919 }
920
921 now = get_clock();
922
923 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
924 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
925 (unsigned int) intparm);
926
927 /* first of all check for state change pending interrupt */
928 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
929 if ((irb->scsw.dstat & mask) == mask) {
930 device = dasd_device_from_cdev(cdev);
931 if (!IS_ERR(device)) {
932 dasd_handle_state_change_pending(device);
933 dasd_put_device(device);
934 }
935 return;
936 }
937
938 cqr = (struct dasd_ccw_req *) intparm;
939
940 /* check for unsolicited interrupts */
941 if (cqr == NULL) {
942 MESSAGE(KERN_DEBUG,
943 "unsolicited interrupt received: bus_id %s",
944 cdev->dev.bus_id);
945 return;
946 }
947
948 device = (struct dasd_device *) cqr->device;
949 if (device == NULL ||
950 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
951 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
952 cdev->dev.bus_id);
953 return;
954 }
955
956 /* Check for clear pending */
957 if (cqr->status == DASD_CQR_CLEAR &&
958 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
959 cqr->status = DASD_CQR_QUEUED;
960 dasd_clear_timer(device);
961 dasd_schedule_bh(device);
962 return;
963 }
964
965 /* check status - the request might have been killed by dyn detach */
966 if (cqr->status != DASD_CQR_IN_IO) {
967 MESSAGE(KERN_DEBUG,
968 "invalid status: bus_id %s, status %02x",
969 cdev->dev.bus_id, cqr->status);
970 return;
971 }
972 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
973 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
974
975 /* Find out the appropriate era_action. */
976 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
977 era = dasd_era_fatal;
978 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
979 irb->scsw.cstat == 0 &&
980 !irb->esw.esw0.erw.cons)
981 era = dasd_era_none;
982 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
983 era = dasd_era_fatal; /* don't recover this request */
984 else if (irb->esw.esw0.erw.cons)
985 era = device->discipline->examine_error(cqr, irb);
986 else
987 era = dasd_era_recover;
988
989 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
990 expires = 0;
991 if (era == dasd_era_none) {
992 cqr->status = DASD_CQR_DONE;
993 cqr->stopclk = now;
994 /* Start first request on queue if possible -> fast_io. */
995 if (cqr->list.next != &device->ccw_queue) {
996 next = list_entry(cqr->list.next,
997 struct dasd_ccw_req, list);
998 if ((next->status == DASD_CQR_QUEUED) &&
999 (!device->stopped)) {
1000 if (device->discipline->start_IO(next) == 0)
1001 expires = next->expires;
1002 else
1003 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1004 "Interrupt fastpath "
1005 "failed!");
1006 }
1007 }
1008 } else { /* error */
1009 memcpy(&cqr->irb, irb, sizeof (struct irb));
1010#ifdef ERP_DEBUG
1011 /* dump sense data */
1012 dasd_log_sense(cqr, irb);
1013#endif
1014 switch (era) {
1015 case dasd_era_fatal:
1016 cqr->status = DASD_CQR_FAILED;
1017 cqr->stopclk = now;
1018 break;
1019 case dasd_era_recover:
1020 cqr->status = DASD_CQR_ERROR;
1021 break;
1022 default:
1023 BUG();
1024 }
1025 }
1026 if (expires != 0)
1027 dasd_set_timer(device, expires);
1028 else
1029 dasd_clear_timer(device);
1030 dasd_schedule_bh(device);
1031}
1032
1033/*
1034 * posts the buffer_cache about a finalized request
1035 */
1036static inline void
1037dasd_end_request(struct request *req, int uptodate)
1038{
1039 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1040 BUG();
1041 add_disk_randomness(req->rq_disk);
1042 end_that_request_last(req);
1043}
1044
1045/*
1046 * Process finished error recovery ccw.
1047 */
1048static inline void
1049__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1050{
1051 dasd_erp_fn_t erp_fn;
1052
1053 if (cqr->status == DASD_CQR_DONE)
1054 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1055 else
1056 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1057 erp_fn = device->discipline->erp_postaction(cqr);
1058 erp_fn(cqr);
1059}
1060
1061/*
1062 * Process ccw request queue.
1063 */
1064static inline void
1065__dasd_process_ccw_queue(struct dasd_device * device,
1066 struct list_head *final_queue)
1067{
1068 struct list_head *l, *n;
1069 struct dasd_ccw_req *cqr;
1070 dasd_erp_fn_t erp_fn;
1071
1072restart:
1073 /* Process request with final status. */
1074 list_for_each_safe(l, n, &device->ccw_queue) {
1075 cqr = list_entry(l, struct dasd_ccw_req, list);
1076 /* Stop list processing at the first non-final request. */
1077 if (cqr->status != DASD_CQR_DONE &&
1078 cqr->status != DASD_CQR_FAILED &&
1079 cqr->status != DASD_CQR_ERROR)
1080 break;
1081 /* Process requests with DASD_CQR_ERROR */
1082 if (cqr->status == DASD_CQR_ERROR) {
1083 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
1084 cqr->status = DASD_CQR_FAILED;
1085 cqr->stopclk = get_clock();
1086 } else {
1087 if (cqr->irb.esw.esw0.erw.cons) {
1088 erp_fn = device->discipline->
1089 erp_action(cqr);
1090 erp_fn(cqr);
1091 } else
1092 dasd_default_erp_action(cqr);
1093 }
1094 goto restart;
1095 }
1096 /* Process finished ERP request. */
1097 if (cqr->refers) {
1098 __dasd_process_erp(device, cqr);
1099 goto restart;
1100 }
1101
1102 /* Rechain finished requests to final queue */
1103 cqr->endclk = get_clock();
1104 list_move_tail(&cqr->list, final_queue);
1105 }
1106}
1107
1108static void
1109dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1110{
1111 struct request *req;
1112 struct dasd_device *device;
1113 int status;
1114
1115 req = (struct request *) data;
1116 device = cqr->device;
1117 dasd_profile_end(device, cqr, req);
1118 status = cqr->device->discipline->free_cp(cqr,req);
1119 spin_lock_irq(&device->request_queue_lock);
1120 dasd_end_request(req, status);
1121 spin_unlock_irq(&device->request_queue_lock);
1122}
1123
1124
1125/*
1126 * Fetch requests from the block device queue.
1127 */
1128static inline void
1129__dasd_process_blk_queue(struct dasd_device * device)
1130{
1131 request_queue_t *queue;
1132 struct request *req;
1133 struct dasd_ccw_req *cqr;
1134 int nr_queued;
1135
1136 queue = device->request_queue;
1137 /* No queue ? Then there is nothing to do. */
1138 if (queue == NULL)
1139 return;
1140
1141 /*
1142 * We requeue request from the block device queue to the ccw
1143 * queue only in two states. In state DASD_STATE_READY the
1144 * partition detection is done and we need to requeue requests
1145 * for that. State DASD_STATE_ONLINE is normal block device
1146 * operation.
1147 */
1148 if (device->state != DASD_STATE_READY &&
1149 device->state != DASD_STATE_ONLINE)
1150 return;
1151 nr_queued = 0;
1152 /* Now we try to fetch requests from the request queue */
1153 list_for_each_entry(cqr, &device->ccw_queue, list)
1154 if (cqr->status == DASD_CQR_QUEUED)
1155 nr_queued++;
1156 while (!blk_queue_plugged(queue) &&
1157 elv_next_request(queue) &&
1158 nr_queued < DASD_CHANQ_MAX_SIZE) {
1159 req = elv_next_request(queue);
1160 if (test_bit(DASD_FLAG_RO, &device->flags) &&
1161 rq_data_dir(req) == WRITE) {
1162 DBF_DEV_EVENT(DBF_ERR, device,
1163 "Rejecting write request %p",
1164 req);
1165 blkdev_dequeue_request(req);
1166 dasd_end_request(req, 0);
1167 continue;
1168 }
1169 if (device->stopped & DASD_STOPPED_DC_EIO) {
1170 blkdev_dequeue_request(req);
1171 dasd_end_request(req, 0);
1172 continue;
1173 }
1174 cqr = device->discipline->build_cp(device, req);
1175 if (IS_ERR(cqr)) {
1176 if (PTR_ERR(cqr) == -ENOMEM)
1177 break; /* terminate request queue loop */
1178 DBF_DEV_EVENT(DBF_ERR, device,
1179 "CCW creation failed (rc=%ld) "
1180 "on request %p",
1181 PTR_ERR(cqr), req);
1182 blkdev_dequeue_request(req);
1183 dasd_end_request(req, 0);
1184 continue;
1185 }
1186 cqr->callback = dasd_end_request_cb;
1187 cqr->callback_data = (void *) req;
1188 cqr->status = DASD_CQR_QUEUED;
1189 blkdev_dequeue_request(req);
1190 list_add_tail(&cqr->list, &device->ccw_queue);
1191 dasd_profile_start(device, cqr, req);
1192 nr_queued++;
1193 }
1194}
1195
1196/*
1197 * Take a look at the first request on the ccw queue and check
1198 * if it reached its expire time. If so, terminate the IO.
1199 */
1200static inline void
1201__dasd_check_expire(struct dasd_device * device)
1202{
1203 struct dasd_ccw_req *cqr;
1204
1205 if (list_empty(&device->ccw_queue))
1206 return;
1207 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1208 if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
1209 if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
1210 if (device->discipline->term_IO(cqr) != 0)
1211 /* Hmpf, try again in 1/10 sec */
1212 dasd_set_timer(device, 10);
1213 }
1214 }
1215}
1216
1217/*
1218 * Take a look at the first request on the ccw queue and check
1219 * if it needs to be started.
1220 */
1221static inline void
1222__dasd_start_head(struct dasd_device * device)
1223{
1224 struct dasd_ccw_req *cqr;
1225 int rc;
1226
1227 if (list_empty(&device->ccw_queue))
1228 return;
1229 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1230 if ((cqr->status == DASD_CQR_QUEUED) &&
1231 (!device->stopped)) {
1232 /* try to start the first I/O that can be started */
1233 rc = device->discipline->start_IO(cqr);
1234 if (rc == 0)
1235 dasd_set_timer(device, cqr->expires);
1236 else if (rc == -EACCES) {
1237 dasd_schedule_bh(device);
1238 } else
1239 /* Hmpf, try again in 1/2 sec */
1240 dasd_set_timer(device, 50);
1241 }
1242}
1243
1244/*
1245 * Remove requests from the ccw queue.
1246 */
1247static void
1248dasd_flush_ccw_queue(struct dasd_device * device, int all)
1249{
1250 struct list_head flush_queue;
1251 struct list_head *l, *n;
1252 struct dasd_ccw_req *cqr;
1253
1254 INIT_LIST_HEAD(&flush_queue);
1255 spin_lock_irq(get_ccwdev_lock(device->cdev));
1256 list_for_each_safe(l, n, &device->ccw_queue) {
1257 cqr = list_entry(l, struct dasd_ccw_req, list);
1258 /* Flush all request or only block device requests? */
1259 if (all == 0 && cqr->callback == dasd_end_request_cb)
1260 continue;
1261 if (cqr->status == DASD_CQR_IN_IO)
1262 device->discipline->term_IO(cqr);
1263 if (cqr->status != DASD_CQR_DONE ||
1264 cqr->status != DASD_CQR_FAILED) {
1265 cqr->status = DASD_CQR_FAILED;
1266 cqr->stopclk = get_clock();
1267 }
1268 /* Process finished ERP request. */
1269 if (cqr->refers) {
1270 __dasd_process_erp(device, cqr);
1271 continue;
1272 }
1273 /* Rechain request on device request queue */
1274 cqr->endclk = get_clock();
1275 list_move_tail(&cqr->list, &flush_queue);
1276 }
1277 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1278 /* Now call the callback function of flushed requests */
1279 list_for_each_safe(l, n, &flush_queue) {
1280 cqr = list_entry(l, struct dasd_ccw_req, list);
1281 if (cqr->callback != NULL)
1282 (cqr->callback)(cqr, cqr->callback_data);
1283 }
1284}
1285
1286/*
1287 * Acquire the device lock and process queues for the device.
1288 */
1289static void
1290dasd_tasklet(struct dasd_device * device)
1291{
1292 struct list_head final_queue;
1293 struct list_head *l, *n;
1294 struct dasd_ccw_req *cqr;
1295
1296 atomic_set (&device->tasklet_scheduled, 0);
1297 INIT_LIST_HEAD(&final_queue);
1298 spin_lock_irq(get_ccwdev_lock(device->cdev));
1299 /* Check expire time of first request on the ccw queue. */
1300 __dasd_check_expire(device);
1301 /* Finish off requests on ccw queue */
1302 __dasd_process_ccw_queue(device, &final_queue);
1303 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1304 /* Now call the callback function of requests with final status */
1305 list_for_each_safe(l, n, &final_queue) {
1306 cqr = list_entry(l, struct dasd_ccw_req, list);
1307 list_del(&cqr->list);
1308 if (cqr->callback != NULL)
1309 (cqr->callback)(cqr, cqr->callback_data);
1310 }
1311 spin_lock_irq(&device->request_queue_lock);
1312 spin_lock(get_ccwdev_lock(device->cdev));
1313 /* Get new request from the block device request queue */
1314 __dasd_process_blk_queue(device);
1315 /* Now check if the head of the ccw queue needs to be started. */
1316 __dasd_start_head(device);
1317 spin_unlock(get_ccwdev_lock(device->cdev));
1318 spin_unlock_irq(&device->request_queue_lock);
1319 dasd_put_device(device);
1320}
1321
1322/*
1323 * Schedules a call to dasd_tasklet over the device tasklet.
1324 */
1325void
1326dasd_schedule_bh(struct dasd_device * device)
1327{
1328 /* Protect against rescheduling. */
1329 if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled))
1330 return;
1331 dasd_get_device(device);
1332 tasklet_hi_schedule(&device->tasklet);
1333}
1334
1335/*
1336 * Queue a request to the head of the ccw_queue. Start the I/O if
1337 * possible.
1338 */
1339void
1340dasd_add_request_head(struct dasd_ccw_req *req)
1341{
1342 struct dasd_device *device;
1343 unsigned long flags;
1344
1345 device = req->device;
1346 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1347 req->status = DASD_CQR_QUEUED;
1348 req->device = device;
1349 list_add(&req->list, &device->ccw_queue);
1350 /* let the bh start the request to keep them in order */
1351 dasd_schedule_bh(device);
1352 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1353}
1354
1355/*
1356 * Queue a request to the tail of the ccw_queue. Start the I/O if
1357 * possible.
1358 */
1359void
1360dasd_add_request_tail(struct dasd_ccw_req *req)
1361{
1362 struct dasd_device *device;
1363 unsigned long flags;
1364
1365 device = req->device;
1366 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1367 req->status = DASD_CQR_QUEUED;
1368 req->device = device;
1369 list_add_tail(&req->list, &device->ccw_queue);
1370 /* let the bh start the request to keep them in order */
1371 dasd_schedule_bh(device);
1372 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1373}
1374
1375/*
1376 * Wakeup callback.
1377 */
1378static void
1379dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1380{
1381 wake_up((wait_queue_head_t *) data);
1382}
1383
1384static inline int
1385_wait_for_wakeup(struct dasd_ccw_req *cqr)
1386{
1387 struct dasd_device *device;
1388 int rc;
1389
1390 device = cqr->device;
1391 spin_lock_irq(get_ccwdev_lock(device->cdev));
1392 rc = cqr->status == DASD_CQR_DONE || cqr->status == DASD_CQR_FAILED;
1393 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1394 return rc;
1395}
1396
1397/*
1398 * Attempts to start a special ccw queue and waits for its completion.
1399 */
1400int
1401dasd_sleep_on(struct dasd_ccw_req * cqr)
1402{
1403 wait_queue_head_t wait_q;
1404 struct dasd_device *device;
1405 int rc;
1406
1407 device = cqr->device;
1408 spin_lock_irq(get_ccwdev_lock(device->cdev));
1409
1410 init_waitqueue_head (&wait_q);
1411 cqr->callback = dasd_wakeup_cb;
1412 cqr->callback_data = (void *) &wait_q;
1413 cqr->status = DASD_CQR_QUEUED;
1414 list_add_tail(&cqr->list, &device->ccw_queue);
1415
1416 /* let the bh start the request to keep them in order */
1417 dasd_schedule_bh(device);
1418
1419 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1420
1421 wait_event(wait_q, _wait_for_wakeup(cqr));
1422
1423 /* Request status is either done or failed. */
1424 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1425 return rc;
1426}
1427
1428/*
1429 * Attempts to start a special ccw queue and wait interruptible
1430 * for its completion.
1431 */
1432int
1433dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1434{
1435 wait_queue_head_t wait_q;
1436 struct dasd_device *device;
1437 int rc, finished;
1438
1439 device = cqr->device;
1440 spin_lock_irq(get_ccwdev_lock(device->cdev));
1441
1442 init_waitqueue_head (&wait_q);
1443 cqr->callback = dasd_wakeup_cb;
1444 cqr->callback_data = (void *) &wait_q;
1445 cqr->status = DASD_CQR_QUEUED;
1446 list_add_tail(&cqr->list, &device->ccw_queue);
1447
1448 /* let the bh start the request to keep them in order */
1449 dasd_schedule_bh(device);
1450 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1451
1452 finished = 0;
1453 while (!finished) {
1454 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1455 if (rc != -ERESTARTSYS) {
1456 /* Request status is either done or failed. */
1457 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1458 break;
1459 }
1460 spin_lock_irq(get_ccwdev_lock(device->cdev));
1461 if (cqr->status == DASD_CQR_IN_IO &&
1462 device->discipline->term_IO(cqr) == 0) {
1463 list_del(&cqr->list);
1464 finished = 1;
1465 }
1466 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1467 }
1468 return rc;
1469}
1470
1471/*
1472 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1473 * for eckd devices) the currently running request has to be terminated
1474 * and be put back to status queued, before the special request is added
1475 * to the head of the queue. Then the special request is waited on normally.
1476 */
1477static inline int
1478_dasd_term_running_cqr(struct dasd_device *device)
1479{
1480 struct dasd_ccw_req *cqr;
1481 int rc;
1482
1483 if (list_empty(&device->ccw_queue))
1484 return 0;
1485 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1486 rc = device->discipline->term_IO(cqr);
1487 if (rc == 0) {
1488 /* termination successful */
1489 cqr->status = DASD_CQR_QUEUED;
1490 cqr->startclk = cqr->stopclk = 0;
1491 cqr->starttime = 0;
1492 }
1493 return rc;
1494}
1495
1496int
1497dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1498{
1499 wait_queue_head_t wait_q;
1500 struct dasd_device *device;
1501 int rc;
1502
1503 device = cqr->device;
1504 spin_lock_irq(get_ccwdev_lock(device->cdev));
1505 rc = _dasd_term_running_cqr(device);
1506 if (rc) {
1507 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1508 return rc;
1509 }
1510
1511 init_waitqueue_head (&wait_q);
1512 cqr->callback = dasd_wakeup_cb;
1513 cqr->callback_data = (void *) &wait_q;
1514 cqr->status = DASD_CQR_QUEUED;
1515 list_add(&cqr->list, &device->ccw_queue);
1516
1517 /* let the bh start the request to keep them in order */
1518 dasd_schedule_bh(device);
1519
1520 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1521
1522 wait_event(wait_q, _wait_for_wakeup(cqr));
1523
1524 /* Request status is either done or failed. */
1525 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1526 return rc;
1527}
1528
1529/*
1530 * Cancels a request that was started with dasd_sleep_on_req.
1531 * This is useful to timeout requests. The request will be
1532 * terminated if it is currently in i/o.
1533 * Returns 1 if the request has been terminated.
1534 */
1535int
1536dasd_cancel_req(struct dasd_ccw_req *cqr)
1537{
1538 struct dasd_device *device = cqr->device;
1539 unsigned long flags;
1540 int rc;
1541
1542 rc = 0;
1543 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1544 switch (cqr->status) {
1545 case DASD_CQR_QUEUED:
1546 /* request was not started - just set to failed */
1547 cqr->status = DASD_CQR_FAILED;
1548 break;
1549 case DASD_CQR_IN_IO:
1550 /* request in IO - terminate IO and release again */
1551 if (device->discipline->term_IO(cqr) != 0)
1552 /* what to do if unable to terminate ??????
1553 e.g. not _IN_IO */
1554 cqr->status = DASD_CQR_FAILED;
1555 cqr->stopclk = get_clock();
1556 rc = 1;
1557 break;
1558 case DASD_CQR_DONE:
1559 case DASD_CQR_FAILED:
1560 /* already finished - do nothing */
1561 break;
1562 default:
1563 DEV_MESSAGE(KERN_ALERT, device,
1564 "invalid status %02x in request",
1565 cqr->status);
1566 BUG();
1567
1568 }
1569 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1570 dasd_schedule_bh(device);
1571 return rc;
1572}
1573
1574/*
1575 * SECTION: Block device operations (request queue, partitions, open, release).
1576 */
1577
1578/*
1579 * Dasd request queue function. Called from ll_rw_blk.c
1580 */
1581static void
1582do_dasd_request(request_queue_t * queue)
1583{
1584 struct dasd_device *device;
1585
1586 device = (struct dasd_device *) queue->queuedata;
1587 spin_lock(get_ccwdev_lock(device->cdev));
1588 /* Get new request from the block device request queue */
1589 __dasd_process_blk_queue(device);
1590 /* Now check if the head of the ccw queue needs to be started. */
1591 __dasd_start_head(device);
1592 spin_unlock(get_ccwdev_lock(device->cdev));
1593}
1594
1595/*
1596 * Allocate and initialize request queue and default I/O scheduler.
1597 */
1598static int
1599dasd_alloc_queue(struct dasd_device * device)
1600{
1601 int rc;
1602
1603 device->request_queue = blk_init_queue(do_dasd_request,
1604 &device->request_queue_lock);
1605 if (device->request_queue == NULL)
1606 return -ENOMEM;
1607
1608 device->request_queue->queuedata = device;
1609
1610 elevator_exit(device->request_queue->elevator);
1611 rc = elevator_init(device->request_queue, "deadline");
1612 if (rc) {
1613 blk_cleanup_queue(device->request_queue);
1614 return rc;
1615 }
1616 return 0;
1617}
1618
1619/*
1620 * Allocate and initialize request queue.
1621 */
1622static void
1623dasd_setup_queue(struct dasd_device * device)
1624{
1625 int max;
1626
1627 blk_queue_hardsect_size(device->request_queue, device->bp_block);
1628 max = device->discipline->max_blocks << device->s2b_shift;
1629 blk_queue_max_sectors(device->request_queue, max);
1630 blk_queue_max_phys_segments(device->request_queue, -1L);
1631 blk_queue_max_hw_segments(device->request_queue, -1L);
1632 blk_queue_max_segment_size(device->request_queue, -1L);
1633 blk_queue_segment_boundary(device->request_queue, -1L);
1634}
1635
1636/*
1637 * Deactivate and free request queue.
1638 */
1639static void
1640dasd_free_queue(struct dasd_device * device)
1641{
1642 if (device->request_queue) {
1643 blk_cleanup_queue(device->request_queue);
1644 device->request_queue = NULL;
1645 }
1646}
1647
1648/*
1649 * Flush request on the request queue.
1650 */
1651static void
1652dasd_flush_request_queue(struct dasd_device * device)
1653{
1654 struct request *req;
1655
1656 if (!device->request_queue)
1657 return;
1658
1659 spin_lock_irq(&device->request_queue_lock);
1660 while (!list_empty(&device->request_queue->queue_head)) {
1661 req = elv_next_request(device->request_queue);
1662 if (req == NULL)
1663 break;
1664 dasd_end_request(req, 0);
1665 blkdev_dequeue_request(req);
1666 }
1667 spin_unlock_irq(&device->request_queue_lock);
1668}
1669
1670static int
1671dasd_open(struct inode *inp, struct file *filp)
1672{
1673 struct gendisk *disk = inp->i_bdev->bd_disk;
1674 struct dasd_device *device = disk->private_data;
1675 int rc;
1676
1677 atomic_inc(&device->open_count);
1678 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1679 rc = -ENODEV;
1680 goto unlock;
1681 }
1682
1683 if (!try_module_get(device->discipline->owner)) {
1684 rc = -EINVAL;
1685 goto unlock;
1686 }
1687
1688 if (dasd_probeonly) {
1689 DEV_MESSAGE(KERN_INFO, device, "%s",
1690 "No access to device due to probeonly mode");
1691 rc = -EPERM;
1692 goto out;
1693 }
1694
1695 if (device->state < DASD_STATE_BASIC) {
1696 DBF_DEV_EVENT(DBF_ERR, device, " %s",
1697 " Cannot open unrecognized device");
1698 rc = -ENODEV;
1699 goto out;
1700 }
1701
1702 return 0;
1703
1704out:
1705 module_put(device->discipline->owner);
1706unlock:
1707 atomic_dec(&device->open_count);
1708 return rc;
1709}
1710
1711static int
1712dasd_release(struct inode *inp, struct file *filp)
1713{
1714 struct gendisk *disk = inp->i_bdev->bd_disk;
1715 struct dasd_device *device = disk->private_data;
1716
1717 atomic_dec(&device->open_count);
1718 module_put(device->discipline->owner);
1719 return 0;
1720}
1721
1722struct block_device_operations
1723dasd_device_operations = {
1724 .owner = THIS_MODULE,
1725 .open = dasd_open,
1726 .release = dasd_release,
1727 .ioctl = dasd_ioctl,
1728};
1729
1730
1731static void
1732dasd_exit(void)
1733{
1734#ifdef CONFIG_PROC_FS
1735 dasd_proc_exit();
1736#endif
1737 dasd_ioctl_exit();
1738 dasd_gendisk_exit();
1739 dasd_devmap_exit();
1740 devfs_remove("dasd");
1741 if (dasd_debug_area != NULL) {
1742 debug_unregister(dasd_debug_area);
1743 dasd_debug_area = NULL;
1744 }
1745}
1746
1747/*
1748 * SECTION: common functions for ccw_driver use
1749 */
1750
1751/* initial attempt at a probe function. this can be simplified once
1752 * the other detection code is gone */
1753int
1754dasd_generic_probe (struct ccw_device *cdev,
1755 struct dasd_discipline *discipline)
1756{
1757 int ret;
1758
1759 ret = dasd_add_sysfs_files(cdev);
1760 if (ret) {
1761 printk(KERN_WARNING
1762 "dasd_generic_probe: could not add sysfs entries "
1763 "for %s\n", cdev->dev.bus_id);
1764 }
1765
1766 cdev->handler = &dasd_int_handler;
1767
1768 return ret;
1769}
1770
1771/* this will one day be called from a global not_oper handler.
1772 * It is also used by driver_unregister during module unload */
1773void
1774dasd_generic_remove (struct ccw_device *cdev)
1775{
1776 struct dasd_device *device;
1777
1778 dasd_remove_sysfs_files(cdev);
1779 device = dasd_device_from_cdev(cdev);
1780 if (IS_ERR(device))
1781 return;
1782 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1783 /* Already doing offline processing */
1784 dasd_put_device(device);
1785 return;
1786 }
1787 /*
1788 * This device is removed unconditionally. Set offline
1789 * flag to prevent dasd_open from opening it while it is
1790 * no quite down yet.
1791 */
1792 dasd_set_target_state(device, DASD_STATE_NEW);
1793 /* dasd_delete_device destroys the device reference. */
1794 dasd_delete_device(device);
1795}
1796
1797/* activate a device. This is called from dasd_{eckd,fba}_probe() when either
1798 * the device is detected for the first time and is supposed to be used
1799 * or the user has started activation through sysfs */
1800int
1801dasd_generic_set_online (struct ccw_device *cdev,
1802 struct dasd_discipline *discipline)
1803
1804{
1805 struct dasd_device *device;
1806 int rc;
1807
1808 device = dasd_create_device(cdev);
1809 if (IS_ERR(device))
1810 return PTR_ERR(device);
1811
1812 if (test_bit(DASD_FLAG_USE_DIAG, &device->flags)) {
1813 if (!dasd_diag_discipline_pointer) {
1814 printk (KERN_WARNING
1815 "dasd_generic couldn't online device %s "
1816 "- discipline DIAG not available\n",
1817 cdev->dev.bus_id);
1818 dasd_delete_device(device);
1819 return -ENODEV;
1820 }
1821 discipline = dasd_diag_discipline_pointer;
1822 }
1823 device->discipline = discipline;
1824
1825 rc = discipline->check_device(device);
1826 if (rc) {
1827 printk (KERN_WARNING
1828 "dasd_generic couldn't online device %s "
1829 "with discipline %s rc=%i\n",
1830 cdev->dev.bus_id, discipline->name, rc);
1831 dasd_delete_device(device);
1832 return rc;
1833 }
1834
1835 dasd_set_target_state(device, DASD_STATE_ONLINE);
1836 if (device->state <= DASD_STATE_KNOWN) {
1837 printk (KERN_WARNING
1838 "dasd_generic discipline not found for %s\n",
1839 cdev->dev.bus_id);
1840 rc = -ENODEV;
1841 dasd_set_target_state(device, DASD_STATE_NEW);
1842 dasd_delete_device(device);
1843 } else
1844 pr_debug("dasd_generic device %s found\n",
1845 cdev->dev.bus_id);
1846
1847 /* FIXME: we have to wait for the root device but we don't want
1848 * to wait for each single device but for all at once. */
1849 wait_event(dasd_init_waitq, _wait_for_device(device));
1850
1851 dasd_put_device(device);
1852
1853 return rc;
1854}
1855
1856int
1857dasd_generic_set_offline (struct ccw_device *cdev)
1858{
1859 struct dasd_device *device;
1860 int max_count;
1861
1862 device = dasd_device_from_cdev(cdev);
1863 if (IS_ERR(device))
1864 return PTR_ERR(device);
1865 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1866 /* Already doing offline processing */
1867 dasd_put_device(device);
1868 return 0;
1869 }
1870 /*
1871 * We must make sure that this device is currently not in use.
1872 * The open_count is increased for every opener, that includes
1873 * the blkdev_get in dasd_scan_partitions. We are only interested
1874 * in the other openers.
1875 */
1876 max_count = device->bdev ? 0 : -1;
1877 if (atomic_read(&device->open_count) > max_count) {
1878 printk (KERN_WARNING "Can't offline dasd device with open"
1879 " count = %i.\n",
1880 atomic_read(&device->open_count));
1881 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
1882 dasd_put_device(device);
1883 return -EBUSY;
1884 }
1885 dasd_set_target_state(device, DASD_STATE_NEW);
1886 /* dasd_delete_device destroys the device reference. */
1887 dasd_delete_device(device);
1888
1889 return 0;
1890}
1891
1892int
1893dasd_generic_notify(struct ccw_device *cdev, int event)
1894{
1895 struct dasd_device *device;
1896 struct dasd_ccw_req *cqr;
1897 unsigned long flags;
1898 int ret;
1899
1900 device = dasd_device_from_cdev(cdev);
1901 if (IS_ERR(device))
1902 return 0;
1903 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1904 ret = 0;
1905 switch (event) {
1906 case CIO_GONE:
1907 case CIO_NO_PATH:
1908 if (device->state < DASD_STATE_BASIC)
1909 break;
1910 /* Device is active. We want to keep it. */
1911 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
1912 list_for_each_entry(cqr, &device->ccw_queue, list)
1913 if (cqr->status == DASD_CQR_IN_IO)
1914 cqr->status = DASD_CQR_FAILED;
1915 device->stopped |= DASD_STOPPED_DC_EIO;
1916 dasd_schedule_bh(device);
1917 } else {
1918 list_for_each_entry(cqr, &device->ccw_queue, list)
1919 if (cqr->status == DASD_CQR_IN_IO) {
1920 cqr->status = DASD_CQR_QUEUED;
1921 cqr->retries++;
1922 }
1923 device->stopped |= DASD_STOPPED_DC_WAIT;
1924 dasd_set_timer(device, 0);
1925 }
1926 ret = 1;
1927 break;
1928 case CIO_OPER:
1929 /* FIXME: add a sanity check. */
1930 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
1931 dasd_schedule_bh(device);
1932 ret = 1;
1933 break;
1934 }
1935 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1936 dasd_put_device(device);
1937 return ret;
1938}
1939
1940/*
1941 * Automatically online either all dasd devices (dasd_autodetect) or
1942 * all devices specified with dasd= parameters.
1943 */
1944void
1945dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
1946{
1947 struct device_driver *drv;
1948 struct device *d, *dev;
1949 struct ccw_device *cdev;
1950
1951 drv = get_driver(&dasd_discipline_driver->driver);
1952 down_read(&drv->bus->subsys.rwsem);
1953 dev = NULL;
1954 list_for_each_entry(d, &drv->devices, driver_list) {
1955 dev = get_device(d);
1956 if (!dev)
1957 continue;
1958 cdev = to_ccwdev(dev);
1959 if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
1960 ccw_device_set_online(cdev);
1961 put_device(dev);
1962 }
1963 up_read(&drv->bus->subsys.rwsem);
1964 put_driver(drv);
1965}
1966
1967static int __init
1968dasd_init(void)
1969{
1970 int rc;
1971
1972 init_waitqueue_head(&dasd_init_waitq);
1973
1974 /* register 'common' DASD debug area, used for all DBF_XXX calls */
1975 dasd_debug_area = debug_register("dasd", 0, 2, 8 * sizeof (long));
1976 if (dasd_debug_area == NULL) {
1977 rc = -ENOMEM;
1978 goto failed;
1979 }
1980 debug_register_view(dasd_debug_area, &debug_sprintf_view);
1981 debug_set_level(dasd_debug_area, DBF_EMERG);
1982
1983 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
1984
1985 dasd_diag_discipline_pointer = NULL;
1986
1987 rc = devfs_mk_dir("dasd");
1988 if (rc)
1989 goto failed;
1990 rc = dasd_devmap_init();
1991 if (rc)
1992 goto failed;
1993 rc = dasd_gendisk_init();
1994 if (rc)
1995 goto failed;
1996 rc = dasd_parse();
1997 if (rc)
1998 goto failed;
1999 rc = dasd_ioctl_init();
2000 if (rc)
2001 goto failed;
2002#ifdef CONFIG_PROC_FS
2003 rc = dasd_proc_init();
2004 if (rc)
2005 goto failed;
2006#endif
2007
2008 return 0;
2009failed:
2010 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
2011 dasd_exit();
2012 return rc;
2013}
2014
2015module_init(dasd_init);
2016module_exit(dasd_exit);
2017
2018EXPORT_SYMBOL(dasd_debug_area);
2019EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2020
2021EXPORT_SYMBOL(dasd_add_request_head);
2022EXPORT_SYMBOL(dasd_add_request_tail);
2023EXPORT_SYMBOL(dasd_cancel_req);
2024EXPORT_SYMBOL(dasd_clear_timer);
2025EXPORT_SYMBOL(dasd_enable_device);
2026EXPORT_SYMBOL(dasd_int_handler);
2027EXPORT_SYMBOL(dasd_kfree_request);
2028EXPORT_SYMBOL(dasd_kick_device);
2029EXPORT_SYMBOL(dasd_kmalloc_request);
2030EXPORT_SYMBOL(dasd_schedule_bh);
2031EXPORT_SYMBOL(dasd_set_target_state);
2032EXPORT_SYMBOL(dasd_set_timer);
2033EXPORT_SYMBOL(dasd_sfree_request);
2034EXPORT_SYMBOL(dasd_sleep_on);
2035EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2036EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2037EXPORT_SYMBOL(dasd_smalloc_request);
2038EXPORT_SYMBOL(dasd_start_IO);
2039EXPORT_SYMBOL(dasd_term_IO);
2040
2041EXPORT_SYMBOL_GPL(dasd_generic_probe);
2042EXPORT_SYMBOL_GPL(dasd_generic_remove);
2043EXPORT_SYMBOL_GPL(dasd_generic_notify);
2044EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2045EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2046EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
2047
2048/*
2049 * Overrides for Emacs so that we follow Linus's tabbing style.
2050 * Emacs will notice this stuff at the end of the file and automatically
2051 * adjust the settings for this buffer only. This must remain at the end
2052 * of the file.
2053 * ---------------------------------------------------------------------------
2054 * Local variables:
2055 * c-indent-level: 4
2056 * c-brace-imaginary-offset: 0
2057 * c-brace-offset: -4
2058 * c-argdecl-indent: 4
2059 * c-label-offset: -4
2060 * c-continued-statement-offset: 4
2061 * c-continued-brace-offset: 0
2062 * indent-tabs-mode: 1
2063 * tab-width: 8
2064 * End:
2065 */
diff --git a/drivers/s390/block/dasd_3370_erp.c b/drivers/s390/block/dasd_3370_erp.c
new file mode 100644
index 000000000000..84565c8f584e
--- /dev/null
+++ b/drivers/s390/block/dasd_3370_erp.c
@@ -0,0 +1,104 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_3370_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 * $Revision: 1.9 $
8 */
9
10#define PRINTK_HEADER "dasd_erp(3370)"
11
12#include "dasd_int.h"
13
14
15/*
16 * DASD_3370_ERP_EXAMINE
17 *
18 * DESCRIPTION
19 * Checks only for fatal/no/recover error.
20 * A detailed examination of the sense data is done later outside
21 * the interrupt handler.
22 *
23 * The logic is based on the 'IBM 3880 Storage Control Reference' manual
24 * 'Chapter 7. 3370 Sense Data'.
25 *
26 * RETURN VALUES
27 * dasd_era_none no error
28 * dasd_era_fatal for all fatal (unrecoverable errors)
29 * dasd_era_recover for all others.
30 */
31dasd_era_t
32dasd_3370_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
33{
34 char *sense = irb->ecw;
35
36 /* check for successful execution first */
37 if (irb->scsw.cstat == 0x00 &&
38 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
39 return dasd_era_none;
40 if (sense[0] & 0x80) { /* CMD reject */
41 return dasd_era_fatal;
42 }
43 if (sense[0] & 0x40) { /* Drive offline */
44 return dasd_era_recover;
45 }
46 if (sense[0] & 0x20) { /* Bus out parity */
47 return dasd_era_recover;
48 }
49 if (sense[0] & 0x10) { /* equipment check */
50 if (sense[1] & 0x80) {
51 return dasd_era_fatal;
52 }
53 return dasd_era_recover;
54 }
55 if (sense[0] & 0x08) { /* data check */
56 if (sense[1] & 0x80) {
57 return dasd_era_fatal;
58 }
59 return dasd_era_recover;
60 }
61 if (sense[0] & 0x04) { /* overrun */
62 if (sense[1] & 0x80) {
63 return dasd_era_fatal;
64 }
65 return dasd_era_recover;
66 }
67 if (sense[1] & 0x40) { /* invalid blocksize */
68 return dasd_era_fatal;
69 }
70 if (sense[1] & 0x04) { /* file protected */
71 return dasd_era_recover;
72 }
73 if (sense[1] & 0x01) { /* operation incomplete */
74 return dasd_era_recover;
75 }
76 if (sense[2] & 0x80) { /* check data erroor */
77 return dasd_era_recover;
78 }
79 if (sense[2] & 0x10) { /* Env. data present */
80 return dasd_era_recover;
81 }
82 /* examine the 24 byte sense data */
83 return dasd_era_recover;
84
85} /* END dasd_3370_erp_examine */
86
87/*
88 * Overrides for Emacs so that we follow Linus's tabbing style.
89 * Emacs will notice this stuff at the end of the file and automatically
90 * adjust the settings for this buffer only. This must remain at the end
91 * of the file.
92 * ---------------------------------------------------------------------------
93 * Local variables:
94 * c-indent-level: 4
95 * c-brace-imaginary-offset: 0
96 * c-brace-offset: -4
97 * c-argdecl-indent: 4
98 * c-label-offset: -4
99 * c-continued-statement-offset: 4
100 * c-continued-brace-offset: 0
101 * indent-tabs-mode: 1
102 * tab-width: 8
103 * End:
104 */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
new file mode 100644
index 000000000000..c143ecb53d9d
--- /dev/null
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -0,0 +1,2742 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_3990_erp.c
3 * Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Holger Smolinski <Holger.Smolinski@de.ibm.com>
5 * Bugreports.to..: <Linux390@de.ibm.com>
6 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001
7 *
8 * $Revision: 1.36 $
9 */
10
11#include <linux/timer.h>
12#include <linux/slab.h>
13#include <asm/idals.h>
14#include <asm/todclk.h>
15
16#define PRINTK_HEADER "dasd_erp(3990): "
17
18#include "dasd_int.h"
19#include "dasd_eckd.h"
20
21
22struct DCTL_data {
23 unsigned char subcommand; /* e.g Inhibit Write, Enable Write,... */
24 unsigned char modifier; /* Subcommand modifier */
25 unsigned short res; /* reserved */
26} __attribute__ ((packed));
27
28/*
29 *****************************************************************************
30 * SECTION ERP EXAMINATION
31 *****************************************************************************
32 */
33
34/*
35 * DASD_3990_ERP_EXAMINE_24
36 *
37 * DESCRIPTION
38 * Checks only for fatal (unrecoverable) error.
39 * A detailed examination of the sense data is done later outside
40 * the interrupt handler.
41 *
42 * Each bit configuration leading to an action code 2 (Exit with
43 * programming error or unusual condition indication)
44 * are handled as fatal error´s.
45 *
46 * All other configurations are handled as recoverable errors.
47 *
48 * RETURN VALUES
49 * dasd_era_fatal for all fatal (unrecoverable errors)
50 * dasd_era_recover for all others.
51 */
52static dasd_era_t
53dasd_3990_erp_examine_24(struct dasd_ccw_req * cqr, char *sense)
54{
55
56 struct dasd_device *device = cqr->device;
57
58 /* check for 'Command Reject' */
59 if ((sense[0] & SNS0_CMD_REJECT) &&
60 (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
61
62 DEV_MESSAGE(KERN_ERR, device, "%s",
63 "EXAMINE 24: Command Reject detected - "
64 "fatal error");
65
66 return dasd_era_fatal;
67 }
68
69 /* check for 'Invalid Track Format' */
70 if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
71 (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
72
73 DEV_MESSAGE(KERN_ERR, device, "%s",
74 "EXAMINE 24: Invalid Track Format detected "
75 "- fatal error");
76
77 return dasd_era_fatal;
78 }
79
80 /* check for 'No Record Found' */
81 if (sense[1] & SNS1_NO_REC_FOUND) {
82
83 /* FIXME: fatal error ?!? */
84 DEV_MESSAGE(KERN_ERR, device,
85 "EXAMINE 24: No Record Found detected %s",
86 device->state <= DASD_STATE_BASIC ?
87 " " : "- fatal error");
88
89 return dasd_era_fatal;
90 }
91
92 /* return recoverable for all others */
93 return dasd_era_recover;
94} /* END dasd_3990_erp_examine_24 */
95
96/*
97 * DASD_3990_ERP_EXAMINE_32
98 *
99 * DESCRIPTION
100 * Checks only for fatal/no/recoverable error.
101 * A detailed examination of the sense data is done later outside
102 * the interrupt handler.
103 *
104 * RETURN VALUES
105 * dasd_era_none no error
106 * dasd_era_fatal for all fatal (unrecoverable errors)
107 * dasd_era_recover for recoverable others.
108 */
109static dasd_era_t
110dasd_3990_erp_examine_32(struct dasd_ccw_req * cqr, char *sense)
111{
112
113 struct dasd_device *device = cqr->device;
114
115 switch (sense[25]) {
116 case 0x00:
117 return dasd_era_none;
118
119 case 0x01:
120 DEV_MESSAGE(KERN_ERR, device, "%s", "EXAMINE 32: fatal error");
121
122 return dasd_era_fatal;
123
124 default:
125
126 return dasd_era_recover;
127 }
128
129} /* end dasd_3990_erp_examine_32 */
130
131/*
132 * DASD_3990_ERP_EXAMINE
133 *
134 * DESCRIPTION
135 * Checks only for fatal/no/recover error.
136 * A detailed examination of the sense data is done later outside
137 * the interrupt handler.
138 *
139 * The logic is based on the 'IBM 3990 Storage Control Reference' manual
140 * 'Chapter 7. Error Recovery Procedures'.
141 *
142 * RETURN VALUES
143 * dasd_era_none no error
144 * dasd_era_fatal for all fatal (unrecoverable errors)
145 * dasd_era_recover for all others.
146 */
147dasd_era_t
148dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
149{
150
151 char *sense = irb->ecw;
152 dasd_era_t era = dasd_era_recover;
153 struct dasd_device *device = cqr->device;
154
155 /* check for successful execution first */
156 if (irb->scsw.cstat == 0x00 &&
157 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
158 return dasd_era_none;
159
160 /* distinguish between 24 and 32 byte sense data */
161 if (sense[27] & DASD_SENSE_BIT_0) {
162
163 era = dasd_3990_erp_examine_24(cqr, sense);
164
165 } else {
166
167 era = dasd_3990_erp_examine_32(cqr, sense);
168
169 }
170
171 /* log the erp chain if fatal error occurred */
172 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) {
173 dasd_log_sense(cqr, irb);
174 dasd_log_ccw(cqr, 0, irb->scsw.cpa);
175 }
176
177 return era;
178
179} /* END dasd_3990_erp_examine */
180
181/*
182 *****************************************************************************
183 * SECTION ERP HANDLING
184 *****************************************************************************
185 */
186/*
187 *****************************************************************************
188 * 24 and 32 byte sense ERP functions
189 *****************************************************************************
190 */
191
192/*
193 * DASD_3990_ERP_CLEANUP
194 *
195 * DESCRIPTION
196 * Removes the already build but not necessary ERP request and sets
197 * the status of the original cqr / erp to the given (final) status
198 *
199 * PARAMETER
200 * erp request to be blocked
201 * final_status either DASD_CQR_DONE or DASD_CQR_FAILED
202 *
203 * RETURN VALUES
204 * cqr original cqr
205 */
206static struct dasd_ccw_req *
207dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
208{
209 struct dasd_ccw_req *cqr = erp->refers;
210
211 dasd_free_erp_request(erp, erp->device);
212 cqr->status = final_status;
213 return cqr;
214
215} /* end dasd_3990_erp_cleanup */
216
217/*
218 * DASD_3990_ERP_BLOCK_QUEUE
219 *
220 * DESCRIPTION
221 * Block the given device request queue to prevent from further
222 * processing until the started timer has expired or an related
223 * interrupt was received.
224 */
225static void
226dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
227{
228
229 struct dasd_device *device = erp->device;
230
231 DEV_MESSAGE(KERN_INFO, device,
232 "blocking request queue for %is", expires/HZ);
233
234 device->stopped |= DASD_STOPPED_PENDING;
235 erp->status = DASD_CQR_QUEUED;
236
237 dasd_set_timer(device, expires);
238}
239
240/*
241 * DASD_3990_ERP_INT_REQ
242 *
243 * DESCRIPTION
244 * Handles 'Intervention Required' error.
245 * This means either device offline or not installed.
246 *
247 * PARAMETER
248 * erp current erp
249 * RETURN VALUES
250 * erp modified erp
251 */
252static struct dasd_ccw_req *
253dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
254{
255
256 struct dasd_device *device = erp->device;
257
258 /* first time set initial retry counter and erp_function */
259 /* and retry once without blocking queue */
260 /* (this enables easier enqueing of the cqr) */
261 if (erp->function != dasd_3990_erp_int_req) {
262
263 erp->retries = 256;
264 erp->function = dasd_3990_erp_int_req;
265
266 } else {
267
268 /* issue a message and wait for 'device ready' interrupt */
269 DEV_MESSAGE(KERN_ERR, device, "%s",
270 "is offline or not installed - "
271 "INTERVENTION REQUIRED!!");
272
273 dasd_3990_erp_block_queue(erp, 60*HZ);
274 }
275
276 return erp;
277
278} /* end dasd_3990_erp_int_req */
279
280/*
281 * DASD_3990_ERP_ALTERNATE_PATH
282 *
283 * DESCRIPTION
284 * Repeat the operation on a different channel path.
285 * If all alternate paths have been tried, the request is posted with a
286 * permanent error.
287 *
288 * PARAMETER
289 * erp pointer to the current ERP
290 *
291 * RETURN VALUES
292 * erp modified pointer to the ERP
293 */
294static void
295dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
296{
297 struct dasd_device *device = erp->device;
298 __u8 opm;
299
300 /* try alternate valid path */
301 opm = ccw_device_get_path_mask(device->cdev);
302 //FIXME: start with get_opm ?
303 if (erp->lpm == 0)
304 erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum);
305 else
306 erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
307
308 if ((erp->lpm & opm) != 0x00) {
309
310 DEV_MESSAGE(KERN_DEBUG, device,
311 "try alternate lpm=%x (lpum=%x / opm=%x)",
312 erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
313
314 /* reset status to queued to handle the request again... */
315 if (erp->status > DASD_CQR_QUEUED)
316 erp->status = DASD_CQR_QUEUED;
317 erp->retries = 1;
318 } else {
319 DEV_MESSAGE(KERN_ERR, device,
320 "No alternate channel path left (lpum=%x / "
321 "opm=%x) -> permanent error",
322 erp->irb.esw.esw0.sublog.lpum, opm);
323
324 /* post request with permanent error */
325 if (erp->status > DASD_CQR_QUEUED)
326 erp->status = DASD_CQR_FAILED;
327 }
328} /* end dasd_3990_erp_alternate_path */
329
330/*
331 * DASD_3990_ERP_DCTL
332 *
333 * DESCRIPTION
334 * Setup cqr to do the Diagnostic Control (DCTL) command with an
335 * Inhibit Write subcommand (0x20) and the given modifier.
336 *
337 * PARAMETER
338 * erp pointer to the current (failed) ERP
339 * modifier subcommand modifier
340 *
341 * RETURN VALUES
342 * dctl_cqr pointer to NEW dctl_cqr
343 *
344 */
345static struct dasd_ccw_req *
346dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
347{
348
349 struct dasd_device *device = erp->device;
350 struct DCTL_data *DCTL_data;
351 struct ccw1 *ccw;
352 struct dasd_ccw_req *dctl_cqr;
353
354 dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1,
355 sizeof (struct DCTL_data),
356 erp->device);
357 if (IS_ERR(dctl_cqr)) {
358 DEV_MESSAGE(KERN_ERR, device, "%s",
359 "Unable to allocate DCTL-CQR");
360 erp->status = DASD_CQR_FAILED;
361 return erp;
362 }
363
364 DCTL_data = dctl_cqr->data;
365
366 DCTL_data->subcommand = 0x02; /* Inhibit Write */
367 DCTL_data->modifier = modifier;
368
369 ccw = dctl_cqr->cpaddr;
370 memset(ccw, 0, sizeof (struct ccw1));
371 ccw->cmd_code = CCW_CMD_DCTL;
372 ccw->count = 4;
373 ccw->cda = (__u32)(addr_t) DCTL_data;
374 dctl_cqr->function = dasd_3990_erp_DCTL;
375 dctl_cqr->refers = erp;
376 dctl_cqr->device = erp->device;
377 dctl_cqr->magic = erp->magic;
378 dctl_cqr->expires = 5 * 60 * HZ;
379 dctl_cqr->retries = 2;
380
381 dctl_cqr->buildclk = get_clock();
382
383 dctl_cqr->status = DASD_CQR_FILLED;
384
385 return dctl_cqr;
386
387} /* end dasd_3990_erp_DCTL */
388
389/*
390 * DASD_3990_ERP_ACTION_1
391 *
392 * DESCRIPTION
393 * Setup ERP to do the ERP action 1 (see Reference manual).
394 * Repeat the operation on a different channel path.
395 * If all alternate paths have been tried, the request is posted with a
396 * permanent error.
397 * Note: duplex handling is not implemented (yet).
398 *
399 * PARAMETER
400 * erp pointer to the current ERP
401 *
402 * RETURN VALUES
403 * erp pointer to the ERP
404 *
405 */
406static struct dasd_ccw_req *
407dasd_3990_erp_action_1(struct dasd_ccw_req * erp)
408{
409
410 erp->function = dasd_3990_erp_action_1;
411
412 dasd_3990_erp_alternate_path(erp);
413
414 return erp;
415
416} /* end dasd_3990_erp_action_1 */
417
418/*
419 * DASD_3990_ERP_ACTION_4
420 *
421 * DESCRIPTION
422 * Setup ERP to do the ERP action 4 (see Reference manual).
423 * Set the current request to PENDING to block the CQR queue for that device
424 * until the state change interrupt appears.
425 * Use a timer (20 seconds) to retry the cqr if the interrupt is still
426 * missing.
427 *
428 * PARAMETER
429 * sense sense data of the actual error
430 * erp pointer to the current ERP
431 *
432 * RETURN VALUES
433 * erp pointer to the ERP
434 *
435 */
436static struct dasd_ccw_req *
437dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
438{
439
440 struct dasd_device *device = erp->device;
441
442 /* first time set initial retry counter and erp_function */
443 /* and retry once without waiting for state change pending */
444 /* interrupt (this enables easier enqueing of the cqr) */
445 if (erp->function != dasd_3990_erp_action_4) {
446
447 DEV_MESSAGE(KERN_INFO, device, "%s",
448 "dasd_3990_erp_action_4: first time retry");
449
450 erp->retries = 256;
451 erp->function = dasd_3990_erp_action_4;
452
453 } else {
454
455 if (sense[25] == 0x1D) { /* state change pending */
456
457 DEV_MESSAGE(KERN_INFO, device,
458 "waiting for state change pending "
459 "interrupt, %d retries left",
460 erp->retries);
461
462 dasd_3990_erp_block_queue(erp, 30*HZ);
463
464 } else if (sense[25] == 0x1E) { /* busy */
465 DEV_MESSAGE(KERN_INFO, device,
466 "busy - redriving request later, "
467 "%d retries left",
468 erp->retries);
469 dasd_3990_erp_block_queue(erp, HZ);
470 } else {
471
472 /* no state change pending - retry */
473 DEV_MESSAGE (KERN_INFO, device,
474 "redriving request immediately, "
475 "%d retries left",
476 erp->retries);
477 erp->status = DASD_CQR_QUEUED;
478 }
479 }
480
481 return erp;
482
483} /* end dasd_3990_erp_action_4 */
484
485/*
486 *****************************************************************************
487 * 24 byte sense ERP functions (only)
488 *****************************************************************************
489 */
490
491/*
492 * DASD_3990_ERP_ACTION_5
493 *
494 * DESCRIPTION
495 * Setup ERP to do the ERP action 5 (see Reference manual).
496 * NOTE: Further handling is done in xxx_further_erp after the retries.
497 *
498 * PARAMETER
499 * erp pointer to the current ERP
500 *
501 * RETURN VALUES
502 * erp pointer to the ERP
503 *
504 */
505static struct dasd_ccw_req *
506dasd_3990_erp_action_5(struct dasd_ccw_req * erp)
507{
508
509 /* first of all retry */
510 erp->retries = 10;
511 erp->function = dasd_3990_erp_action_5;
512
513 return erp;
514
515} /* end dasd_3990_erp_action_5 */
516
517/*
518 * DASD_3990_HANDLE_ENV_DATA
519 *
520 * DESCRIPTION
521 * Handles 24 byte 'Environmental data present'.
522 * Does a analysis of the sense data (message Format)
523 * and prints the error messages.
524 *
525 * PARAMETER
526 * sense current sense data
527 *
528 * RETURN VALUES
529 * void
530 */
531static void
532dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
533{
534
535 struct dasd_device *device = erp->device;
536 char msg_format = (sense[7] & 0xF0);
537 char msg_no = (sense[7] & 0x0F);
538
539 switch (msg_format) {
540 case 0x00: /* Format 0 - Program or System Checks */
541
542 if (sense[1] & 0x10) { /* check message to operator bit */
543
544 switch (msg_no) {
545 case 0x00: /* No Message */
546 break;
547 case 0x01:
548 DEV_MESSAGE(KERN_WARNING, device, "%s",
549 "FORMAT 0 - Invalid Command");
550 break;
551 case 0x02:
552 DEV_MESSAGE(KERN_WARNING, device, "%s",
553 "FORMAT 0 - Invalid Command "
554 "Sequence");
555 break;
556 case 0x03:
557 DEV_MESSAGE(KERN_WARNING, device, "%s",
558 "FORMAT 0 - CCW Count less than "
559 "required");
560 break;
561 case 0x04:
562 DEV_MESSAGE(KERN_WARNING, device, "%s",
563 "FORMAT 0 - Invalid Parameter");
564 break;
565 case 0x05:
566 DEV_MESSAGE(KERN_WARNING, device, "%s",
567 "FORMAT 0 - Diagnostic of Sepecial"
568 " Command Violates File Mask");
569 break;
570 case 0x07:
571 DEV_MESSAGE(KERN_WARNING, device, "%s",
572 "FORMAT 0 - Channel Returned with "
573 "Incorrect retry CCW");
574 break;
575 case 0x08:
576 DEV_MESSAGE(KERN_WARNING, device, "%s",
577 "FORMAT 0 - Reset Notification");
578 break;
579 case 0x09:
580 DEV_MESSAGE(KERN_WARNING, device, "%s",
581 "FORMAT 0 - Storage Path Restart");
582 break;
583 case 0x0A:
584 DEV_MESSAGE(KERN_WARNING, device,
585 "FORMAT 0 - Channel requested "
586 "... %02x", sense[8]);
587 break;
588 case 0x0B:
589 DEV_MESSAGE(KERN_WARNING, device, "%s",
590 "FORMAT 0 - Invalid Defective/"
591 "Alternate Track Pointer");
592 break;
593 case 0x0C:
594 DEV_MESSAGE(KERN_WARNING, device, "%s",
595 "FORMAT 0 - DPS Installation "
596 "Check");
597 break;
598 case 0x0E:
599 DEV_MESSAGE(KERN_WARNING, device, "%s",
600 "FORMAT 0 - Command Invalid on "
601 "Secondary Address");
602 break;
603 case 0x0F:
604 DEV_MESSAGE(KERN_WARNING, device,
605 "FORMAT 0 - Status Not As "
606 "Required: reason %02x", sense[8]);
607 break;
608 default:
609 DEV_MESSAGE(KERN_WARNING, device, "%s",
610 "FORMAT 0 - Reseved");
611 }
612 } else {
613 switch (msg_no) {
614 case 0x00: /* No Message */
615 break;
616 case 0x01:
617 DEV_MESSAGE(KERN_WARNING, device, "%s",
618 "FORMAT 0 - Device Error Source");
619 break;
620 case 0x02:
621 DEV_MESSAGE(KERN_WARNING, device, "%s",
622 "FORMAT 0 - Reserved");
623 break;
624 case 0x03:
625 DEV_MESSAGE(KERN_WARNING, device,
626 "FORMAT 0 - Device Fenced - "
627 "device = %02x", sense[4]);
628 break;
629 case 0x04:
630 DEV_MESSAGE(KERN_WARNING, device, "%s",
631 "FORMAT 0 - Data Pinned for "
632 "Device");
633 break;
634 default:
635 DEV_MESSAGE(KERN_WARNING, device, "%s",
636 "FORMAT 0 - Reserved");
637 }
638 }
639 break;
640
641 case 0x10: /* Format 1 - Device Equipment Checks */
642 switch (msg_no) {
643 case 0x00: /* No Message */
644 break;
645 case 0x01:
646 DEV_MESSAGE(KERN_WARNING, device, "%s",
647 "FORMAT 1 - Device Status 1 not as "
648 "expected");
649 break;
650 case 0x03:
651 DEV_MESSAGE(KERN_WARNING, device, "%s",
652 "FORMAT 1 - Index missing");
653 break;
654 case 0x04:
655 DEV_MESSAGE(KERN_WARNING, device, "%s",
656 "FORMAT 1 - Interruption cannot be reset");
657 break;
658 case 0x05:
659 DEV_MESSAGE(KERN_WARNING, device, "%s",
660 "FORMAT 1 - Device did not respond to "
661 "selection");
662 break;
663 case 0x06:
664 DEV_MESSAGE(KERN_WARNING, device, "%s",
665 "FORMAT 1 - Device check-2 error or Set "
666 "Sector is not complete");
667 break;
668 case 0x07:
669 DEV_MESSAGE(KERN_WARNING, device, "%s",
670 "FORMAT 1 - Head address does not "
671 "compare");
672 break;
673 case 0x08:
674 DEV_MESSAGE(KERN_WARNING, device, "%s",
675 "FORMAT 1 - Device status 1 not valid");
676 break;
677 case 0x09:
678 DEV_MESSAGE(KERN_WARNING, device, "%s",
679 "FORMAT 1 - Device not ready");
680 break;
681 case 0x0A:
682 DEV_MESSAGE(KERN_WARNING, device, "%s",
683 "FORMAT 1 - Track physical address did "
684 "not compare");
685 break;
686 case 0x0B:
687 DEV_MESSAGE(KERN_WARNING, device, "%s",
688 "FORMAT 1 - Missing device address bit");
689 break;
690 case 0x0C:
691 DEV_MESSAGE(KERN_WARNING, device, "%s",
692 "FORMAT 1 - Drive motor switch is off");
693 break;
694 case 0x0D:
695 DEV_MESSAGE(KERN_WARNING, device, "%s",
696 "FORMAT 1 - Seek incomplete");
697 break;
698 case 0x0E:
699 DEV_MESSAGE(KERN_WARNING, device, "%s",
700 "FORMAT 1 - Cylinder address did not "
701 "compare");
702 break;
703 case 0x0F:
704 DEV_MESSAGE(KERN_WARNING, device, "%s",
705 "FORMAT 1 - Offset active cannot be "
706 "reset");
707 break;
708 default:
709 DEV_MESSAGE(KERN_WARNING, device, "%s",
710 "FORMAT 1 - Reserved");
711 }
712 break;
713
714 case 0x20: /* Format 2 - 3990 Equipment Checks */
715 switch (msg_no) {
716 case 0x08:
717 DEV_MESSAGE(KERN_WARNING, device, "%s",
718 "FORMAT 2 - 3990 check-2 error");
719 break;
720 case 0x0E:
721 DEV_MESSAGE(KERN_WARNING, device, "%s",
722 "FORMAT 2 - Support facility errors");
723 break;
724 case 0x0F:
725 DEV_MESSAGE(KERN_WARNING, device,
726 "FORMAT 2 - Microcode detected error %02x",
727 sense[8]);
728 break;
729 default:
730 DEV_MESSAGE(KERN_WARNING, device, "%s",
731 "FORMAT 2 - Reserved");
732 }
733 break;
734
735 case 0x30: /* Format 3 - 3990 Control Checks */
736 switch (msg_no) {
737 case 0x0F:
738 DEV_MESSAGE(KERN_WARNING, device, "%s",
739 "FORMAT 3 - Allegiance terminated");
740 break;
741 default:
742 DEV_MESSAGE(KERN_WARNING, device, "%s",
743 "FORMAT 3 - Reserved");
744 }
745 break;
746
747 case 0x40: /* Format 4 - Data Checks */
748 switch (msg_no) {
749 case 0x00:
750 DEV_MESSAGE(KERN_WARNING, device, "%s",
751 "FORMAT 4 - Home address area error");
752 break;
753 case 0x01:
754 DEV_MESSAGE(KERN_WARNING, device, "%s",
755 "FORMAT 4 - Count area error");
756 break;
757 case 0x02:
758 DEV_MESSAGE(KERN_WARNING, device, "%s",
759 "FORMAT 4 - Key area error");
760 break;
761 case 0x03:
762 DEV_MESSAGE(KERN_WARNING, device, "%s",
763 "FORMAT 4 - Data area error");
764 break;
765 case 0x04:
766 DEV_MESSAGE(KERN_WARNING, device, "%s",
767 "FORMAT 4 - No sync byte in home address "
768 "area");
769 break;
770 case 0x05:
771 DEV_MESSAGE(KERN_WARNING, device, "%s",
772 "FORMAT 4 - No sync byte in count address "
773 "area");
774 break;
775 case 0x06:
776 DEV_MESSAGE(KERN_WARNING, device, "%s",
777 "FORMAT 4 - No sync byte in key area");
778 break;
779 case 0x07:
780 DEV_MESSAGE(KERN_WARNING, device, "%s",
781 "FORMAT 4 - No sync byte in data area");
782 break;
783 case 0x08:
784 DEV_MESSAGE(KERN_WARNING, device, "%s",
785 "FORMAT 4 - Home address area error; "
786 "offset active");
787 break;
788 case 0x09:
789 DEV_MESSAGE(KERN_WARNING, device, "%s",
790 "FORMAT 4 - Count area error; offset "
791 "active");
792 break;
793 case 0x0A:
794 DEV_MESSAGE(KERN_WARNING, device, "%s",
795 "FORMAT 4 - Key area error; offset "
796 "active");
797 break;
798 case 0x0B:
799 DEV_MESSAGE(KERN_WARNING, device, "%s",
800 "FORMAT 4 - Data area error; "
801 "offset active");
802 break;
803 case 0x0C:
804 DEV_MESSAGE(KERN_WARNING, device, "%s",
805 "FORMAT 4 - No sync byte in home "
806 "address area; offset active");
807 break;
808 case 0x0D:
809 DEV_MESSAGE(KERN_WARNING, device, "%s",
810 "FORMAT 4 - No syn byte in count "
811 "address area; offset active");
812 break;
813 case 0x0E:
814 DEV_MESSAGE(KERN_WARNING, device, "%s",
815 "FORMAT 4 - No sync byte in key area; "
816 "offset active");
817 break;
818 case 0x0F:
819 DEV_MESSAGE(KERN_WARNING, device, "%s",
820 "FORMAT 4 - No syn byte in data area; "
821 "offset active");
822 break;
823 default:
824 DEV_MESSAGE(KERN_WARNING, device, "%s",
825 "FORMAT 4 - Reserved");
826 }
827 break;
828
829 case 0x50: /* Format 5 - Data Check with displacement information */
830 switch (msg_no) {
831 case 0x00:
832 DEV_MESSAGE(KERN_WARNING, device, "%s",
833 "FORMAT 5 - Data Check in the "
834 "home address area");
835 break;
836 case 0x01:
837 DEV_MESSAGE(KERN_WARNING, device, "%s",
838 "FORMAT 5 - Data Check in the count area");
839 break;
840 case 0x02:
841 DEV_MESSAGE(KERN_WARNING, device, "%s",
842 "FORMAT 5 - Data Check in the key area");
843 break;
844 case 0x03:
845 DEV_MESSAGE(KERN_WARNING, device, "%s",
846 "FORMAT 5 - Data Check in the data area");
847 break;
848 case 0x08:
849 DEV_MESSAGE(KERN_WARNING, device, "%s",
850 "FORMAT 5 - Data Check in the "
851 "home address area; offset active");
852 break;
853 case 0x09:
854 DEV_MESSAGE(KERN_WARNING, device, "%s",
855 "FORMAT 5 - Data Check in the count area; "
856 "offset active");
857 break;
858 case 0x0A:
859 DEV_MESSAGE(KERN_WARNING, device, "%s",
860 "FORMAT 5 - Data Check in the key area; "
861 "offset active");
862 break;
863 case 0x0B:
864 DEV_MESSAGE(KERN_WARNING, device, "%s",
865 "FORMAT 5 - Data Check in the data area; "
866 "offset active");
867 break;
868 default:
869 DEV_MESSAGE(KERN_WARNING, device, "%s",
870 "FORMAT 5 - Reserved");
871 }
872 break;
873
874 case 0x60: /* Format 6 - Usage Statistics/Overrun Errors */
875 switch (msg_no) {
876 case 0x00:
877 DEV_MESSAGE(KERN_WARNING, device, "%s",
878 "FORMAT 6 - Overrun on channel A");
879 break;
880 case 0x01:
881 DEV_MESSAGE(KERN_WARNING, device, "%s",
882 "FORMAT 6 - Overrun on channel B");
883 break;
884 case 0x02:
885 DEV_MESSAGE(KERN_WARNING, device, "%s",
886 "FORMAT 6 - Overrun on channel C");
887 break;
888 case 0x03:
889 DEV_MESSAGE(KERN_WARNING, device, "%s",
890 "FORMAT 6 - Overrun on channel D");
891 break;
892 case 0x04:
893 DEV_MESSAGE(KERN_WARNING, device, "%s",
894 "FORMAT 6 - Overrun on channel E");
895 break;
896 case 0x05:
897 DEV_MESSAGE(KERN_WARNING, device, "%s",
898 "FORMAT 6 - Overrun on channel F");
899 break;
900 case 0x06:
901 DEV_MESSAGE(KERN_WARNING, device, "%s",
902 "FORMAT 6 - Overrun on channel G");
903 break;
904 case 0x07:
905 DEV_MESSAGE(KERN_WARNING, device, "%s",
906 "FORMAT 6 - Overrun on channel H");
907 break;
908 default:
909 DEV_MESSAGE(KERN_WARNING, device, "%s",
910 "FORMAT 6 - Reserved");
911 }
912 break;
913
914 case 0x70: /* Format 7 - Device Connection Control Checks */
915 switch (msg_no) {
916 case 0x00:
917 DEV_MESSAGE(KERN_WARNING, device, "%s",
918 "FORMAT 7 - RCC initiated by a connection "
919 "check alert");
920 break;
921 case 0x01:
922 DEV_MESSAGE(KERN_WARNING, device, "%s",
923 "FORMAT 7 - RCC 1 sequence not "
924 "successful");
925 break;
926 case 0x02:
927 DEV_MESSAGE(KERN_WARNING, device, "%s",
928 "FORMAT 7 - RCC 1 and RCC 2 sequences not "
929 "successful");
930 break;
931 case 0x03:
932 DEV_MESSAGE(KERN_WARNING, device, "%s",
933 "FORMAT 7 - Invalid tag-in during "
934 "selection sequence");
935 break;
936 case 0x04:
937 DEV_MESSAGE(KERN_WARNING, device, "%s",
938 "FORMAT 7 - extra RCC required");
939 break;
940 case 0x05:
941 DEV_MESSAGE(KERN_WARNING, device, "%s",
942 "FORMAT 7 - Invalid DCC selection "
943 "response or timeout");
944 break;
945 case 0x06:
946 DEV_MESSAGE(KERN_WARNING, device, "%s",
947 "FORMAT 7 - Missing end operation; device "
948 "transfer complete");
949 break;
950 case 0x07:
951 DEV_MESSAGE(KERN_WARNING, device, "%s",
952 "FORMAT 7 - Missing end operation; device "
953 "transfer incomplete");
954 break;
955 case 0x08:
956 DEV_MESSAGE(KERN_WARNING, device, "%s",
957 "FORMAT 7 - Invalid tag-in for an "
958 "immediate command sequence");
959 break;
960 case 0x09:
961 DEV_MESSAGE(KERN_WARNING, device, "%s",
962 "FORMAT 7 - Invalid tag-in for an "
963 "extended command sequence");
964 break;
965 case 0x0A:
966 DEV_MESSAGE(KERN_WARNING, device, "%s",
967 "FORMAT 7 - 3990 microcode time out when "
968 "stopping selection");
969 break;
970 case 0x0B:
971 DEV_MESSAGE(KERN_WARNING, device, "%s",
972 "FORMAT 7 - No response to selection "
973 "after a poll interruption");
974 break;
975 case 0x0C:
976 DEV_MESSAGE(KERN_WARNING, device, "%s",
977 "FORMAT 7 - Permanent path error (DASD "
978 "controller not available)");
979 break;
980 case 0x0D:
981 DEV_MESSAGE(KERN_WARNING, device, "%s",
982 "FORMAT 7 - DASD controller not available"
983 " on disconnected command chain");
984 break;
985 default:
986 DEV_MESSAGE(KERN_WARNING, device, "%s",
987 "FORMAT 7 - Reserved");
988 }
989 break;
990
991 case 0x80: /* Format 8 - Additional Device Equipment Checks */
992 switch (msg_no) {
993 case 0x00: /* No Message */
994 case 0x01:
995 DEV_MESSAGE(KERN_WARNING, device, "%s",
996 "FORMAT 8 - Error correction code "
997 "hardware fault");
998 break;
999 case 0x03:
1000 DEV_MESSAGE(KERN_WARNING, device, "%s",
1001 "FORMAT 8 - Unexpected end operation "
1002 "response code");
1003 break;
1004 case 0x04:
1005 DEV_MESSAGE(KERN_WARNING, device, "%s",
1006 "FORMAT 8 - End operation with transfer "
1007 "count not zero");
1008 break;
1009 case 0x05:
1010 DEV_MESSAGE(KERN_WARNING, device, "%s",
1011 "FORMAT 8 - End operation with transfer "
1012 "count zero");
1013 break;
1014 case 0x06:
1015 DEV_MESSAGE(KERN_WARNING, device, "%s",
1016 "FORMAT 8 - DPS checks after a system "
1017 "reset or selective reset");
1018 break;
1019 case 0x07:
1020 DEV_MESSAGE(KERN_WARNING, device, "%s",
1021 "FORMAT 8 - DPS cannot be filled");
1022 break;
1023 case 0x08:
1024 DEV_MESSAGE(KERN_WARNING, device, "%s",
1025 "FORMAT 8 - Short busy time-out during "
1026 "device selection");
1027 break;
1028 case 0x09:
1029 DEV_MESSAGE(KERN_WARNING, device, "%s",
1030 "FORMAT 8 - DASD controller failed to "
1031 "set or reset the long busy latch");
1032 break;
1033 case 0x0A:
1034 DEV_MESSAGE(KERN_WARNING, device, "%s",
1035 "FORMAT 8 - No interruption from device "
1036 "during a command chain");
1037 break;
1038 default:
1039 DEV_MESSAGE(KERN_WARNING, device, "%s",
1040 "FORMAT 8 - Reserved");
1041 }
1042 break;
1043
1044 case 0x90: /* Format 9 - Device Read, Write, and Seek Checks */
1045 switch (msg_no) {
1046 case 0x00:
1047 break; /* No Message */
1048 case 0x06:
1049 DEV_MESSAGE(KERN_WARNING, device, "%s",
1050 "FORMAT 9 - Device check-2 error");
1051 break;
1052 case 0x07:
1053 DEV_MESSAGE(KERN_WARNING, device, "%s",
1054 "FORMAT 9 - Head address did not compare");
1055 break;
1056 case 0x0A:
1057 DEV_MESSAGE(KERN_WARNING, device, "%s",
1058 "FORMAT 9 - Track physical address did "
1059 "not compare while oriented");
1060 break;
1061 case 0x0E:
1062 DEV_MESSAGE(KERN_WARNING, device, "%s",
1063 "FORMAT 9 - Cylinder address did not "
1064 "compare");
1065 break;
1066 default:
1067 DEV_MESSAGE(KERN_WARNING, device, "%s",
1068 "FORMAT 9 - Reserved");
1069 }
1070 break;
1071
1072 case 0xF0: /* Format F - Cache Storage Checks */
1073 switch (msg_no) {
1074 case 0x00:
1075 DEV_MESSAGE(KERN_WARNING, device, "%s",
1076 "FORMAT F - Operation Terminated");
1077 break;
1078 case 0x01:
1079 DEV_MESSAGE(KERN_WARNING, device, "%s",
1080 "FORMAT F - Subsystem Processing Error");
1081 break;
1082 case 0x02:
1083 DEV_MESSAGE(KERN_WARNING, device, "%s",
1084 "FORMAT F - Cache or nonvolatile storage "
1085 "equipment failure");
1086 break;
1087 case 0x04:
1088 DEV_MESSAGE(KERN_WARNING, device, "%s",
1089 "FORMAT F - Caching terminated");
1090 break;
1091 case 0x06:
1092 DEV_MESSAGE(KERN_WARNING, device, "%s",
1093 "FORMAT F - Cache fast write access not "
1094 "authorized");
1095 break;
1096 case 0x07:
1097 DEV_MESSAGE(KERN_WARNING, device, "%s",
1098 "FORMAT F - Track format incorrect");
1099 break;
1100 case 0x09:
1101 DEV_MESSAGE(KERN_WARNING, device, "%s",
1102 "FORMAT F - Caching reinitiated");
1103 break;
1104 case 0x0A:
1105 DEV_MESSAGE(KERN_WARNING, device, "%s",
1106 "FORMAT F - Nonvolatile storage "
1107 "terminated");
1108 break;
1109 case 0x0B:
1110 DEV_MESSAGE(KERN_WARNING, device, "%s",
1111 "FORMAT F - Volume is suspended duplex");
1112 break;
1113 case 0x0C:
1114 DEV_MESSAGE(KERN_WARNING, device, "%s",
1115 "FORMAT F - Subsystem status connot be "
1116 "determined");
1117 break;
1118 case 0x0D:
1119 DEV_MESSAGE(KERN_WARNING, device, "%s",
1120 "FORMAT F - Caching status reset to "
1121 "default");
1122 break;
1123 case 0x0E:
1124 DEV_MESSAGE(KERN_WARNING, device, "%s",
1125 "FORMAT F - DASD Fast Write inhibited");
1126 break;
1127 default:
1128 DEV_MESSAGE(KERN_WARNING, device, "%s",
1129 "FORMAT D - Reserved");
1130 }
1131 break;
1132
1133 default: /* unknown message format - should not happen */
1134 DEV_MESSAGE (KERN_WARNING, device,
1135 "unknown message format %02x",
1136 msg_format);
1137 break;
1138 } /* end switch message format */
1139
1140} /* end dasd_3990_handle_env_data */
1141
1142/*
1143 * DASD_3990_ERP_COM_REJ
1144 *
1145 * DESCRIPTION
1146 * Handles 24 byte 'Command Reject' error.
1147 *
1148 * PARAMETER
1149 * erp current erp_head
1150 * sense current sense data
1151 *
1152 * RETURN VALUES
1153 * erp 'new' erp_head - pointer to new ERP
1154 */
1155static struct dasd_ccw_req *
1156dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
1157{
1158
1159 struct dasd_device *device = erp->device;
1160
1161 erp->function = dasd_3990_erp_com_rej;
1162
1163 /* env data present (ACTION 10 - retry should work) */
1164 if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1165
1166 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1167 "Command Reject - environmental data present");
1168
1169 dasd_3990_handle_env_data(erp, sense);
1170
1171 erp->retries = 5;
1172
1173 } else {
1174 /* fatal error - set status to FAILED */
1175 DEV_MESSAGE(KERN_ERR, device, "%s",
1176 "Command Reject - Fatal error");
1177
1178 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1179 }
1180
1181 return erp;
1182
1183} /* end dasd_3990_erp_com_rej */
1184
1185/*
1186 * DASD_3990_ERP_BUS_OUT
1187 *
1188 * DESCRIPTION
1189 * Handles 24 byte 'Bus Out Parity Check' error.
1190 *
1191 * PARAMETER
1192 * erp current erp_head
1193 * RETURN VALUES
1194 * erp new erp_head - pointer to new ERP
1195 */
1196static struct dasd_ccw_req *
1197dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
1198{
1199
1200 struct dasd_device *device = erp->device;
1201
1202 /* first time set initial retry counter and erp_function */
1203 /* and retry once without blocking queue */
1204 /* (this enables easier enqueing of the cqr) */
1205 if (erp->function != dasd_3990_erp_bus_out) {
1206 erp->retries = 256;
1207 erp->function = dasd_3990_erp_bus_out;
1208
1209 } else {
1210
1211 /* issue a message and wait for 'device ready' interrupt */
1212 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1213 "bus out parity error or BOPC requested by "
1214 "channel");
1215
1216 dasd_3990_erp_block_queue(erp, 60*HZ);
1217
1218 }
1219
1220 return erp;
1221
1222} /* end dasd_3990_erp_bus_out */
1223
1224/*
1225 * DASD_3990_ERP_EQUIP_CHECK
1226 *
1227 * DESCRIPTION
1228 * Handles 24 byte 'Equipment Check' error.
1229 *
1230 * PARAMETER
1231 * erp current erp_head
1232 * RETURN VALUES
1233 * erp new erp_head - pointer to new ERP
1234 */
1235static struct dasd_ccw_req *
1236dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
1237{
1238
1239 struct dasd_device *device = erp->device;
1240
1241 erp->function = dasd_3990_erp_equip_check;
1242
1243 if (sense[1] & SNS1_WRITE_INHIBITED) {
1244
1245 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1246 "Write inhibited path encountered");
1247
1248 /* vary path offline */
1249 DEV_MESSAGE(KERN_ERR, device, "%s",
1250 "Path should be varied off-line. "
1251 "This is not implemented yet \n - please report "
1252 "to linux390@de.ibm.com");
1253
1254 erp = dasd_3990_erp_action_1(erp);
1255
1256 } else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1257
1258 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1259 "Equipment Check - " "environmental data present");
1260
1261 dasd_3990_handle_env_data(erp, sense);
1262
1263 erp = dasd_3990_erp_action_4(erp, sense);
1264
1265 } else if (sense[1] & SNS1_PERM_ERR) {
1266
1267 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1268 "Equipment Check - retry exhausted or "
1269 "undesirable");
1270
1271 erp = dasd_3990_erp_action_1(erp);
1272
1273 } else {
1274 /* all other equipment checks - Action 5 */
1275 /* rest is done when retries == 0 */
1276 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1277 "Equipment check or processing error");
1278
1279 erp = dasd_3990_erp_action_5(erp);
1280 }
1281
1282 return erp;
1283
1284} /* end dasd_3990_erp_equip_check */
1285
1286/*
1287 * DASD_3990_ERP_DATA_CHECK
1288 *
1289 * DESCRIPTION
1290 * Handles 24 byte 'Data Check' error.
1291 *
1292 * PARAMETER
1293 * erp current erp_head
1294 * RETURN VALUES
1295 * erp new erp_head - pointer to new ERP
1296 */
1297static struct dasd_ccw_req *
1298dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
1299{
1300
1301 struct dasd_device *device = erp->device;
1302
1303 erp->function = dasd_3990_erp_data_check;
1304
1305 if (sense[2] & SNS2_CORRECTABLE) { /* correctable data check */
1306
1307 /* issue message that the data has been corrected */
1308 DEV_MESSAGE(KERN_EMERG, device, "%s",
1309 "Data recovered during retry with PCI "
1310 "fetch mode active");
1311
1312 /* not possible to handle this situation in Linux */
1313 panic("No way to inform application about the possibly "
1314 "incorrect data");
1315
1316 } else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1317
1318 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1319 "Uncorrectable data check recovered secondary "
1320 "addr of duplex pair");
1321
1322 erp = dasd_3990_erp_action_4(erp, sense);
1323
1324 } else if (sense[1] & SNS1_PERM_ERR) {
1325
1326 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1327 "Uncorrectable data check with internal "
1328 "retry exhausted");
1329
1330 erp = dasd_3990_erp_action_1(erp);
1331
1332 } else {
1333 /* all other data checks */
1334 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1335 "Uncorrectable data check with retry count "
1336 "exhausted...");
1337
1338 erp = dasd_3990_erp_action_5(erp);
1339 }
1340
1341 return erp;
1342
1343} /* end dasd_3990_erp_data_check */
1344
1345/*
1346 * DASD_3990_ERP_OVERRUN
1347 *
1348 * DESCRIPTION
1349 * Handles 24 byte 'Overrun' error.
1350 *
1351 * PARAMETER
1352 * erp current erp_head
1353 * RETURN VALUES
1354 * erp new erp_head - pointer to new ERP
1355 */
1356static struct dasd_ccw_req *
1357dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
1358{
1359
1360 struct dasd_device *device = erp->device;
1361
1362 erp->function = dasd_3990_erp_overrun;
1363
1364 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1365 "Overrun - service overrun or overrun"
1366 " error requested by channel");
1367
1368 erp = dasd_3990_erp_action_5(erp);
1369
1370 return erp;
1371
1372} /* end dasd_3990_erp_overrun */
1373
1374/*
1375 * DASD_3990_ERP_INV_FORMAT
1376 *
1377 * DESCRIPTION
1378 * Handles 24 byte 'Invalid Track Format' error.
1379 *
1380 * PARAMETER
1381 * erp current erp_head
1382 * RETURN VALUES
1383 * erp new erp_head - pointer to new ERP
1384 */
1385static struct dasd_ccw_req *
1386dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
1387{
1388
1389 struct dasd_device *device = erp->device;
1390
1391 erp->function = dasd_3990_erp_inv_format;
1392
1393 if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1394
1395 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1396 "Track format error when destaging or "
1397 "staging data");
1398
1399 dasd_3990_handle_env_data(erp, sense);
1400
1401 erp = dasd_3990_erp_action_4(erp, sense);
1402
1403 } else {
1404 DEV_MESSAGE(KERN_ERR, device, "%s",
1405 "Invalid Track Format - Fatal error should have "
1406 "been handled within the interrupt handler");
1407
1408 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1409 }
1410
1411 return erp;
1412
1413} /* end dasd_3990_erp_inv_format */
1414
1415/*
1416 * DASD_3990_ERP_EOC
1417 *
1418 * DESCRIPTION
1419 * Handles 24 byte 'End-of-Cylinder' error.
1420 *
1421 * PARAMETER
1422 * erp already added default erp
1423 * RETURN VALUES
1424 * erp pointer to original (failed) cqr.
1425 */
1426static struct dasd_ccw_req *
1427dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
1428{
1429
1430 struct dasd_device *device = default_erp->device;
1431
1432 DEV_MESSAGE(KERN_ERR, device, "%s",
1433 "End-of-Cylinder - must never happen");
1434
1435 /* implement action 7 - BUG */
1436 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1437
1438} /* end dasd_3990_erp_EOC */
1439
1440/*
1441 * DASD_3990_ERP_ENV_DATA
1442 *
1443 * DESCRIPTION
1444 * Handles 24 byte 'Environmental-Data Present' error.
1445 *
1446 * PARAMETER
1447 * erp current erp_head
1448 * RETURN VALUES
1449 * erp new erp_head - pointer to new ERP
1450 */
1451static struct dasd_ccw_req *
1452dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
1453{
1454
1455 struct dasd_device *device = erp->device;
1456
1457 erp->function = dasd_3990_erp_env_data;
1458
1459 DEV_MESSAGE(KERN_DEBUG, device, "%s", "Environmental data present");
1460
1461 dasd_3990_handle_env_data(erp, sense);
1462
1463 /* don't retry on disabled interface */
1464 if (sense[7] != 0x0F) {
1465
1466 erp = dasd_3990_erp_action_4(erp, sense);
1467 } else {
1468
1469 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_IN_IO);
1470 }
1471
1472 return erp;
1473
1474} /* end dasd_3990_erp_env_data */
1475
1476/*
1477 * DASD_3990_ERP_NO_REC
1478 *
1479 * DESCRIPTION
1480 * Handles 24 byte 'No Record Found' error.
1481 *
1482 * PARAMETER
1483 * erp already added default ERP
1484 *
1485 * RETURN VALUES
1486 * erp new erp_head - pointer to new ERP
1487 */
1488static struct dasd_ccw_req *
1489dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
1490{
1491
1492 struct dasd_device *device = default_erp->device;
1493
1494 DEV_MESSAGE(KERN_ERR, device, "%s",
1495 "No Record Found - Fatal error should "
1496 "have been handled within the interrupt handler");
1497
1498 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1499
1500} /* end dasd_3990_erp_no_rec */
1501
1502/*
1503 * DASD_3990_ERP_FILE_PROT
1504 *
1505 * DESCRIPTION
1506 * Handles 24 byte 'File Protected' error.
1507 * Note: Seek related recovery is not implemented because
1508 * wee don't use the seek command yet.
1509 *
1510 * PARAMETER
1511 * erp current erp_head
1512 * RETURN VALUES
1513 * erp new erp_head - pointer to new ERP
1514 */
1515static struct dasd_ccw_req *
1516dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
1517{
1518
1519 struct dasd_device *device = erp->device;
1520
1521 DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected");
1522
1523 return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1524
1525} /* end dasd_3990_erp_file_prot */
1526
1527/*
1528 * DASD_3990_ERP_INSPECT_24
1529 *
1530 * DESCRIPTION
1531 * Does a detailed inspection of the 24 byte sense data
1532 * and sets up a related error recovery action.
1533 *
1534 * PARAMETER
1535 * sense sense data of the actual error
1536 * erp pointer to the currently created default ERP
1537 *
1538 * RETURN VALUES
1539 * erp pointer to the (addtitional) ERP
1540 */
1541static struct dasd_ccw_req *
1542dasd_3990_erp_inspect_24(struct dasd_ccw_req * erp, char *sense)
1543{
1544
1545 struct dasd_ccw_req *erp_filled = NULL;
1546
1547 /* Check sense for .... */
1548 /* 'Command Reject' */
1549 if ((erp_filled == NULL) && (sense[0] & SNS0_CMD_REJECT)) {
1550 erp_filled = dasd_3990_erp_com_rej(erp, sense);
1551 }
1552 /* 'Intervention Required' */
1553 if ((erp_filled == NULL) && (sense[0] & SNS0_INTERVENTION_REQ)) {
1554 erp_filled = dasd_3990_erp_int_req(erp);
1555 }
1556 /* 'Bus Out Parity Check' */
1557 if ((erp_filled == NULL) && (sense[0] & SNS0_BUS_OUT_CHECK)) {
1558 erp_filled = dasd_3990_erp_bus_out(erp);
1559 }
1560 /* 'Equipment Check' */
1561 if ((erp_filled == NULL) && (sense[0] & SNS0_EQUIPMENT_CHECK)) {
1562 erp_filled = dasd_3990_erp_equip_check(erp, sense);
1563 }
1564 /* 'Data Check' */
1565 if ((erp_filled == NULL) && (sense[0] & SNS0_DATA_CHECK)) {
1566 erp_filled = dasd_3990_erp_data_check(erp, sense);
1567 }
1568 /* 'Overrun' */
1569 if ((erp_filled == NULL) && (sense[0] & SNS0_OVERRUN)) {
1570 erp_filled = dasd_3990_erp_overrun(erp, sense);
1571 }
1572 /* 'Invalid Track Format' */
1573 if ((erp_filled == NULL) && (sense[1] & SNS1_INV_TRACK_FORMAT)) {
1574 erp_filled = dasd_3990_erp_inv_format(erp, sense);
1575 }
1576 /* 'End-of-Cylinder' */
1577 if ((erp_filled == NULL) && (sense[1] & SNS1_EOC)) {
1578 erp_filled = dasd_3990_erp_EOC(erp, sense);
1579 }
1580 /* 'Environmental Data' */
1581 if ((erp_filled == NULL) && (sense[2] & SNS2_ENV_DATA_PRESENT)) {
1582 erp_filled = dasd_3990_erp_env_data(erp, sense);
1583 }
1584 /* 'No Record Found' */
1585 if ((erp_filled == NULL) && (sense[1] & SNS1_NO_REC_FOUND)) {
1586 erp_filled = dasd_3990_erp_no_rec(erp, sense);
1587 }
1588 /* 'File Protected' */
1589 if ((erp_filled == NULL) && (sense[1] & SNS1_FILE_PROTECTED)) {
1590 erp_filled = dasd_3990_erp_file_prot(erp);
1591 }
1592 /* other (unknown) error - do default ERP */
1593 if (erp_filled == NULL) {
1594
1595 erp_filled = erp;
1596 }
1597
1598 return erp_filled;
1599
1600} /* END dasd_3990_erp_inspect_24 */
1601
1602/*
1603 *****************************************************************************
1604 * 32 byte sense ERP functions (only)
1605 *****************************************************************************
1606 */
1607
1608/*
1609 * DASD_3990_ERPACTION_10_32
1610 *
1611 * DESCRIPTION
1612 * Handles 32 byte 'Action 10' of Single Program Action Codes.
1613 * Just retry and if retry doesn't work, return with error.
1614 *
1615 * PARAMETER
1616 * erp current erp_head
1617 * sense current sense data
1618 * RETURN VALUES
1619 * erp modified erp_head
1620 */
1621static struct dasd_ccw_req *
1622dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
1623{
1624
1625 struct dasd_device *device = erp->device;
1626
1627 erp->retries = 256;
1628 erp->function = dasd_3990_erp_action_10_32;
1629
1630 DEV_MESSAGE(KERN_DEBUG, device, "%s", "Perform logging requested");
1631
1632 return erp;
1633
1634} /* end dasd_3990_erp_action_10_32 */
1635
1636/*
1637 * DASD_3990_ERP_ACTION_1B_32
1638 *
1639 * DESCRIPTION
1640 * Handles 32 byte 'Action 1B' of Single Program Action Codes.
1641 * A write operation could not be finished because of an unexpected
1642 * condition.
1643 * The already created 'default erp' is used to get the link to
1644 * the erp chain, but it can not be used for this recovery
1645 * action because it contains no DE/LO data space.
1646 *
1647 * PARAMETER
1648 * default_erp already added default erp.
1649 * sense current sense data
1650 *
1651 * RETURN VALUES
1652 * erp new erp or
1653 * default_erp in case of imprecise ending or error
1654 */
1655static struct dasd_ccw_req *
1656dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1657{
1658
1659 struct dasd_device *device = default_erp->device;
1660 __u32 cpa = 0;
1661 struct dasd_ccw_req *cqr;
1662 struct dasd_ccw_req *erp;
1663 struct DE_eckd_data *DE_data;
1664 char *LO_data; /* LO_eckd_data_t */
1665 struct ccw1 *ccw;
1666
1667 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1668 "Write not finished because of unexpected condition");
1669
1670 default_erp->function = dasd_3990_erp_action_1B_32;
1671
1672 /* determine the original cqr */
1673 cqr = default_erp;
1674
1675 while (cqr->refers != NULL) {
1676 cqr = cqr->refers;
1677 }
1678
1679 /* for imprecise ending just do default erp */
1680 if (sense[1] & 0x01) {
1681
1682 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1683 "Imprecise ending is set - just retry");
1684
1685 return default_erp;
1686 }
1687
1688 /* determine the address of the CCW to be restarted */
1689 /* Imprecise ending is not set -> addr from IRB-SCSW */
1690 cpa = default_erp->refers->irb.scsw.cpa;
1691
1692 if (cpa == 0) {
1693
1694 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1695 "Unable to determine address of the CCW "
1696 "to be restarted");
1697
1698 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1699 }
1700
1701 /* Build new ERP request including DE/LO */
1702 erp = dasd_alloc_erp_request((char *) &cqr->magic,
1703 2 + 1,/* DE/LO + TIC */
1704 sizeof (struct DE_eckd_data) +
1705 sizeof (struct LO_eckd_data), device);
1706
1707 if (IS_ERR(erp)) {
1708 DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP");
1709 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1710 }
1711
1712 /* use original DE */
1713 DE_data = erp->data;
1714 memcpy(DE_data, cqr->data, sizeof (struct DE_eckd_data));
1715
1716 /* create LO */
1717 LO_data = erp->data + sizeof (struct DE_eckd_data);
1718
1719 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
1720
1721 DEV_MESSAGE(KERN_ERR, device, "%s",
1722 "BUG - this should not happen");
1723
1724 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1725 }
1726
1727 if ((sense[7] & 0x3F) == 0x01) {
1728 /* operation code is WRITE DATA -> data area orientation */
1729 LO_data[0] = 0x81;
1730
1731 } else if ((sense[7] & 0x3F) == 0x03) {
1732 /* operation code is FORMAT WRITE -> index orientation */
1733 LO_data[0] = 0xC3;
1734
1735 } else {
1736 LO_data[0] = sense[7]; /* operation */
1737 }
1738
1739 LO_data[1] = sense[8]; /* auxiliary */
1740 LO_data[2] = sense[9];
1741 LO_data[3] = sense[3]; /* count */
1742 LO_data[4] = sense[29]; /* seek_addr.cyl */
1743 LO_data[5] = sense[30]; /* seek_addr.cyl 2nd byte */
1744 LO_data[7] = sense[31]; /* seek_addr.head 2nd byte */
1745
1746 memcpy(&(LO_data[8]), &(sense[11]), 8);
1747
1748 /* create DE ccw */
1749 ccw = erp->cpaddr;
1750 memset(ccw, 0, sizeof (struct ccw1));
1751 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
1752 ccw->flags = CCW_FLAG_CC;
1753 ccw->count = 16;
1754 ccw->cda = (__u32)(addr_t) DE_data;
1755
1756 /* create LO ccw */
1757 ccw++;
1758 memset(ccw, 0, sizeof (struct ccw1));
1759 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
1760 ccw->flags = CCW_FLAG_CC;
1761 ccw->count = 16;
1762 ccw->cda = (__u32)(addr_t) LO_data;
1763
1764 /* TIC to the failed ccw */
1765 ccw++;
1766 ccw->cmd_code = CCW_CMD_TIC;
1767 ccw->cda = cpa;
1768
1769 /* fill erp related fields */
1770 erp->function = dasd_3990_erp_action_1B_32;
1771 erp->refers = default_erp->refers;
1772 erp->device = device;
1773 erp->magic = default_erp->magic;
1774 erp->expires = 0;
1775 erp->retries = 256;
1776 erp->buildclk = get_clock();
1777 erp->status = DASD_CQR_FILLED;
1778
1779 /* remove the default erp */
1780 dasd_free_erp_request(default_erp, device);
1781
1782 return erp;
1783
1784} /* end dasd_3990_erp_action_1B_32 */
1785
1786/*
1787 * DASD_3990_UPDATE_1B
1788 *
1789 * DESCRIPTION
1790 * Handles the update to the 32 byte 'Action 1B' of Single Program
1791 * Action Codes in case the first action was not successful.
1792 * The already created 'previous_erp' is the currently not successful
1793 * ERP.
1794 *
1795 * PARAMETER
1796 * previous_erp already created previous erp.
1797 * sense current sense data
1798 * RETURN VALUES
1799 * erp modified erp
1800 */
1801static struct dasd_ccw_req *
1802dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1803{
1804
1805 struct dasd_device *device = previous_erp->device;
1806 __u32 cpa = 0;
1807 struct dasd_ccw_req *cqr;
1808 struct dasd_ccw_req *erp;
1809 char *LO_data; /* struct LO_eckd_data */
1810 struct ccw1 *ccw;
1811
1812 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1813 "Write not finished because of unexpected condition"
1814 " - follow on");
1815
1816 /* determine the original cqr */
1817 cqr = previous_erp;
1818
1819 while (cqr->refers != NULL) {
1820 cqr = cqr->refers;
1821 }
1822
1823 /* for imprecise ending just do default erp */
1824 if (sense[1] & 0x01) {
1825
1826 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1827 "Imprecise ending is set - just retry");
1828
1829 previous_erp->status = DASD_CQR_QUEUED;
1830
1831 return previous_erp;
1832 }
1833
1834 /* determine the address of the CCW to be restarted */
1835 /* Imprecise ending is not set -> addr from IRB-SCSW */
1836 cpa = previous_erp->irb.scsw.cpa;
1837
1838 if (cpa == 0) {
1839
1840 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1841 "Unable to determine address of the CCW "
1842 "to be restarted");
1843
1844 previous_erp->status = DASD_CQR_FAILED;
1845
1846 return previous_erp;
1847 }
1848
1849 erp = previous_erp;
1850
1851 /* update the LO with the new returned sense data */
1852 LO_data = erp->data + sizeof (struct DE_eckd_data);
1853
1854 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
1855
1856 DEV_MESSAGE(KERN_ERR, device, "%s",
1857 "BUG - this should not happen");
1858
1859 previous_erp->status = DASD_CQR_FAILED;
1860
1861 return previous_erp;
1862 }
1863
1864 if ((sense[7] & 0x3F) == 0x01) {
1865 /* operation code is WRITE DATA -> data area orientation */
1866 LO_data[0] = 0x81;
1867
1868 } else if ((sense[7] & 0x3F) == 0x03) {
1869 /* operation code is FORMAT WRITE -> index orientation */
1870 LO_data[0] = 0xC3;
1871
1872 } else {
1873 LO_data[0] = sense[7]; /* operation */
1874 }
1875
1876 LO_data[1] = sense[8]; /* auxiliary */
1877 LO_data[2] = sense[9];
1878 LO_data[3] = sense[3]; /* count */
1879 LO_data[4] = sense[29]; /* seek_addr.cyl */
1880 LO_data[5] = sense[30]; /* seek_addr.cyl 2nd byte */
1881 LO_data[7] = sense[31]; /* seek_addr.head 2nd byte */
1882
1883 memcpy(&(LO_data[8]), &(sense[11]), 8);
1884
1885 /* TIC to the failed ccw */
1886 ccw = erp->cpaddr; /* addr of DE ccw */
1887 ccw++; /* addr of LE ccw */
1888 ccw++; /* addr of TIC ccw */
1889 ccw->cda = cpa;
1890
1891 erp->status = DASD_CQR_QUEUED;
1892
1893 return erp;
1894
1895} /* end dasd_3990_update_1B */
1896
1897/*
1898 * DASD_3990_ERP_COMPOUND_RETRY
1899 *
1900 * DESCRIPTION
1901 * Handles the compound ERP action retry code.
1902 * NOTE: At least one retry is done even if zero is specified
1903 * by the sense data. This makes enqueueing of the request
1904 * easier.
1905 *
1906 * PARAMETER
1907 * sense sense data of the actual error
1908 * erp pointer to the currently created ERP
1909 *
1910 * RETURN VALUES
1911 * erp modified ERP pointer
1912 *
1913 */
1914static void
1915dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
1916{
1917
1918 switch (sense[25] & 0x03) {
1919 case 0x00: /* no not retry */
1920 erp->retries = 1;
1921 break;
1922
1923 case 0x01: /* retry 2 times */
1924 erp->retries = 2;
1925 break;
1926
1927 case 0x02: /* retry 10 times */
1928 erp->retries = 10;
1929 break;
1930
1931 case 0x03: /* retry 256 times */
1932 erp->retries = 256;
1933 break;
1934
1935 default:
1936 BUG();
1937 }
1938
1939 erp->function = dasd_3990_erp_compound_retry;
1940
1941} /* end dasd_3990_erp_compound_retry */
1942
1943/*
1944 * DASD_3990_ERP_COMPOUND_PATH
1945 *
1946 * DESCRIPTION
1947 * Handles the compound ERP action for retry on alternate
1948 * channel path.
1949 *
1950 * PARAMETER
1951 * sense sense data of the actual error
1952 * erp pointer to the currently created ERP
1953 *
1954 * RETURN VALUES
1955 * erp modified ERP pointer
1956 *
1957 */
1958static void
1959dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
1960{
1961
1962 if (sense[25] & DASD_SENSE_BIT_3) {
1963 dasd_3990_erp_alternate_path(erp);
1964
1965 if (erp->status == DASD_CQR_FAILED) {
1966 /* reset the lpm and the status to be able to
1967 * try further actions. */
1968
1969 erp->lpm = 0;
1970
1971 erp->status = DASD_CQR_ERROR;
1972
1973 }
1974 }
1975
1976 erp->function = dasd_3990_erp_compound_path;
1977
1978} /* end dasd_3990_erp_compound_path */
1979
1980/*
1981 * DASD_3990_ERP_COMPOUND_CODE
1982 *
1983 * DESCRIPTION
1984 * Handles the compound ERP action for retry code.
1985 *
1986 * PARAMETER
1987 * sense sense data of the actual error
1988 * erp pointer to the currently created ERP
1989 *
1990 * RETURN VALUES
1991 * erp NEW ERP pointer
1992 *
1993 */
1994static struct dasd_ccw_req *
1995dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
1996{
1997
1998 if (sense[25] & DASD_SENSE_BIT_2) {
1999
2000 switch (sense[28]) {
2001 case 0x17:
2002 /* issue a Diagnostic Control command with an
2003 * Inhibit Write subcommand and controler modifier */
2004 erp = dasd_3990_erp_DCTL(erp, 0x20);
2005 break;
2006
2007 case 0x25:
2008 /* wait for 5 seconds and retry again */
2009 erp->retries = 1;
2010
2011 dasd_3990_erp_block_queue (erp, 5*HZ);
2012 break;
2013
2014 default:
2015 /* should not happen - continue */
2016 break;
2017 }
2018 }
2019
2020 erp->function = dasd_3990_erp_compound_code;
2021
2022 return erp;
2023
2024} /* end dasd_3990_erp_compound_code */
2025
2026/*
2027 * DASD_3990_ERP_COMPOUND_CONFIG
2028 *
2029 * DESCRIPTION
2030 * Handles the compound ERP action for configruation
2031 * dependent error.
2032 * Note: duplex handling is not implemented (yet).
2033 *
2034 * PARAMETER
2035 * sense sense data of the actual error
2036 * erp pointer to the currently created ERP
2037 *
2038 * RETURN VALUES
2039 * erp modified ERP pointer
2040 *
2041 */
2042static void
2043dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
2044{
2045
2046 if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
2047
2048 /* set to suspended duplex state then restart */
2049 struct dasd_device *device = erp->device;
2050
2051 DEV_MESSAGE(KERN_ERR, device, "%s",
2052 "Set device to suspended duplex state should be "
2053 "done!\n"
2054 "This is not implemented yet (for compound ERP)"
2055 " - please report to linux390@de.ibm.com");
2056
2057 }
2058
2059 erp->function = dasd_3990_erp_compound_config;
2060
2061} /* end dasd_3990_erp_compound_config */
2062
2063/*
2064 * DASD_3990_ERP_COMPOUND
2065 *
2066 * DESCRIPTION
2067 * Does the further compound program action if
2068 * compound retry was not successful.
2069 *
2070 * PARAMETER
2071 * sense sense data of the actual error
2072 * erp pointer to the current (failed) ERP
2073 *
2074 * RETURN VALUES
2075 * erp (additional) ERP pointer
2076 *
2077 */
2078static struct dasd_ccw_req *
2079dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
2080{
2081
2082 if ((erp->function == dasd_3990_erp_compound_retry) &&
2083 (erp->status == DASD_CQR_ERROR)) {
2084
2085 dasd_3990_erp_compound_path(erp, sense);
2086 }
2087
2088 if ((erp->function == dasd_3990_erp_compound_path) &&
2089 (erp->status == DASD_CQR_ERROR)) {
2090
2091 erp = dasd_3990_erp_compound_code(erp, sense);
2092 }
2093
2094 if ((erp->function == dasd_3990_erp_compound_code) &&
2095 (erp->status == DASD_CQR_ERROR)) {
2096
2097 dasd_3990_erp_compound_config(erp, sense);
2098 }
2099
2100 /* if no compound action ERP specified, the request failed */
2101 if (erp->status == DASD_CQR_ERROR) {
2102
2103 erp->status = DASD_CQR_FAILED;
2104 }
2105
2106 return erp;
2107
2108} /* end dasd_3990_erp_compound */
2109
2110/*
2111 * DASD_3990_ERP_INSPECT_32
2112 *
2113 * DESCRIPTION
2114 * Does a detailed inspection of the 32 byte sense data
2115 * and sets up a related error recovery action.
2116 *
2117 * PARAMETER
2118 * sense sense data of the actual error
2119 * erp pointer to the currently created default ERP
2120 *
2121 * RETURN VALUES
2122 * erp_filled pointer to the ERP
2123 *
2124 */
2125static struct dasd_ccw_req *
2126dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2127{
2128
2129 struct dasd_device *device = erp->device;
2130
2131 erp->function = dasd_3990_erp_inspect_32;
2132
2133 if (sense[25] & DASD_SENSE_BIT_0) {
2134
2135 /* compound program action codes (byte25 bit 0 == '1') */
2136 dasd_3990_erp_compound_retry(erp, sense);
2137
2138 } else {
2139
2140 /* single program action codes (byte25 bit 0 == '0') */
2141 switch (sense[25]) {
2142
2143 case 0x00: /* success - use default ERP for retries */
2144 DEV_MESSAGE(KERN_DEBUG, device, "%s",
2145 "ERP called for successful request"
2146 " - just retry");
2147 break;
2148
2149 case 0x01: /* fatal error */
2150 DEV_MESSAGE(KERN_ERR, device, "%s",
2151 "Fatal error should have been "
2152 "handled within the interrupt handler");
2153
2154 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
2155 break;
2156
2157 case 0x02: /* intervention required */
2158 case 0x03: /* intervention required during dual copy */
2159 erp = dasd_3990_erp_int_req(erp);
2160 break;
2161
2162 case 0x0F: /* length mismatch during update write command */
2163 DEV_MESSAGE(KERN_ERR, device, "%s",
2164 "update write command error - should not "
2165 "happen;\n"
2166 "Please send this message together with "
2167 "the above sense data to linux390@de."
2168 "ibm.com");
2169
2170 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
2171 break;
2172
2173 case 0x10: /* logging required for other channel program */
2174 erp = dasd_3990_erp_action_10_32(erp, sense);
2175 break;
2176
2177 case 0x15: /* next track outside defined extend */
2178 DEV_MESSAGE(KERN_ERR, device, "%s",
2179 "next track outside defined extend - "
2180 "should not happen;\n"
2181 "Please send this message together with "
2182 "the above sense data to linux390@de."
2183 "ibm.com");
2184
2185 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
2186 break;
2187
2188 case 0x1B: /* unexpected condition during write */
2189
2190 erp = dasd_3990_erp_action_1B_32(erp, sense);
2191 break;
2192
2193 case 0x1C: /* invalid data */
2194 DEV_MESSAGE(KERN_EMERG, device, "%s",
2195 "Data recovered during retry with PCI "
2196 "fetch mode active");
2197
2198 /* not possible to handle this situation in Linux */
2199 panic
2200 ("Invalid data - No way to inform application "
2201 "about the possibly incorrect data");
2202 break;
2203
2204 case 0x1D: /* state-change pending */
2205 DEV_MESSAGE(KERN_DEBUG, device, "%s",
2206 "A State change pending condition exists "
2207 "for the subsystem or device");
2208
2209 erp = dasd_3990_erp_action_4(erp, sense);
2210 break;
2211
2212 case 0x1E: /* busy */
2213 DEV_MESSAGE(KERN_DEBUG, device, "%s",
2214 "Busy condition exists "
2215 "for the subsystem or device");
2216 erp = dasd_3990_erp_action_4(erp, sense);
2217 break;
2218
2219 default: /* all others errors - default erp */
2220 break;
2221 }
2222 }
2223
2224 return erp;
2225
2226} /* end dasd_3990_erp_inspect_32 */
2227
2228/*
2229 *****************************************************************************
2230 * main ERP control fuctions (24 and 32 byte sense)
2231 *****************************************************************************
2232 */
2233
2234/*
2235 * DASD_3990_ERP_INSPECT
2236 *
2237 * DESCRIPTION
2238 * Does a detailed inspection for sense data by calling either
2239 * the 24-byte or the 32-byte inspection routine.
2240 *
2241 * PARAMETER
2242 * erp pointer to the currently created default ERP
2243 * RETURN VALUES
2244 * erp_new contens was possibly modified
2245 */
2246static struct dasd_ccw_req *
2247dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
2248{
2249
2250 struct dasd_ccw_req *erp_new = NULL;
2251 /* sense data are located in the refers record of the */
2252 /* already set up new ERP ! */
2253 char *sense = erp->refers->irb.ecw;
2254
2255 /* distinguish between 24 and 32 byte sense data */
2256 if (sense[27] & DASD_SENSE_BIT_0) {
2257
2258 /* inspect the 24 byte sense data */
2259 erp_new = dasd_3990_erp_inspect_24(erp, sense);
2260
2261 } else {
2262
2263 /* inspect the 32 byte sense data */
2264 erp_new = dasd_3990_erp_inspect_32(erp, sense);
2265
2266 } /* end distinguish between 24 and 32 byte sense data */
2267
2268 return erp_new;
2269}
2270
2271/*
2272 * DASD_3990_ERP_ADD_ERP
2273 *
2274 * DESCRIPTION
2275 * This funtion adds an additional request block (ERP) to the head of
2276 * the given cqr (or erp).
2277 * This erp is initialized as an default erp (retry TIC)
2278 *
2279 * PARAMETER
2280 * cqr head of the current ERP-chain (or single cqr if
2281 * first error)
2282 * RETURN VALUES
2283 * erp pointer to new ERP-chain head
2284 */
2285static struct dasd_ccw_req *
2286dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
2287{
2288
2289 struct dasd_device *device = cqr->device;
2290 struct ccw1 *ccw;
2291
2292 /* allocate additional request block */
2293 struct dasd_ccw_req *erp;
2294
2295 erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, cqr->device);
2296 if (IS_ERR(erp)) {
2297 if (cqr->retries <= 0) {
2298 DEV_MESSAGE(KERN_ERR, device, "%s",
2299 "Unable to allocate ERP request");
2300 cqr->status = DASD_CQR_FAILED;
2301 cqr->stopclk = get_clock ();
2302 } else {
2303 DEV_MESSAGE (KERN_ERR, device,
2304 "Unable to allocate ERP request "
2305 "(%i retries left)",
2306 cqr->retries);
2307 dasd_set_timer(device, (HZ << 3));
2308 }
2309 return cqr;
2310 }
2311
2312 /* initialize request with default TIC to current ERP/CQR */
2313 ccw = erp->cpaddr;
2314 ccw->cmd_code = CCW_CMD_NOOP;
2315 ccw->flags = CCW_FLAG_CC;
2316 ccw++;
2317 ccw->cmd_code = CCW_CMD_TIC;
2318 ccw->cda = (long)(cqr->cpaddr);
2319 erp->function = dasd_3990_erp_add_erp;
2320 erp->refers = cqr;
2321 erp->device = cqr->device;
2322 erp->magic = cqr->magic;
2323 erp->expires = 0;
2324 erp->retries = 256;
2325 erp->buildclk = get_clock();
2326
2327 erp->status = DASD_CQR_FILLED;
2328
2329 return erp;
2330}
2331
2332/*
2333 * DASD_3990_ERP_ADDITIONAL_ERP
2334 *
2335 * DESCRIPTION
2336 * An additional ERP is needed to handle the current error.
2337 * Add ERP to the head of the ERP-chain containing the ERP processing
2338 * determined based on the sense data.
2339 *
2340 * PARAMETER
2341 * cqr head of the current ERP-chain (or single cqr if
2342 * first error)
2343 *
2344 * RETURN VALUES
2345 * erp pointer to new ERP-chain head
2346 */
2347static struct dasd_ccw_req *
2348dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
2349{
2350
2351 struct dasd_ccw_req *erp = NULL;
2352
2353 /* add erp and initialize with default TIC */
2354 erp = dasd_3990_erp_add_erp(cqr);
2355
2356 /* inspect sense, determine specific ERP if possible */
2357 if (erp != cqr) {
2358
2359 erp = dasd_3990_erp_inspect(erp);
2360 }
2361
2362 return erp;
2363
2364} /* end dasd_3990_erp_additional_erp */
2365
2366/*
2367 * DASD_3990_ERP_ERROR_MATCH
2368 *
2369 * DESCRIPTION
2370 * Check if the device status of the given cqr is the same.
2371 * This means that the failed CCW and the relevant sense data
2372 * must match.
2373 * I don't distinguish between 24 and 32 byte sense because in case of
2374 * 24 byte sense byte 25 and 27 is set as well.
2375 *
2376 * PARAMETER
2377 * cqr1 first cqr, which will be compared with the
2378 * cqr2 second cqr.
2379 *
2380 * RETURN VALUES
2381 * match 'boolean' for match found
2382 * returns 1 if match found, otherwise 0.
2383 */
2384static int
2385dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
2386{
2387
2388 /* check failed CCW */
2389 if (cqr1->irb.scsw.cpa != cqr2->irb.scsw.cpa) {
2390 // return 0; /* CCW doesn't match */
2391 }
2392
2393 /* check sense data; byte 0-2,25,27 */
2394 if (!((memcmp (cqr1->irb.ecw, cqr2->irb.ecw, 3) == 0) &&
2395 (cqr1->irb.ecw[27] == cqr2->irb.ecw[27]) &&
2396 (cqr1->irb.ecw[25] == cqr2->irb.ecw[25]))) {
2397
2398 return 0; /* sense doesn't match */
2399 }
2400
2401 return 1; /* match */
2402
2403} /* end dasd_3990_erp_error_match */
2404
2405/*
2406 * DASD_3990_ERP_IN_ERP
2407 *
2408 * DESCRIPTION
2409 * check if the current error already happened before.
2410 * quick exit if current cqr is not an ERP (cqr->refers=NULL)
2411 *
2412 * PARAMETER
2413 * cqr failed cqr (either original cqr or already an erp)
2414 *
2415 * RETURN VALUES
2416 * erp erp-pointer to the already defined error
2417 * recovery procedure OR
2418 * NULL if a 'new' error occurred.
2419 */
2420static struct dasd_ccw_req *
2421dasd_3990_erp_in_erp(struct dasd_ccw_req *cqr)
2422{
2423
2424 struct dasd_ccw_req *erp_head = cqr, /* save erp chain head */
2425 *erp_match = NULL; /* save erp chain head */
2426 int match = 0; /* 'boolean' for matching error found */
2427
2428 if (cqr->refers == NULL) { /* return if not in erp */
2429 return NULL;
2430 }
2431
2432 /* check the erp/cqr chain for current error */
2433 do {
2434 match = dasd_3990_erp_error_match(erp_head, cqr->refers);
2435 erp_match = cqr; /* save possible matching erp */
2436 cqr = cqr->refers; /* check next erp/cqr in queue */
2437
2438 } while ((cqr->refers != NULL) && (!match));
2439
2440 if (!match) {
2441 return NULL; /* no match was found */
2442 }
2443
2444 return erp_match; /* return address of matching erp */
2445
2446} /* END dasd_3990_erp_in_erp */
2447
2448/*
2449 * DASD_3990_ERP_FURTHER_ERP (24 & 32 byte sense)
2450 *
2451 * DESCRIPTION
2452 * No retry is left for the current ERP. Check what has to be done
2453 * with the ERP.
2454 * - do further defined ERP action or
2455 * - wait for interrupt or
2456 * - exit with permanent error
2457 *
2458 * PARAMETER
2459 * erp ERP which is in progress with no retry left
2460 *
2461 * RETURN VALUES
2462 * erp modified/additional ERP
2463 */
2464static struct dasd_ccw_req *
2465dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
2466{
2467
2468 struct dasd_device *device = erp->device;
2469 char *sense = erp->irb.ecw;
2470
2471 /* check for 24 byte sense ERP */
2472 if ((erp->function == dasd_3990_erp_bus_out) ||
2473 (erp->function == dasd_3990_erp_action_1) ||
2474 (erp->function == dasd_3990_erp_action_4)) {
2475
2476 erp = dasd_3990_erp_action_1(erp);
2477
2478 } else if (erp->function == dasd_3990_erp_action_5) {
2479
2480 /* retries have not been successful */
2481 /* prepare erp for retry on different channel path */
2482 erp = dasd_3990_erp_action_1(erp);
2483
2484 if (!(sense[2] & DASD_SENSE_BIT_0)) {
2485
2486 /* issue a Diagnostic Control command with an
2487 * Inhibit Write subcommand */
2488
2489 switch (sense[25]) {
2490 case 0x17:
2491 case 0x57:{ /* controller */
2492 erp = dasd_3990_erp_DCTL(erp, 0x20);
2493 break;
2494 }
2495 case 0x18:
2496 case 0x58:{ /* channel path */
2497 erp = dasd_3990_erp_DCTL(erp, 0x40);
2498 break;
2499 }
2500 case 0x19:
2501 case 0x59:{ /* storage director */
2502 erp = dasd_3990_erp_DCTL(erp, 0x80);
2503 break;
2504 }
2505 default:
2506 DEV_MESSAGE(KERN_DEBUG, device,
2507 "invalid subcommand modifier 0x%x "
2508 "for Diagnostic Control Command",
2509 sense[25]);
2510 }
2511 }
2512
2513 /* check for 32 byte sense ERP */
2514 } else if ((erp->function == dasd_3990_erp_compound_retry) ||
2515 (erp->function == dasd_3990_erp_compound_path) ||
2516 (erp->function == dasd_3990_erp_compound_code) ||
2517 (erp->function == dasd_3990_erp_compound_config)) {
2518
2519 erp = dasd_3990_erp_compound(erp, sense);
2520
2521 } else {
2522 /* No retry left and no additional special handling */
2523 /*necessary */
2524 DEV_MESSAGE(KERN_ERR, device,
2525 "no retries left for erp %p - "
2526 "set status to FAILED", erp);
2527
2528 erp->status = DASD_CQR_FAILED;
2529 }
2530
2531 return erp;
2532
2533} /* end dasd_3990_erp_further_erp */
2534
2535/*
2536 * DASD_3990_ERP_HANDLE_MATCH_ERP
2537 *
2538 * DESCRIPTION
2539 * An error occurred again and an ERP has been detected which is already
2540 * used to handle this error (e.g. retries).
2541 * All prior ERP's are asumed to be successful and therefore removed
2542 * from queue.
2543 * If retry counter of matching erp is already 0, it is checked if further
2544 * action is needed (besides retry) or if the ERP has failed.
2545 *
2546 * PARAMETER
2547 * erp_head first ERP in ERP-chain
2548 * erp ERP that handles the actual error.
2549 * (matching erp)
2550 *
2551 * RETURN VALUES
2552 * erp modified/additional ERP
2553 */
2554static struct dasd_ccw_req *
2555dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2556 struct dasd_ccw_req *erp)
2557{
2558
2559 struct dasd_device *device = erp_head->device;
2560 struct dasd_ccw_req *erp_done = erp_head; /* finished req */
2561 struct dasd_ccw_req *erp_free = NULL; /* req to be freed */
2562
2563 /* loop over successful ERPs and remove them from chanq */
2564 while (erp_done != erp) {
2565
2566 if (erp_done == NULL) /* end of chain reached */
2567 panic(PRINTK_HEADER "Programming error in ERP! The "
2568 "original request was lost\n");
2569
2570 /* remove the request from the device queue */
2571 list_del(&erp_done->list);
2572
2573 erp_free = erp_done;
2574 erp_done = erp_done->refers;
2575
2576 /* free the finished erp request */
2577 dasd_free_erp_request(erp_free, erp_free->device);
2578
2579 } /* end while */
2580
2581 if (erp->retries > 0) {
2582
2583 char *sense = erp->refers->irb.ecw;
2584
2585 /* check for special retries */
2586 if (erp->function == dasd_3990_erp_action_4) {
2587
2588 erp = dasd_3990_erp_action_4(erp, sense);
2589
2590 } else if (erp->function == dasd_3990_erp_action_1B_32) {
2591
2592 erp = dasd_3990_update_1B(erp, sense);
2593
2594 } else if (erp->function == dasd_3990_erp_int_req) {
2595
2596 erp = dasd_3990_erp_int_req(erp);
2597
2598 } else {
2599 /* simple retry */
2600 DEV_MESSAGE(KERN_DEBUG, device,
2601 "%i retries left for erp %p",
2602 erp->retries, erp);
2603
2604 /* handle the request again... */
2605 erp->status = DASD_CQR_QUEUED;
2606 }
2607
2608 } else {
2609 /* no retry left - check for further necessary action */
2610 /* if no further actions, handle rest as permanent error */
2611 erp = dasd_3990_erp_further_erp(erp);
2612 }
2613
2614 return erp;
2615
2616} /* end dasd_3990_erp_handle_match_erp */
2617
2618/*
2619 * DASD_3990_ERP_ACTION
2620 *
2621 * DESCRIPTION
2622 * controll routine for 3990 erp actions.
2623 * Has to be called with the queue lock (namely the s390_irq_lock) acquired.
2624 *
2625 * PARAMETER
2626 * cqr failed cqr (either original cqr or already an erp)
2627 *
2628 * RETURN VALUES
2629 * erp erp-pointer to the head of the ERP action chain.
2630 * This means:
2631 * - either a ptr to an additional ERP cqr or
2632 * - the original given cqr (which's status might
2633 * be modified)
2634 */
2635struct dasd_ccw_req *
2636dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2637{
2638
2639 struct dasd_ccw_req *erp = NULL;
2640 struct dasd_device *device = cqr->device;
2641 __u32 cpa = cqr->irb.scsw.cpa;
2642
2643#ifdef ERP_DEBUG
2644 /* print current erp_chain */
2645 DEV_MESSAGE(KERN_ERR, device, "%s",
2646 "ERP chain at BEGINNING of ERP-ACTION");
2647 {
2648 struct dasd_ccw_req *temp_erp = NULL;
2649
2650 for (temp_erp = cqr;
2651 temp_erp != NULL; temp_erp = temp_erp->refers) {
2652
2653 DEV_MESSAGE(KERN_ERR, device,
2654 " erp %p (%02x) refers to %p",
2655 temp_erp, temp_erp->status,
2656 temp_erp->refers);
2657 }
2658 }
2659#endif /* ERP_DEBUG */
2660
2661 /* double-check if current erp/cqr was successfull */
2662 if ((cqr->irb.scsw.cstat == 0x00) &&
2663 (cqr->irb.scsw.dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2664
2665 DEV_MESSAGE(KERN_DEBUG, device,
2666 "ERP called for successful request %p"
2667 " - NO ERP necessary", cqr);
2668
2669 cqr->status = DASD_CQR_DONE;
2670
2671 return cqr;
2672 }
2673 /* check if sense data are available */
2674 if (!cqr->irb.ecw) {
2675 DEV_MESSAGE(KERN_DEBUG, device,
2676 "ERP called witout sense data avail ..."
2677 "request %p - NO ERP possible", cqr);
2678
2679 cqr->status = DASD_CQR_FAILED;
2680
2681 return cqr;
2682
2683 }
2684
2685 /* check if error happened before */
2686 erp = dasd_3990_erp_in_erp(cqr);
2687
2688 if (erp == NULL) {
2689 /* no matching erp found - set up erp */
2690 erp = dasd_3990_erp_additional_erp(cqr);
2691 } else {
2692 /* matching erp found - set all leading erp's to DONE */
2693 erp = dasd_3990_erp_handle_match_erp(cqr, erp);
2694 }
2695
2696#ifdef ERP_DEBUG
2697 /* print current erp_chain */
2698 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP chain at END of ERP-ACTION");
2699 {
2700 struct dasd_ccw_req *temp_erp = NULL;
2701 for (temp_erp = erp;
2702 temp_erp != NULL; temp_erp = temp_erp->refers) {
2703
2704 DEV_MESSAGE(KERN_ERR, device,
2705 " erp %p (%02x) refers to %p",
2706 temp_erp, temp_erp->status,
2707 temp_erp->refers);
2708 }
2709 }
2710#endif /* ERP_DEBUG */
2711
2712 if (erp->status == DASD_CQR_FAILED)
2713 dasd_log_ccw(erp, 1, cpa);
2714
2715 /* enqueue added ERP request */
2716 if (erp->status == DASD_CQR_FILLED) {
2717 erp->status = DASD_CQR_QUEUED;
2718 list_add(&erp->list, &device->ccw_queue);
2719 }
2720
2721 return erp;
2722
2723} /* end dasd_3990_erp_action */
2724
2725/*
2726 * Overrides for Emacs so that we follow Linus's tabbing style.
2727 * Emacs will notice this stuff at the end of the file and automatically
2728 * adjust the settings for this buffer only. This must remain at the end
2729 * of the file.
2730 * ---------------------------------------------------------------------------
2731 * Local variables:
2732 * c-indent-level: 4
2733 * c-brace-imaginary-offset: 0
2734 * c-brace-offset: -4
2735 * c-argdecl-indent: 4
2736 * c-label-offset: -4
2737 * c-continued-statement-offset: 4
2738 * c-continued-brace-offset: 0
2739 * indent-tabs-mode: 1
2740 * tab-width: 8
2741 * End:
2742 */
diff --git a/drivers/s390/block/dasd_9336_erp.c b/drivers/s390/block/dasd_9336_erp.c
new file mode 100644
index 000000000000..01e87170a3a2
--- /dev/null
+++ b/drivers/s390/block/dasd_9336_erp.c
@@ -0,0 +1,61 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_9336_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 * $Revision: 1.8 $
8 */
9
10#define PRINTK_HEADER "dasd_erp(9336)"
11
12#include "dasd_int.h"
13
14
15/*
16 * DASD_9336_ERP_EXAMINE
17 *
18 * DESCRIPTION
19 * Checks only for fatal/no/recover error.
20 * A detailed examination of the sense data is done later outside
21 * the interrupt handler.
22 *
23 * The logic is based on the 'IBM 3880 Storage Control Reference' manual
24 * 'Chapter 7. 9336 Sense Data'.
25 *
26 * RETURN VALUES
27 * dasd_era_none no error
28 * dasd_era_fatal for all fatal (unrecoverable errors)
29 * dasd_era_recover for all others.
30 */
31dasd_era_t
32dasd_9336_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
33{
34 /* check for successful execution first */
35 if (irb->scsw.cstat == 0x00 &&
36 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
37 return dasd_era_none;
38
39 /* examine the 24 byte sense data */
40 return dasd_era_recover;
41
42} /* END dasd_9336_erp_examine */
43
44/*
45 * Overrides for Emacs so that we follow Linus's tabbing style.
46 * Emacs will notice this stuff at the end of the file and automatically
47 * adjust the settings for this buffer only. This must remain at the end
48 * of the file.
49 * ---------------------------------------------------------------------------
50 * Local variables:
51 * c-indent-level: 4
52 * c-brace-imaginary-offset: 0
53 * c-brace-offset: -4
54 * c-argdecl-indent: 4
55 * c-label-offset: -4
56 * c-continued-statement-offset: 4
57 * c-continued-brace-offset: 0
58 * indent-tabs-mode: 1
59 * tab-width: 8
60 * End:
61 */
diff --git a/drivers/s390/block/dasd_9343_erp.c b/drivers/s390/block/dasd_9343_erp.c
new file mode 100644
index 000000000000..2a23b74faf3f
--- /dev/null
+++ b/drivers/s390/block/dasd_9343_erp.c
@@ -0,0 +1,22 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_9345_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 * $Revision: 1.13 $
8 */
9
10#define PRINTK_HEADER "dasd_erp(9343)"
11
12#include "dasd_int.h"
13
14dasd_era_t
15dasd_9343_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
16{
17 if (irb->scsw.cstat == 0x00 &&
18 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
19 return dasd_era_none;
20
21 return dasd_era_recover;
22}
diff --git a/drivers/s390/block/dasd_cmb.c b/drivers/s390/block/dasd_cmb.c
new file mode 100644
index 000000000000..ed1ab474c0c6
--- /dev/null
+++ b/drivers/s390/block/dasd_cmb.c
@@ -0,0 +1,145 @@
1/*
2 * linux/drivers/s390/block/dasd_cmb.c ($Revision: 1.6 $)
3 *
4 * Linux on zSeries Channel Measurement Facility support
5 * (dasd device driver interface)
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author: Arnd Bergmann <arndb@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25#include <linux/init.h>
26#include <linux/ioctl32.h>
27#include <linux/module.h>
28#include <asm/ccwdev.h>
29#include <asm/cmb.h>
30
31#include "dasd_int.h"
32
33static int
34dasd_ioctl_cmf_enable(struct block_device *bdev, int no, long args)
35{
36 struct dasd_device *device;
37
38 device = bdev->bd_disk->private_data;
39 if (!device)
40 return -EINVAL;
41
42 return enable_cmf(device->cdev);
43}
44
45static int
46dasd_ioctl_cmf_disable(struct block_device *bdev, int no, long args)
47{
48 struct dasd_device *device;
49
50 device = bdev->bd_disk->private_data;
51 if (!device)
52 return -EINVAL;
53
54 return disable_cmf(device->cdev);
55}
56
57static int
58dasd_ioctl_readall_cmb(struct block_device *bdev, int no, long args)
59{
60 struct dasd_device *device;
61 struct cmbdata __user *udata;
62 struct cmbdata data;
63 size_t size;
64 int ret;
65
66 device = bdev->bd_disk->private_data;
67 if (!device)
68 return -EINVAL;
69 udata = (void __user *) args;
70 size = _IOC_SIZE(no);
71
72 if (!access_ok(VERIFY_WRITE, udata, size))
73 return -EFAULT;
74 ret = cmf_readall(device->cdev, &data);
75 if (ret)
76 return ret;
77 if (copy_to_user(udata, &data, min(size, sizeof(*udata))))
78 return -EFAULT;
79 return 0;
80}
81
82/* module initialization below here. dasd already provides a mechanism
83 * to dynamically register ioctl functions, so we simply use this. */
84static inline int
85ioctl_reg(unsigned int no, dasd_ioctl_fn_t handler)
86{
87 int ret;
88 ret = dasd_ioctl_no_register(THIS_MODULE, no, handler);
89#ifdef CONFIG_COMPAT
90 if (ret)
91 return ret;
92
93 ret = register_ioctl32_conversion(no, NULL);
94 if (ret)
95 dasd_ioctl_no_unregister(THIS_MODULE, no, handler);
96#endif
97 return ret;
98}
99
100static inline void
101ioctl_unreg(unsigned int no, dasd_ioctl_fn_t handler)
102{
103 dasd_ioctl_no_unregister(THIS_MODULE, no, handler);
104#ifdef CONFIG_COMPAT
105 unregister_ioctl32_conversion(no);
106#endif
107
108}
109
110static void
111dasd_cmf_exit(void)
112{
113 ioctl_unreg(BIODASDCMFENABLE, dasd_ioctl_cmf_enable);
114 ioctl_unreg(BIODASDCMFDISABLE, dasd_ioctl_cmf_disable);
115 ioctl_unreg(BIODASDREADALLCMB, dasd_ioctl_readall_cmb);
116}
117
118static int __init
119dasd_cmf_init(void)
120{
121 int ret;
122 ret = ioctl_reg (BIODASDCMFENABLE, dasd_ioctl_cmf_enable);
123 if (ret)
124 goto err;
125 ret = ioctl_reg (BIODASDCMFDISABLE, dasd_ioctl_cmf_disable);
126 if (ret)
127 goto err;
128 ret = ioctl_reg (BIODASDREADALLCMB, dasd_ioctl_readall_cmb);
129 if (ret)
130 goto err;
131
132 return 0;
133err:
134 dasd_cmf_exit();
135
136 return ret;
137}
138
139module_init(dasd_cmf_init);
140module_exit(dasd_cmf_exit);
141
142MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
143MODULE_LICENSE("GPL");
144MODULE_DESCRIPTION("channel measurement facility interface for dasd\n"
145 "Copyright 2003 IBM Corporation\n");
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
new file mode 100644
index 000000000000..ad1841a96c87
--- /dev/null
+++ b/drivers/s390/block/dasd_devmap.c
@@ -0,0 +1,772 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_devmap.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
10 * Device mapping and dasd= parameter parsing functions. All devmap
11 * functions may not be called from interrupt context. In particular
12 * dasd_get_device is a no-no from interrupt context.
13 *
14 * $Revision: 1.37 $
15 */
16
17#include <linux/config.h>
18#include <linux/ctype.h>
19#include <linux/init.h>
20
21#include <asm/debug.h>
22#include <asm/uaccess.h>
23
24/* This is ugly... */
25#define PRINTK_HEADER "dasd_devmap:"
26
27#include "dasd_int.h"
28
29kmem_cache_t *dasd_page_cache;
30EXPORT_SYMBOL(dasd_page_cache);
31
32/*
33 * dasd_devmap_t is used to store the features and the relation
34 * between device number and device index. To find a dasd_devmap_t
35 * that corresponds to a device number of a device index each
36 * dasd_devmap_t is added to two linked lists, one to search by
37 * the device number and one to search by the device index. As
38 * soon as big minor numbers are available the device index list
39 * can be removed since the device number will then be identical
40 * to the device index.
41 */
42struct dasd_devmap {
43 struct list_head list;
44 char bus_id[BUS_ID_SIZE];
45 unsigned int devindex;
46 unsigned short features;
47 struct dasd_device *device;
48};
49
50/*
51 * Parameter parsing functions for dasd= parameter. The syntax is:
52 * <devno> : (0x)?[0-9a-fA-F]+
53 * <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+
54 * <feature> : ro
55 * <feature_list> : \(<feature>(:<feature>)*\)
56 * <devno-range> : <devno>(-<devno>)?<feature_list>?
57 * <busid-range> : <busid>(-<busid>)?<feature_list>?
58 * <devices> : <devno-range>|<busid-range>
59 * <dasd_module> : dasd_diag_mod|dasd_eckd_mod|dasd_fba_mod
60 *
61 * <dasd> : autodetect|probeonly|<devices>(,<devices>)*
62 */
63
64int dasd_probeonly = 0; /* is true, when probeonly mode is active */
65int dasd_autodetect = 0; /* is true, when autodetection is active */
66
67/*
68 * char *dasd[] is intended to hold the ranges supplied by the dasd= statement
69 * it is named 'dasd' to directly be filled by insmod with the comma separated
70 * strings when running as a module.
71 */
72static char *dasd[256];
73/*
74 * Single spinlock to protect devmap structures and lists.
75 */
76static DEFINE_SPINLOCK(dasd_devmap_lock);
77
78/*
79 * Hash lists for devmap structures.
80 */
81static struct list_head dasd_hashlists[256];
82int dasd_max_devindex;
83
84static struct dasd_devmap *dasd_add_busid(char *, int);
85
86static inline int
87dasd_hash_busid(char *bus_id)
88{
89 int hash, i;
90
91 hash = 0;
92 for (i = 0; (i < BUS_ID_SIZE) && *bus_id; i++, bus_id++)
93 hash += *bus_id;
94 return hash & 0xff;
95}
96
97#ifndef MODULE
98/*
99 * The parameter parsing functions for builtin-drivers are called
100 * before kmalloc works. Store the pointers to the parameters strings
101 * into dasd[] for later processing.
102 */
103static int __init
104dasd_call_setup(char *str)
105{
106 static int count = 0;
107
108 if (count < 256)
109 dasd[count++] = str;
110 return 1;
111}
112
113__setup ("dasd=", dasd_call_setup);
114#endif /* #ifndef MODULE */
115
116/*
117 * Read a device busid/devno from a string.
118 */
119static inline int
120dasd_busid(char **str, int *id0, int *id1, int *devno)
121{
122 int val, old_style;
123
124 /* check for leading '0x' */
125 old_style = 0;
126 if ((*str)[0] == '0' && (*str)[1] == 'x') {
127 *str += 2;
128 old_style = 1;
129 }
130 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
131 return -EINVAL;
132 val = simple_strtoul(*str, str, 16);
133 if (old_style || (*str)[0] != '.') {
134 *id0 = *id1 = 0;
135 if (val < 0 || val > 0xffff)
136 return -EINVAL;
137 *devno = val;
138 return 0;
139 }
140 /* New style x.y.z busid */
141 if (val < 0 || val > 0xff)
142 return -EINVAL;
143 *id0 = val;
144 (*str)++;
145 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
146 return -EINVAL;
147 val = simple_strtoul(*str, str, 16);
148 if (val < 0 || val > 0xff || (*str)++[0] != '.')
149 return -EINVAL;
150 *id1 = val;
151 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
152 return -EINVAL;
153 val = simple_strtoul(*str, str, 16);
154 if (val < 0 || val > 0xffff)
155 return -EINVAL;
156 *devno = val;
157 return 0;
158}
159
160/*
161 * Read colon separated list of dasd features. Currently there is
162 * only one: "ro" for read-only devices. The default feature set
163 * is empty (value 0).
164 */
165static inline int
166dasd_feature_list(char *str, char **endp)
167{
168 int features, len, rc;
169
170 rc = 0;
171 if (*str != '(') {
172 *endp = str;
173 return DASD_FEATURE_DEFAULT;
174 }
175 str++;
176 features = 0;
177
178 while (1) {
179 for (len = 0;
180 str[len] && str[len] != ':' && str[len] != ')'; len++);
181 if (len == 2 && !strncmp(str, "ro", 2))
182 features |= DASD_FEATURE_READONLY;
183 else if (len == 4 && !strncmp(str, "diag", 4))
184 features |= DASD_FEATURE_USEDIAG;
185 else {
186 MESSAGE(KERN_WARNING,
187 "unsupported feature: %*s, "
188 "ignoring setting", len, str);
189 rc = -EINVAL;
190 }
191 str += len;
192 if (*str != ':')
193 break;
194 str++;
195 }
196 if (*str != ')') {
197 MESSAGE(KERN_WARNING, "%s",
198 "missing ')' in dasd parameter string\n");
199 rc = -EINVAL;
200 } else
201 str++;
202 *endp = str;
203 if (rc != 0)
204 return rc;
205 return features;
206}
207
208/*
209 * Try to match the first element on the comma separated parse string
210 * with one of the known keywords. If a keyword is found, take the approprate
211 * action and return a pointer to the residual string. If the first element
212 * could not be matched to any keyword then return an error code.
213 */
214static char *
215dasd_parse_keyword( char *parsestring ) {
216
217 char *nextcomma, *residual_str;
218 int length;
219
220 nextcomma = strchr(parsestring,',');
221 if (nextcomma) {
222 length = nextcomma - parsestring;
223 residual_str = nextcomma + 1;
224 } else {
225 length = strlen(parsestring);
226 residual_str = parsestring + length;
227 }
228 if (strncmp ("autodetect", parsestring, length) == 0) {
229 dasd_autodetect = 1;
230 MESSAGE (KERN_INFO, "%s",
231 "turning to autodetection mode");
232 return residual_str;
233 }
234 if (strncmp ("probeonly", parsestring, length) == 0) {
235 dasd_probeonly = 1;
236 MESSAGE(KERN_INFO, "%s",
237 "turning to probeonly mode");
238 return residual_str;
239 }
240 if (strncmp ("fixedbuffers", parsestring, length) == 0) {
241 if (dasd_page_cache)
242 return residual_str;
243 dasd_page_cache =
244 kmem_cache_create("dasd_page_cache", PAGE_SIZE, 0,
245 SLAB_CACHE_DMA, NULL, NULL );
246 if (!dasd_page_cache)
247 MESSAGE(KERN_WARNING, "%s", "Failed to create slab, "
248 "fixed buffer mode disabled.");
249 else
250 MESSAGE (KERN_INFO, "%s",
251 "turning on fixed buffer mode");
252 return residual_str;
253 }
254 return ERR_PTR(-EINVAL);
255}
256
257/*
258 * Try to interprete the first element on the comma separated parse string
259 * as a device number or a range of devices. If the interpretation is
260 * successfull, create the matching dasd_devmap entries and return a pointer
261 * to the residual string.
262 * If interpretation fails or in case of an error, return an error code.
263 */
264static char *
265dasd_parse_range( char *parsestring ) {
266
267 struct dasd_devmap *devmap;
268 int from, from_id0, from_id1;
269 int to, to_id0, to_id1;
270 int features, rc;
271 char bus_id[BUS_ID_SIZE+1], *str;
272
273 str = parsestring;
274 rc = dasd_busid(&str, &from_id0, &from_id1, &from);
275 if (rc == 0) {
276 to = from;
277 to_id0 = from_id0;
278 to_id1 = from_id1;
279 if (*str == '-') {
280 str++;
281 rc = dasd_busid(&str, &to_id0, &to_id1, &to);
282 }
283 }
284 if (rc == 0 &&
285 (from_id0 != to_id0 || from_id1 != to_id1 || from > to))
286 rc = -EINVAL;
287 if (rc) {
288 MESSAGE(KERN_ERR, "Invalid device range %s", parsestring);
289 return ERR_PTR(rc);
290 }
291 features = dasd_feature_list(str, &str);
292 if (features < 0)
293 return ERR_PTR(-EINVAL);
294 while (from <= to) {
295 sprintf(bus_id, "%01x.%01x.%04x",
296 from_id0, from_id1, from++);
297 devmap = dasd_add_busid(bus_id, features);
298 if (IS_ERR(devmap))
299 return (char *)devmap;
300 }
301 if (*str == ',')
302 return str + 1;
303 if (*str == '\0')
304 return str;
305 MESSAGE(KERN_WARNING,
306 "junk at end of dasd parameter string: %s\n", str);
307 return ERR_PTR(-EINVAL);
308}
309
310static inline char *
311dasd_parse_next_element( char *parsestring ) {
312 char * residual_str;
313 residual_str = dasd_parse_keyword(parsestring);
314 if (!IS_ERR(residual_str))
315 return residual_str;
316 residual_str = dasd_parse_range(parsestring);
317 return residual_str;
318}
319
320/*
321 * Parse parameters stored in dasd[]
322 * The 'dasd=...' parameter allows to specify a comma separated list of
323 * keywords and device ranges. When the dasd driver is build into the kernel,
324 * the complete list will be stored as one element of the dasd[] array.
325 * When the dasd driver is build as a module, then the list is broken into
326 * it's elements and each dasd[] entry contains one element.
327 */
328int
329dasd_parse(void)
330{
331 int rc, i;
332 char *parsestring;
333
334 rc = 0;
335 for (i = 0; i < 256; i++) {
336 if (dasd[i] == NULL)
337 break;
338 parsestring = dasd[i];
339 /* loop over the comma separated list in the parsestring */
340 while (*parsestring) {
341 parsestring = dasd_parse_next_element(parsestring);
342 if(IS_ERR(parsestring)) {
343 rc = PTR_ERR(parsestring);
344 break;
345 }
346 }
347 if (rc) {
348 DBF_EVENT(DBF_ALERT, "%s", "invalid range found");
349 break;
350 }
351 }
352 return rc;
353}
354
355/*
356 * Add a devmap for the device specified by busid. It is possible that
357 * the devmap already exists (dasd= parameter). The order of the devices
358 * added through this function will define the kdevs for the individual
359 * devices.
360 */
361static struct dasd_devmap *
362dasd_add_busid(char *bus_id, int features)
363{
364 struct dasd_devmap *devmap, *new, *tmp;
365 int hash;
366
367 new = (struct dasd_devmap *)
368 kmalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
369 if (!new)
370 return ERR_PTR(-ENOMEM);
371 spin_lock(&dasd_devmap_lock);
372 devmap = 0;
373 hash = dasd_hash_busid(bus_id);
374 list_for_each_entry(tmp, &dasd_hashlists[hash], list)
375 if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) {
376 devmap = tmp;
377 break;
378 }
379 if (!devmap) {
380 /* This bus_id is new. */
381 new->devindex = dasd_max_devindex++;
382 strncpy(new->bus_id, bus_id, BUS_ID_SIZE);
383 new->features = features;
384 new->device = 0;
385 list_add(&new->list, &dasd_hashlists[hash]);
386 devmap = new;
387 new = 0;
388 }
389 spin_unlock(&dasd_devmap_lock);
390 if (new)
391 kfree(new);
392 return devmap;
393}
394
395/*
396 * Find devmap for device with given bus_id.
397 */
398static struct dasd_devmap *
399dasd_find_busid(char *bus_id)
400{
401 struct dasd_devmap *devmap, *tmp;
402 int hash;
403
404 spin_lock(&dasd_devmap_lock);
405 devmap = ERR_PTR(-ENODEV);
406 hash = dasd_hash_busid(bus_id);
407 list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
408 if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) {
409 devmap = tmp;
410 break;
411 }
412 }
413 spin_unlock(&dasd_devmap_lock);
414 return devmap;
415}
416
417/*
418 * Check if busid has been added to the list of dasd ranges.
419 */
420int
421dasd_busid_known(char *bus_id)
422{
423 return IS_ERR(dasd_find_busid(bus_id)) ? -ENOENT : 0;
424}
425
426/*
427 * Forget all about the device numbers added so far.
428 * This may only be called at module unload or system shutdown.
429 */
430static void
431dasd_forget_ranges(void)
432{
433 struct dasd_devmap *devmap, *n;
434 int i;
435
436 spin_lock(&dasd_devmap_lock);
437 for (i = 0; i < 256; i++) {
438 list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) {
439 if (devmap->device != NULL)
440 BUG();
441 list_del(&devmap->list);
442 kfree(devmap);
443 }
444 }
445 spin_unlock(&dasd_devmap_lock);
446}
447
448/*
449 * Find the device struct by its device index.
450 */
451struct dasd_device *
452dasd_device_from_devindex(int devindex)
453{
454 struct dasd_devmap *devmap, *tmp;
455 struct dasd_device *device;
456 int i;
457
458 spin_lock(&dasd_devmap_lock);
459 devmap = 0;
460 for (i = 0; (i < 256) && !devmap; i++)
461 list_for_each_entry(tmp, &dasd_hashlists[i], list)
462 if (tmp->devindex == devindex) {
463 /* Found the devmap for the device. */
464 devmap = tmp;
465 break;
466 }
467 if (devmap && devmap->device) {
468 device = devmap->device;
469 dasd_get_device(device);
470 } else
471 device = ERR_PTR(-ENODEV);
472 spin_unlock(&dasd_devmap_lock);
473 return device;
474}
475
476/*
477 * Return devmap for cdev. If no devmap exists yet, create one and
478 * connect it to the cdev.
479 */
480static struct dasd_devmap *
481dasd_devmap_from_cdev(struct ccw_device *cdev)
482{
483 struct dasd_devmap *devmap;
484
485 devmap = dasd_find_busid(cdev->dev.bus_id);
486 if (IS_ERR(devmap))
487 devmap = dasd_add_busid(cdev->dev.bus_id,
488 DASD_FEATURE_DEFAULT);
489 return devmap;
490}
491
492/*
493 * Create a dasd device structure for cdev.
494 */
495struct dasd_device *
496dasd_create_device(struct ccw_device *cdev)
497{
498 struct dasd_devmap *devmap;
499 struct dasd_device *device;
500 int rc;
501
502 devmap = dasd_devmap_from_cdev(cdev);
503 if (IS_ERR(devmap))
504 return (void *) devmap;
505 cdev->dev.driver_data = devmap;
506
507 device = dasd_alloc_device();
508 if (IS_ERR(device))
509 return device;
510 atomic_set(&device->ref_count, 2);
511
512 spin_lock(&dasd_devmap_lock);
513 if (!devmap->device) {
514 devmap->device = device;
515 device->devindex = devmap->devindex;
516 if (devmap->features & DASD_FEATURE_READONLY)
517 set_bit(DASD_FLAG_RO, &device->flags);
518 else
519 clear_bit(DASD_FLAG_RO, &device->flags);
520 if (devmap->features & DASD_FEATURE_USEDIAG)
521 set_bit(DASD_FLAG_USE_DIAG, &device->flags);
522 else
523 clear_bit(DASD_FLAG_USE_DIAG, &device->flags);
524 get_device(&cdev->dev);
525 device->cdev = cdev;
526 rc = 0;
527 } else
528 /* Someone else was faster. */
529 rc = -EBUSY;
530 spin_unlock(&dasd_devmap_lock);
531
532 if (rc) {
533 dasd_free_device(device);
534 return ERR_PTR(rc);
535 }
536 return device;
537}
538
539/*
540 * Wait queue for dasd_delete_device waits.
541 */
542static DECLARE_WAIT_QUEUE_HEAD(dasd_delete_wq);
543
544/*
545 * Remove a dasd device structure. The passed referenced
546 * is destroyed.
547 */
548void
549dasd_delete_device(struct dasd_device *device)
550{
551 struct ccw_device *cdev;
552 struct dasd_devmap *devmap;
553
554 /* First remove device pointer from devmap. */
555 devmap = dasd_find_busid(device->cdev->dev.bus_id);
556 if (IS_ERR(devmap))
557 BUG();
558 spin_lock(&dasd_devmap_lock);
559 if (devmap->device != device) {
560 spin_unlock(&dasd_devmap_lock);
561 dasd_put_device(device);
562 return;
563 }
564 devmap->device = NULL;
565 spin_unlock(&dasd_devmap_lock);
566
567 /* Drop ref_count by 2, one for the devmap reference and
568 * one for the passed reference. */
569 atomic_sub(2, &device->ref_count);
570
571 /* Wait for reference counter to drop to zero. */
572 wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
573
574 /* Disconnect dasd_device structure from ccw_device structure. */
575 cdev = device->cdev;
576 device->cdev = NULL;
577
578 /* Disconnect dasd_devmap structure from ccw_device structure. */
579 cdev->dev.driver_data = NULL;
580
581 /* Put ccw_device structure. */
582 put_device(&cdev->dev);
583
584 /* Now the device structure can be freed. */
585 dasd_free_device(device);
586}
587
588/*
589 * Reference counter dropped to zero. Wake up waiter
590 * in dasd_delete_device.
591 */
592void
593dasd_put_device_wake(struct dasd_device *device)
594{
595 wake_up(&dasd_delete_wq);
596}
597
598/*
599 * Return dasd_device structure associated with cdev.
600 */
601struct dasd_device *
602dasd_device_from_cdev(struct ccw_device *cdev)
603{
604 struct dasd_devmap *devmap;
605 struct dasd_device *device;
606
607 device = ERR_PTR(-ENODEV);
608 spin_lock(&dasd_devmap_lock);
609 devmap = cdev->dev.driver_data;
610 if (devmap && devmap->device) {
611 device = devmap->device;
612 dasd_get_device(device);
613 }
614 spin_unlock(&dasd_devmap_lock);
615 return device;
616}
617
618/*
619 * SECTION: files in sysfs
620 */
621
622/*
623 * readonly controls the readonly status of a dasd
624 */
625static ssize_t
626dasd_ro_show(struct device *dev, char *buf)
627{
628 struct dasd_devmap *devmap;
629 int ro_flag;
630
631 devmap = dasd_find_busid(dev->bus_id);
632 if (!IS_ERR(devmap))
633 ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0;
634 else
635 ro_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_READONLY) != 0;
636 return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n");
637}
638
639static ssize_t
640dasd_ro_store(struct device *dev, const char *buf, size_t count)
641{
642 struct dasd_devmap *devmap;
643 int ro_flag;
644
645 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
646 if (IS_ERR(devmap))
647 return PTR_ERR(devmap);
648 ro_flag = buf[0] == '1';
649 spin_lock(&dasd_devmap_lock);
650 if (ro_flag)
651 devmap->features |= DASD_FEATURE_READONLY;
652 else
653 devmap->features &= ~DASD_FEATURE_READONLY;
654 if (devmap->device) {
655 if (devmap->device->gdp)
656 set_disk_ro(devmap->device->gdp, ro_flag);
657 if (ro_flag)
658 set_bit(DASD_FLAG_RO, &devmap->device->flags);
659 else
660 clear_bit(DASD_FLAG_RO, &devmap->device->flags);
661 }
662 spin_unlock(&dasd_devmap_lock);
663 return count;
664}
665
666static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
667
668/*
669 * use_diag controls whether the driver should use diag rather than ssch
670 * to talk to the device
671 */
672static ssize_t
673dasd_use_diag_show(struct device *dev, char *buf)
674{
675 struct dasd_devmap *devmap;
676 int use_diag;
677
678 devmap = dasd_find_busid(dev->bus_id);
679 if (!IS_ERR(devmap))
680 use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
681 else
682 use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0;
683 return sprintf(buf, use_diag ? "1\n" : "0\n");
684}
685
686static ssize_t
687dasd_use_diag_store(struct device *dev, const char *buf, size_t count)
688{
689 struct dasd_devmap *devmap;
690 ssize_t rc;
691 int use_diag;
692
693 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
694 if (IS_ERR(devmap))
695 return PTR_ERR(devmap);
696 use_diag = buf[0] == '1';
697 spin_lock(&dasd_devmap_lock);
698 /* Changing diag discipline flag is only allowed in offline state. */
699 rc = count;
700 if (!devmap->device) {
701 if (use_diag)
702 devmap->features |= DASD_FEATURE_USEDIAG;
703 else
704 devmap->features &= ~DASD_FEATURE_USEDIAG;
705 } else
706 rc = -EPERM;
707 spin_unlock(&dasd_devmap_lock);
708 return rc;
709}
710
711static
712DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
713
714static ssize_t
715dasd_discipline_show(struct device *dev, char *buf)
716{
717 struct dasd_devmap *devmap;
718 char *dname;
719
720 spin_lock(&dasd_devmap_lock);
721 dname = "none";
722 devmap = dev->driver_data;
723 if (devmap && devmap->device && devmap->device->discipline)
724 dname = devmap->device->discipline->name;
725 spin_unlock(&dasd_devmap_lock);
726 return snprintf(buf, PAGE_SIZE, "%s\n", dname);
727}
728
729static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
730
731static struct attribute * dasd_attrs[] = {
732 &dev_attr_readonly.attr,
733 &dev_attr_discipline.attr,
734 &dev_attr_use_diag.attr,
735 NULL,
736};
737
738static struct attribute_group dasd_attr_group = {
739 .attrs = dasd_attrs,
740};
741
742int
743dasd_add_sysfs_files(struct ccw_device *cdev)
744{
745 return sysfs_create_group(&cdev->dev.kobj, &dasd_attr_group);
746}
747
748void
749dasd_remove_sysfs_files(struct ccw_device *cdev)
750{
751 sysfs_remove_group(&cdev->dev.kobj, &dasd_attr_group);
752}
753
754
755int
756dasd_devmap_init(void)
757{
758 int i;
759
760 /* Initialize devmap structures. */
761 dasd_max_devindex = 0;
762 for (i = 0; i < 256; i++)
763 INIT_LIST_HEAD(&dasd_hashlists[i]);
764 return 0;
765
766}
767
768void
769dasd_devmap_exit(void)
770{
771 dasd_forget_ranges();
772}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
new file mode 100644
index 000000000000..127699830fa1
--- /dev/null
+++ b/drivers/s390/block/dasd_diag.c
@@ -0,0 +1,541 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_diag.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Based on.......: linux/drivers/s390/block/mdisk.c
5 * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 *
9 * $Revision: 1.42 $
10 */
11
12#include <linux/config.h>
13#include <linux/stddef.h>
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/hdreg.h> /* HDIO_GETGEO */
17#include <linux/bio.h>
18#include <linux/module.h>
19#include <linux/init.h>
20
21#include <asm/dasd.h>
22#include <asm/debug.h>
23#include <asm/ebcdic.h>
24#include <asm/io.h>
25#include <asm/s390_ext.h>
26#include <asm/todclk.h>
27
28#include "dasd_int.h"
29#include "dasd_diag.h"
30
31#ifdef PRINTK_HEADER
32#undef PRINTK_HEADER
33#endif /* PRINTK_HEADER */
34#define PRINTK_HEADER "dasd(diag):"
35
36MODULE_LICENSE("GPL");
37
38struct dasd_discipline dasd_diag_discipline;
39
40struct dasd_diag_private {
41 struct dasd_diag_characteristics rdc_data;
42 struct dasd_diag_rw_io iob;
43 struct dasd_diag_init_io iib;
44 unsigned int pt_block;
45};
46
47struct dasd_diag_req {
48 int block_count;
49 struct dasd_diag_bio bio[0];
50};
51
52static __inline__ int
53dia250(void *iob, int cmd)
54{
55 int rc;
56
57 __asm__ __volatile__(" lhi %0,3\n"
58 " lr 0,%2\n"
59 " diag 0,%1,0x250\n"
60 "0: ipm %0\n"
61 " srl %0,28\n"
62 " or %0,1\n"
63 "1:\n"
64#ifndef CONFIG_ARCH_S390X
65 ".section __ex_table,\"a\"\n"
66 " .align 4\n"
67 " .long 0b,1b\n"
68 ".previous\n"
69#else
70 ".section __ex_table,\"a\"\n"
71 " .align 8\n"
72 " .quad 0b,1b\n"
73 ".previous\n"
74#endif
75 : "=&d" (rc)
76 : "d" (cmd), "d" ((void *) __pa(iob))
77 : "0", "1", "cc");
78 return rc;
79}
80
81static __inline__ int
82mdsk_init_io(struct dasd_device * device, int blocksize, int offset, int size)
83{
84 struct dasd_diag_private *private;
85 struct dasd_diag_init_io *iib;
86 int rc;
87
88 private = (struct dasd_diag_private *) device->private;
89 iib = &private->iib;
90 memset(iib, 0, sizeof (struct dasd_diag_init_io));
91
92 iib->dev_nr = _ccw_device_get_device_number(device->cdev);
93 iib->block_size = blocksize;
94 iib->offset = offset;
95 iib->start_block = 0;
96 iib->end_block = size;
97
98 rc = dia250(iib, INIT_BIO);
99
100 return rc & 3;
101}
102
103static __inline__ int
104mdsk_term_io(struct dasd_device * device)
105{
106 struct dasd_diag_private *private;
107 struct dasd_diag_init_io *iib;
108 int rc;
109
110 private = (struct dasd_diag_private *) device->private;
111 iib = &private->iib;
112 memset(iib, 0, sizeof (struct dasd_diag_init_io));
113 iib->dev_nr = _ccw_device_get_device_number(device->cdev);
114 rc = dia250(iib, TERM_BIO);
115 return rc & 3;
116}
117
118static int
119dasd_start_diag(struct dasd_ccw_req * cqr)
120{
121 struct dasd_device *device;
122 struct dasd_diag_private *private;
123 struct dasd_diag_req *dreq;
124 int rc;
125
126 device = cqr->device;
127 private = (struct dasd_diag_private *) device->private;
128 dreq = (struct dasd_diag_req *) cqr->data;
129
130 private->iob.dev_nr = _ccw_device_get_device_number(device->cdev);
131 private->iob.key = 0;
132 private->iob.flags = 2; /* do asynchronous io */
133 private->iob.block_count = dreq->block_count;
134 private->iob.interrupt_params = (u32)(addr_t) cqr;
135 private->iob.bio_list = __pa(dreq->bio);
136
137 cqr->startclk = get_clock();
138
139 rc = dia250(&private->iob, RW_BIO);
140 if (rc > 8) {
141 DEV_MESSAGE(KERN_WARNING, device, "dia250 returned CC %d", rc);
142 cqr->status = DASD_CQR_ERROR;
143 } else if (rc == 0) {
144 cqr->status = DASD_CQR_DONE;
145 dasd_schedule_bh(device);
146 } else {
147 cqr->status = DASD_CQR_IN_IO;
148 rc = 0;
149 }
150 return rc;
151}
152
153static void
154dasd_ext_handler(struct pt_regs *regs, __u16 code)
155{
156 struct dasd_ccw_req *cqr, *next;
157 struct dasd_device *device;
158 unsigned long long expires;
159 unsigned long flags;
160 char status;
161 int ip;
162
163 /*
164 * Get the external interruption subcode. VM stores
165 * this in the 'cpu address' field associated with
166 * the external interrupt. For diag 250 the subcode
167 * needs to be 3.
168 */
169 if ((S390_lowcore.cpu_addr & 0xff00) != 0x0300)
170 return;
171 status = *((char *) &S390_lowcore.ext_params + 5);
172 ip = S390_lowcore.ext_params;
173
174 if (!ip) { /* no intparm: unsolicited interrupt */
175 MESSAGE(KERN_DEBUG, "%s", "caught unsolicited interrupt");
176 return;
177 }
178 cqr = (struct dasd_ccw_req *)(addr_t) ip;
179 device = (struct dasd_device *) cqr->device;
180 if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
181 DEV_MESSAGE(KERN_WARNING, device,
182 " magic number of dasd_ccw_req 0x%08X doesn't"
183 " match discipline 0x%08X",
184 cqr->magic, *(int *) (&device->discipline->name));
185 return;
186 }
187
188 /* get irq lock to modify request queue */
189 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
190
191 cqr->stopclk = get_clock();
192
193 expires = 0;
194 if (status == 0) {
195 cqr->status = DASD_CQR_DONE;
196 /* Start first request on queue if possible -> fast_io. */
197 if (!list_empty(&device->ccw_queue)) {
198 next = list_entry(device->ccw_queue.next,
199 struct dasd_ccw_req, list);
200 if (next->status == DASD_CQR_QUEUED) {
201 if (dasd_start_diag(next) == 0)
202 expires = next->expires;
203 else
204 DEV_MESSAGE(KERN_WARNING, device, "%s",
205 "Interrupt fastpath "
206 "failed!");
207 }
208 }
209 } else
210 cqr->status = DASD_CQR_FAILED;
211
212 if (expires != 0)
213 dasd_set_timer(device, expires);
214 else
215 dasd_clear_timer(device);
216 dasd_schedule_bh(device);
217
218 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
219}
220
221static int
222dasd_diag_check_device(struct dasd_device *device)
223{
224 struct dasd_diag_private *private;
225 struct dasd_diag_characteristics *rdc_data;
226 struct dasd_diag_bio bio;
227 long *label;
228 int sb, bsize;
229 int rc;
230
231 private = (struct dasd_diag_private *) device->private;
232 if (private == NULL) {
233 private = kmalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
234 if (private == NULL) {
235 DEV_MESSAGE(KERN_WARNING, device, "%s",
236 "memory allocation failed for private data");
237 return -ENOMEM;
238 }
239 device->private = (void *) private;
240 }
241 /* Read Device Characteristics */
242 rdc_data = (void *) &(private->rdc_data);
243 rdc_data->dev_nr = _ccw_device_get_device_number(device->cdev);
244 rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
245
246 rc = diag210((struct diag210 *) rdc_data);
247 if (rc)
248 return -ENOTSUPP;
249
250 /* Figure out position of label block */
251 switch (private->rdc_data.vdev_class) {
252 case DEV_CLASS_FBA:
253 private->pt_block = 1;
254 break;
255 case DEV_CLASS_ECKD:
256 private->pt_block = 2;
257 break;
258 default:
259 return -ENOTSUPP;
260 }
261
262 DBF_DEV_EVENT(DBF_INFO, device,
263 "%04X: %04X on real %04X/%02X",
264 rdc_data->dev_nr,
265 rdc_data->vdev_type,
266 rdc_data->rdev_type, rdc_data->rdev_model);
267
268 /* terminate all outstanding operations */
269 mdsk_term_io(device);
270
271 /* figure out blocksize of device */
272 label = (long *) get_zeroed_page(GFP_KERNEL);
273 if (label == NULL) {
274 DEV_MESSAGE(KERN_WARNING, device, "%s",
275 "No memory to allocate initialization request");
276 return -ENOMEM;
277 }
278 /* try all sizes - needed for ECKD devices */
279 for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
280 mdsk_init_io(device, bsize, 0, 64);
281 memset(&bio, 0, sizeof (struct dasd_diag_bio));
282 bio.type = MDSK_READ_REQ;
283 bio.block_number = private->pt_block + 1;
284 bio.buffer = __pa(label);
285 memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
286 private->iob.dev_nr = rdc_data->dev_nr;
287 private->iob.key = 0;
288 private->iob.flags = 0; /* do synchronous io */
289 private->iob.block_count = 1;
290 private->iob.interrupt_params = 0;
291 private->iob.bio_list = __pa(&bio);
292 if (dia250(&private->iob, RW_BIO) == 0)
293 break;
294 mdsk_term_io(device);
295 }
296 if (bsize <= PAGE_SIZE && label[0] == 0xc3d4e2f1) {
297 /* get formatted blocksize from label block */
298 bsize = (int) label[3];
299 device->blocks = label[7];
300 device->bp_block = bsize;
301 device->s2b_shift = 0; /* bits to shift 512 to get a block */
302 for (sb = 512; sb < bsize; sb = sb << 1)
303 device->s2b_shift++;
304
305 DEV_MESSAGE(KERN_INFO, device,
306 "capacity (%dkB blks): %ldkB",
307 (device->bp_block >> 10),
308 (device->blocks << device->s2b_shift) >> 1);
309 rc = 0;
310 } else {
311 if (bsize > PAGE_SIZE)
312 DEV_MESSAGE(KERN_WARNING, device, "%s",
313 "DIAG access failed");
314 else
315 DEV_MESSAGE(KERN_WARNING, device, "%s",
316 "volume is not CMS formatted");
317 rc = -EMEDIUMTYPE;
318 }
319 free_page((long) label);
320 return rc;
321}
322
323static int
324dasd_diag_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
325{
326 if (dasd_check_blocksize(device->bp_block) != 0)
327 return -EINVAL;
328 geo->cylinders = (device->blocks << device->s2b_shift) >> 10;
329 geo->heads = 16;
330 geo->sectors = 128 >> device->s2b_shift;
331 return 0;
332}
333
334static dasd_era_t
335dasd_diag_examine_error(struct dasd_ccw_req * cqr, struct irb * stat)
336{
337 return dasd_era_fatal;
338}
339
340static dasd_erp_fn_t
341dasd_diag_erp_action(struct dasd_ccw_req * cqr)
342{
343 return dasd_default_erp_action;
344}
345
346static dasd_erp_fn_t
347dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
348{
349 return dasd_default_erp_postaction;
350}
351
352static struct dasd_ccw_req *
353dasd_diag_build_cp(struct dasd_device * device, struct request *req)
354{
355 struct dasd_ccw_req *cqr;
356 struct dasd_diag_req *dreq;
357 struct dasd_diag_bio *dbio;
358 struct bio *bio;
359 struct bio_vec *bv;
360 char *dst;
361 int count, datasize;
362 sector_t recid, first_rec, last_rec;
363 unsigned blksize, off;
364 unsigned char rw_cmd;
365 int i;
366
367 if (rq_data_dir(req) == READ)
368 rw_cmd = MDSK_READ_REQ;
369 else if (rq_data_dir(req) == WRITE)
370 rw_cmd = MDSK_WRITE_REQ;
371 else
372 return ERR_PTR(-EINVAL);
373 blksize = device->bp_block;
374 /* Calculate record id of first and last block. */
375 first_rec = req->sector >> device->s2b_shift;
376 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
377 /* Check struct bio and count the number of blocks for the request. */
378 count = 0;
379 rq_for_each_bio(bio, req) {
380 bio_for_each_segment(bv, bio, i) {
381 if (bv->bv_len & (blksize - 1))
382 /* Fba can only do full blocks. */
383 return ERR_PTR(-EINVAL);
384 count += bv->bv_len >> (device->s2b_shift + 9);
385 }
386 }
387 /* Paranoia. */
388 if (count != last_rec - first_rec + 1)
389 return ERR_PTR(-EINVAL);
390 /* Build the request */
391 datasize = sizeof(struct dasd_diag_req) +
392 count*sizeof(struct dasd_diag_bio);
393 cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0,
394 datasize, device);
395 if (IS_ERR(cqr))
396 return cqr;
397
398 dreq = (struct dasd_diag_req *) cqr->data;
399 dreq->block_count = count;
400 dbio = dreq->bio;
401 recid = first_rec;
402 rq_for_each_bio(bio, req) {
403 bio_for_each_segment(bv, bio, i) {
404 dst = page_address(bv->bv_page) + bv->bv_offset;
405 for (off = 0; off < bv->bv_len; off += blksize) {
406 memset(dbio, 0, sizeof (struct dasd_diag_bio));
407 dbio->type = rw_cmd;
408 dbio->block_number = recid + 1;
409 dbio->buffer = __pa(dst);
410 dbio++;
411 dst += blksize;
412 recid++;
413 }
414 }
415 }
416 cqr->buildclk = get_clock();
417 cqr->device = device;
418 cqr->expires = 50 * HZ; /* 50 seconds */
419 cqr->status = DASD_CQR_FILLED;
420 return cqr;
421}
422
423static int
424dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
425{
426 int status;
427
428 status = cqr->status == DASD_CQR_DONE;
429 dasd_sfree_request(cqr, cqr->device);
430 return status;
431}
432
433static int
434dasd_diag_fill_info(struct dasd_device * device,
435 struct dasd_information2_t * info)
436{
437 struct dasd_diag_private *private;
438
439 private = (struct dasd_diag_private *) device->private;
440 info->label_block = private->pt_block;
441 info->FBA_layout = 1;
442 info->format = DASD_FORMAT_LDL;
443 info->characteristics_size = sizeof (struct dasd_diag_characteristics);
444 memcpy(info->characteristics,
445 &((struct dasd_diag_private *) device->private)->rdc_data,
446 sizeof (struct dasd_diag_characteristics));
447 info->confdata_size = 0;
448 return 0;
449}
450
451static void
452dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
453 struct irb *stat)
454{
455 DEV_MESSAGE(KERN_ERR, device, "%s",
456 "dump sense not available for DIAG data");
457}
458
459/*
460 * max_blocks is dependent on the amount of storage that is available
461 * in the static io buffer for each device. Currently each device has
462 * 8192 bytes (=2 pages). dasd diag is only relevant for 31 bit.
463 * The struct dasd_ccw_req has 96 bytes, the struct dasd_diag_req has
464 * 8 bytes and the struct dasd_diag_bio for each block has 16 bytes.
465 * That makes:
466 * (8192 - 96 - 8) / 16 = 505.5 blocks at maximum.
467 * We want to fit two into the available memory so that we can immediately
468 * start the next request if one finishes off. That makes 252.75 blocks
469 * for one request. Give a little safety and the result is 240.
470 */
471struct dasd_discipline dasd_diag_discipline = {
472 .owner = THIS_MODULE,
473 .name = "DIAG",
474 .ebcname = "DIAG",
475 .max_blocks = 240,
476 .check_device = dasd_diag_check_device,
477 .fill_geometry = dasd_diag_fill_geometry,
478 .start_IO = dasd_start_diag,
479 .examine_error = dasd_diag_examine_error,
480 .erp_action = dasd_diag_erp_action,
481 .erp_postaction = dasd_diag_erp_postaction,
482 .build_cp = dasd_diag_build_cp,
483 .free_cp = dasd_diag_free_cp,
484 .dump_sense = dasd_diag_dump_sense,
485 .fill_info = dasd_diag_fill_info,
486};
487
488static int __init
489dasd_diag_init(void)
490{
491 if (!MACHINE_IS_VM) {
492 MESSAGE_LOG(KERN_INFO,
493 "Machine is not VM: %s "
494 "discipline not initializing",
495 dasd_diag_discipline.name);
496 return -EINVAL;
497 }
498 ASCEBC(dasd_diag_discipline.ebcname, 4);
499
500 ctl_set_bit(0, 9);
501 register_external_interrupt(0x2603, dasd_ext_handler);
502 dasd_diag_discipline_pointer = &dasd_diag_discipline;
503 return 0;
504}
505
506static void __exit
507dasd_diag_cleanup(void)
508{
509 if (!MACHINE_IS_VM) {
510 MESSAGE_LOG(KERN_INFO,
511 "Machine is not VM: %s "
512 "discipline not cleaned",
513 dasd_diag_discipline.name);
514 return;
515 }
516 unregister_external_interrupt(0x2603, dasd_ext_handler);
517 ctl_clear_bit(0, 9);
518 dasd_diag_discipline_pointer = NULL;
519}
520
521module_init(dasd_diag_init);
522module_exit(dasd_diag_cleanup);
523
524/*
525 * Overrides for Emacs so that we follow Linus's tabbing style.
526 * Emacs will notice this stuff at the end of the file and automatically
527 * adjust the settings for this buffer only. This must remain at the end
528 * of the file.
529 * ---------------------------------------------------------------------------
530 * Local variables:
531 * c-indent-level: 4
532 * c-brace-imaginary-offset: 0
533 * c-brace-offset: -4
534 * c-argdecl-indent: 4
535 * c-label-offset: -4
536 * c-continued-statement-offset: 4
537 * c-continued-brace-offset: 0
538 * indent-tabs-mode: 1
539 * tab-width: 8
540 * End:
541 */
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
new file mode 100644
index 000000000000..a0c38e303979
--- /dev/null
+++ b/drivers/s390/block/dasd_diag.h
@@ -0,0 +1,66 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_diag.h
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Based on.......: linux/drivers/s390/block/mdisk.h
5 * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 *
9 * $Revision: 1.6 $
10 */
11
12#define MDSK_WRITE_REQ 0x01
13#define MDSK_READ_REQ 0x02
14
15#define INIT_BIO 0x00
16#define RW_BIO 0x01
17#define TERM_BIO 0x02
18
19#define DEV_CLASS_FBA 0x01
20#define DEV_CLASS_ECKD 0x04
21
22struct dasd_diag_characteristics {
23 u16 dev_nr;
24 u16 rdc_len;
25 u8 vdev_class;
26 u8 vdev_type;
27 u8 vdev_status;
28 u8 vdev_flags;
29 u8 rdev_class;
30 u8 rdev_type;
31 u8 rdev_model;
32 u8 rdev_features;
33} __attribute__ ((packed, aligned(4)));
34
35struct dasd_diag_bio {
36 u8 type;
37 u8 status;
38 u16 spare1;
39 u32 block_number;
40 u32 alet;
41 u32 buffer;
42} __attribute__ ((packed, aligned(8)));
43
44struct dasd_diag_init_io {
45 u16 dev_nr;
46 u16 spare1[11];
47 u32 block_size;
48 u32 offset;
49 u32 start_block;
50 u32 end_block;
51 u32 spare2[6];
52} __attribute__ ((packed, aligned(8)));
53
54struct dasd_diag_rw_io {
55 u16 dev_nr;
56 u16 spare1[11];
57 u8 key;
58 u8 flags;
59 u16 spare2;
60 u32 block_count;
61 u32 alet;
62 u32 bio_list;
63 u32 interrupt_params;
64 u32 spare3[5];
65} __attribute__ ((packed, aligned(8)));
66
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
new file mode 100644
index 000000000000..838aedf78a56
--- /dev/null
+++ b/drivers/s390/block/dasd_eckd.c
@@ -0,0 +1,1722 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_eckd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
9 *
10 * $Revision: 1.69 $
11 */
12
13#include <linux/config.h>
14#include <linux/stddef.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/hdreg.h> /* HDIO_GETGEO */
18#include <linux/bio.h>
19#include <linux/module.h>
20#include <linux/init.h>
21
22#include <asm/debug.h>
23#include <asm/idals.h>
24#include <asm/ebcdic.h>
25#include <asm/io.h>
26#include <asm/todclk.h>
27#include <asm/uaccess.h>
28#include <asm/ccwdev.h>
29
30#include "dasd_int.h"
31#include "dasd_eckd.h"
32
33#ifdef PRINTK_HEADER
34#undef PRINTK_HEADER
35#endif /* PRINTK_HEADER */
36#define PRINTK_HEADER "dasd(eckd):"
37
38#define ECKD_C0(i) (i->home_bytes)
39#define ECKD_F(i) (i->formula)
40#define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
41 (i->factors.f_0x02.f1))
42#define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
43 (i->factors.f_0x02.f2))
44#define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
45 (i->factors.f_0x02.f3))
46#define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
47#define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
48#define ECKD_F6(i) (i->factor6)
49#define ECKD_F7(i) (i->factor7)
50#define ECKD_F8(i) (i->factor8)
51
52MODULE_LICENSE("GPL");
53
54static struct dasd_discipline dasd_eckd_discipline;
55
56struct dasd_eckd_private {
57 struct dasd_eckd_characteristics rdc_data;
58 struct dasd_eckd_confdata conf_data;
59 struct dasd_eckd_path path_data;
60 struct eckd_count count_area[5];
61 int init_cqr_status;
62 int uses_cdl;
63 struct attrib_data_t attrib; /* e.g. cache operations */
64};
65
66/* The ccw bus type uses this table to find devices that it sends to
67 * dasd_eckd_probe */
68static struct ccw_device_id dasd_eckd_ids[] = {
69 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), driver_info: 0x1},
70 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), driver_info: 0x2},
71 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), driver_info: 0x3},
72 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), driver_info: 0x4},
73 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), driver_info: 0x5},
74 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), driver_info: 0x6},
75 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), driver_info: 0x7},
76 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), driver_info: 0x8},
77 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), driver_info: 0x9},
78 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), driver_info: 0xa},
79 { /* end of list */ },
80};
81
82MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
83
84static struct ccw_driver dasd_eckd_driver; /* see below */
85
86/* initial attempt at a probe function. this can be simplified once
87 * the other detection code is gone */
88static int
89dasd_eckd_probe (struct ccw_device *cdev)
90{
91 int ret;
92
93 ret = dasd_generic_probe (cdev, &dasd_eckd_discipline);
94 if (ret)
95 return ret;
96 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | CCWDEV_ALLOW_FORCE);
97 return 0;
98}
99
100static int
101dasd_eckd_set_online(struct ccw_device *cdev)
102{
103 return dasd_generic_set_online (cdev, &dasd_eckd_discipline);
104}
105
106static struct ccw_driver dasd_eckd_driver = {
107 .name = "dasd-eckd",
108 .owner = THIS_MODULE,
109 .ids = dasd_eckd_ids,
110 .probe = dasd_eckd_probe,
111 .remove = dasd_generic_remove,
112 .set_offline = dasd_generic_set_offline,
113 .set_online = dasd_eckd_set_online,
114 .notify = dasd_generic_notify,
115};
116
117static const int sizes_trk0[] = { 28, 148, 84 };
118#define LABEL_SIZE 140
119
120static inline unsigned int
121round_up_multiple(unsigned int no, unsigned int mult)
122{
123 int rem = no % mult;
124 return (rem ? no - rem + mult : no);
125}
126
127static inline unsigned int
128ceil_quot(unsigned int d1, unsigned int d2)
129{
130 return (d1 + (d2 - 1)) / d2;
131}
132
133static inline int
134bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl)
135{
136 unsigned int fl1, fl2, int1, int2;
137 int bpr;
138
139 switch (rdc->formula) {
140 case 0x01:
141 fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc));
142 fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0,
143 ECKD_F1(rdc));
144 bpr = fl1 + fl2;
145 break;
146 case 0x02:
147 int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
148 int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
149 fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl +
150 ECKD_F6(rdc) + ECKD_F4(rdc) * int1,
151 ECKD_F1(rdc));
152 fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl +
153 ECKD_F6(rdc) + ECKD_F4(rdc) * int2,
154 ECKD_F1(rdc));
155 bpr = fl1 + fl2;
156 break;
157 default:
158 bpr = 0;
159 break;
160 }
161 return bpr;
162}
163
164static inline unsigned int
165bytes_per_track(struct dasd_eckd_characteristics *rdc)
166{
167 return *(unsigned int *) (rdc->byte_per_track) >> 8;
168}
169
170static inline unsigned int
171recs_per_track(struct dasd_eckd_characteristics * rdc,
172 unsigned int kl, unsigned int dl)
173{
174 int dn, kn;
175
176 switch (rdc->dev_type) {
177 case 0x3380:
178 if (kl)
179 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
180 ceil_quot(dl + 12, 32));
181 else
182 return 1499 / (15 + ceil_quot(dl + 12, 32));
183 case 0x3390:
184 dn = ceil_quot(dl + 6, 232) + 1;
185 if (kl) {
186 kn = ceil_quot(kl + 6, 232) + 1;
187 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
188 9 + ceil_quot(dl + 6 * dn, 34));
189 } else
190 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
191 case 0x9345:
192 dn = ceil_quot(dl + 6, 232) + 1;
193 if (kl) {
194 kn = ceil_quot(kl + 6, 232) + 1;
195 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
196 ceil_quot(dl + 6 * dn, 34));
197 } else
198 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
199 }
200 return 0;
201}
202
203static inline void
204check_XRC (struct ccw1 *de_ccw,
205 struct DE_eckd_data *data,
206 struct dasd_device *device)
207{
208 struct dasd_eckd_private *private;
209
210 private = (struct dasd_eckd_private *) device->private;
211
212 /* switch on System Time Stamp - needed for XRC Support */
213 if (private->rdc_data.facilities.XRC_supported) {
214
215 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
216 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
217
218 data->ep_sys_time = get_clock ();
219
220 de_ccw->count = sizeof (struct DE_eckd_data);
221 de_ccw->flags |= CCW_FLAG_SLI;
222 }
223
224 return;
225
226} /* end check_XRC */
227
228static inline void
229define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
230 int totrk, int cmd, struct dasd_device * device)
231{
232 struct dasd_eckd_private *private;
233 struct ch_t geo, beg, end;
234
235 private = (struct dasd_eckd_private *) device->private;
236
237 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
238 ccw->flags = 0;
239 ccw->count = 16;
240 ccw->cda = (__u32) __pa(data);
241
242 memset(data, 0, sizeof (struct DE_eckd_data));
243 switch (cmd) {
244 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
245 case DASD_ECKD_CCW_READ_RECORD_ZERO:
246 case DASD_ECKD_CCW_READ:
247 case DASD_ECKD_CCW_READ_MT:
248 case DASD_ECKD_CCW_READ_CKD:
249 case DASD_ECKD_CCW_READ_CKD_MT:
250 case DASD_ECKD_CCW_READ_KD:
251 case DASD_ECKD_CCW_READ_KD_MT:
252 case DASD_ECKD_CCW_READ_COUNT:
253 data->mask.perm = 0x1;
254 data->attributes.operation = private->attrib.operation;
255 break;
256 case DASD_ECKD_CCW_WRITE:
257 case DASD_ECKD_CCW_WRITE_MT:
258 case DASD_ECKD_CCW_WRITE_KD:
259 case DASD_ECKD_CCW_WRITE_KD_MT:
260 data->mask.perm = 0x02;
261 data->attributes.operation = private->attrib.operation;
262 check_XRC (ccw, data, device);
263 break;
264 case DASD_ECKD_CCW_WRITE_CKD:
265 case DASD_ECKD_CCW_WRITE_CKD_MT:
266 data->attributes.operation = DASD_BYPASS_CACHE;
267 check_XRC (ccw, data, device);
268 break;
269 case DASD_ECKD_CCW_ERASE:
270 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
271 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
272 data->mask.perm = 0x3;
273 data->mask.auth = 0x1;
274 data->attributes.operation = DASD_BYPASS_CACHE;
275 check_XRC (ccw, data, device);
276 break;
277 default:
278 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
279 break;
280 }
281
282 data->attributes.mode = 0x3; /* ECKD */
283
284 if ((private->rdc_data.cu_type == 0x2105 ||
285 private->rdc_data.cu_type == 0x2107 ||
286 private->rdc_data.cu_type == 0x1750)
287 && !(private->uses_cdl && trk < 2))
288 data->ga_extended |= 0x40; /* Regular Data Format Mode */
289
290 geo.cyl = private->rdc_data.no_cyl;
291 geo.head = private->rdc_data.trk_per_cyl;
292 beg.cyl = trk / geo.head;
293 beg.head = trk % geo.head;
294 end.cyl = totrk / geo.head;
295 end.head = totrk % geo.head;
296
297 /* check for sequential prestage - enhance cylinder range */
298 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
299 data->attributes.operation == DASD_SEQ_ACCESS) {
300
301 if (end.cyl + private->attrib.nr_cyl < geo.cyl)
302 end.cyl += private->attrib.nr_cyl;
303 else
304 end.cyl = (geo.cyl - 1);
305 }
306
307 data->beg_ext.cyl = beg.cyl;
308 data->beg_ext.head = beg.head;
309 data->end_ext.cyl = end.cyl;
310 data->end_ext.head = end.head;
311}
312
313static inline void
314locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
315 int rec_on_trk, int no_rec, int cmd,
316 struct dasd_device * device, int reclen)
317{
318 struct dasd_eckd_private *private;
319 int sector;
320 int dn, d;
321
322 private = (struct dasd_eckd_private *) device->private;
323
324 DBF_DEV_EVENT(DBF_INFO, device,
325 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
326 trk, rec_on_trk, no_rec, cmd, reclen);
327
328 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
329 ccw->flags = 0;
330 ccw->count = 16;
331 ccw->cda = (__u32) __pa(data);
332
333 memset(data, 0, sizeof (struct LO_eckd_data));
334 sector = 0;
335 if (rec_on_trk) {
336 switch (private->rdc_data.dev_type) {
337 case 0x3390:
338 dn = ceil_quot(reclen + 6, 232);
339 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
340 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
341 break;
342 case 0x3380:
343 d = 7 + ceil_quot(reclen + 12, 32);
344 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
345 break;
346 }
347 }
348 data->sector = sector;
349 data->count = no_rec;
350 switch (cmd) {
351 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
352 data->operation.orientation = 0x3;
353 data->operation.operation = 0x03;
354 break;
355 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
356 data->operation.orientation = 0x3;
357 data->operation.operation = 0x16;
358 break;
359 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
360 data->operation.orientation = 0x1;
361 data->operation.operation = 0x03;
362 data->count++;
363 break;
364 case DASD_ECKD_CCW_READ_RECORD_ZERO:
365 data->operation.orientation = 0x3;
366 data->operation.operation = 0x16;
367 data->count++;
368 break;
369 case DASD_ECKD_CCW_WRITE:
370 case DASD_ECKD_CCW_WRITE_MT:
371 case DASD_ECKD_CCW_WRITE_KD:
372 case DASD_ECKD_CCW_WRITE_KD_MT:
373 data->auxiliary.last_bytes_used = 0x1;
374 data->length = reclen;
375 data->operation.operation = 0x01;
376 break;
377 case DASD_ECKD_CCW_WRITE_CKD:
378 case DASD_ECKD_CCW_WRITE_CKD_MT:
379 data->auxiliary.last_bytes_used = 0x1;
380 data->length = reclen;
381 data->operation.operation = 0x03;
382 break;
383 case DASD_ECKD_CCW_READ:
384 case DASD_ECKD_CCW_READ_MT:
385 case DASD_ECKD_CCW_READ_KD:
386 case DASD_ECKD_CCW_READ_KD_MT:
387 data->auxiliary.last_bytes_used = 0x1;
388 data->length = reclen;
389 data->operation.operation = 0x06;
390 break;
391 case DASD_ECKD_CCW_READ_CKD:
392 case DASD_ECKD_CCW_READ_CKD_MT:
393 data->auxiliary.last_bytes_used = 0x1;
394 data->length = reclen;
395 data->operation.operation = 0x16;
396 break;
397 case DASD_ECKD_CCW_READ_COUNT:
398 data->operation.operation = 0x06;
399 break;
400 case DASD_ECKD_CCW_ERASE:
401 data->length = reclen;
402 data->auxiliary.last_bytes_used = 0x1;
403 data->operation.operation = 0x0b;
404 break;
405 default:
406 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
407 }
408 data->seek_addr.cyl = data->search_arg.cyl =
409 trk / private->rdc_data.trk_per_cyl;
410 data->seek_addr.head = data->search_arg.head =
411 trk % private->rdc_data.trk_per_cyl;
412 data->search_arg.record = rec_on_trk;
413}
414
415/*
416 * Returns 1 if the block is one of the special blocks that needs
417 * to get read/written with the KD variant of the command.
418 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
419 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
420 * Luckily the KD variants differ only by one bit (0x08) from the
421 * normal variant. So don't wonder about code like:
422 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
423 * ccw->cmd_code |= 0x8;
424 */
425static inline int
426dasd_eckd_cdl_special(int blk_per_trk, int recid)
427{
428 if (recid < 3)
429 return 1;
430 if (recid < blk_per_trk)
431 return 0;
432 if (recid < 2 * blk_per_trk)
433 return 1;
434 return 0;
435}
436
437/*
438 * Returns the record size for the special blocks of the cdl format.
439 * Only returns something useful if dasd_eckd_cdl_special is true
440 * for the recid.
441 */
442static inline int
443dasd_eckd_cdl_reclen(int recid)
444{
445 if (recid < 3)
446 return sizes_trk0[recid];
447 return LABEL_SIZE;
448}
449
450static int
451dasd_eckd_read_conf(struct dasd_device *device)
452{
453 void *conf_data;
454 int conf_len, conf_data_saved;
455 int rc;
456 __u8 lpm;
457 struct dasd_eckd_private *private;
458 struct dasd_eckd_path *path_data;
459
460 private = (struct dasd_eckd_private *) device->private;
461 path_data = (struct dasd_eckd_path *) &private->path_data;
462 path_data->opm = ccw_device_get_path_mask(device->cdev);
463 lpm = 0x80;
464 conf_data_saved = 0;
465
466 /* get configuration data per operational path */
467 for (lpm = 0x80; lpm; lpm>>= 1) {
468 if (lpm & path_data->opm){
469 rc = read_conf_data_lpm(device->cdev, &conf_data,
470 &conf_len, lpm);
471 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
472 MESSAGE(KERN_WARNING,
473 "Read configuration data returned "
474 "error %d", rc);
475 return rc;
476 }
477 if (conf_data == NULL) {
478 MESSAGE(KERN_WARNING, "%s", "No configuration "
479 "data retrieved");
480 continue; /* no errror */
481 }
482 if (conf_len != sizeof (struct dasd_eckd_confdata)) {
483 MESSAGE(KERN_WARNING,
484 "sizes of configuration data mismatch"
485 "%d (read) vs %ld (expected)",
486 conf_len,
487 sizeof (struct dasd_eckd_confdata));
488 kfree(conf_data);
489 continue; /* no errror */
490 }
491 /* save first valid configuration data */
492 if (!conf_data_saved){
493 memcpy(&private->conf_data, conf_data,
494 sizeof (struct dasd_eckd_confdata));
495 conf_data_saved++;
496 }
497 switch (((char *)conf_data)[242] & 0x07){
498 case 0x02:
499 path_data->npm |= lpm;
500 break;
501 case 0x03:
502 path_data->ppm |= lpm;
503 break;
504 }
505 kfree(conf_data);
506 }
507 }
508 return 0;
509}
510
511
512static int
513dasd_eckd_check_characteristics(struct dasd_device *device)
514{
515 struct dasd_eckd_private *private;
516 void *rdc_data;
517 int rc;
518
519 private = (struct dasd_eckd_private *) device->private;
520 if (private == NULL) {
521 private = kmalloc(sizeof(struct dasd_eckd_private),
522 GFP_KERNEL | GFP_DMA);
523 if (private == NULL) {
524 DEV_MESSAGE(KERN_WARNING, device, "%s",
525 "memory allocation failed for private "
526 "data");
527 return -ENOMEM;
528 }
529 memset(private, 0, sizeof(struct dasd_eckd_private));
530 device->private = (void *) private;
531 }
532 /* Invalidate status of initial analysis. */
533 private->init_cqr_status = -1;
534 /* Set default cache operations. */
535 private->attrib.operation = DASD_NORMAL_CACHE;
536 private->attrib.nr_cyl = 0;
537
538 /* Read Device Characteristics */
539 rdc_data = (void *) &(private->rdc_data);
540 rc = read_dev_chars(device->cdev, &rdc_data, 64);
541 if (rc) {
542 DEV_MESSAGE(KERN_WARNING, device,
543 "Read device characteristics returned error %d",
544 rc);
545 return rc;
546 }
547
548 DEV_MESSAGE(KERN_INFO, device,
549 "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
550 private->rdc_data.dev_type,
551 private->rdc_data.dev_model,
552 private->rdc_data.cu_type,
553 private->rdc_data.cu_model.model,
554 private->rdc_data.no_cyl,
555 private->rdc_data.trk_per_cyl,
556 private->rdc_data.sec_per_trk);
557
558 /* Read Configuration Data */
559 rc = dasd_eckd_read_conf (device);
560 return rc;
561
562}
563
564static struct dasd_ccw_req *
565dasd_eckd_analysis_ccw(struct dasd_device *device)
566{
567 struct dasd_eckd_private *private;
568 struct eckd_count *count_data;
569 struct LO_eckd_data *LO_data;
570 struct dasd_ccw_req *cqr;
571 struct ccw1 *ccw;
572 int cplength, datasize;
573 int i;
574
575 private = (struct dasd_eckd_private *) device->private;
576
577 cplength = 8;
578 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
579 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
580 cplength, datasize, device);
581 if (IS_ERR(cqr))
582 return cqr;
583 ccw = cqr->cpaddr;
584 /* Define extent for the first 3 tracks. */
585 define_extent(ccw++, cqr->data, 0, 2,
586 DASD_ECKD_CCW_READ_COUNT, device);
587 LO_data = cqr->data + sizeof (struct DE_eckd_data);
588 /* Locate record for the first 4 records on track 0. */
589 ccw[-1].flags |= CCW_FLAG_CC;
590 locate_record(ccw++, LO_data++, 0, 0, 4,
591 DASD_ECKD_CCW_READ_COUNT, device, 0);
592
593 count_data = private->count_area;
594 for (i = 0; i < 4; i++) {
595 ccw[-1].flags |= CCW_FLAG_CC;
596 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
597 ccw->flags = 0;
598 ccw->count = 8;
599 ccw->cda = (__u32)(addr_t) count_data;
600 ccw++;
601 count_data++;
602 }
603
604 /* Locate record for the first record on track 2. */
605 ccw[-1].flags |= CCW_FLAG_CC;
606 locate_record(ccw++, LO_data++, 2, 0, 1,
607 DASD_ECKD_CCW_READ_COUNT, device, 0);
608 /* Read count ccw. */
609 ccw[-1].flags |= CCW_FLAG_CC;
610 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
611 ccw->flags = 0;
612 ccw->count = 8;
613 ccw->cda = (__u32)(addr_t) count_data;
614
615 cqr->device = device;
616 cqr->retries = 0;
617 cqr->buildclk = get_clock();
618 cqr->status = DASD_CQR_FILLED;
619 return cqr;
620}
621
622/*
623 * This is the callback function for the init_analysis cqr. It saves
624 * the status of the initial analysis ccw before it frees it and kicks
625 * the device to continue the startup sequence. This will call
626 * dasd_eckd_do_analysis again (if the devices has not been marked
627 * for deletion in the meantime).
628 */
629static void
630dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
631{
632 struct dasd_eckd_private *private;
633 struct dasd_device *device;
634
635 device = init_cqr->device;
636 private = (struct dasd_eckd_private *) device->private;
637 private->init_cqr_status = init_cqr->status;
638 dasd_sfree_request(init_cqr, device);
639 dasd_kick_device(device);
640}
641
642static int
643dasd_eckd_start_analysis(struct dasd_device *device)
644{
645 struct dasd_eckd_private *private;
646 struct dasd_ccw_req *init_cqr;
647
648 private = (struct dasd_eckd_private *) device->private;
649 init_cqr = dasd_eckd_analysis_ccw(device);
650 if (IS_ERR(init_cqr))
651 return PTR_ERR(init_cqr);
652 init_cqr->callback = dasd_eckd_analysis_callback;
653 init_cqr->callback_data = NULL;
654 init_cqr->expires = 5*HZ;
655 dasd_add_request_head(init_cqr);
656 return -EAGAIN;
657}
658
659static int
660dasd_eckd_end_analysis(struct dasd_device *device)
661{
662 struct dasd_eckd_private *private;
663 struct eckd_count *count_area;
664 unsigned int sb, blk_per_trk;
665 int status, i;
666
667 private = (struct dasd_eckd_private *) device->private;
668 status = private->init_cqr_status;
669 private->init_cqr_status = -1;
670 if (status != DASD_CQR_DONE) {
671 DEV_MESSAGE(KERN_WARNING, device, "%s",
672 "volume analysis returned unformatted disk");
673 return -EMEDIUMTYPE;
674 }
675
676 private->uses_cdl = 1;
677 /* Calculate number of blocks/records per track. */
678 blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
679 /* Check Track 0 for Compatible Disk Layout */
680 count_area = NULL;
681 for (i = 0; i < 3; i++) {
682 if (private->count_area[i].kl != 4 ||
683 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
684 private->uses_cdl = 0;
685 break;
686 }
687 }
688 if (i == 3)
689 count_area = &private->count_area[4];
690
691 if (private->uses_cdl == 0) {
692 for (i = 0; i < 5; i++) {
693 if ((private->count_area[i].kl != 0) ||
694 (private->count_area[i].dl !=
695 private->count_area[0].dl))
696 break;
697 }
698 if (i == 5)
699 count_area = &private->count_area[0];
700 } else {
701 if (private->count_area[3].record == 1)
702 DEV_MESSAGE(KERN_WARNING, device, "%s",
703 "Trk 0: no records after VTOC!");
704 }
705 if (count_area != NULL && count_area->kl == 0) {
706 /* we found notthing violating our disk layout */
707 if (dasd_check_blocksize(count_area->dl) == 0)
708 device->bp_block = count_area->dl;
709 }
710 if (device->bp_block == 0) {
711 DEV_MESSAGE(KERN_WARNING, device, "%s",
712 "Volume has incompatible disk layout");
713 return -EMEDIUMTYPE;
714 }
715 device->s2b_shift = 0; /* bits to shift 512 to get a block */
716 for (sb = 512; sb < device->bp_block; sb = sb << 1)
717 device->s2b_shift++;
718
719 blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
720 device->blocks = (private->rdc_data.no_cyl *
721 private->rdc_data.trk_per_cyl *
722 blk_per_trk);
723
724 DEV_MESSAGE(KERN_INFO, device,
725 "(%dkB blks): %dkB at %dkB/trk %s",
726 (device->bp_block >> 10),
727 ((private->rdc_data.no_cyl *
728 private->rdc_data.trk_per_cyl *
729 blk_per_trk * (device->bp_block >> 9)) >> 1),
730 ((blk_per_trk * device->bp_block) >> 10),
731 private->uses_cdl ?
732 "compatible disk layout" : "linux disk layout");
733
734 return 0;
735}
736
737static int
738dasd_eckd_do_analysis(struct dasd_device *device)
739{
740 struct dasd_eckd_private *private;
741
742 private = (struct dasd_eckd_private *) device->private;
743 if (private->init_cqr_status < 0)
744 return dasd_eckd_start_analysis(device);
745 else
746 return dasd_eckd_end_analysis(device);
747}
748
749static int
750dasd_eckd_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
751{
752 struct dasd_eckd_private *private;
753
754 private = (struct dasd_eckd_private *) device->private;
755 if (dasd_check_blocksize(device->bp_block) == 0) {
756 geo->sectors = recs_per_track(&private->rdc_data,
757 0, device->bp_block);
758 }
759 geo->cylinders = private->rdc_data.no_cyl;
760 geo->heads = private->rdc_data.trk_per_cyl;
761 return 0;
762}
763
764static struct dasd_ccw_req *
765dasd_eckd_format_device(struct dasd_device * device,
766 struct format_data_t * fdata)
767{
768 struct dasd_eckd_private *private;
769 struct dasd_ccw_req *fcp;
770 struct eckd_count *ect;
771 struct ccw1 *ccw;
772 void *data;
773 int rpt, cyl, head;
774 int cplength, datasize;
775 int i;
776
777 private = (struct dasd_eckd_private *) device->private;
778 rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
779 cyl = fdata->start_unit / private->rdc_data.trk_per_cyl;
780 head = fdata->start_unit % private->rdc_data.trk_per_cyl;
781
782 /* Sanity checks. */
783 if (fdata->start_unit >=
784 (private->rdc_data.no_cyl * private->rdc_data.trk_per_cyl)) {
785 DEV_MESSAGE(KERN_INFO, device, "Track no %d too big!",
786 fdata->start_unit);
787 return ERR_PTR(-EINVAL);
788 }
789 if (fdata->start_unit > fdata->stop_unit) {
790 DEV_MESSAGE(KERN_INFO, device, "Track %d reached! ending.",
791 fdata->start_unit);
792 return ERR_PTR(-EINVAL);
793 }
794 if (dasd_check_blocksize(fdata->blksize) != 0) {
795 DEV_MESSAGE(KERN_WARNING, device,
796 "Invalid blocksize %d...terminating!",
797 fdata->blksize);
798 return ERR_PTR(-EINVAL);
799 }
800
801 /*
802 * fdata->intensity is a bit string that tells us what to do:
803 * Bit 0: write record zero
804 * Bit 1: write home address, currently not supported
805 * Bit 2: invalidate tracks
806 * Bit 3: use OS/390 compatible disk layout (cdl)
807 * Only some bit combinations do make sense.
808 */
809 switch (fdata->intensity) {
810 case 0x00: /* Normal format */
811 case 0x08: /* Normal format, use cdl. */
812 cplength = 2 + rpt;
813 datasize = sizeof(struct DE_eckd_data) +
814 sizeof(struct LO_eckd_data) +
815 rpt * sizeof(struct eckd_count);
816 break;
817 case 0x01: /* Write record zero and format track. */
818 case 0x09: /* Write record zero and format track, use cdl. */
819 cplength = 3 + rpt;
820 datasize = sizeof(struct DE_eckd_data) +
821 sizeof(struct LO_eckd_data) +
822 sizeof(struct eckd_count) +
823 rpt * sizeof(struct eckd_count);
824 break;
825 case 0x04: /* Invalidate track. */
826 case 0x0c: /* Invalidate track, use cdl. */
827 cplength = 3;
828 datasize = sizeof(struct DE_eckd_data) +
829 sizeof(struct LO_eckd_data) +
830 sizeof(struct eckd_count);
831 break;
832 default:
833 DEV_MESSAGE(KERN_WARNING, device, "Invalid flags 0x%x.",
834 fdata->intensity);
835 return ERR_PTR(-EINVAL);
836 }
837 /* Allocate the format ccw request. */
838 fcp = dasd_smalloc_request(dasd_eckd_discipline.name,
839 cplength, datasize, device);
840 if (IS_ERR(fcp))
841 return fcp;
842
843 data = fcp->data;
844 ccw = fcp->cpaddr;
845
846 switch (fdata->intensity & ~0x08) {
847 case 0x00: /* Normal format. */
848 define_extent(ccw++, (struct DE_eckd_data *) data,
849 fdata->start_unit, fdata->start_unit,
850 DASD_ECKD_CCW_WRITE_CKD, device);
851 data += sizeof(struct DE_eckd_data);
852 ccw[-1].flags |= CCW_FLAG_CC;
853 locate_record(ccw++, (struct LO_eckd_data *) data,
854 fdata->start_unit, 0, rpt,
855 DASD_ECKD_CCW_WRITE_CKD, device,
856 fdata->blksize);
857 data += sizeof(struct LO_eckd_data);
858 break;
859 case 0x01: /* Write record zero + format track. */
860 define_extent(ccw++, (struct DE_eckd_data *) data,
861 fdata->start_unit, fdata->start_unit,
862 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
863 device);
864 data += sizeof(struct DE_eckd_data);
865 ccw[-1].flags |= CCW_FLAG_CC;
866 locate_record(ccw++, (struct LO_eckd_data *) data,
867 fdata->start_unit, 0, rpt + 1,
868 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
869 device->bp_block);
870 data += sizeof(struct LO_eckd_data);
871 break;
872 case 0x04: /* Invalidate track. */
873 define_extent(ccw++, (struct DE_eckd_data *) data,
874 fdata->start_unit, fdata->start_unit,
875 DASD_ECKD_CCW_WRITE_CKD, device);
876 data += sizeof(struct DE_eckd_data);
877 ccw[-1].flags |= CCW_FLAG_CC;
878 locate_record(ccw++, (struct LO_eckd_data *) data,
879 fdata->start_unit, 0, 1,
880 DASD_ECKD_CCW_WRITE_CKD, device, 8);
881 data += sizeof(struct LO_eckd_data);
882 break;
883 }
884 if (fdata->intensity & 0x01) { /* write record zero */
885 ect = (struct eckd_count *) data;
886 data += sizeof(struct eckd_count);
887 ect->cyl = cyl;
888 ect->head = head;
889 ect->record = 0;
890 ect->kl = 0;
891 ect->dl = 8;
892 ccw[-1].flags |= CCW_FLAG_CC;
893 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
894 ccw->flags = CCW_FLAG_SLI;
895 ccw->count = 8;
896 ccw->cda = (__u32)(addr_t) ect;
897 ccw++;
898 }
899 if ((fdata->intensity & ~0x08) & 0x04) { /* erase track */
900 ect = (struct eckd_count *) data;
901 data += sizeof(struct eckd_count);
902 ect->cyl = cyl;
903 ect->head = head;
904 ect->record = 1;
905 ect->kl = 0;
906 ect->dl = 0;
907 ccw[-1].flags |= CCW_FLAG_CC;
908 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
909 ccw->flags = CCW_FLAG_SLI;
910 ccw->count = 8;
911 ccw->cda = (__u32)(addr_t) ect;
912 } else { /* write remaining records */
913 for (i = 0; i < rpt; i++) {
914 ect = (struct eckd_count *) data;
915 data += sizeof(struct eckd_count);
916 ect->cyl = cyl;
917 ect->head = head;
918 ect->record = i + 1;
919 ect->kl = 0;
920 ect->dl = fdata->blksize;
921 /* Check for special tracks 0-1 when formatting CDL */
922 if ((fdata->intensity & 0x08) &&
923 fdata->start_unit == 0) {
924 if (i < 3) {
925 ect->kl = 4;
926 ect->dl = sizes_trk0[i] - 4;
927 }
928 }
929 if ((fdata->intensity & 0x08) &&
930 fdata->start_unit == 1) {
931 ect->kl = 44;
932 ect->dl = LABEL_SIZE - 44;
933 }
934 ccw[-1].flags |= CCW_FLAG_CC;
935 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
936 ccw->flags = CCW_FLAG_SLI;
937 ccw->count = 8;
938 ccw->cda = (__u32)(addr_t) ect;
939 ccw++;
940 }
941 }
942 fcp->device = device;
943 fcp->retries = 2; /* set retry counter to enable ERP */
944 fcp->buildclk = get_clock();
945 fcp->status = DASD_CQR_FILLED;
946 return fcp;
947}
948
949static dasd_era_t
950dasd_eckd_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
951{
952 struct dasd_device *device = (struct dasd_device *) cqr->device;
953 struct ccw_device *cdev = device->cdev;
954
955 if (irb->scsw.cstat == 0x00 &&
956 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
957 return dasd_era_none;
958
959 switch (cdev->id.cu_type) {
960 case 0x3990:
961 case 0x2105:
962 case 0x2107:
963 case 0x1750:
964 return dasd_3990_erp_examine(cqr, irb);
965 case 0x9343:
966 return dasd_9343_erp_examine(cqr, irb);
967 case 0x3880:
968 default:
969 DEV_MESSAGE(KERN_WARNING, device, "%s",
970 "default (unknown CU type) - RECOVERABLE return");
971 return dasd_era_recover;
972 }
973}
974
975static dasd_erp_fn_t
976dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
977{
978 struct dasd_device *device = (struct dasd_device *) cqr->device;
979 struct ccw_device *cdev = device->cdev;
980
981 switch (cdev->id.cu_type) {
982 case 0x3990:
983 case 0x2105:
984 case 0x2107:
985 case 0x1750:
986 return dasd_3990_erp_action;
987 case 0x9343:
988 case 0x3880:
989 default:
990 return dasd_default_erp_action;
991 }
992}
993
994static dasd_erp_fn_t
995dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
996{
997 return dasd_default_erp_postaction;
998}
999
1000static struct dasd_ccw_req *
1001dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1002{
1003 struct dasd_eckd_private *private;
1004 unsigned long *idaws;
1005 struct LO_eckd_data *LO_data;
1006 struct dasd_ccw_req *cqr;
1007 struct ccw1 *ccw;
1008 struct bio *bio;
1009 struct bio_vec *bv;
1010 char *dst;
1011 unsigned int blksize, blk_per_trk, off;
1012 int count, cidaw, cplength, datasize;
1013 sector_t recid, first_rec, last_rec;
1014 sector_t first_trk, last_trk;
1015 unsigned int first_offs, last_offs;
1016 unsigned char cmd, rcmd;
1017 int i;
1018
1019 private = (struct dasd_eckd_private *) device->private;
1020 if (rq_data_dir(req) == READ)
1021 cmd = DASD_ECKD_CCW_READ_MT;
1022 else if (rq_data_dir(req) == WRITE)
1023 cmd = DASD_ECKD_CCW_WRITE_MT;
1024 else
1025 return ERR_PTR(-EINVAL);
1026 /* Calculate number of blocks/records per track. */
1027 blksize = device->bp_block;
1028 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1029 /* Calculate record id of first and last block. */
1030 first_rec = first_trk = req->sector >> device->s2b_shift;
1031 first_offs = sector_div(first_trk, blk_per_trk);
1032 last_rec = last_trk =
1033 (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
1034 last_offs = sector_div(last_trk, blk_per_trk);
1035 /* Check struct bio and count the number of blocks for the request. */
1036 count = 0;
1037 cidaw = 0;
1038 rq_for_each_bio(bio, req) {
1039 bio_for_each_segment(bv, bio, i) {
1040 if (bv->bv_len & (blksize - 1))
1041 /* Eckd can only do full blocks. */
1042 return ERR_PTR(-EINVAL);
1043 count += bv->bv_len >> (device->s2b_shift + 9);
1044#if defined(CONFIG_ARCH_S390X)
1045 if (idal_is_needed (page_address(bv->bv_page),
1046 bv->bv_len))
1047 cidaw += bv->bv_len >> (device->s2b_shift + 9);
1048#endif
1049 }
1050 }
1051 /* Paranoia. */
1052 if (count != last_rec - first_rec + 1)
1053 return ERR_PTR(-EINVAL);
1054 /* 1x define extent + 1x locate record + number of blocks */
1055 cplength = 2 + count;
1056 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1057 datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) +
1058 cidaw * sizeof(unsigned long);
1059 /* Find out the number of additional locate record ccws for cdl. */
1060 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1061 if (last_rec >= 2*blk_per_trk)
1062 count = 2*blk_per_trk - first_rec;
1063 cplength += count;
1064 datasize += count*sizeof(struct LO_eckd_data);
1065 }
1066 /* Allocate the ccw request. */
1067 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1068 cplength, datasize, device);
1069 if (IS_ERR(cqr))
1070 return cqr;
1071 ccw = cqr->cpaddr;
1072 /* First ccw is define extent. */
1073 define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device);
1074 /* Build locate_record+read/write/ccws. */
1075 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
1076 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1077 recid = first_rec;
1078 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1079 /* Only standard blocks so there is just one locate record. */
1080 ccw[-1].flags |= CCW_FLAG_CC;
1081 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1082 last_rec - recid + 1, cmd, device, blksize);
1083 }
1084 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
1085 dst = page_address(bv->bv_page) + bv->bv_offset;
1086 if (dasd_page_cache) {
1087 char *copy = kmem_cache_alloc(dasd_page_cache,
1088 SLAB_DMA | __GFP_NOWARN);
1089 if (copy && rq_data_dir(req) == WRITE)
1090 memcpy(copy + bv->bv_offset, dst, bv->bv_len);
1091 if (copy)
1092 dst = copy + bv->bv_offset;
1093 }
1094 for (off = 0; off < bv->bv_len; off += blksize) {
1095 sector_t trkid = recid;
1096 unsigned int recoffs = sector_div(trkid, blk_per_trk);
1097 rcmd = cmd;
1098 count = blksize;
1099 /* Locate record for cdl special block ? */
1100 if (private->uses_cdl && recid < 2*blk_per_trk) {
1101 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
1102 rcmd |= 0x8;
1103 count = dasd_eckd_cdl_reclen(recid);
1104 if (count < blksize)
1105 memset(dst + count, 0xe5,
1106 blksize - count);
1107 }
1108 ccw[-1].flags |= CCW_FLAG_CC;
1109 locate_record(ccw++, LO_data++,
1110 trkid, recoffs + 1,
1111 1, rcmd, device, count);
1112 }
1113 /* Locate record for standard blocks ? */
1114 if (private->uses_cdl && recid == 2*blk_per_trk) {
1115 ccw[-1].flags |= CCW_FLAG_CC;
1116 locate_record(ccw++, LO_data++,
1117 trkid, recoffs + 1,
1118 last_rec - recid + 1,
1119 cmd, device, count);
1120 }
1121 /* Read/write ccw. */
1122 ccw[-1].flags |= CCW_FLAG_CC;
1123 ccw->cmd_code = rcmd;
1124 ccw->count = count;
1125 if (idal_is_needed(dst, blksize)) {
1126 ccw->cda = (__u32)(addr_t) idaws;
1127 ccw->flags = CCW_FLAG_IDA;
1128 idaws = idal_create_words(idaws, dst, blksize);
1129 } else {
1130 ccw->cda = (__u32)(addr_t) dst;
1131 ccw->flags = 0;
1132 }
1133 ccw++;
1134 dst += blksize;
1135 recid++;
1136 }
1137 }
1138 cqr->device = device;
1139 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
1140 cqr->lpm = private->path_data.ppm;
1141 cqr->retries = 256;
1142 cqr->buildclk = get_clock();
1143 cqr->status = DASD_CQR_FILLED;
1144 return cqr;
1145}
1146
1147static int
1148dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1149{
1150 struct dasd_eckd_private *private;
1151 struct ccw1 *ccw;
1152 struct bio *bio;
1153 struct bio_vec *bv;
1154 char *dst, *cda;
1155 unsigned int blksize, blk_per_trk, off;
1156 sector_t recid;
1157 int i, status;
1158
1159 if (!dasd_page_cache)
1160 goto out;
1161 private = (struct dasd_eckd_private *) cqr->device->private;
1162 blksize = cqr->device->bp_block;
1163 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1164 recid = req->sector >> cqr->device->s2b_shift;
1165 ccw = cqr->cpaddr;
1166 /* Skip over define extent & locate record. */
1167 ccw++;
1168 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
1169 ccw++;
1170 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
1171 dst = page_address(bv->bv_page) + bv->bv_offset;
1172 for (off = 0; off < bv->bv_len; off += blksize) {
1173 /* Skip locate record. */
1174 if (private->uses_cdl && recid <= 2*blk_per_trk)
1175 ccw++;
1176 if (dst) {
1177 if (ccw->flags & CCW_FLAG_IDA)
1178 cda = *((char **)((addr_t) ccw->cda));
1179 else
1180 cda = (char *)((addr_t) ccw->cda);
1181 if (dst != cda) {
1182 if (rq_data_dir(req) == READ)
1183 memcpy(dst, cda, bv->bv_len);
1184 kmem_cache_free(dasd_page_cache,
1185 (void *)((addr_t)cda & PAGE_MASK));
1186 }
1187 dst = NULL;
1188 }
1189 ccw++;
1190 recid++;
1191 }
1192 }
1193out:
1194 status = cqr->status == DASD_CQR_DONE;
1195 dasd_sfree_request(cqr, cqr->device);
1196 return status;
1197}
1198
1199static int
1200dasd_eckd_fill_info(struct dasd_device * device,
1201 struct dasd_information2_t * info)
1202{
1203 struct dasd_eckd_private *private;
1204
1205 private = (struct dasd_eckd_private *) device->private;
1206 info->label_block = 2;
1207 info->FBA_layout = private->uses_cdl ? 0 : 1;
1208 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
1209 info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
1210 memcpy(info->characteristics, &private->rdc_data,
1211 sizeof(struct dasd_eckd_characteristics));
1212 info->confdata_size = sizeof (struct dasd_eckd_confdata);
1213 memcpy(info->configuration_data, &private->conf_data,
1214 sizeof (struct dasd_eckd_confdata));
1215 return 0;
1216}
1217
1218/*
1219 * SECTION: ioctl functions for eckd devices.
1220 */
1221
1222/*
1223 * Release device ioctl.
1224 * Buils a channel programm to releases a prior reserved
1225 * (see dasd_eckd_reserve) device.
1226 */
1227static int
1228dasd_eckd_release(struct block_device *bdev, int no, long args)
1229{
1230 struct dasd_device *device;
1231 struct dasd_ccw_req *cqr;
1232 int rc;
1233
1234 if (!capable(CAP_SYS_ADMIN))
1235 return -EACCES;
1236
1237 device = bdev->bd_disk->private_data;
1238 if (device == NULL)
1239 return -ENODEV;
1240
1241 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1242 1, 32, device);
1243 if (IS_ERR(cqr)) {
1244 DEV_MESSAGE(KERN_WARNING, device, "%s",
1245 "Could not allocate initialization request");
1246 return PTR_ERR(cqr);
1247 }
1248 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RELEASE;
1249 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1250 cqr->cpaddr->count = 32;
1251 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1252 cqr->device = device;
1253 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1254 cqr->retries = 0;
1255 cqr->expires = 2 * HZ;
1256 cqr->buildclk = get_clock();
1257 cqr->status = DASD_CQR_FILLED;
1258
1259 rc = dasd_sleep_on_immediatly(cqr);
1260
1261 dasd_sfree_request(cqr, cqr->device);
1262 return rc;
1263}
1264
1265/*
1266 * Reserve device ioctl.
1267 * Options are set to 'synchronous wait for interrupt' and
1268 * 'timeout the request'. This leads to a terminate IO if
1269 * the interrupt is outstanding for a certain time.
1270 */
1271static int
1272dasd_eckd_reserve(struct block_device *bdev, int no, long args)
1273{
1274 struct dasd_device *device;
1275 struct dasd_ccw_req *cqr;
1276 int rc;
1277
1278 if (!capable(CAP_SYS_ADMIN))
1279 return -EACCES;
1280
1281 device = bdev->bd_disk->private_data;
1282 if (device == NULL)
1283 return -ENODEV;
1284
1285 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1286 1, 32, device);
1287 if (IS_ERR(cqr)) {
1288 DEV_MESSAGE(KERN_WARNING, device, "%s",
1289 "Could not allocate initialization request");
1290 return PTR_ERR(cqr);
1291 }
1292 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RESERVE;
1293 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1294 cqr->cpaddr->count = 32;
1295 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1296 cqr->device = device;
1297 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1298 cqr->retries = 0;
1299 cqr->expires = 2 * HZ;
1300 cqr->buildclk = get_clock();
1301 cqr->status = DASD_CQR_FILLED;
1302
1303 rc = dasd_sleep_on_immediatly(cqr);
1304
1305 dasd_sfree_request(cqr, cqr->device);
1306 return rc;
1307}
1308
1309/*
1310 * Steal lock ioctl - unconditional reserve device.
1311 * Buils a channel programm to break a device's reservation.
1312 * (unconditional reserve)
1313 */
1314static int
1315dasd_eckd_steal_lock(struct block_device *bdev, int no, long args)
1316{
1317 struct dasd_device *device;
1318 struct dasd_ccw_req *cqr;
1319 int rc;
1320
1321 if (!capable(CAP_SYS_ADMIN))
1322 return -EACCES;
1323
1324 device = bdev->bd_disk->private_data;
1325 if (device == NULL)
1326 return -ENODEV;
1327
1328 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1329 1, 32, device);
1330 if (IS_ERR(cqr)) {
1331 DEV_MESSAGE(KERN_WARNING, device, "%s",
1332 "Could not allocate initialization request");
1333 return PTR_ERR(cqr);
1334 }
1335 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SLCK;
1336 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1337 cqr->cpaddr->count = 32;
1338 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1339 cqr->device = device;
1340 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1341 cqr->retries = 0;
1342 cqr->expires = 2 * HZ;
1343 cqr->buildclk = get_clock();
1344 cqr->status = DASD_CQR_FILLED;
1345
1346 rc = dasd_sleep_on_immediatly(cqr);
1347
1348 dasd_sfree_request(cqr, cqr->device);
1349 return rc;
1350}
1351
1352/*
1353 * Read performance statistics
1354 */
1355static int
1356dasd_eckd_performance(struct block_device *bdev, int no, long args)
1357{
1358 struct dasd_device *device;
1359 struct dasd_psf_prssd_data *prssdp;
1360 struct dasd_rssd_perf_stats_t *stats;
1361 struct dasd_ccw_req *cqr;
1362 struct ccw1 *ccw;
1363 int rc;
1364
1365 device = bdev->bd_disk->private_data;
1366 if (device == NULL)
1367 return -ENODEV;
1368
1369 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1370 1 /* PSF */ + 1 /* RSSD */ ,
1371 (sizeof (struct dasd_psf_prssd_data) +
1372 sizeof (struct dasd_rssd_perf_stats_t)),
1373 device);
1374 if (IS_ERR(cqr)) {
1375 DEV_MESSAGE(KERN_WARNING, device, "%s",
1376 "Could not allocate initialization request");
1377 return PTR_ERR(cqr);
1378 }
1379 cqr->device = device;
1380 cqr->retries = 0;
1381 cqr->expires = 10 * HZ;
1382
1383 /* Prepare for Read Subsystem Data */
1384 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1385 memset(prssdp, 0, sizeof (struct dasd_psf_prssd_data));
1386 prssdp->order = PSF_ORDER_PRSSD;
1387 prssdp->suborder = 0x01; /* Perfomance Statistics */
1388 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
1389
1390 ccw = cqr->cpaddr;
1391 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1392 ccw->count = sizeof (struct dasd_psf_prssd_data);
1393 ccw->flags |= CCW_FLAG_CC;
1394 ccw->cda = (__u32)(addr_t) prssdp;
1395
1396 /* Read Subsystem Data - Performance Statistics */
1397 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1398 memset(stats, 0, sizeof (struct dasd_rssd_perf_stats_t));
1399
1400 ccw++;
1401 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1402 ccw->count = sizeof (struct dasd_rssd_perf_stats_t);
1403 ccw->cda = (__u32)(addr_t) stats;
1404
1405 cqr->buildclk = get_clock();
1406 cqr->status = DASD_CQR_FILLED;
1407 rc = dasd_sleep_on(cqr);
1408 if (rc == 0) {
1409 /* Prepare for Read Subsystem Data */
1410 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1411 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1412 rc = copy_to_user((long __user *) args, (long *) stats,
1413 sizeof(struct dasd_rssd_perf_stats_t));
1414 }
1415 dasd_sfree_request(cqr, cqr->device);
1416 return rc;
1417}
1418
1419/*
1420 * Get attributes (cache operations)
1421 * Returnes the cache attributes used in Define Extend (DE).
1422 */
1423static int
1424dasd_eckd_get_attrib (struct block_device *bdev, int no, long args)
1425{
1426 struct dasd_device *device;
1427 struct dasd_eckd_private *private;
1428 struct attrib_data_t attrib;
1429 int rc;
1430
1431 if (!capable(CAP_SYS_ADMIN))
1432 return -EACCES;
1433 if (!args)
1434 return -EINVAL;
1435
1436 device = bdev->bd_disk->private_data;
1437 if (device == NULL)
1438 return -ENODEV;
1439
1440 private = (struct dasd_eckd_private *) device->private;
1441 attrib = private->attrib;
1442
1443 rc = copy_to_user((long __user *) args, (long *) &attrib,
1444 sizeof (struct attrib_data_t));
1445
1446 return rc;
1447}
1448
1449/*
1450 * Set attributes (cache operations)
1451 * Stores the attributes for cache operation to be used in Define Extend (DE).
1452 */
1453static int
1454dasd_eckd_set_attrib(struct block_device *bdev, int no, long args)
1455{
1456 struct dasd_device *device;
1457 struct dasd_eckd_private *private;
1458 struct attrib_data_t attrib;
1459
1460 if (!capable(CAP_SYS_ADMIN))
1461 return -EACCES;
1462 if (!args)
1463 return -EINVAL;
1464
1465 device = bdev->bd_disk->private_data;
1466 if (device == NULL)
1467 return -ENODEV;
1468
1469 if (copy_from_user(&attrib, (void __user *) args,
1470 sizeof (struct attrib_data_t))) {
1471 return -EFAULT;
1472 }
1473 private = (struct dasd_eckd_private *) device->private;
1474 private->attrib = attrib;
1475
1476 DEV_MESSAGE(KERN_INFO, device,
1477 "cache operation mode set to %x (%i cylinder prestage)",
1478 private->attrib.operation, private->attrib.nr_cyl);
1479 return 0;
1480}
1481
1482/*
1483 * Print sense data and related channel program.
1484 * Parts are printed because printk buffer is only 1024 bytes.
1485 */
1486static void
1487dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
1488 struct irb *irb)
1489{
1490 char *page;
1491 struct ccw1 *act, *end, *last;
1492 int len, sl, sct, count;
1493
1494 page = (char *) get_zeroed_page(GFP_ATOMIC);
1495 if (page == NULL) {
1496 DEV_MESSAGE(KERN_ERR, device, " %s",
1497 "No memory to dump sense data");
1498 return;
1499 }
1500 len = sprintf(page, KERN_ERR PRINTK_HEADER
1501 " I/O status report for device %s:\n",
1502 device->cdev->dev.bus_id);
1503 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1504 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
1505 irb->scsw.cstat, irb->scsw.dstat);
1506 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1507 " device %s: Failing CCW: %p\n",
1508 device->cdev->dev.bus_id,
1509 (void *) (addr_t) irb->scsw.cpa);
1510 if (irb->esw.esw0.erw.cons) {
1511 for (sl = 0; sl < 4; sl++) {
1512 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1513 " Sense(hex) %2d-%2d:",
1514 (8 * sl), ((8 * sl) + 7));
1515
1516 for (sct = 0; sct < 8; sct++) {
1517 len += sprintf(page + len, " %02x",
1518 irb->ecw[8 * sl + sct]);
1519 }
1520 len += sprintf(page + len, "\n");
1521 }
1522
1523 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
1524 /* 24 Byte Sense Data */
1525 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1526 " 24 Byte: %x MSG %x, "
1527 "%s MSGb to SYSOP\n",
1528 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
1529 irb->ecw[1] & 0x10 ? "" : "no");
1530 } else {
1531 /* 32 Byte Sense Data */
1532 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1533 " 32 Byte: Format: %x "
1534 "Exception class %x\n",
1535 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
1536 }
1537 } else {
1538 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1539 " SORRY - NO VALID SENSE AVAILABLE\n");
1540 }
1541 MESSAGE_LOG(KERN_ERR, "%s",
1542 page + sizeof(KERN_ERR PRINTK_HEADER));
1543
1544 /* dump the Channel Program */
1545 /* print first CCWs (maximum 8) */
1546 act = req->cpaddr;
1547 for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
1548 end = min(act + 8, last);
1549 len = sprintf(page, KERN_ERR PRINTK_HEADER
1550 " Related CP in req: %p\n", req);
1551 while (act <= end) {
1552 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1553 " CCW %p: %08X %08X DAT:",
1554 act, ((int *) act)[0], ((int *) act)[1]);
1555 for (count = 0; count < 32 && count < act->count;
1556 count += sizeof(int))
1557 len += sprintf(page + len, " %08X",
1558 ((int *) (addr_t) act->cda)
1559 [(count>>2)]);
1560 len += sprintf(page + len, "\n");
1561 act++;
1562 }
1563 MESSAGE_LOG(KERN_ERR, "%s",
1564 page + sizeof(KERN_ERR PRINTK_HEADER));
1565
1566 /* print failing CCW area */
1567 len = 0;
1568 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) {
1569 act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2;
1570 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
1571 }
1572 end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last);
1573 while (act <= end) {
1574 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1575 " CCW %p: %08X %08X DAT:",
1576 act, ((int *) act)[0], ((int *) act)[1]);
1577 for (count = 0; count < 32 && count < act->count;
1578 count += sizeof(int))
1579 len += sprintf(page + len, " %08X",
1580 ((int *) (addr_t) act->cda)
1581 [(count>>2)]);
1582 len += sprintf(page + len, "\n");
1583 act++;
1584 }
1585
1586 /* print last CCWs */
1587 if (act < last - 2) {
1588 act = last - 2;
1589 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
1590 }
1591 while (act <= last) {
1592 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1593 " CCW %p: %08X %08X DAT:",
1594 act, ((int *) act)[0], ((int *) act)[1]);
1595 for (count = 0; count < 32 && count < act->count;
1596 count += sizeof(int))
1597 len += sprintf(page + len, " %08X",
1598 ((int *) (addr_t) act->cda)
1599 [(count>>2)]);
1600 len += sprintf(page + len, "\n");
1601 act++;
1602 }
1603 if (len > 0)
1604 MESSAGE_LOG(KERN_ERR, "%s",
1605 page + sizeof(KERN_ERR PRINTK_HEADER));
1606 free_page((unsigned long) page);
1607}
1608
1609/*
1610 * max_blocks is dependent on the amount of storage that is available
1611 * in the static io buffer for each device. Currently each device has
1612 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
1613 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
1614 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
1615 * addition we have one define extent ccw + 16 bytes of data and one
1616 * locate record ccw + 16 bytes of data. That makes:
1617 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
1618 * We want to fit two into the available memory so that we can immediately
1619 * start the next request if one finishes off. That makes 249.5 blocks
1620 * for one request. Give a little safety and the result is 240.
1621 */
1622static struct dasd_discipline dasd_eckd_discipline = {
1623 .owner = THIS_MODULE,
1624 .name = "ECKD",
1625 .ebcname = "ECKD",
1626 .max_blocks = 240,
1627 .check_device = dasd_eckd_check_characteristics,
1628 .do_analysis = dasd_eckd_do_analysis,
1629 .fill_geometry = dasd_eckd_fill_geometry,
1630 .start_IO = dasd_start_IO,
1631 .term_IO = dasd_term_IO,
1632 .format_device = dasd_eckd_format_device,
1633 .examine_error = dasd_eckd_examine_error,
1634 .erp_action = dasd_eckd_erp_action,
1635 .erp_postaction = dasd_eckd_erp_postaction,
1636 .build_cp = dasd_eckd_build_cp,
1637 .free_cp = dasd_eckd_free_cp,
1638 .dump_sense = dasd_eckd_dump_sense,
1639 .fill_info = dasd_eckd_fill_info,
1640};
1641
1642static int __init
1643dasd_eckd_init(void)
1644{
1645 int ret;
1646
1647 dasd_ioctl_no_register(THIS_MODULE, BIODASDGATTR,
1648 dasd_eckd_get_attrib);
1649 dasd_ioctl_no_register(THIS_MODULE, BIODASDSATTR,
1650 dasd_eckd_set_attrib);
1651 dasd_ioctl_no_register(THIS_MODULE, BIODASDPSRD,
1652 dasd_eckd_performance);
1653 dasd_ioctl_no_register(THIS_MODULE, BIODASDRLSE,
1654 dasd_eckd_release);
1655 dasd_ioctl_no_register(THIS_MODULE, BIODASDRSRV,
1656 dasd_eckd_reserve);
1657 dasd_ioctl_no_register(THIS_MODULE, BIODASDSLCK,
1658 dasd_eckd_steal_lock);
1659
1660 ASCEBC(dasd_eckd_discipline.ebcname, 4);
1661
1662 ret = ccw_driver_register(&dasd_eckd_driver);
1663 if (ret) {
1664 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDGATTR,
1665 dasd_eckd_get_attrib);
1666 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSATTR,
1667 dasd_eckd_set_attrib);
1668 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDPSRD,
1669 dasd_eckd_performance);
1670 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRLSE,
1671 dasd_eckd_release);
1672 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRSRV,
1673 dasd_eckd_reserve);
1674 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSLCK,
1675 dasd_eckd_steal_lock);
1676 return ret;
1677 }
1678
1679 dasd_generic_auto_online(&dasd_eckd_driver);
1680 return 0;
1681}
1682
1683static void __exit
1684dasd_eckd_cleanup(void)
1685{
1686 ccw_driver_unregister(&dasd_eckd_driver);
1687
1688 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDGATTR,
1689 dasd_eckd_get_attrib);
1690 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSATTR,
1691 dasd_eckd_set_attrib);
1692 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDPSRD,
1693 dasd_eckd_performance);
1694 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRLSE,
1695 dasd_eckd_release);
1696 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRSRV,
1697 dasd_eckd_reserve);
1698 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSLCK,
1699 dasd_eckd_steal_lock);
1700}
1701
1702module_init(dasd_eckd_init);
1703module_exit(dasd_eckd_cleanup);
1704
1705/*
1706 * Overrides for Emacs so that we follow Linus's tabbing style.
1707 * Emacs will notice this stuff at the end of the file and automatically
1708 * adjust the settings for this buffer only. This must remain at the end
1709 * of the file.
1710 * ---------------------------------------------------------------------------
1711 * Local variables:
1712 * c-indent-level: 4
1713 * c-brace-imaginary-offset: 0
1714 * c-brace-offset: -4
1715 * c-argdecl-indent: 4
1716 * c-label-offset: -4
1717 * c-continued-statement-offset: 4
1718 * c-continued-brace-offset: 0
1719 * indent-tabs-mode: 1
1720 * tab-width: 8
1721 * End:
1722 */
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
new file mode 100644
index 000000000000..b6888c68b224
--- /dev/null
+++ b/drivers/s390/block/dasd_eckd.h
@@ -0,0 +1,346 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_eckd.h
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Bugreports.to..: <Linux390@de.ibm.com>
6 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
7 *
8 * $Revision: 1.10 $
9 */
10
11#ifndef DASD_ECKD_H
12#define DASD_ECKD_H
13
14/*****************************************************************************
15 * SECTION: CCW Definitions
16 ****************************************************************************/
17#define DASD_ECKD_CCW_WRITE 0x05
18#define DASD_ECKD_CCW_READ 0x06
19#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
20#define DASD_ECKD_CCW_READ_HOME_ADDRESS 0x0a
21#define DASD_ECKD_CCW_WRITE_KD 0x0d
22#define DASD_ECKD_CCW_READ_KD 0x0e
23#define DASD_ECKD_CCW_ERASE 0x11
24#define DASD_ECKD_CCW_READ_COUNT 0x12
25#define DASD_ECKD_CCW_SLCK 0x14
26#define DASD_ECKD_CCW_WRITE_RECORD_ZERO 0x15
27#define DASD_ECKD_CCW_READ_RECORD_ZERO 0x16
28#define DASD_ECKD_CCW_WRITE_CKD 0x1d
29#define DASD_ECKD_CCW_READ_CKD 0x1e
30#define DASD_ECKD_CCW_PSF 0x27
31#define DASD_ECKD_CCW_RSSD 0x3e
32#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
33#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
34#define DASD_ECKD_CCW_WRITE_MT 0x85
35#define DASD_ECKD_CCW_READ_MT 0x86
36#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
37#define DASD_ECKD_CCW_READ_KD_MT 0x8e
38#define DASD_ECKD_CCW_RELEASE 0x94
39#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
40#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
41#define DASD_ECKD_CCW_RESERVE 0xB4
42
43/*
44 *Perform Subsystem Function / Sub-Orders
45 */
46#define PSF_ORDER_PRSSD 0x18
47
48/*****************************************************************************
49 * SECTION: Type Definitions
50 ****************************************************************************/
51
52struct eckd_count {
53 __u16 cyl;
54 __u16 head;
55 __u8 record;
56 __u8 kl;
57 __u16 dl;
58} __attribute__ ((packed));
59
60struct ch_t {
61 __u16 cyl;
62 __u16 head;
63} __attribute__ ((packed));
64
65struct chs_t {
66 __u16 cyl;
67 __u16 head;
68 __u32 sector;
69} __attribute__ ((packed));
70
71struct chr_t {
72 __u16 cyl;
73 __u16 head;
74 __u8 record;
75} __attribute__ ((packed));
76
77struct geom_t {
78 __u16 cyl;
79 __u16 head;
80 __u32 sector;
81} __attribute__ ((packed));
82
83struct eckd_home {
84 __u8 skip_control[14];
85 __u16 cell_number;
86 __u8 physical_addr[3];
87 __u8 flag;
88 struct ch_t track_addr;
89 __u8 reserved;
90 __u8 key_length;
91 __u8 reserved2[2];
92} __attribute__ ((packed));
93
94struct DE_eckd_data {
95 struct {
96 unsigned char perm:2; /* Permissions on this extent */
97 unsigned char reserved:1;
98 unsigned char seek:2; /* Seek control */
99 unsigned char auth:2; /* Access authorization */
100 unsigned char pci:1; /* PCI Fetch mode */
101 } __attribute__ ((packed)) mask;
102 struct {
103 unsigned char mode:2; /* Architecture mode */
104 unsigned char ckd:1; /* CKD Conversion */
105 unsigned char operation:3; /* Operation mode */
106 unsigned char cfw:1; /* Cache fast write */
107 unsigned char dfw:1; /* DASD fast write */
108 } __attribute__ ((packed)) attributes;
109 __u16 blk_size; /* Blocksize */
110 __u16 fast_write_id;
111 __u8 ga_additional; /* Global Attributes Additional */
112 __u8 ga_extended; /* Global Attributes Extended */
113 struct ch_t beg_ext;
114 struct ch_t end_ext;
115 unsigned long long ep_sys_time; /* Ext Parameter - System Time Stamp */
116 __u8 ep_format; /* Extended Parameter format byte */
117 __u8 ep_prio; /* Extended Parameter priority I/O byte */
118 __u8 ep_reserved[6]; /* Extended Parameter Reserved */
119} __attribute__ ((packed));
120
121struct LO_eckd_data {
122 struct {
123 unsigned char orientation:2;
124 unsigned char operation:6;
125 } __attribute__ ((packed)) operation;
126 struct {
127 unsigned char last_bytes_used:1;
128 unsigned char reserved:6;
129 unsigned char read_count_suffix:1;
130 } __attribute__ ((packed)) auxiliary;
131 __u8 unused;
132 __u8 count;
133 struct ch_t seek_addr;
134 struct chr_t search_arg;
135 __u8 sector;
136 __u16 length;
137} __attribute__ ((packed));
138
139struct dasd_eckd_characteristics {
140 __u16 cu_type;
141 struct {
142 unsigned char support:2;
143 unsigned char async:1;
144 unsigned char reserved:1;
145 unsigned char cache_info:1;
146 unsigned char model:3;
147 } __attribute__ ((packed)) cu_model;
148 __u16 dev_type;
149 __u8 dev_model;
150 struct {
151 unsigned char mult_burst:1;
152 unsigned char RT_in_LR:1;
153 unsigned char reserved1:1;
154 unsigned char RD_IN_LR:1;
155 unsigned char reserved2:4;
156 unsigned char reserved3:8;
157 unsigned char defect_wr:1;
158 unsigned char XRC_supported:1;
159 unsigned char reserved4:1;
160 unsigned char striping:1;
161 unsigned char reserved5:4;
162 unsigned char cfw:1;
163 unsigned char reserved6:2;
164 unsigned char cache:1;
165 unsigned char dual_copy:1;
166 unsigned char dfw:1;
167 unsigned char reset_alleg:1;
168 unsigned char sense_down:1;
169 } __attribute__ ((packed)) facilities;
170 __u8 dev_class;
171 __u8 unit_type;
172 __u16 no_cyl;
173 __u16 trk_per_cyl;
174 __u8 sec_per_trk;
175 __u8 byte_per_track[3];
176 __u16 home_bytes;
177 __u8 formula;
178 union {
179 struct {
180 __u8 f1;
181 __u16 f2;
182 __u16 f3;
183 } __attribute__ ((packed)) f_0x01;
184 struct {
185 __u8 f1;
186 __u8 f2;
187 __u8 f3;
188 __u8 f4;
189 __u8 f5;
190 } __attribute__ ((packed)) f_0x02;
191 } __attribute__ ((packed)) factors;
192 __u16 first_alt_trk;
193 __u16 no_alt_trk;
194 __u16 first_dia_trk;
195 __u16 no_dia_trk;
196 __u16 first_sup_trk;
197 __u16 no_sup_trk;
198 __u8 MDR_ID;
199 __u8 OBR_ID;
200 __u8 director;
201 __u8 rd_trk_set;
202 __u16 max_rec_zero;
203 __u8 reserved1;
204 __u8 RWANY_in_LR;
205 __u8 factor6;
206 __u8 factor7;
207 __u8 factor8;
208 __u8 reserved2[3];
209 __u8 reserved3[10];
210} __attribute__ ((packed));
211
212struct dasd_eckd_confdata {
213 struct {
214 struct {
215 unsigned char identifier:2;
216 unsigned char token_id:1;
217 unsigned char sno_valid:1;
218 unsigned char subst_sno:1;
219 unsigned char recNED:1;
220 unsigned char emuNED:1;
221 unsigned char reserved:1;
222 } __attribute__ ((packed)) flags;
223 __u8 descriptor;
224 __u8 dev_class;
225 __u8 reserved;
226 unsigned char dev_type[6];
227 unsigned char dev_model[3];
228 unsigned char HDA_manufacturer[3];
229 unsigned char HDA_location[2];
230 unsigned char HDA_seqno[12];
231 __u16 ID;
232 } __attribute__ ((packed)) ned1;
233 struct {
234 struct {
235 unsigned char identifier:2;
236 unsigned char token_id:1;
237 unsigned char sno_valid:1;
238 unsigned char subst_sno:1;
239 unsigned char recNED:1;
240 unsigned char emuNED:1;
241 unsigned char reserved:1;
242 } __attribute__ ((packed)) flags;
243 __u8 descriptor;
244 __u8 reserved[2];
245 unsigned char dev_type[6];
246 unsigned char dev_model[3];
247 unsigned char DASD_manufacturer[3];
248 unsigned char DASD_location[2];
249 unsigned char DASD_seqno[12];
250 __u16 ID;
251 } __attribute__ ((packed)) ned2;
252 struct {
253 struct {
254 unsigned char identifier:2;
255 unsigned char token_id:1;
256 unsigned char sno_valid:1;
257 unsigned char subst_sno:1;
258 unsigned char recNED:1;
259 unsigned char emuNED:1;
260 unsigned char reserved:1;
261 } __attribute__ ((packed)) flags;
262 __u8 descriptor;
263 __u8 reserved[2];
264 unsigned char cont_type[6];
265 unsigned char cont_model[3];
266 unsigned char cont_manufacturer[3];
267 unsigned char cont_location[2];
268 unsigned char cont_seqno[12];
269 __u16 ID;
270 } __attribute__ ((packed)) ned3;
271 struct {
272 struct {
273 unsigned char identifier:2;
274 unsigned char token_id:1;
275 unsigned char sno_valid:1;
276 unsigned char subst_sno:1;
277 unsigned char recNED:1;
278 unsigned char emuNED:1;
279 unsigned char reserved:1;
280 } __attribute__ ((packed)) flags;
281 __u8 descriptor;
282 __u8 reserved[2];
283 unsigned char cont_type[6];
284 unsigned char empty[3];
285 unsigned char cont_manufacturer[3];
286 unsigned char cont_location[2];
287 unsigned char cont_seqno[12];
288 __u16 ID;
289 } __attribute__ ((packed)) ned4;
290 unsigned char ned5[32];
291 unsigned char ned6[32];
292 unsigned char ned7[32];
293 struct {
294 struct {
295 unsigned char identifier:2;
296 unsigned char reserved:6;
297 } __attribute__ ((packed)) flags;
298 __u8 selector;
299 __u16 interfaceID;
300 __u32 reserved;
301 __u16 subsystemID;
302 struct {
303 unsigned char sp0:1;
304 unsigned char sp1:1;
305 unsigned char reserved:5;
306 unsigned char scluster:1;
307 } __attribute__ ((packed)) spathID;
308 __u8 unit_address;
309 __u8 dev_ID;
310 __u8 dev_address;
311 __u8 adapterID;
312 __u16 link_address;
313 struct {
314 unsigned char parallel:1;
315 unsigned char escon:1;
316 unsigned char reserved:1;
317 unsigned char ficon:1;
318 unsigned char reserved2:4;
319 } __attribute__ ((packed)) protocol_type;
320 struct {
321 unsigned char PID_in_236:1;
322 unsigned char reserved:7;
323 } __attribute__ ((packed)) format_flags;
324 __u8 log_dev_address;
325 unsigned char reserved2[12];
326 } __attribute__ ((packed)) neq;
327} __attribute__ ((packed));
328
329struct dasd_eckd_path {
330 __u8 opm;
331 __u8 ppm;
332 __u8 npm;
333};
334
335/*
336 * Perform Subsystem Function - Prepare for Read Subsystem Data
337 */
338struct dasd_psf_prssd_data {
339 unsigned char order;
340 unsigned char flags;
341 unsigned char reserved[4];
342 unsigned char suborder;
343 unsigned char varies[9];
344} __attribute__ ((packed));
345
346#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
new file mode 100644
index 000000000000..7cb98d25f341
--- /dev/null
+++ b/drivers/s390/block/dasd_erp.c
@@ -0,0 +1,254 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
10 * $Revision: 1.14 $
11 */
12
13#include <linux/config.h>
14#include <linux/ctype.h>
15#include <linux/init.h>
16
17#include <asm/debug.h>
18#include <asm/ebcdic.h>
19#include <asm/uaccess.h>
20
21/* This is ugly... */
22#define PRINTK_HEADER "dasd_erp:"
23
24#include "dasd_int.h"
25
26struct dasd_ccw_req *
27dasd_alloc_erp_request(char *magic, int cplength, int datasize,
28 struct dasd_device * device)
29{
30 unsigned long flags;
31 struct dasd_ccw_req *cqr;
32 char *data;
33 int size;
34
35 /* Sanity checks */
36 if ( magic == NULL || datasize > PAGE_SIZE ||
37 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
38 BUG();
39
40 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
41 if (cplength > 0)
42 size += cplength * sizeof(struct ccw1);
43 if (datasize > 0)
44 size += datasize;
45 spin_lock_irqsave(&device->mem_lock, flags);
46 cqr = (struct dasd_ccw_req *)
47 dasd_alloc_chunk(&device->erp_chunks, size);
48 spin_unlock_irqrestore(&device->mem_lock, flags);
49 if (cqr == NULL)
50 return ERR_PTR(-ENOMEM);
51 memset(cqr, 0, sizeof(struct dasd_ccw_req));
52 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
53 cqr->cpaddr = NULL;
54 if (cplength > 0) {
55 cqr->cpaddr = (struct ccw1 *) data;
56 data += cplength*sizeof(struct ccw1);
57 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
58 }
59 cqr->data = NULL;
60 if (datasize > 0) {
61 cqr->data = data;
62 memset(cqr->data, 0, datasize);
63 }
64 strncpy((char *) &cqr->magic, magic, 4);
65 ASCEBC((char *) &cqr->magic, 4);
66 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
67 dasd_get_device(device);
68 return cqr;
69}
70
71void
72dasd_free_erp_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
73{
74 unsigned long flags;
75
76 spin_lock_irqsave(&device->mem_lock, flags);
77 dasd_free_chunk(&device->erp_chunks, cqr);
78 spin_unlock_irqrestore(&device->mem_lock, flags);
79 atomic_dec(&device->ref_count);
80}
81
82
83/*
84 * dasd_default_erp_action just retries the current cqr
85 */
86struct dasd_ccw_req *
87dasd_default_erp_action(struct dasd_ccw_req * cqr)
88{
89 struct dasd_device *device;
90
91 device = cqr->device;
92
93 /* just retry - there is nothing to save ... I got no sense data.... */
94 if (cqr->retries > 0) {
95 DEV_MESSAGE (KERN_DEBUG, device,
96 "default ERP called (%i retries left)",
97 cqr->retries);
98 cqr->lpm = LPM_ANYPATH;
99 cqr->status = DASD_CQR_QUEUED;
100 } else {
101 DEV_MESSAGE (KERN_WARNING, device, "%s",
102 "default ERP called (NO retry left)");
103 cqr->status = DASD_CQR_FAILED;
104 cqr->stopclk = get_clock ();
105 }
106 return cqr;
107} /* end dasd_default_erp_action */
108
109/*
110 * DESCRIPTION
111 * Frees all ERPs of the current ERP Chain and set the status
112 * of the original CQR either to DASD_CQR_DONE if ERP was successful
113 * or to DASD_CQR_FAILED if ERP was NOT successful.
114 * NOTE: This function is only called if no discipline postaction
115 * is available
116 *
117 * PARAMETER
118 * erp current erp_head
119 *
120 * RETURN VALUES
121 * cqr pointer to the original CQR
122 */
123struct dasd_ccw_req *
124dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
125{
126 struct dasd_device *device;
127 int success;
128
129 if (cqr->refers == NULL || cqr->function == NULL)
130 BUG();
131
132 device = cqr->device;
133 success = cqr->status == DASD_CQR_DONE;
134
135 /* free all ERPs - but NOT the original cqr */
136 while (cqr->refers != NULL) {
137 struct dasd_ccw_req *refers;
138
139 refers = cqr->refers;
140 /* remove the request from the device queue */
141 list_del(&cqr->list);
142 /* free the finished erp request */
143 dasd_free_erp_request(cqr, device);
144 cqr = refers;
145 }
146
147 /* set corresponding status to original cqr */
148 if (success)
149 cqr->status = DASD_CQR_DONE;
150 else {
151 cqr->status = DASD_CQR_FAILED;
152 cqr->stopclk = get_clock();
153 }
154
155 return cqr;
156
157} /* end default_erp_postaction */
158
159/*
160 * Print the hex dump of the memory used by a request. This includes
161 * all error recovery ccws that have been chained in from of the
162 * real request.
163 */
164static inline void
165hex_dump_memory(struct dasd_device *device, void *data, int len)
166{
167 int *pint;
168
169 pint = (int *) data;
170 while (len > 0) {
171 DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x",
172 pint, pint[0], pint[1], pint[2], pint[3]);
173 pint += 4;
174 len -= 16;
175 }
176}
177
178void
179dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
180{
181 struct dasd_device *device;
182
183 device = cqr->device;
184 /* dump sense data */
185 if (device->discipline && device->discipline->dump_sense)
186 device->discipline->dump_sense(device, cqr, irb);
187}
188
189void
190dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa)
191{
192 struct dasd_device *device;
193 struct dasd_ccw_req *lcqr;
194 struct ccw1 *ccw;
195 int cplength;
196
197 device = cqr->device;
198 /* log the channel program */
199 for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) {
200 DEV_MESSAGE(KERN_ERR, device,
201 "(%s) ERP chain report for req: %p",
202 caller == 0 ? "EXAMINE" : "ACTION", lcqr);
203 hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req));
204
205 cplength = 1;
206 ccw = lcqr->cpaddr;
207 while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC))
208 cplength++;
209
210 if (cplength > 40) { /* log only parts of the CP */
211 DEV_MESSAGE(KERN_ERR, device, "%s",
212 "Start of channel program:");
213 hex_dump_memory(device, lcqr->cpaddr,
214 40*sizeof(struct ccw1));
215
216 DEV_MESSAGE(KERN_ERR, device, "%s",
217 "End of channel program:");
218 hex_dump_memory(device, lcqr->cpaddr + cplength - 10,
219 10*sizeof(struct ccw1));
220 } else { /* log the whole CP */
221 DEV_MESSAGE(KERN_ERR, device, "%s",
222 "Channel program (complete):");
223 hex_dump_memory(device, lcqr->cpaddr,
224 cplength*sizeof(struct ccw1));
225 }
226
227 if (lcqr != cqr)
228 continue;
229
230 /*
231 * Log bytes arround failed CCW but only if we did
232 * not log the whole CP of the CCW is outside the
233 * logged CP.
234 */
235 if (cplength > 40 ||
236 ((addr_t) cpa < (addr_t) lcqr->cpaddr &&
237 (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) {
238
239 DEV_MESSAGE(KERN_ERR, device,
240 "Failed CCW (%p) (area):",
241 (void *) (long) cpa);
242 hex_dump_memory(device, cqr->cpaddr - 10,
243 20*sizeof(struct ccw1));
244 }
245 }
246
247} /* end log_erp_chain */
248
249EXPORT_SYMBOL(dasd_default_erp_action);
250EXPORT_SYMBOL(dasd_default_erp_postaction);
251EXPORT_SYMBOL(dasd_alloc_erp_request);
252EXPORT_SYMBOL(dasd_free_erp_request);
253EXPORT_SYMBOL(dasd_log_sense);
254EXPORT_SYMBOL(dasd_log_ccw);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
new file mode 100644
index 000000000000..7963ae343eef
--- /dev/null
+++ b/drivers/s390/block/dasd_fba.c
@@ -0,0 +1,607 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_fba.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
6 *
7 * $Revision: 1.39 $
8 */
9
10#include <linux/config.h>
11#include <linux/stddef.h>
12#include <linux/kernel.h>
13#include <asm/debug.h>
14
15#include <linux/slab.h>
16#include <linux/hdreg.h> /* HDIO_GETGEO */
17#include <linux/bio.h>
18#include <linux/module.h>
19#include <linux/init.h>
20
21#include <asm/idals.h>
22#include <asm/ebcdic.h>
23#include <asm/io.h>
24#include <asm/todclk.h>
25#include <asm/ccwdev.h>
26
27#include "dasd_int.h"
28#include "dasd_fba.h"
29
30#ifdef PRINTK_HEADER
31#undef PRINTK_HEADER
32#endif /* PRINTK_HEADER */
33#define PRINTK_HEADER "dasd(fba):"
34
35#define DASD_FBA_CCW_WRITE 0x41
36#define DASD_FBA_CCW_READ 0x42
37#define DASD_FBA_CCW_LOCATE 0x43
38#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
39
40MODULE_LICENSE("GPL");
41
42static struct dasd_discipline dasd_fba_discipline;
43
44struct dasd_fba_private {
45 struct dasd_fba_characteristics rdc_data;
46};
47
48static struct ccw_device_id dasd_fba_ids[] = {
49 { CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), driver_info: 0x1},
50 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), driver_info: 0x2},
51 { /* end of list */ },
52};
53
54MODULE_DEVICE_TABLE(ccw, dasd_fba_ids);
55
56static struct ccw_driver dasd_fba_driver; /* see below */
57static int
58dasd_fba_probe(struct ccw_device *cdev)
59{
60 int ret;
61
62 ret = dasd_generic_probe (cdev, &dasd_fba_discipline);
63 if (ret)
64 return ret;
65 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
66 return 0;
67}
68
69static int
70dasd_fba_set_online(struct ccw_device *cdev)
71{
72 return dasd_generic_set_online (cdev, &dasd_fba_discipline);
73}
74
75static struct ccw_driver dasd_fba_driver = {
76 .name = "dasd-fba",
77 .owner = THIS_MODULE,
78 .ids = dasd_fba_ids,
79 .probe = dasd_fba_probe,
80 .remove = dasd_generic_remove,
81 .set_offline = dasd_generic_set_offline,
82 .set_online = dasd_fba_set_online,
83 .notify = dasd_generic_notify,
84};
85
86static inline void
87define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
88 int blksize, int beg, int nr)
89{
90 ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
91 ccw->flags = 0;
92 ccw->count = 16;
93 ccw->cda = (__u32) __pa(data);
94 memset(data, 0, sizeof (struct DE_fba_data));
95 if (rw == WRITE)
96 (data->mask).perm = 0x0;
97 else if (rw == READ)
98 (data->mask).perm = 0x1;
99 else
100 data->mask.perm = 0x2;
101 data->blk_size = blksize;
102 data->ext_loc = beg;
103 data->ext_end = nr - 1;
104}
105
106static inline void
107locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
108 int block_nr, int block_ct)
109{
110 ccw->cmd_code = DASD_FBA_CCW_LOCATE;
111 ccw->flags = 0;
112 ccw->count = 8;
113 ccw->cda = (__u32) __pa(data);
114 memset(data, 0, sizeof (struct LO_fba_data));
115 if (rw == WRITE)
116 data->operation.cmd = 0x5;
117 else if (rw == READ)
118 data->operation.cmd = 0x6;
119 else
120 data->operation.cmd = 0x8;
121 data->blk_nr = block_nr;
122 data->blk_ct = block_ct;
123}
124
125static int
126dasd_fba_check_characteristics(struct dasd_device *device)
127{
128 struct dasd_fba_private *private;
129 struct ccw_device *cdev = device->cdev;
130 void *rdc_data;
131 int rc;
132
133 private = (struct dasd_fba_private *) device->private;
134 if (private == NULL) {
135 private = kmalloc(sizeof(struct dasd_fba_private), GFP_KERNEL);
136 if (private == NULL) {
137 DEV_MESSAGE(KERN_WARNING, device, "%s",
138 "memory allocation failed for private "
139 "data");
140 return -ENOMEM;
141 }
142 device->private = (void *) private;
143 }
144 /* Read Device Characteristics */
145 rdc_data = (void *) &(private->rdc_data);
146 rc = read_dev_chars(device->cdev, &rdc_data, 32);
147 if (rc) {
148 DEV_MESSAGE(KERN_WARNING, device,
149 "Read device characteristics returned error %d",
150 rc);
151 return rc;
152 }
153
154 DEV_MESSAGE(KERN_INFO, device,
155 "%04X/%02X(CU:%04X/%02X) %dMB at(%d B/blk)",
156 cdev->id.dev_type,
157 cdev->id.dev_model,
158 cdev->id.cu_type,
159 cdev->id.cu_model,
160 ((private->rdc_data.blk_bdsa *
161 (private->rdc_data.blk_size >> 9)) >> 11),
162 private->rdc_data.blk_size);
163 return 0;
164}
165
166static int
167dasd_fba_do_analysis(struct dasd_device *device)
168{
169 struct dasd_fba_private *private;
170 int sb, rc;
171
172 private = (struct dasd_fba_private *) device->private;
173 rc = dasd_check_blocksize(private->rdc_data.blk_size);
174 if (rc) {
175 DEV_MESSAGE(KERN_INFO, device, "unknown blocksize %d",
176 private->rdc_data.blk_size);
177 return rc;
178 }
179 device->blocks = private->rdc_data.blk_bdsa;
180 device->bp_block = private->rdc_data.blk_size;
181 device->s2b_shift = 0; /* bits to shift 512 to get a block */
182 for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
183 device->s2b_shift++;
184 return 0;
185}
186
187static int
188dasd_fba_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
189{
190 if (dasd_check_blocksize(device->bp_block) != 0)
191 return -EINVAL;
192 geo->cylinders = (device->blocks << device->s2b_shift) >> 10;
193 geo->heads = 16;
194 geo->sectors = 128 >> device->s2b_shift;
195 return 0;
196}
197
198static dasd_era_t
199dasd_fba_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
200{
201 struct dasd_device *device;
202 struct ccw_device *cdev;
203
204 device = (struct dasd_device *) cqr->device;
205 if (irb->scsw.cstat == 0x00 &&
206 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
207 return dasd_era_none;
208
209 cdev = device->cdev;
210 switch (cdev->id.dev_type) {
211 case 0x3370:
212 return dasd_3370_erp_examine(cqr, irb);
213 case 0x9336:
214 return dasd_9336_erp_examine(cqr, irb);
215 default:
216 return dasd_era_recover;
217 }
218}
219
220static dasd_erp_fn_t
221dasd_fba_erp_action(struct dasd_ccw_req * cqr)
222{
223 return dasd_default_erp_action;
224}
225
226static dasd_erp_fn_t
227dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
228{
229 if (cqr->function == dasd_default_erp_action)
230 return dasd_default_erp_postaction;
231
232 DEV_MESSAGE(KERN_WARNING, cqr->device, "unknown ERP action %p",
233 cqr->function);
234 return NULL;
235}
236
237static struct dasd_ccw_req *
238dasd_fba_build_cp(struct dasd_device * device, struct request *req)
239{
240 struct dasd_fba_private *private;
241 unsigned long *idaws;
242 struct LO_fba_data *LO_data;
243 struct dasd_ccw_req *cqr;
244 struct ccw1 *ccw;
245 struct bio *bio;
246 struct bio_vec *bv;
247 char *dst;
248 int count, cidaw, cplength, datasize;
249 sector_t recid, first_rec, last_rec;
250 unsigned int blksize, off;
251 unsigned char cmd;
252 int i;
253
254 private = (struct dasd_fba_private *) device->private;
255 if (rq_data_dir(req) == READ) {
256 cmd = DASD_FBA_CCW_READ;
257 } else if (rq_data_dir(req) == WRITE) {
258 cmd = DASD_FBA_CCW_WRITE;
259 } else
260 return ERR_PTR(-EINVAL);
261 blksize = device->bp_block;
262 /* Calculate record id of first and last block. */
263 first_rec = req->sector >> device->s2b_shift;
264 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
265 /* Check struct bio and count the number of blocks for the request. */
266 count = 0;
267 cidaw = 0;
268 rq_for_each_bio(bio, req) {
269 bio_for_each_segment(bv, bio, i) {
270 if (bv->bv_len & (blksize - 1))
271 /* Fba can only do full blocks. */
272 return ERR_PTR(-EINVAL);
273 count += bv->bv_len >> (device->s2b_shift + 9);
274#if defined(CONFIG_ARCH_S390X)
275 if (idal_is_needed (page_address(bv->bv_page),
276 bv->bv_len))
277 cidaw += bv->bv_len / blksize;
278#endif
279 }
280 }
281 /* Paranoia. */
282 if (count != last_rec - first_rec + 1)
283 return ERR_PTR(-EINVAL);
284 /* 1x define extent + 1x locate record + number of blocks */
285 cplength = 2 + count;
286 /* 1x define extent + 1x locate record */
287 datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) +
288 cidaw * sizeof(unsigned long);
289 /*
290 * Find out number of additional locate record ccws if the device
291 * can't do data chaining.
292 */
293 if (private->rdc_data.mode.bits.data_chain == 0) {
294 cplength += count - 1;
295 datasize += (count - 1)*sizeof(struct LO_fba_data);
296 }
297 /* Allocate the ccw request. */
298 cqr = dasd_smalloc_request(dasd_fba_discipline.name,
299 cplength, datasize, device);
300 if (IS_ERR(cqr))
301 return cqr;
302 ccw = cqr->cpaddr;
303 /* First ccw is define extent. */
304 define_extent(ccw++, cqr->data, rq_data_dir(req),
305 device->bp_block, req->sector, req->nr_sectors);
306 /* Build locate_record + read/write ccws. */
307 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
308 LO_data = (struct LO_fba_data *) (idaws + cidaw);
309 /* Locate record for all blocks for smart devices. */
310 if (private->rdc_data.mode.bits.data_chain != 0) {
311 ccw[-1].flags |= CCW_FLAG_CC;
312 locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
313 }
314 recid = first_rec;
315 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
316 dst = page_address(bv->bv_page) + bv->bv_offset;
317 if (dasd_page_cache) {
318 char *copy = kmem_cache_alloc(dasd_page_cache,
319 SLAB_DMA | __GFP_NOWARN);
320 if (copy && rq_data_dir(req) == WRITE)
321 memcpy(copy + bv->bv_offset, dst, bv->bv_len);
322 if (copy)
323 dst = copy + bv->bv_offset;
324 }
325 for (off = 0; off < bv->bv_len; off += blksize) {
326 /* Locate record for stupid devices. */
327 if (private->rdc_data.mode.bits.data_chain == 0) {
328 ccw[-1].flags |= CCW_FLAG_CC;
329 locate_record(ccw, LO_data++,
330 rq_data_dir(req),
331 recid - first_rec, 1);
332 ccw->flags = CCW_FLAG_CC;
333 ccw++;
334 } else {
335 if (recid > first_rec)
336 ccw[-1].flags |= CCW_FLAG_DC;
337 else
338 ccw[-1].flags |= CCW_FLAG_CC;
339 }
340 ccw->cmd_code = cmd;
341 ccw->count = device->bp_block;
342 if (idal_is_needed(dst, blksize)) {
343 ccw->cda = (__u32)(addr_t) idaws;
344 ccw->flags = CCW_FLAG_IDA;
345 idaws = idal_create_words(idaws, dst, blksize);
346 } else {
347 ccw->cda = (__u32)(addr_t) dst;
348 ccw->flags = 0;
349 }
350 ccw++;
351 dst += blksize;
352 recid++;
353 }
354 }
355 cqr->device = device;
356 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
357 cqr->status = DASD_CQR_FILLED;
358 return cqr;
359}
360
361static int
362dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
363{
364 struct dasd_fba_private *private;
365 struct ccw1 *ccw;
366 struct bio *bio;
367 struct bio_vec *bv;
368 char *dst, *cda;
369 unsigned int blksize, off;
370 int i, status;
371
372 if (!dasd_page_cache)
373 goto out;
374 private = (struct dasd_fba_private *) cqr->device->private;
375 blksize = cqr->device->bp_block;
376 ccw = cqr->cpaddr;
377 /* Skip over define extent & locate record. */
378 ccw++;
379 if (private->rdc_data.mode.bits.data_chain != 0)
380 ccw++;
381 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
382 dst = page_address(bv->bv_page) + bv->bv_offset;
383 for (off = 0; off < bv->bv_len; off += blksize) {
384 /* Skip locate record. */
385 if (private->rdc_data.mode.bits.data_chain == 0)
386 ccw++;
387 if (dst) {
388 if (ccw->flags & CCW_FLAG_IDA)
389 cda = *((char **)((addr_t) ccw->cda));
390 else
391 cda = (char *)((addr_t) ccw->cda);
392 if (dst != cda) {
393 if (rq_data_dir(req) == READ)
394 memcpy(dst, cda, bv->bv_len);
395 kmem_cache_free(dasd_page_cache,
396 (void *)((addr_t)cda & PAGE_MASK));
397 }
398 dst = NULL;
399 }
400 ccw++;
401 }
402 }
403out:
404 status = cqr->status == DASD_CQR_DONE;
405 dasd_sfree_request(cqr, cqr->device);
406 return status;
407}
408
409static int
410dasd_fba_fill_info(struct dasd_device * device,
411 struct dasd_information2_t * info)
412{
413 info->label_block = 1;
414 info->FBA_layout = 1;
415 info->format = DASD_FORMAT_LDL;
416 info->characteristics_size = sizeof(struct dasd_fba_characteristics);
417 memcpy(info->characteristics,
418 &((struct dasd_fba_private *) device->private)->rdc_data,
419 sizeof (struct dasd_fba_characteristics));
420 info->confdata_size = 0;
421 return 0;
422}
423
424static void
425dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
426 struct irb *irb)
427{
428 char *page;
429 struct ccw1 *act, *end, *last;
430 int len, sl, sct, count;
431
432 page = (char *) get_zeroed_page(GFP_ATOMIC);
433 if (page == NULL) {
434 DEV_MESSAGE(KERN_ERR, device, " %s",
435 "No memory to dump sense data");
436 return;
437 }
438 len = sprintf(page, KERN_ERR PRINTK_HEADER
439 " I/O status report for device %s:\n",
440 device->cdev->dev.bus_id);
441 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
442 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
443 irb->scsw.cstat, irb->scsw.dstat);
444 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
445 " device %s: Failing CCW: %p\n",
446 device->cdev->dev.bus_id,
447 (void *) (addr_t) irb->scsw.cpa);
448 if (irb->esw.esw0.erw.cons) {
449 for (sl = 0; sl < 4; sl++) {
450 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
451 " Sense(hex) %2d-%2d:",
452 (8 * sl), ((8 * sl) + 7));
453
454 for (sct = 0; sct < 8; sct++) {
455 len += sprintf(page + len, " %02x",
456 irb->ecw[8 * sl + sct]);
457 }
458 len += sprintf(page + len, "\n");
459 }
460 } else {
461 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
462 " SORRY - NO VALID SENSE AVAILABLE\n");
463 }
464 MESSAGE_LOG(KERN_ERR, "%s",
465 page + sizeof(KERN_ERR PRINTK_HEADER));
466
467 /* dump the Channel Program */
468 /* print first CCWs (maximum 8) */
469 act = req->cpaddr;
470 for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
471 end = min(act + 8, last);
472 len = sprintf(page, KERN_ERR PRINTK_HEADER
473 " Related CP in req: %p\n", req);
474 while (act <= end) {
475 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
476 " CCW %p: %08X %08X DAT:",
477 act, ((int *) act)[0], ((int *) act)[1]);
478 for (count = 0; count < 32 && count < act->count;
479 count += sizeof(int))
480 len += sprintf(page + len, " %08X",
481 ((int *) (addr_t) act->cda)
482 [(count>>2)]);
483 len += sprintf(page + len, "\n");
484 act++;
485 }
486 MESSAGE_LOG(KERN_ERR, "%s",
487 page + sizeof(KERN_ERR PRINTK_HEADER));
488
489
490 /* print failing CCW area */
491 len = 0;
492 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) {
493 act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2;
494 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
495 }
496 end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last);
497 while (act <= end) {
498 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
499 " CCW %p: %08X %08X DAT:",
500 act, ((int *) act)[0], ((int *) act)[1]);
501 for (count = 0; count < 32 && count < act->count;
502 count += sizeof(int))
503 len += sprintf(page + len, " %08X",
504 ((int *) (addr_t) act->cda)
505 [(count>>2)]);
506 len += sprintf(page + len, "\n");
507 act++;
508 }
509
510 /* print last CCWs */
511 if (act < last - 2) {
512 act = last - 2;
513 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
514 }
515 while (act <= last) {
516 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
517 " CCW %p: %08X %08X DAT:",
518 act, ((int *) act)[0], ((int *) act)[1]);
519 for (count = 0; count < 32 && count < act->count;
520 count += sizeof(int))
521 len += sprintf(page + len, " %08X",
522 ((int *) (addr_t) act->cda)
523 [(count>>2)]);
524 len += sprintf(page + len, "\n");
525 act++;
526 }
527 if (len > 0)
528 MESSAGE_LOG(KERN_ERR, "%s",
529 page + sizeof(KERN_ERR PRINTK_HEADER));
530 free_page((unsigned long) page);
531}
532
533/*
534 * max_blocks is dependent on the amount of storage that is available
535 * in the static io buffer for each device. Currently each device has
536 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
537 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
538 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
539 * addition we have one define extent ccw + 16 bytes of data and a
540 * locate record ccw for each block (stupid devices!) + 16 bytes of data.
541 * That makes:
542 * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
543 * We want to fit two into the available memory so that we can immediately
544 * start the next request if one finishes off. That makes 100.1 blocks
545 * for one request. Give a little safety and the result is 96.
546 */
547static struct dasd_discipline dasd_fba_discipline = {
548 .owner = THIS_MODULE,
549 .name = "FBA ",
550 .ebcname = "FBA ",
551 .max_blocks = 96,
552 .check_device = dasd_fba_check_characteristics,
553 .do_analysis = dasd_fba_do_analysis,
554 .fill_geometry = dasd_fba_fill_geometry,
555 .start_IO = dasd_start_IO,
556 .term_IO = dasd_term_IO,
557 .examine_error = dasd_fba_examine_error,
558 .erp_action = dasd_fba_erp_action,
559 .erp_postaction = dasd_fba_erp_postaction,
560 .build_cp = dasd_fba_build_cp,
561 .free_cp = dasd_fba_free_cp,
562 .dump_sense = dasd_fba_dump_sense,
563 .fill_info = dasd_fba_fill_info,
564};
565
566static int __init
567dasd_fba_init(void)
568{
569 int ret;
570
571 ASCEBC(dasd_fba_discipline.ebcname, 4);
572
573 ret = ccw_driver_register(&dasd_fba_driver);
574 if (ret)
575 return ret;
576
577 dasd_generic_auto_online(&dasd_fba_driver);
578 return 0;
579}
580
581static void __exit
582dasd_fba_cleanup(void)
583{
584 ccw_driver_unregister(&dasd_fba_driver);
585}
586
587module_init(dasd_fba_init);
588module_exit(dasd_fba_cleanup);
589
590/*
591 * Overrides for Emacs so that we follow Linus's tabbing style.
592 * Emacs will notice this stuff at the end of the file and automatically
593 * adjust the settings for this buffer only. This must remain at the end
594 * of the file.
595 * ---------------------------------------------------------------------------
596 * Local variables:
597 * c-indent-level: 4
598 * c-brace-imaginary-offset: 0
599 * c-brace-offset: -4
600 * c-argdecl-indent: 4
601 * c-label-offset: -4
602 * c-continued-statement-offset: 4
603 * c-continued-brace-offset: 0
604 * indent-tabs-mode: 1
605 * tab-width: 8
606 * End:
607 */
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
new file mode 100644
index 000000000000..624f0402ee22
--- /dev/null
+++ b/drivers/s390/block/dasd_fba.h
@@ -0,0 +1,73 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_fba.h
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
6 *
7 * $Revision: 1.6 $
8 */
9
10#ifndef DASD_FBA_H
11#define DASD_FBA_H
12
13struct DE_fba_data {
14 struct {
15 unsigned char perm:2; /* Permissions on this extent */
16 unsigned char zero:2; /* Must be zero */
17 unsigned char da:1; /* usually zero */
18 unsigned char diag:1; /* allow diagnose */
19 unsigned char zero2:2; /* zero */
20 } __attribute__ ((packed)) mask;
21 __u8 zero; /* Must be zero */
22 __u16 blk_size; /* Blocksize */
23 __u32 ext_loc; /* Extent locator */
24 __u32 ext_beg; /* logical number of block 0 in extent */
25 __u32 ext_end; /* logocal number of last block in extent */
26} __attribute__ ((packed));
27
28struct LO_fba_data {
29 struct {
30 unsigned char zero:4;
31 unsigned char cmd:4;
32 } __attribute__ ((packed)) operation;
33 __u8 auxiliary;
34 __u16 blk_ct;
35 __u32 blk_nr;
36} __attribute__ ((packed));
37
38struct dasd_fba_characteristics {
39 union {
40 __u8 c;
41 struct {
42 unsigned char reserved:1;
43 unsigned char overrunnable:1;
44 unsigned char burst_byte:1;
45 unsigned char data_chain:1;
46 unsigned char zeros:4;
47 } __attribute__ ((packed)) bits;
48 } __attribute__ ((packed)) mode;
49 union {
50 __u8 c;
51 struct {
52 unsigned char zero0:1;
53 unsigned char removable:1;
54 unsigned char shared:1;
55 unsigned char zero1:1;
56 unsigned char mam:1;
57 unsigned char zeros:3;
58 } __attribute__ ((packed)) bits;
59 } __attribute__ ((packed)) features;
60 __u8 dev_class;
61 __u8 unit_type;
62 __u16 blk_size;
63 __u32 blk_per_cycl;
64 __u32 blk_per_bound;
65 __u32 blk_bdsa;
66 __u32 reserved0;
67 __u16 reserved1;
68 __u16 blk_ce;
69 __u32 reserved2;
70 __u16 reserved3;
71} __attribute__ ((packed));
72
73#endif /* DASD_FBA_H */
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
new file mode 100644
index 000000000000..1d52db406b2e
--- /dev/null
+++ b/drivers/s390/block/dasd_genhd.c
@@ -0,0 +1,185 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_genhd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
10 * gendisk related functions for the dasd driver.
11 *
12 * $Revision: 1.48 $
13 */
14
15#include <linux/config.h>
16#include <linux/interrupt.h>
17#include <linux/fs.h>
18#include <linux/blkpg.h>
19
20#include <asm/uaccess.h>
21
22/* This is ugly... */
23#define PRINTK_HEADER "dasd_gendisk:"
24
25#include "dasd_int.h"
26
27/*
28 * Allocate and register gendisk structure for device.
29 */
30int
31dasd_gendisk_alloc(struct dasd_device *device)
32{
33 struct gendisk *gdp;
34 int len;
35
36 /* Make sure the minor for this device exists. */
37 if (device->devindex >= DASD_PER_MAJOR)
38 return -EBUSY;
39
40 gdp = alloc_disk(1 << DASD_PARTN_BITS);
41 if (!gdp)
42 return -ENOMEM;
43
44 /* Initialize gendisk structure. */
45 gdp->major = DASD_MAJOR;
46 gdp->first_minor = device->devindex << DASD_PARTN_BITS;
47 gdp->fops = &dasd_device_operations;
48 gdp->driverfs_dev = &device->cdev->dev;
49
50 /*
51 * Set device name.
52 * dasda - dasdz : 26 devices
53 * dasdaa - dasdzz : 676 devices, added up = 702
54 * dasdaaa - dasdzzz : 17576 devices, added up = 18278
55 * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
56 */
57 len = sprintf(gdp->disk_name, "dasd");
58 if (device->devindex > 25) {
59 if (device->devindex > 701) {
60 if (device->devindex > 18277)
61 len += sprintf(gdp->disk_name + len, "%c",
62 'a'+(((device->devindex-18278)
63 /17576)%26));
64 len += sprintf(gdp->disk_name + len, "%c",
65 'a'+(((device->devindex-702)/676)%26));
66 }
67 len += sprintf(gdp->disk_name + len, "%c",
68 'a'+(((device->devindex-26)/26)%26));
69 }
70 len += sprintf(gdp->disk_name + len, "%c", 'a'+(device->devindex%26));
71
72 sprintf(gdp->devfs_name, "dasd/%s", device->cdev->dev.bus_id);
73
74 if (test_bit(DASD_FLAG_RO, &device->flags))
75 set_disk_ro(gdp, 1);
76 gdp->private_data = device;
77 gdp->queue = device->request_queue;
78 device->gdp = gdp;
79 set_capacity(device->gdp, 0);
80 add_disk(device->gdp);
81 return 0;
82}
83
84/*
85 * Unregister and free gendisk structure for device.
86 */
87void
88dasd_gendisk_free(struct dasd_device *device)
89{
90 del_gendisk(device->gdp);
91 device->gdp->queue = 0;
92 put_disk(device->gdp);
93 device->gdp = 0;
94}
95
96/*
97 * Trigger a partition detection.
98 */
99int
100dasd_scan_partitions(struct dasd_device * device)
101{
102 struct block_device *bdev;
103
104 /* Make the disk known. */
105 set_capacity(device->gdp, device->blocks << device->s2b_shift);
106 bdev = bdget_disk(device->gdp, 0);
107 if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0)
108 return -ENODEV;
109 /*
110 * See fs/partition/check.c:register_disk,rescan_partitions
111 * Can't call rescan_partitions directly. Use ioctl.
112 */
113 ioctl_by_bdev(bdev, BLKRRPART, 0);
114 /*
115 * Since the matching blkdev_put call to the blkdev_get in
116 * this function is not called before dasd_destroy_partitions
117 * the offline open_count limit needs to be increased from
118 * 0 to 1. This is done by setting device->bdev (see
119 * dasd_generic_set_offline). As long as the partition
120 * detection is running no offline should be allowed. That
121 * is why the assignment to device->bdev is done AFTER
122 * the BLKRRPART ioctl.
123 */
124 device->bdev = bdev;
125 return 0;
126}
127
128/*
129 * Remove all inodes in the system for a device, delete the
130 * partitions and make device unusable by setting its size to zero.
131 */
132void
133dasd_destroy_partitions(struct dasd_device * device)
134{
135 /* The two structs have 168/176 byte on 31/64 bit. */
136 struct blkpg_partition bpart;
137 struct blkpg_ioctl_arg barg;
138 struct block_device *bdev;
139
140 /*
141 * Get the bdev pointer from the device structure and clear
142 * device->bdev to lower the offline open_count limit again.
143 */
144 bdev = device->bdev;
145 device->bdev = 0;
146
147 /*
148 * See fs/partition/check.c:delete_partition
149 * Can't call delete_partitions directly. Use ioctl.
150 * The ioctl also does locking and invalidation.
151 */
152 memset(&bpart, 0, sizeof(struct blkpg_partition));
153 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
154 barg.data = &bpart;
155 barg.op = BLKPG_DEL_PARTITION;
156 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
157 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
158
159 invalidate_partition(device->gdp, 0);
160 /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
161 blkdev_put(bdev);
162 set_capacity(device->gdp, 0);
163}
164
165int
166dasd_gendisk_init(void)
167{
168 int rc;
169
170 /* Register to static dasd major 94 */
171 rc = register_blkdev(DASD_MAJOR, "dasd");
172 if (rc != 0) {
173 MESSAGE(KERN_WARNING,
174 "Couldn't register successfully to "
175 "major no %d", DASD_MAJOR);
176 return rc;
177 }
178 return 0;
179}
180
181void
182dasd_gendisk_exit(void)
183{
184 unregister_blkdev(DASD_MAJOR, "dasd");
185}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
new file mode 100644
index 000000000000..4586e0ecc526
--- /dev/null
+++ b/drivers/s390/block/dasd_int.h
@@ -0,0 +1,576 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_int.h
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 *
9 * $Revision: 1.63 $
10 */
11
12#ifndef DASD_INT_H
13#define DASD_INT_H
14
15#ifdef __KERNEL__
16
17/* erp debugging in dasd.c and dasd_3990_erp.c */
18#define ERP_DEBUG
19
20
21/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
22#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
23#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
24
25/*
26 * States a dasd device can have:
27 * new: the dasd_device structure is allocated.
28 * known: the discipline for the device is identified.
29 * basic: the device can do basic i/o.
30 * accept: the device is analysed (format is known).
31 * ready: partition detection is done and the device is can do block io.
32 * online: the device accepts requests from the block device queue.
33 *
34 * Things to do for startup state transitions:
35 * new -> known: find discipline for the device and create devfs entries.
36 * known -> basic: request irq line for the device.
37 * basic -> ready: do the initial analysis, e.g. format detection,
38 * do block device setup and detect partitions.
39 * ready -> online: schedule the device tasklet.
40 * Things to do for shutdown state transitions:
41 * online -> ready: just set the new device state.
42 * ready -> basic: flush requests from the block device layer, clear
43 * partition information and reset format information.
44 * basic -> known: terminate all requests and free irq.
45 * known -> new: remove devfs entries and forget discipline.
46 */
47
48#define DASD_STATE_NEW 0
49#define DASD_STATE_KNOWN 1
50#define DASD_STATE_BASIC 2
51#define DASD_STATE_READY 3
52#define DASD_STATE_ONLINE 4
53
54#include <linux/module.h>
55#include <linux/wait.h>
56#include <linux/blkdev.h>
57#include <linux/devfs_fs_kernel.h>
58#include <linux/genhd.h>
59#include <linux/hdreg.h>
60#include <linux/interrupt.h>
61#include <asm/ccwdev.h>
62#include <linux/workqueue.h>
63#include <asm/debug.h>
64#include <asm/dasd.h>
65#include <asm/idals.h>
66
67/*
68 * SECTION: Type definitions
69 */
70struct dasd_device;
71
72typedef int (*dasd_ioctl_fn_t) (struct block_device *bdev, int no, long args);
73
74struct dasd_ioctl {
75 struct list_head list;
76 struct module *owner;
77 int no;
78 dasd_ioctl_fn_t handler;
79};
80
81typedef enum {
82 dasd_era_fatal = -1, /* no chance to recover */
83 dasd_era_none = 0, /* don't recover, everything alright */
84 dasd_era_msg = 1, /* don't recover, just report... */
85 dasd_era_recover = 2 /* recovery action recommended */
86} dasd_era_t;
87
88/* BIT DEFINITIONS FOR SENSE DATA */
89#define DASD_SENSE_BIT_0 0x80
90#define DASD_SENSE_BIT_1 0x40
91#define DASD_SENSE_BIT_2 0x20
92#define DASD_SENSE_BIT_3 0x10
93
94/*
95 * SECTION: MACROs for klogd and s390 debug feature (dbf)
96 */
97#define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \
98do { \
99 debug_sprintf_event(d_device->debug_area, \
100 d_level, \
101 d_str "\n", \
102 d_data); \
103} while(0)
104
105#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \
106do { \
107 debug_sprintf_exception(d_device->debug_area, \
108 d_level, \
109 d_str "\n", \
110 d_data); \
111} while(0)
112
113#define DBF_EVENT(d_level, d_str, d_data...)\
114do { \
115 debug_sprintf_event(dasd_debug_area, \
116 d_level,\
117 d_str "\n", \
118 d_data); \
119} while(0)
120
121#define DBF_EXC(d_level, d_str, d_data...)\
122do { \
123 debug_sprintf_exception(dasd_debug_area, \
124 d_level,\
125 d_str "\n", \
126 d_data); \
127} while(0)
128
129/* definition of dbf debug levels */
130#define DBF_EMERG 0 /* system is unusable */
131#define DBF_ALERT 1 /* action must be taken immediately */
132#define DBF_CRIT 2 /* critical conditions */
133#define DBF_ERR 3 /* error conditions */
134#define DBF_WARNING 4 /* warning conditions */
135#define DBF_NOTICE 5 /* normal but significant condition */
136#define DBF_INFO 6 /* informational */
137#define DBF_DEBUG 6 /* debug-level messages */
138
139/* messages to be written via klogd and dbf */
140#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
141do { \
142 printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
143 d_device->cdev->dev.bus_id, d_args); \
144 DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
145} while(0)
146
147#define MESSAGE(d_loglevel,d_string,d_args...)\
148do { \
149 printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
150 DBF_EVENT(DBF_ALERT, d_string, d_args); \
151} while(0)
152
153/* messages to be written via klogd only */
154#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
155do { \
156 printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
157 d_device->cdev->dev.bus_id, d_args); \
158} while(0)
159
160#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
161do { \
162 printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
163} while(0)
164
165struct dasd_ccw_req {
166 unsigned int magic; /* Eye catcher */
167 struct list_head list; /* list_head for request queueing. */
168
169 /* Where to execute what... */
170 struct dasd_device *device; /* device the request is for */
171 struct ccw1 *cpaddr; /* address of channel program */
172 char status; /* status of this request */
173 short retries; /* A retry counter */
174 unsigned long flags; /* flags of this request */
175
176 /* ... and how */
177 unsigned long starttime; /* jiffies time of request start */
178 int expires; /* expiration period in jiffies */
179 char lpm; /* logical path mask */
180 void *data; /* pointer to data area */
181
182 /* these are important for recovering erroneous requests */
183 struct irb irb; /* device status in case of an error */
184 struct dasd_ccw_req *refers; /* ERP-chain queueing. */
185 void *function; /* originating ERP action */
186
187 /* these are for statistics only */
188 unsigned long long buildclk; /* TOD-clock of request generation */
189 unsigned long long startclk; /* TOD-clock of request start */
190 unsigned long long stopclk; /* TOD-clock of request interrupt */
191 unsigned long long endclk; /* TOD-clock of request termination */
192
193 /* Callback that is called after reaching final status. */
194 void (*callback)(struct dasd_ccw_req *, void *data);
195 void *callback_data;
196};
197
198/*
199 * dasd_ccw_req -> status can be:
200 */
201#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */
202#define DASD_CQR_QUEUED 0x01 /* request is queued to be processed */
203#define DASD_CQR_IN_IO 0x02 /* request is currently in IO */
204#define DASD_CQR_DONE 0x03 /* request is completed successfully */
205#define DASD_CQR_ERROR 0x04 /* request is completed with error */
206#define DASD_CQR_FAILED 0x05 /* request is finally failed */
207#define DASD_CQR_CLEAR 0x06 /* request is clear pending */
208
209/* per dasd_ccw_req flags */
210#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
211
212/* Signature for error recovery functions. */
213typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
214
215/*
216 * the struct dasd_discipline is
217 * sth like a table of virtual functions, if you think of dasd_eckd
218 * inheriting dasd...
219 * no, currently we are not planning to reimplement the driver in C++
220 */
221struct dasd_discipline {
222 struct module *owner;
223 char ebcname[8]; /* a name used for tagging and printks */
224 char name[8]; /* a name used for tagging and printks */
225 int max_blocks; /* maximum number of blocks to be chained */
226
227 struct list_head list; /* used for list of disciplines */
228
229 /*
230 * Device recognition functions. check_device is used to verify
231 * the sense data and the information returned by read device
232 * characteristics. It returns 0 if the discipline can be used
233 * for the device in question.
234 * do_analysis is used in the step from device state "basic" to
235 * state "accept". It returns 0 if the device can be made ready,
236 * it returns -EMEDIUMTYPE if the device can't be made ready or
237 * -EAGAIN if do_analysis started a ccw that needs to complete
238 * before the analysis may be repeated.
239 */
240 int (*check_device)(struct dasd_device *);
241 int (*do_analysis) (struct dasd_device *);
242
243 /*
244 * Device operation functions. build_cp creates a ccw chain for
245 * a block device request, start_io starts the request and
246 * term_IO cancels it (e.g. in case of a timeout). format_device
247 * returns a ccw chain to be used to format the device.
248 */
249 struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
250 struct request *);
251 int (*start_IO) (struct dasd_ccw_req *);
252 int (*term_IO) (struct dasd_ccw_req *);
253 struct dasd_ccw_req *(*format_device) (struct dasd_device *,
254 struct format_data_t *);
255 int (*free_cp) (struct dasd_ccw_req *, struct request *);
256 /*
257 * Error recovery functions. examine_error() returns a value that
258 * indicates what to do for an error condition. If examine_error()
259 * returns 'dasd_era_recover' erp_action() is called to create a
260 * special error recovery ccw. erp_postaction() is called after
261 * an error recovery ccw has finished its execution. dump_sense
262 * is called for every error condition to print the sense data
263 * to the console.
264 */
265 dasd_era_t(*examine_error) (struct dasd_ccw_req *, struct irb *);
266 dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
267 dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
268 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
269 struct irb *);
270
271 /* i/o control functions. */
272 int (*fill_geometry) (struct dasd_device *, struct hd_geometry *);
273 int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
274};
275
276extern struct dasd_discipline *dasd_diag_discipline_pointer;
277
278struct dasd_device {
279 /* Block device stuff. */
280 struct gendisk *gdp;
281 request_queue_t *request_queue;
282 spinlock_t request_queue_lock;
283 struct block_device *bdev;
284 unsigned int devindex;
285 unsigned long blocks; /* size of volume in blocks */
286 unsigned int bp_block; /* bytes per block */
287 unsigned int s2b_shift; /* log2 (bp_block/512) */
288 unsigned long flags; /* per device flags */
289
290 /* Device discipline stuff. */
291 struct dasd_discipline *discipline;
292 char *private;
293
294 /* Device state and target state. */
295 int state, target;
296 int stopped; /* device (ccw_device_start) was stopped */
297
298 /* Open and reference count. */
299 atomic_t ref_count;
300 atomic_t open_count;
301
302 /* ccw queue and memory for static ccw/erp buffers. */
303 struct list_head ccw_queue;
304 spinlock_t mem_lock;
305 void *ccw_mem;
306 void *erp_mem;
307 struct list_head ccw_chunks;
308 struct list_head erp_chunks;
309
310 atomic_t tasklet_scheduled;
311 struct tasklet_struct tasklet;
312 struct work_struct kick_work;
313 struct timer_list timer;
314
315 debug_info_t *debug_area;
316
317 struct ccw_device *cdev;
318
319#ifdef CONFIG_DASD_PROFILE
320 struct dasd_profile_info_t profile;
321#endif
322};
323
324/* reasons why device (ccw_device_start) was stopped */
325#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
326#define DASD_STOPPED_QUIESCE 2 /* Quiesced */
327#define DASD_STOPPED_PENDING 4 /* long busy */
328#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
329#define DASD_STOPPED_DC_EIO 16 /* disconnected, return -EIO */
330
331/* per device flags */
332#define DASD_FLAG_RO 0 /* device is read-only */
333#define DASD_FLAG_USE_DIAG 1 /* use diag disciplnie */
334#define DASD_FLAG_DSC_ERROR 2 /* return -EIO when disconnected */
335#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
336
337void dasd_put_device_wake(struct dasd_device *);
338
339/*
340 * Reference count inliners
341 */
342static inline void
343dasd_get_device(struct dasd_device *device)
344{
345 atomic_inc(&device->ref_count);
346}
347
348static inline void
349dasd_put_device(struct dasd_device *device)
350{
351 if (atomic_dec_return(&device->ref_count) == 0)
352 dasd_put_device_wake(device);
353}
354
355/*
356 * The static memory in ccw_mem and erp_mem is managed by a sorted
357 * list of free memory chunks.
358 */
359struct dasd_mchunk
360{
361 struct list_head list;
362 unsigned long size;
363} __attribute__ ((aligned(8)));
364
365static inline void
366dasd_init_chunklist(struct list_head *chunk_list, void *mem,
367 unsigned long size)
368{
369 struct dasd_mchunk *chunk;
370
371 INIT_LIST_HEAD(chunk_list);
372 chunk = (struct dasd_mchunk *) mem;
373 chunk->size = size - sizeof(struct dasd_mchunk);
374 list_add(&chunk->list, chunk_list);
375}
376
377static inline void *
378dasd_alloc_chunk(struct list_head *chunk_list, unsigned long size)
379{
380 struct dasd_mchunk *chunk, *tmp;
381
382 size = (size + 7L) & -8L;
383 list_for_each_entry(chunk, chunk_list, list) {
384 if (chunk->size < size)
385 continue;
386 if (chunk->size > size + sizeof(struct dasd_mchunk)) {
387 char *endaddr = (char *) (chunk + 1) + chunk->size;
388 tmp = (struct dasd_mchunk *) (endaddr - size) - 1;
389 tmp->size = size;
390 chunk->size -= size + sizeof(struct dasd_mchunk);
391 chunk = tmp;
392 } else
393 list_del(&chunk->list);
394 return (void *) (chunk + 1);
395 }
396 return NULL;
397}
398
399static inline void
400dasd_free_chunk(struct list_head *chunk_list, void *mem)
401{
402 struct dasd_mchunk *chunk, *tmp;
403 struct list_head *p, *left;
404
405 chunk = (struct dasd_mchunk *)
406 ((char *) mem - sizeof(struct dasd_mchunk));
407 /* Find out the left neighbour in chunk_list. */
408 left = chunk_list;
409 list_for_each(p, chunk_list) {
410 if (list_entry(p, struct dasd_mchunk, list) > chunk)
411 break;
412 left = p;
413 }
414 /* Try to merge with right neighbour = next element from left. */
415 if (left->next != chunk_list) {
416 tmp = list_entry(left->next, struct dasd_mchunk, list);
417 if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
418 list_del(&tmp->list);
419 chunk->size += tmp->size + sizeof(struct dasd_mchunk);
420 }
421 }
422 /* Try to merge with left neighbour. */
423 if (left != chunk_list) {
424 tmp = list_entry(left, struct dasd_mchunk, list);
425 if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
426 tmp->size += chunk->size + sizeof(struct dasd_mchunk);
427 return;
428 }
429 }
430 __list_add(&chunk->list, left, left->next);
431}
432
433/*
434 * Check if bsize is in { 512, 1024, 2048, 4096 }
435 */
436static inline int
437dasd_check_blocksize(int bsize)
438{
439 if (bsize < 512 || bsize > 4096 || (bsize & (bsize - 1)) != 0)
440 return -EMEDIUMTYPE;
441 return 0;
442}
443
444/* externals in dasd.c */
445#define DASD_PROFILE_ON 1
446#define DASD_PROFILE_OFF 0
447
448extern debug_info_t *dasd_debug_area;
449extern struct dasd_profile_info_t dasd_global_profile;
450extern unsigned int dasd_profile_level;
451extern struct block_device_operations dasd_device_operations;
452
453extern kmem_cache_t *dasd_page_cache;
454
455struct dasd_ccw_req *
456dasd_kmalloc_request(char *, int, int, struct dasd_device *);
457struct dasd_ccw_req *
458dasd_smalloc_request(char *, int, int, struct dasd_device *);
459void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
460void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
461
462static inline int
463dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
464{
465 return set_normalized_cda(ccw, cda);
466}
467
468struct dasd_device *dasd_alloc_device(void);
469void dasd_free_device(struct dasd_device *);
470
471void dasd_enable_device(struct dasd_device *);
472void dasd_set_target_state(struct dasd_device *, int);
473void dasd_kick_device(struct dasd_device *);
474
475void dasd_add_request_head(struct dasd_ccw_req *);
476void dasd_add_request_tail(struct dasd_ccw_req *);
477int dasd_start_IO(struct dasd_ccw_req *);
478int dasd_term_IO(struct dasd_ccw_req *);
479void dasd_schedule_bh(struct dasd_device *);
480int dasd_sleep_on(struct dasd_ccw_req *);
481int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
482int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
483void dasd_set_timer(struct dasd_device *, int);
484void dasd_clear_timer(struct dasd_device *);
485int dasd_cancel_req(struct dasd_ccw_req *);
486int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
487void dasd_generic_remove (struct ccw_device *cdev);
488int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
489int dasd_generic_set_offline (struct ccw_device *cdev);
490int dasd_generic_notify(struct ccw_device *, int);
491void dasd_generic_auto_online (struct ccw_driver *);
492
493/* externals in dasd_devmap.c */
494extern int dasd_max_devindex;
495extern int dasd_probeonly;
496extern int dasd_autodetect;
497
498int dasd_devmap_init(void);
499void dasd_devmap_exit(void);
500
501struct dasd_device *dasd_create_device(struct ccw_device *);
502void dasd_delete_device(struct dasd_device *);
503
504int dasd_add_sysfs_files(struct ccw_device *);
505void dasd_remove_sysfs_files(struct ccw_device *);
506
507struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
508struct dasd_device *dasd_device_from_devindex(int);
509
510int dasd_parse(void);
511int dasd_busid_known(char *);
512
513/* externals in dasd_gendisk.c */
514int dasd_gendisk_init(void);
515void dasd_gendisk_exit(void);
516int dasd_gendisk_alloc(struct dasd_device *);
517void dasd_gendisk_free(struct dasd_device *);
518int dasd_scan_partitions(struct dasd_device *);
519void dasd_destroy_partitions(struct dasd_device *);
520
521/* externals in dasd_ioctl.c */
522int dasd_ioctl_init(void);
523void dasd_ioctl_exit(void);
524int dasd_ioctl_no_register(struct module *, int, dasd_ioctl_fn_t);
525int dasd_ioctl_no_unregister(struct module *, int, dasd_ioctl_fn_t);
526int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
527
528/* externals in dasd_proc.c */
529int dasd_proc_init(void);
530void dasd_proc_exit(void);
531
532/* externals in dasd_erp.c */
533struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
534struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
535struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
536 struct dasd_device *);
537void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
538void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
539void dasd_log_ccw(struct dasd_ccw_req *, int, __u32);
540
541/* externals in dasd_3370_erp.c */
542dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *);
543
544/* externals in dasd_3990_erp.c */
545dasd_era_t dasd_3990_erp_examine(struct dasd_ccw_req *, struct irb *);
546struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
547
548/* externals in dasd_9336_erp.c */
549dasd_era_t dasd_9336_erp_examine(struct dasd_ccw_req *, struct irb *);
550
551/* externals in dasd_9336_erp.c */
552dasd_era_t dasd_9343_erp_examine(struct dasd_ccw_req *, struct irb *);
553struct dasd_ccw_req *dasd_9343_erp_action(struct dasd_ccw_req *);
554
555#endif /* __KERNEL__ */
556
557#endif /* DASD_H */
558
559/*
560 * Overrides for Emacs so that we follow Linus's tabbing style.
561 * Emacs will notice this stuff at the end of the file and automatically
562 * adjust the settings for this buffer only. This must remain at the end
563 * of the file.
564 * ---------------------------------------------------------------------------
565 * Local variables:
566 * c-indent-level: 4
567 * c-brace-imaginary-offset: 0
568 * c-brace-offset: -4
569 * c-argdecl-indent: 4
570 * c-label-offset: -4
571 * c-continued-statement-offset: 4
572 * c-continued-brace-offset: 0
573 * indent-tabs-mode: 1
574 * tab-width: 8
575 * End:
576 */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
new file mode 100644
index 000000000000..f1892baa3b18
--- /dev/null
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -0,0 +1,554 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_ioctl.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
10 * i/o controls for the dasd driver.
11 */
12#include <linux/config.h>
13#include <linux/interrupt.h>
14#include <linux/major.h>
15#include <linux/fs.h>
16#include <linux/blkpg.h>
17
18#include <asm/ccwdev.h>
19#include <asm/uaccess.h>
20
21/* This is ugly... */
22#define PRINTK_HEADER "dasd_ioctl:"
23
24#include "dasd_int.h"
25
26/*
27 * SECTION: ioctl functions.
28 */
29static struct list_head dasd_ioctl_list = LIST_HEAD_INIT(dasd_ioctl_list);
30
31/*
32 * Find the ioctl with number no.
33 */
34static struct dasd_ioctl *
35dasd_find_ioctl(int no)
36{
37 struct dasd_ioctl *ioctl;
38
39 list_for_each_entry (ioctl, &dasd_ioctl_list, list)
40 if (ioctl->no == no)
41 return ioctl;
42 return NULL;
43}
44
45/*
46 * Register ioctl with number no.
47 */
48int
49dasd_ioctl_no_register(struct module *owner, int no, dasd_ioctl_fn_t handler)
50{
51 struct dasd_ioctl *new;
52 if (dasd_find_ioctl(no))
53 return -EBUSY;
54 new = kmalloc(sizeof (struct dasd_ioctl), GFP_KERNEL);
55 if (new == NULL)
56 return -ENOMEM;
57 new->owner = owner;
58 new->no = no;
59 new->handler = handler;
60 list_add(&new->list, &dasd_ioctl_list);
61 return 0;
62}
63
64/*
65 * Deregister ioctl with number no.
66 */
67int
68dasd_ioctl_no_unregister(struct module *owner, int no, dasd_ioctl_fn_t handler)
69{
70 struct dasd_ioctl *old = dasd_find_ioctl(no);
71 if (old == NULL)
72 return -ENOENT;
73 if (old->no != no || old->handler != handler || owner != old->owner)
74 return -EINVAL;
75 list_del(&old->list);
76 kfree(old);
77 return 0;
78}
79
80int
81dasd_ioctl(struct inode *inp, struct file *filp,
82 unsigned int no, unsigned long data)
83{
84 struct block_device *bdev = inp->i_bdev;
85 struct dasd_device *device = bdev->bd_disk->private_data;
86 struct dasd_ioctl *ioctl;
87 const char *dir;
88 int rc;
89
90 if ((_IOC_DIR(no) != _IOC_NONE) && (data == 0)) {
91 PRINT_DEBUG("empty data ptr");
92 return -EINVAL;
93 }
94 dir = _IOC_DIR (no) == _IOC_NONE ? "0" :
95 _IOC_DIR (no) == _IOC_READ ? "r" :
96 _IOC_DIR (no) == _IOC_WRITE ? "w" :
97 _IOC_DIR (no) == (_IOC_READ | _IOC_WRITE) ? "rw" : "u";
98 DBF_DEV_EVENT(DBF_DEBUG, device,
99 "ioctl 0x%08x %s'0x%x'%d(%d) with data %8lx", no,
100 dir, _IOC_TYPE(no), _IOC_NR(no), _IOC_SIZE(no), data);
101 /* Search for ioctl no in the ioctl list. */
102 list_for_each_entry(ioctl, &dasd_ioctl_list, list) {
103 if (ioctl->no == no) {
104 /* Found a matching ioctl. Call it. */
105 if (!try_module_get(ioctl->owner))
106 continue;
107 rc = ioctl->handler(bdev, no, data);
108 module_put(ioctl->owner);
109 return rc;
110 }
111 }
112 /* No ioctl with number no. */
113 DBF_DEV_EVENT(DBF_INFO, device,
114 "unknown ioctl 0x%08x=%s'0x%x'%d(%d) data %8lx", no,
115 dir, _IOC_TYPE(no), _IOC_NR(no), _IOC_SIZE(no), data);
116 return -EINVAL;
117}
118
119static int
120dasd_ioctl_api_version(struct block_device *bdev, int no, long args)
121{
122 int ver = DASD_API_VERSION;
123 return put_user(ver, (int __user *) args);
124}
125
126/*
127 * Enable device.
128 * used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection
129 */
130static int
131dasd_ioctl_enable(struct block_device *bdev, int no, long args)
132{
133 struct dasd_device *device;
134
135 if (!capable(CAP_SYS_ADMIN))
136 return -EACCES;
137 device = bdev->bd_disk->private_data;
138 if (device == NULL)
139 return -ENODEV;
140 dasd_enable_device(device);
141 /* Formatting the dasd device can change the capacity. */
142 down(&bdev->bd_sem);
143 i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9);
144 up(&bdev->bd_sem);
145 return 0;
146}
147
148/*
149 * Disable device.
150 * Used by dasdfmt. Disable I/O operations but allow ioctls.
151 */
152static int
153dasd_ioctl_disable(struct block_device *bdev, int no, long args)
154{
155 struct dasd_device *device;
156
157 if (!capable(CAP_SYS_ADMIN))
158 return -EACCES;
159 device = bdev->bd_disk->private_data;
160 if (device == NULL)
161 return -ENODEV;
162 /*
163 * Man this is sick. We don't do a real disable but only downgrade
164 * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
165 * BIODASDDISABLE to disable accesses to the device via the block
166 * device layer but it still wants to do i/o on the device by
167 * using the BIODASDFMT ioctl. Therefore the correct state for the
168 * device is DASD_STATE_BASIC that allows to do basic i/o.
169 */
170 dasd_set_target_state(device, DASD_STATE_BASIC);
171 /*
172 * Set i_size to zero, since read, write, etc. check against this
173 * value.
174 */
175 down(&bdev->bd_sem);
176 i_size_write(bdev->bd_inode, 0);
177 up(&bdev->bd_sem);
178 return 0;
179}
180
181/*
182 * Quiesce device.
183 */
184static int
185dasd_ioctl_quiesce(struct block_device *bdev, int no, long args)
186{
187 struct dasd_device *device;
188 unsigned long flags;
189
190 if (!capable (CAP_SYS_ADMIN))
191 return -EACCES;
192
193 device = bdev->bd_disk->private_data;
194 if (device == NULL)
195 return -ENODEV;
196
197 DEV_MESSAGE (KERN_DEBUG, device, "%s",
198 "Quiesce IO on device");
199 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
200 device->stopped |= DASD_STOPPED_QUIESCE;
201 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
202 return 0;
203}
204
205
206/*
207 * Quiesce device.
208 */
209static int
210dasd_ioctl_resume(struct block_device *bdev, int no, long args)
211{
212 struct dasd_device *device;
213 unsigned long flags;
214
215 if (!capable (CAP_SYS_ADMIN))
216 return -EACCES;
217
218 device = bdev->bd_disk->private_data;
219 if (device == NULL)
220 return -ENODEV;
221
222 DEV_MESSAGE (KERN_DEBUG, device, "%s",
223 "resume IO on device");
224
225 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
226 device->stopped &= ~DASD_STOPPED_QUIESCE;
227 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
228
229 dasd_schedule_bh (device);
230 return 0;
231}
232
233/*
234 * performs formatting of _device_ according to _fdata_
235 * Note: The discipline's format_function is assumed to deliver formatting
236 * commands to format a single unit of the device. In terms of the ECKD
237 * devices this means CCWs are generated to format a single track.
238 */
239static int
240dasd_format(struct dasd_device * device, struct format_data_t * fdata)
241{
242 struct dasd_ccw_req *cqr;
243 int rc;
244
245 if (device->discipline->format_device == NULL)
246 return -EPERM;
247
248 if (device->state != DASD_STATE_BASIC) {
249 DEV_MESSAGE(KERN_WARNING, device, "%s",
250 "dasd_format: device is not disabled! ");
251 return -EBUSY;
252 }
253
254 DBF_DEV_EVENT(DBF_NOTICE, device,
255 "formatting units %d to %d (%d B blocks) flags %d",
256 fdata->start_unit,
257 fdata->stop_unit, fdata->blksize, fdata->intensity);
258
259 /* Since dasdfmt keeps the device open after it was disabled,
260 * there still exists an inode for this device.
261 * We must update i_blkbits, otherwise we might get errors when
262 * enabling the device later.
263 */
264 if (fdata->start_unit == 0) {
265 struct block_device *bdev = bdget_disk(device->gdp, 0);
266 bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
267 bdput(bdev);
268 }
269
270 while (fdata->start_unit <= fdata->stop_unit) {
271 cqr = device->discipline->format_device(device, fdata);
272 if (IS_ERR(cqr))
273 return PTR_ERR(cqr);
274 rc = dasd_sleep_on_interruptible(cqr);
275 dasd_sfree_request(cqr, cqr->device);
276 if (rc) {
277 if (rc != -ERESTARTSYS)
278 DEV_MESSAGE(KERN_ERR, device,
279 " Formatting of unit %d failed "
280 "with rc = %d",
281 fdata->start_unit, rc);
282 return rc;
283 }
284 fdata->start_unit++;
285 }
286 return 0;
287}
288
289/*
290 * Format device.
291 */
292static int
293dasd_ioctl_format(struct block_device *bdev, int no, long args)
294{
295 struct dasd_device *device;
296 struct format_data_t fdata;
297
298 if (!capable(CAP_SYS_ADMIN))
299 return -EACCES;
300 if (!args)
301 return -EINVAL;
302 /* fdata == NULL is no longer a valid arg to dasd_format ! */
303 device = bdev->bd_disk->private_data;
304
305 if (device == NULL)
306 return -ENODEV;
307 if (test_bit(DASD_FLAG_RO, &device->flags))
308 return -EROFS;
309 if (copy_from_user(&fdata, (void __user *) args,
310 sizeof (struct format_data_t)))
311 return -EFAULT;
312 if (bdev != bdev->bd_contains) {
313 DEV_MESSAGE(KERN_WARNING, device, "%s",
314 "Cannot low-level format a partition");
315 return -EINVAL;
316 }
317 return dasd_format(device, &fdata);
318}
319
320#ifdef CONFIG_DASD_PROFILE
321/*
322 * Reset device profile information
323 */
324static int
325dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args)
326{
327 struct dasd_device *device;
328
329 if (!capable(CAP_SYS_ADMIN))
330 return -EACCES;
331
332 device = bdev->bd_disk->private_data;
333 if (device == NULL)
334 return -ENODEV;
335
336 memset(&device->profile, 0, sizeof (struct dasd_profile_info_t));
337 return 0;
338}
339
340/*
341 * Return device profile information
342 */
343static int
344dasd_ioctl_read_profile(struct block_device *bdev, int no, long args)
345{
346 struct dasd_device *device;
347
348 device = bdev->bd_disk->private_data;
349 if (device == NULL)
350 return -ENODEV;
351
352 if (copy_to_user((long __user *) args, (long *) &device->profile,
353 sizeof (struct dasd_profile_info_t)))
354 return -EFAULT;
355 return 0;
356}
357#else
358static int
359dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args)
360{
361 return -ENOSYS;
362}
363
364static int
365dasd_ioctl_read_profile(struct block_device *bdev, int no, long args)
366{
367 return -ENOSYS;
368}
369#endif
370
371/*
372 * Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
373 */
374static int
375dasd_ioctl_information(struct block_device *bdev, int no, long args)
376{
377 struct dasd_device *device;
378 struct dasd_information2_t *dasd_info;
379 unsigned long flags;
380 int rc;
381 struct ccw_device *cdev;
382
383 device = bdev->bd_disk->private_data;
384 if (device == NULL)
385 return -ENODEV;
386
387 if (!device->discipline->fill_info)
388 return -EINVAL;
389
390 dasd_info = kmalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
391 if (dasd_info == NULL)
392 return -ENOMEM;
393
394 rc = device->discipline->fill_info(device, dasd_info);
395 if (rc) {
396 kfree(dasd_info);
397 return rc;
398 }
399
400 cdev = device->cdev;
401
402 dasd_info->devno = _ccw_device_get_device_number(device->cdev);
403 dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev);
404 dasd_info->cu_type = cdev->id.cu_type;
405 dasd_info->cu_model = cdev->id.cu_model;
406 dasd_info->dev_type = cdev->id.dev_type;
407 dasd_info->dev_model = cdev->id.dev_model;
408 dasd_info->open_count = atomic_read(&device->open_count);
409 dasd_info->status = device->state;
410
411 /*
412 * check if device is really formatted
413 * LDL / CDL was returned by 'fill_info'
414 */
415 if ((device->state < DASD_STATE_READY) ||
416 (dasd_check_blocksize(device->bp_block)))
417 dasd_info->format = DASD_FORMAT_NONE;
418
419 dasd_info->features |= test_bit(DASD_FLAG_RO, &device->flags) ?
420 DASD_FEATURE_READONLY : DASD_FEATURE_DEFAULT;
421
422 if (device->discipline)
423 memcpy(dasd_info->type, device->discipline->name, 4);
424 else
425 memcpy(dasd_info->type, "none", 4);
426 dasd_info->req_queue_len = 0;
427 dasd_info->chanq_len = 0;
428 if (device->request_queue->request_fn) {
429 struct list_head *l;
430#ifdef DASD_EXTENDED_PROFILING
431 {
432 struct list_head *l;
433 spin_lock_irqsave(&device->lock, flags);
434 list_for_each(l, &device->request_queue->queue_head)
435 dasd_info->req_queue_len++;
436 spin_unlock_irqrestore(&device->lock, flags);
437 }
438#endif /* DASD_EXTENDED_PROFILING */
439 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
440 list_for_each(l, &device->ccw_queue)
441 dasd_info->chanq_len++;
442 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
443 flags);
444 }
445
446 rc = 0;
447 if (copy_to_user((long __user *) args, (long *) dasd_info,
448 ((no == (unsigned int) BIODASDINFO2) ?
449 sizeof (struct dasd_information2_t) :
450 sizeof (struct dasd_information_t))))
451 rc = -EFAULT;
452 kfree(dasd_info);
453 return rc;
454}
455
456/*
457 * Set read only
458 */
459static int
460dasd_ioctl_set_ro(struct block_device *bdev, int no, long args)
461{
462 struct dasd_device *device;
463 int intval;
464
465 if (!capable(CAP_SYS_ADMIN))
466 return -EACCES;
467 if (bdev != bdev->bd_contains)
468 // ro setting is not allowed for partitions
469 return -EINVAL;
470 if (get_user(intval, (int __user *) args))
471 return -EFAULT;
472 device = bdev->bd_disk->private_data;
473 if (device == NULL)
474 return -ENODEV;
475 set_disk_ro(bdev->bd_disk, intval);
476 if (intval)
477 set_bit(DASD_FLAG_RO, &device->flags);
478 else
479 clear_bit(DASD_FLAG_RO, &device->flags);
480 return 0;
481}
482
483/*
484 * Return disk geometry.
485 */
486static int
487dasd_ioctl_getgeo(struct block_device *bdev, int no, long args)
488{
489 struct hd_geometry geo = { 0, };
490 struct dasd_device *device;
491
492 device = bdev->bd_disk->private_data;
493 if (device == NULL)
494 return -ENODEV;
495
496 if (device == NULL || device->discipline == NULL ||
497 device->discipline->fill_geometry == NULL)
498 return -EINVAL;
499
500 geo = (struct hd_geometry) {};
501 device->discipline->fill_geometry(device, &geo);
502 geo.start = get_start_sect(bdev) >> device->s2b_shift;
503 if (copy_to_user((struct hd_geometry __user *) args, &geo,
504 sizeof (struct hd_geometry)))
505 return -EFAULT;
506
507 return 0;
508}
509
510/*
511 * List of static ioctls.
512 */
513static struct { int no; dasd_ioctl_fn_t fn; } dasd_ioctls[] =
514{
515 { BIODASDDISABLE, dasd_ioctl_disable },
516 { BIODASDENABLE, dasd_ioctl_enable },
517 { BIODASDQUIESCE, dasd_ioctl_quiesce },
518 { BIODASDRESUME, dasd_ioctl_resume },
519 { BIODASDFMT, dasd_ioctl_format },
520 { BIODASDINFO, dasd_ioctl_information },
521 { BIODASDINFO2, dasd_ioctl_information },
522 { BIODASDPRRD, dasd_ioctl_read_profile },
523 { BIODASDPRRST, dasd_ioctl_reset_profile },
524 { BLKROSET, dasd_ioctl_set_ro },
525 { DASDAPIVER, dasd_ioctl_api_version },
526 { HDIO_GETGEO, dasd_ioctl_getgeo },
527 { -1, NULL }
528};
529
530int
531dasd_ioctl_init(void)
532{
533 int i;
534
535 for (i = 0; dasd_ioctls[i].no != -1; i++)
536 dasd_ioctl_no_register(NULL, dasd_ioctls[i].no,
537 dasd_ioctls[i].fn);
538 return 0;
539
540}
541
542void
543dasd_ioctl_exit(void)
544{
545 int i;
546
547 for (i = 0; dasd_ioctls[i].no != -1; i++)
548 dasd_ioctl_no_unregister(NULL, dasd_ioctls[i].no,
549 dasd_ioctls[i].fn);
550
551}
552
553EXPORT_SYMBOL(dasd_ioctl_no_register);
554EXPORT_SYMBOL(dasd_ioctl_no_unregister);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
new file mode 100644
index 000000000000..353d41118c62
--- /dev/null
+++ b/drivers/s390/block/dasd_proc.c
@@ -0,0 +1,319 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_proc.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2002
9 *
10 * /proc interface for the dasd driver.
11 *
12 * $Revision: 1.30 $
13 */
14
15#include <linux/config.h>
16#include <linux/ctype.h>
17#include <linux/seq_file.h>
18#include <linux/vmalloc.h>
19
20#include <asm/debug.h>
21#include <asm/uaccess.h>
22
23/* This is ugly... */
24#define PRINTK_HEADER "dasd_proc:"
25
26#include "dasd_int.h"
27
28static struct proc_dir_entry *dasd_proc_root_entry = NULL;
29static struct proc_dir_entry *dasd_devices_entry = NULL;
30static struct proc_dir_entry *dasd_statistics_entry = NULL;
31
32static inline char *
33dasd_get_user_string(const char __user *user_buf, size_t user_len)
34{
35 char *buffer;
36
37 buffer = kmalloc(user_len + 1, GFP_KERNEL);
38 if (buffer == NULL)
39 return ERR_PTR(-ENOMEM);
40 if (copy_from_user(buffer, user_buf, user_len) != 0) {
41 kfree(buffer);
42 return ERR_PTR(-EFAULT);
43 }
44 /* got the string, now strip linefeed. */
45 if (buffer[user_len - 1] == '\n')
46 buffer[user_len - 1] = 0;
47 else
48 buffer[user_len] = 0;
49 return buffer;
50}
51
52static int
53dasd_devices_show(struct seq_file *m, void *v)
54{
55 struct dasd_device *device;
56 char *substr;
57
58 device = dasd_device_from_devindex((unsigned long) v - 1);
59 if (IS_ERR(device))
60 return 0;
61 /* Print device number. */
62 seq_printf(m, "%s", device->cdev->dev.bus_id);
63 /* Print discipline string. */
64 if (device != NULL && device->discipline != NULL)
65 seq_printf(m, "(%s)", device->discipline->name);
66 else
67 seq_printf(m, "(none)");
68 /* Print kdev. */
69 if (device->gdp)
70 seq_printf(m, " at (%3d:%6d)",
71 device->gdp->major, device->gdp->first_minor);
72 else
73 seq_printf(m, " at (???:??????)");
74 /* Print device name. */
75 if (device->gdp)
76 seq_printf(m, " is %-8s", device->gdp->disk_name);
77 else
78 seq_printf(m, " is ????????");
79 /* Print devices features. */
80 substr = test_bit(DASD_FLAG_RO, &device->flags) ? "(ro)" : " ";
81 seq_printf(m, "%4s: ", substr);
82 /* Print device status information. */
83 switch ((device != NULL) ? device->state : -1) {
84 case -1:
85 seq_printf(m, "unknown");
86 break;
87 case DASD_STATE_NEW:
88 seq_printf(m, "new");
89 break;
90 case DASD_STATE_KNOWN:
91 seq_printf(m, "detected");
92 break;
93 case DASD_STATE_BASIC:
94 seq_printf(m, "basic");
95 break;
96 case DASD_STATE_READY:
97 case DASD_STATE_ONLINE:
98 seq_printf(m, "active ");
99 if (dasd_check_blocksize(device->bp_block))
100 seq_printf(m, "n/f ");
101 else
102 seq_printf(m,
103 "at blocksize: %d, %ld blocks, %ld MB",
104 device->bp_block, device->blocks,
105 ((device->bp_block >> 9) *
106 device->blocks) >> 11);
107 break;
108 default:
109 seq_printf(m, "no stat");
110 break;
111 }
112 dasd_put_device(device);
113 if (dasd_probeonly)
114 seq_printf(m, "(probeonly)");
115 seq_printf(m, "\n");
116 return 0;
117}
118
119static void *dasd_devices_start(struct seq_file *m, loff_t *pos)
120{
121 if (*pos >= dasd_max_devindex)
122 return NULL;
123 return (void *)((unsigned long) *pos + 1);
124}
125
126static void *dasd_devices_next(struct seq_file *m, void *v, loff_t *pos)
127{
128 ++*pos;
129 return dasd_devices_start(m, pos);
130}
131
132static void dasd_devices_stop(struct seq_file *m, void *v)
133{
134}
135
136static struct seq_operations dasd_devices_seq_ops = {
137 .start = dasd_devices_start,
138 .next = dasd_devices_next,
139 .stop = dasd_devices_stop,
140 .show = dasd_devices_show,
141};
142
143static int dasd_devices_open(struct inode *inode, struct file *file)
144{
145 return seq_open(file, &dasd_devices_seq_ops);
146}
147
148static struct file_operations dasd_devices_file_ops = {
149 .open = dasd_devices_open,
150 .read = seq_read,
151 .llseek = seq_lseek,
152 .release = seq_release,
153};
154
155static inline int
156dasd_calc_metrics(char *page, char **start, off_t off,
157 int count, int *eof, int len)
158{
159 len = (len > off) ? len - off : 0;
160 if (len > count)
161 len = count;
162 if (len < count)
163 *eof = 1;
164 *start = page + off;
165 return len;
166}
167
168static inline char *
169dasd_statistics_array(char *str, int *array, int shift)
170{
171 int i;
172
173 for (i = 0; i < 32; i++) {
174 str += sprintf(str, "%7d ", array[i] >> shift);
175 if (i == 15)
176 str += sprintf(str, "\n");
177 }
178 str += sprintf(str,"\n");
179 return str;
180}
181
182static int
183dasd_statistics_read(char *page, char **start, off_t off,
184 int count, int *eof, void *data)
185{
186 unsigned long len;
187#ifdef CONFIG_DASD_PROFILE
188 struct dasd_profile_info_t *prof;
189 char *str;
190 int shift;
191
192 /* check for active profiling */
193 if (dasd_profile_level == DASD_PROFILE_OFF) {
194 len = sprintf(page, "Statistics are off - they might be "
195 "switched on using 'echo set on > "
196 "/proc/dasd/statistics'\n");
197 return dasd_calc_metrics(page, start, off, count, eof, len);
198 }
199
200 prof = &dasd_global_profile;
201 /* prevent couter 'overflow' on output */
202 for (shift = 0; (prof->dasd_io_reqs >> shift) > 9999999; shift++);
203
204 str = page;
205 str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs);
206 str += sprintf(str, "with %d sectors(512B each)\n",
207 prof->dasd_io_sects);
208 str += sprintf(str,
209 " __<4 ___8 __16 __32 __64 _128 "
210 " _256 _512 __1k __2k __4k __8k "
211 " _16k _32k _64k 128k\n");
212 str += sprintf(str,
213 " _256 _512 __1M __2M __4M __8M "
214 " _16M _32M _64M 128M 256M 512M "
215 " __1G __2G __4G " " _>4G\n");
216
217 str += sprintf(str, "Histogram of sizes (512B secs)\n");
218 str = dasd_statistics_array(str, prof->dasd_io_secs, shift);
219 str += sprintf(str, "Histogram of I/O times (microseconds)\n");
220 str = dasd_statistics_array(str, prof->dasd_io_times, shift);
221 str += sprintf(str, "Histogram of I/O times per sector\n");
222 str = dasd_statistics_array(str, prof->dasd_io_timps, shift);
223 str += sprintf(str, "Histogram of I/O time till ssch\n");
224 str = dasd_statistics_array(str, prof->dasd_io_time1, shift);
225 str += sprintf(str, "Histogram of I/O time between ssch and irq\n");
226 str = dasd_statistics_array(str, prof->dasd_io_time2, shift);
227 str += sprintf(str, "Histogram of I/O time between ssch "
228 "and irq per sector\n");
229 str = dasd_statistics_array(str, prof->dasd_io_time2ps, shift);
230 str += sprintf(str, "Histogram of I/O time between irq and end\n");
231 str = dasd_statistics_array(str, prof->dasd_io_time3, shift);
232 str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n");
233 str = dasd_statistics_array(str, prof->dasd_io_nr_req, shift);
234 len = str - page;
235#else
236 len = sprintf(page, "Statistics are not activated in this kernel\n");
237#endif
238 return dasd_calc_metrics(page, start, off, count, eof, len);
239}
240
241static int
242dasd_statistics_write(struct file *file, const char __user *user_buf,
243 unsigned long user_len, void *data)
244{
245#ifdef CONFIG_DASD_PROFILE
246 char *buffer, *str;
247
248 if (user_len > 65536)
249 user_len = 65536;
250 buffer = dasd_get_user_string(user_buf, user_len);
251 if (IS_ERR(buffer))
252 return PTR_ERR(buffer);
253 MESSAGE_LOG(KERN_INFO, "/proc/dasd/statictics: '%s'", buffer);
254
255 /* check for valid verbs */
256 for (str = buffer; isspace(*str); str++);
257 if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
258 /* 'set xxx' was given */
259 for (str = str + 4; isspace(*str); str++);
260 if (strcmp(str, "on") == 0) {
261 /* switch on statistics profiling */
262 dasd_profile_level = DASD_PROFILE_ON;
263 MESSAGE(KERN_INFO, "%s", "Statistics switched on");
264 } else if (strcmp(str, "off") == 0) {
265 /* switch off and reset statistics profiling */
266 memset(&dasd_global_profile,
267 0, sizeof (struct dasd_profile_info_t));
268 dasd_profile_level = DASD_PROFILE_OFF;
269 MESSAGE(KERN_INFO, "%s", "Statistics switched off");
270 } else
271 goto out_error;
272 } else if (strncmp(str, "reset", 5) == 0) {
273 /* reset the statistics */
274 memset(&dasd_global_profile, 0,
275 sizeof (struct dasd_profile_info_t));
276 MESSAGE(KERN_INFO, "%s", "Statistics reset");
277 } else
278 goto out_error;
279 kfree(buffer);
280 return user_len;
281out_error:
282 MESSAGE(KERN_WARNING, "%s",
283 "/proc/dasd/statistics: only 'set on', 'set off' "
284 "and 'reset' are supported verbs");
285 kfree(buffer);
286 return -EINVAL;
287#else
288 MESSAGE(KERN_WARNING, "%s",
289 "/proc/dasd/statistics: is not activated in this kernel");
290 return user_len;
291#endif /* CONFIG_DASD_PROFILE */
292}
293
294int
295dasd_proc_init(void)
296{
297 dasd_proc_root_entry = proc_mkdir("dasd", &proc_root);
298 dasd_proc_root_entry->owner = THIS_MODULE;
299 dasd_devices_entry = create_proc_entry("devices",
300 S_IFREG | S_IRUGO | S_IWUSR,
301 dasd_proc_root_entry);
302 dasd_devices_entry->proc_fops = &dasd_devices_file_ops;
303 dasd_devices_entry->owner = THIS_MODULE;
304 dasd_statistics_entry = create_proc_entry("statistics",
305 S_IFREG | S_IRUGO | S_IWUSR,
306 dasd_proc_root_entry);
307 dasd_statistics_entry->read_proc = dasd_statistics_read;
308 dasd_statistics_entry->write_proc = dasd_statistics_write;
309 dasd_statistics_entry->owner = THIS_MODULE;
310 return 0;
311}
312
313void
314dasd_proc_exit(void)
315{
316 remove_proc_entry("devices", dasd_proc_root_entry);
317 remove_proc_entry("statistics", dasd_proc_root_entry);
318 remove_proc_entry("dasd", &proc_root);
319}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
new file mode 100644
index 000000000000..a66b17b65296
--- /dev/null
+++ b/drivers/s390/block/dcssblk.c
@@ -0,0 +1,775 @@
1/*
2 * dcssblk.c -- the S/390 block driver for dcss memory
3 *
4 * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
5 */
6
7#include <linux/module.h>
8#include <linux/moduleparam.h>
9#include <linux/ctype.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <linux/blkdev.h>
14#include <asm/extmem.h>
15#include <asm/io.h>
16#include <linux/completion.h>
17#include <linux/interrupt.h>
18#include <asm/ccwdev.h> // for s390_root_dev_(un)register()
19
20//#define DCSSBLK_DEBUG /* Debug messages on/off */
21#define DCSSBLK_NAME "dcssblk"
22#define DCSSBLK_MINORS_PER_DISK 1
23#define DCSSBLK_PARM_LEN 400
24
25#ifdef DCSSBLK_DEBUG
26#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x)
27#else
28#define PRINT_DEBUG(x...) do {} while (0)
29#endif
30#define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x)
31#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
32#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
33
34
35static int dcssblk_open(struct inode *inode, struct file *filp);
36static int dcssblk_release(struct inode *inode, struct file *filp);
37static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
38
39static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
40
41static int dcssblk_major;
42static struct block_device_operations dcssblk_devops = {
43 .owner = THIS_MODULE,
44 .open = dcssblk_open,
45 .release = dcssblk_release,
46};
47
48static ssize_t dcssblk_add_store(struct device * dev, const char * buf,
49 size_t count);
50static ssize_t dcssblk_remove_store(struct device * dev, const char * buf,
51 size_t count);
52static ssize_t dcssblk_save_store(struct device * dev, const char * buf,
53 size_t count);
54static ssize_t dcssblk_save_show(struct device *dev, char *buf);
55static ssize_t dcssblk_shared_store(struct device * dev, const char * buf,
56 size_t count);
57static ssize_t dcssblk_shared_show(struct device *dev, char *buf);
58
59static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
60static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
61static DEVICE_ATTR(save, S_IWUSR | S_IRUGO, dcssblk_save_show,
62 dcssblk_save_store);
63static DEVICE_ATTR(shared, S_IWUSR | S_IRUGO, dcssblk_shared_show,
64 dcssblk_shared_store);
65
66static struct device *dcssblk_root_dev;
67
68struct dcssblk_dev_info {
69 struct list_head lh;
70 struct device dev;
71 char segment_name[BUS_ID_SIZE];
72 atomic_t use_count;
73 struct gendisk *gd;
74 unsigned long start;
75 unsigned long end;
76 int segment_type;
77 unsigned char save_pending;
78 unsigned char is_shared;
79 struct request_queue *dcssblk_queue;
80};
81
82static struct list_head dcssblk_devices = LIST_HEAD_INIT(dcssblk_devices);
83static struct rw_semaphore dcssblk_devices_sem;
84
85/*
86 * release function for segment device.
87 */
88static void
89dcssblk_release_segment(struct device *dev)
90{
91 PRINT_DEBUG("segment release fn called for %s\n", dev->bus_id);
92 kfree(container_of(dev, struct dcssblk_dev_info, dev));
93 module_put(THIS_MODULE);
94}
95
96/*
97 * get a minor number. needs to be called with
98 * down_write(&dcssblk_devices_sem) and the
99 * device needs to be enqueued before the semaphore is
100 * freed.
101 */
102static inline int
103dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
104{
105 int minor, found;
106 struct dcssblk_dev_info *entry;
107
108 if (dev_info == NULL)
109 return -EINVAL;
110 for (minor = 0; minor < (1<<MINORBITS); minor++) {
111 found = 0;
112 // test if minor available
113 list_for_each_entry(entry, &dcssblk_devices, lh)
114 if (minor == entry->gd->first_minor)
115 found++;
116 if (!found) break; // got unused minor
117 }
118 if (found)
119 return -EBUSY;
120 dev_info->gd->first_minor = minor;
121 return 0;
122}
123
124/*
125 * get the struct dcssblk_dev_info from dcssblk_devices
126 * for the given name.
127 * down_read(&dcssblk_devices_sem) must be held.
128 */
129static struct dcssblk_dev_info *
130dcssblk_get_device_by_name(char *name)
131{
132 struct dcssblk_dev_info *entry;
133
134 list_for_each_entry(entry, &dcssblk_devices, lh) {
135 if (!strcmp(name, entry->segment_name)) {
136 return entry;
137 }
138 }
139 return NULL;
140}
141
142/*
143 * print appropriate error message for segment_load()/segment_type()
144 * return code
145 */
146static void
147dcssblk_segment_warn(int rc, char* seg_name)
148{
149 switch (rc) {
150 case -ENOENT:
151 PRINT_WARN("cannot load/query segment %s, does not exist\n",
152 seg_name);
153 break;
154 case -ENOSYS:
155 PRINT_WARN("cannot load/query segment %s, not running on VM\n",
156 seg_name);
157 break;
158 case -EIO:
159 PRINT_WARN("cannot load/query segment %s, hardware error\n",
160 seg_name);
161 break;
162 case -ENOTSUPP:
163 PRINT_WARN("cannot load/query segment %s, is a multi-part "
164 "segment\n", seg_name);
165 break;
166 case -ENOSPC:
167 PRINT_WARN("cannot load/query segment %s, overlaps with "
168 "storage\n", seg_name);
169 break;
170 case -EBUSY:
171 PRINT_WARN("cannot load/query segment %s, overlaps with "
172 "already loaded dcss\n", seg_name);
173 break;
174 case -EPERM:
175 PRINT_WARN("cannot load/query segment %s, already loaded in "
176 "incompatible mode\n", seg_name);
177 break;
178 case -ENOMEM:
179 PRINT_WARN("cannot load/query segment %s, out of memory\n",
180 seg_name);
181 break;
182 case -ERANGE:
183 PRINT_WARN("cannot load/query segment %s, exceeds kernel "
184 "mapping range\n", seg_name);
185 break;
186 default:
187 PRINT_WARN("cannot load/query segment %s, return value %i\n",
188 seg_name, rc);
189 break;
190 }
191}
192
193/*
194 * device attribute for switching shared/nonshared (exclusive)
195 * operation (show + store)
196 */
197static ssize_t
198dcssblk_shared_show(struct device *dev, char *buf)
199{
200 struct dcssblk_dev_info *dev_info;
201
202 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
203 return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
204}
205
206static ssize_t
207dcssblk_shared_store(struct device *dev, const char *inbuf, size_t count)
208{
209 struct dcssblk_dev_info *dev_info;
210 int rc;
211
212 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
213 PRINT_WARN("Invalid value, must be 0 or 1\n");
214 return -EINVAL;
215 }
216 down_write(&dcssblk_devices_sem);
217 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
218 if (atomic_read(&dev_info->use_count)) {
219 PRINT_ERR("share: segment %s is busy!\n",
220 dev_info->segment_name);
221 rc = -EBUSY;
222 goto out;
223 }
224 if (inbuf[0] == '1') {
225 // reload segment in shared mode
226 rc = segment_modify_shared(dev_info->segment_name,
227 SEGMENT_SHARED);
228 if (rc < 0) {
229 BUG_ON(rc == -EINVAL);
230 if (rc == -EIO || rc == -ENOENT)
231 goto removeseg;
232 } else {
233 dev_info->is_shared = 1;
234 switch (dev_info->segment_type) {
235 case SEG_TYPE_SR:
236 case SEG_TYPE_ER:
237 case SEG_TYPE_SC:
238 set_disk_ro(dev_info->gd,1);
239 }
240 }
241 } else if (inbuf[0] == '0') {
242 // reload segment in exclusive mode
243 if (dev_info->segment_type == SEG_TYPE_SC) {
244 PRINT_ERR("Segment type SC (%s) cannot be loaded in "
245 "non-shared mode\n", dev_info->segment_name);
246 rc = -EINVAL;
247 goto out;
248 }
249 rc = segment_modify_shared(dev_info->segment_name,
250 SEGMENT_EXCLUSIVE);
251 if (rc < 0) {
252 BUG_ON(rc == -EINVAL);
253 if (rc == -EIO || rc == -ENOENT)
254 goto removeseg;
255 } else {
256 dev_info->is_shared = 0;
257 set_disk_ro(dev_info->gd, 0);
258 }
259 } else {
260 PRINT_WARN("Invalid value, must be 0 or 1\n");
261 rc = -EINVAL;
262 goto out;
263 }
264 rc = count;
265 goto out;
266
267removeseg:
268 PRINT_ERR("Could not reload segment %s, removing it now!\n",
269 dev_info->segment_name);
270 list_del(&dev_info->lh);
271
272 del_gendisk(dev_info->gd);
273 blk_put_queue(dev_info->dcssblk_queue);
274 dev_info->gd->queue = NULL;
275 put_disk(dev_info->gd);
276 device_unregister(dev);
277 put_device(dev);
278out:
279 up_write(&dcssblk_devices_sem);
280 return rc;
281}
282
283/*
284 * device attribute for save operation on current copy
285 * of the segment. If the segment is busy, saving will
286 * become pending until it gets released, which can be
287 * undone by storing a non-true value to this entry.
288 * (show + store)
289 */
290static ssize_t
291dcssblk_save_show(struct device *dev, char *buf)
292{
293 struct dcssblk_dev_info *dev_info;
294
295 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
296 return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
297}
298
299static ssize_t
300dcssblk_save_store(struct device *dev, const char *inbuf, size_t count)
301{
302 struct dcssblk_dev_info *dev_info;
303
304 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
305 PRINT_WARN("Invalid value, must be 0 or 1\n");
306 return -EINVAL;
307 }
308 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
309
310 down_write(&dcssblk_devices_sem);
311 if (inbuf[0] == '1') {
312 if (atomic_read(&dev_info->use_count) == 0) {
313 // device is idle => we save immediately
314 PRINT_INFO("Saving segment %s\n",
315 dev_info->segment_name);
316 segment_save(dev_info->segment_name);
317 } else {
318 // device is busy => we save it when it becomes
319 // idle in dcssblk_release
320 PRINT_INFO("Segment %s is currently busy, it will "
321 "be saved when it becomes idle...\n",
322 dev_info->segment_name);
323 dev_info->save_pending = 1;
324 }
325 } else if (inbuf[0] == '0') {
326 if (dev_info->save_pending) {
327 // device is busy & the user wants to undo his save
328 // request
329 dev_info->save_pending = 0;
330 PRINT_INFO("Pending save for segment %s deactivated\n",
331 dev_info->segment_name);
332 }
333 } else {
334 up_write(&dcssblk_devices_sem);
335 PRINT_WARN("Invalid value, must be 0 or 1\n");
336 return -EINVAL;
337 }
338 up_write(&dcssblk_devices_sem);
339 return count;
340}
341
342/*
343 * device attribute for adding devices
344 */
345static ssize_t
346dcssblk_add_store(struct device *dev, const char *buf, size_t count)
347{
348 int rc, i;
349 struct dcssblk_dev_info *dev_info;
350 char *local_buf;
351 unsigned long seg_byte_size;
352
353 dev_info = NULL;
354 if (dev != dcssblk_root_dev) {
355 rc = -EINVAL;
356 goto out_nobuf;
357 }
358 local_buf = kmalloc(count + 1, GFP_KERNEL);
359 if (local_buf == NULL) {
360 rc = -ENOMEM;
361 goto out_nobuf;
362 }
363 /*
364 * parse input
365 */
366 for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
367 local_buf[i] = toupper(buf[i]);
368 }
369 local_buf[i] = '\0';
370 if ((i == 0) || (i > 8)) {
371 rc = -ENAMETOOLONG;
372 goto out;
373 }
374 /*
375 * already loaded?
376 */
377 down_read(&dcssblk_devices_sem);
378 dev_info = dcssblk_get_device_by_name(local_buf);
379 up_read(&dcssblk_devices_sem);
380 if (dev_info != NULL) {
381 PRINT_WARN("Segment %s already loaded!\n", local_buf);
382 rc = -EEXIST;
383 goto out;
384 }
385 /*
386 * get a struct dcssblk_dev_info
387 */
388 dev_info = kmalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL);
389 if (dev_info == NULL) {
390 rc = -ENOMEM;
391 goto out;
392 }
393 memset(dev_info, 0, sizeof(struct dcssblk_dev_info));
394
395 strcpy(dev_info->segment_name, local_buf);
396 strlcpy(dev_info->dev.bus_id, local_buf, BUS_ID_SIZE);
397 dev_info->dev.release = dcssblk_release_segment;
398 INIT_LIST_HEAD(&dev_info->lh);
399
400 dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
401 if (dev_info->gd == NULL) {
402 rc = -ENOMEM;
403 goto free_dev_info;
404 }
405 dev_info->gd->major = dcssblk_major;
406 dev_info->gd->fops = &dcssblk_devops;
407 dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
408 dev_info->gd->queue = dev_info->dcssblk_queue;
409 dev_info->gd->private_data = dev_info;
410 dev_info->gd->driverfs_dev = &dev_info->dev;
411 /*
412 * load the segment
413 */
414 rc = segment_load(local_buf, SEGMENT_SHARED,
415 &dev_info->start, &dev_info->end);
416 if (rc < 0) {
417 dcssblk_segment_warn(rc, dev_info->segment_name);
418 goto dealloc_gendisk;
419 }
420 seg_byte_size = (dev_info->end - dev_info->start + 1);
421 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
422 PRINT_INFO("Loaded segment %s, size = %lu Byte, "
423 "capacity = %lu (512 Byte) sectors\n", local_buf,
424 seg_byte_size, seg_byte_size >> 9);
425
426 dev_info->segment_type = rc;
427 dev_info->save_pending = 0;
428 dev_info->is_shared = 1;
429 dev_info->dev.parent = dcssblk_root_dev;
430
431 /*
432 * get minor, add to list
433 */
434 down_write(&dcssblk_devices_sem);
435 rc = dcssblk_assign_free_minor(dev_info);
436 if (rc) {
437 up_write(&dcssblk_devices_sem);
438 PRINT_ERR("No free minor number available! "
439 "Unloading segment...\n");
440 goto unload_seg;
441 }
442 sprintf(dev_info->gd->disk_name, "dcssblk%d",
443 dev_info->gd->first_minor);
444 list_add_tail(&dev_info->lh, &dcssblk_devices);
445
446 if (!try_module_get(THIS_MODULE)) {
447 rc = -ENODEV;
448 goto list_del;
449 }
450 /*
451 * register the device
452 */
453 rc = device_register(&dev_info->dev);
454 if (rc) {
455 PRINT_ERR("Segment %s could not be registered RC=%d\n",
456 local_buf, rc);
457 module_put(THIS_MODULE);
458 goto list_del;
459 }
460 get_device(&dev_info->dev);
461 rc = device_create_file(&dev_info->dev, &dev_attr_shared);
462 if (rc)
463 goto unregister_dev;
464 rc = device_create_file(&dev_info->dev, &dev_attr_save);
465 if (rc)
466 goto unregister_dev;
467
468 add_disk(dev_info->gd);
469
470 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
471 blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
472
473 switch (dev_info->segment_type) {
474 case SEG_TYPE_SR:
475 case SEG_TYPE_ER:
476 case SEG_TYPE_SC:
477 set_disk_ro(dev_info->gd,1);
478 break;
479 default:
480 set_disk_ro(dev_info->gd,0);
481 break;
482 }
483 PRINT_DEBUG("Segment %s loaded successfully\n", local_buf);
484 up_write(&dcssblk_devices_sem);
485 rc = count;
486 goto out;
487
488unregister_dev:
489 PRINT_ERR("device_create_file() failed!\n");
490 list_del(&dev_info->lh);
491 blk_put_queue(dev_info->dcssblk_queue);
492 dev_info->gd->queue = NULL;
493 put_disk(dev_info->gd);
494 device_unregister(&dev_info->dev);
495 segment_unload(dev_info->segment_name);
496 put_device(&dev_info->dev);
497 up_write(&dcssblk_devices_sem);
498 goto out;
499list_del:
500 list_del(&dev_info->lh);
501 up_write(&dcssblk_devices_sem);
502unload_seg:
503 segment_unload(local_buf);
504dealloc_gendisk:
505 blk_put_queue(dev_info->dcssblk_queue);
506 dev_info->gd->queue = NULL;
507 put_disk(dev_info->gd);
508free_dev_info:
509 kfree(dev_info);
510out:
511 kfree(local_buf);
512out_nobuf:
513 return rc;
514}
515
516/*
517 * device attribute for removing devices
518 */
519static ssize_t
520dcssblk_remove_store(struct device *dev, const char *buf, size_t count)
521{
522 struct dcssblk_dev_info *dev_info;
523 int rc, i;
524 char *local_buf;
525
526 if (dev != dcssblk_root_dev) {
527 return -EINVAL;
528 }
529 local_buf = kmalloc(count + 1, GFP_KERNEL);
530 if (local_buf == NULL) {
531 return -ENOMEM;
532 }
533 /*
534 * parse input
535 */
536 for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) {
537 local_buf[i] = toupper(buf[i]);
538 }
539 local_buf[i] = '\0';
540 if ((i == 0) || (i > 8)) {
541 rc = -ENAMETOOLONG;
542 goto out_buf;
543 }
544
545 down_write(&dcssblk_devices_sem);
546 dev_info = dcssblk_get_device_by_name(local_buf);
547 if (dev_info == NULL) {
548 up_write(&dcssblk_devices_sem);
549 PRINT_WARN("Segment %s is not loaded!\n", local_buf);
550 rc = -ENODEV;
551 goto out_buf;
552 }
553 if (atomic_read(&dev_info->use_count) != 0) {
554 up_write(&dcssblk_devices_sem);
555 PRINT_WARN("Segment %s is in use!\n", local_buf);
556 rc = -EBUSY;
557 goto out_buf;
558 }
559 list_del(&dev_info->lh);
560
561 del_gendisk(dev_info->gd);
562 blk_put_queue(dev_info->dcssblk_queue);
563 dev_info->gd->queue = NULL;
564 put_disk(dev_info->gd);
565 device_unregister(&dev_info->dev);
566 segment_unload(dev_info->segment_name);
567 PRINT_DEBUG("Segment %s unloaded successfully\n",
568 dev_info->segment_name);
569 put_device(&dev_info->dev);
570 up_write(&dcssblk_devices_sem);
571
572 rc = count;
573out_buf:
574 kfree(local_buf);
575 return rc;
576}
577
578static int
579dcssblk_open(struct inode *inode, struct file *filp)
580{
581 struct dcssblk_dev_info *dev_info;
582 int rc;
583
584 dev_info = inode->i_bdev->bd_disk->private_data;
585 if (NULL == dev_info) {
586 rc = -ENODEV;
587 goto out;
588 }
589 atomic_inc(&dev_info->use_count);
590 inode->i_bdev->bd_block_size = 4096;
591 rc = 0;
592out:
593 return rc;
594}
595
596static int
597dcssblk_release(struct inode *inode, struct file *filp)
598{
599 struct dcssblk_dev_info *dev_info;
600 int rc;
601
602 dev_info = inode->i_bdev->bd_disk->private_data;
603 if (NULL == dev_info) {
604 rc = -ENODEV;
605 goto out;
606 }
607 down_write(&dcssblk_devices_sem);
608 if (atomic_dec_and_test(&dev_info->use_count)
609 && (dev_info->save_pending)) {
610 PRINT_INFO("Segment %s became idle and is being saved now\n",
611 dev_info->segment_name);
612 segment_save(dev_info->segment_name);
613 dev_info->save_pending = 0;
614 }
615 up_write(&dcssblk_devices_sem);
616 rc = 0;
617out:
618 return rc;
619}
620
621static int
622dcssblk_make_request(request_queue_t *q, struct bio *bio)
623{
624 struct dcssblk_dev_info *dev_info;
625 struct bio_vec *bvec;
626 unsigned long index;
627 unsigned long page_addr;
628 unsigned long source_addr;
629 unsigned long bytes_done;
630 int i;
631
632 bytes_done = 0;
633 dev_info = bio->bi_bdev->bd_disk->private_data;
634 if (dev_info == NULL)
635 goto fail;
636 if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
637 /* Request is not page-aligned. */
638 goto fail;
639 if (((bio->bi_size >> 9) + bio->bi_sector)
640 > get_capacity(bio->bi_bdev->bd_disk)) {
641 /* Request beyond end of DCSS segment. */
642 goto fail;
643 }
644 index = (bio->bi_sector >> 3);
645 bio_for_each_segment(bvec, bio, i) {
646 page_addr = (unsigned long)
647 page_address(bvec->bv_page) + bvec->bv_offset;
648 source_addr = dev_info->start + (index<<12) + bytes_done;
649 if (unlikely(page_addr & 4095) != 0 || (bvec->bv_len & 4095) != 0)
650 // More paranoia.
651 goto fail;
652 if (bio_data_dir(bio) == READ) {
653 memcpy((void*)page_addr, (void*)source_addr,
654 bvec->bv_len);
655 } else {
656 memcpy((void*)source_addr, (void*)page_addr,
657 bvec->bv_len);
658 }
659 bytes_done += bvec->bv_len;
660 }
661 bio_endio(bio, bytes_done, 0);
662 return 0;
663fail:
664 bio_io_error(bio, bytes_done);
665 return 0;
666}
667
668static void
669dcssblk_check_params(void)
670{
671 int rc, i, j, k;
672 char buf[9];
673 struct dcssblk_dev_info *dev_info;
674
675 for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
676 i++) {
677 for (j = i; (dcssblk_segments[j] != ',') &&
678 (dcssblk_segments[j] != '\0') &&
679 (dcssblk_segments[j] != '(') &&
680 (j - i) < 8; j++)
681 {
682 buf[j-i] = dcssblk_segments[j];
683 }
684 buf[j-i] = '\0';
685 rc = dcssblk_add_store(dcssblk_root_dev, buf, j-i);
686 if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
687 for (k = 0; buf[k] != '\0'; k++)
688 buf[k] = toupper(buf[k]);
689 if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
690 down_read(&dcssblk_devices_sem);
691 dev_info = dcssblk_get_device_by_name(buf);
692 up_read(&dcssblk_devices_sem);
693 if (dev_info)
694 dcssblk_shared_store(&dev_info->dev,
695 "0\n", 2);
696 }
697 }
698 while ((dcssblk_segments[j] != ',') &&
699 (dcssblk_segments[j] != '\0'))
700 {
701 j++;
702 }
703 if (dcssblk_segments[j] == '\0')
704 break;
705 i = j;
706 }
707}
708
709/*
710 * The init/exit functions.
711 */
712static void __exit
713dcssblk_exit(void)
714{
715 int rc;
716
717 PRINT_DEBUG("DCSSBLOCK EXIT...\n");
718 s390_root_dev_unregister(dcssblk_root_dev);
719 rc = unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
720 if (rc) {
721 PRINT_ERR("unregister_blkdev() failed!\n");
722 }
723 PRINT_DEBUG("...finished!\n");
724}
725
726static int __init
727dcssblk_init(void)
728{
729 int rc;
730
731 PRINT_DEBUG("DCSSBLOCK INIT...\n");
732 dcssblk_root_dev = s390_root_dev_register("dcssblk");
733 if (IS_ERR(dcssblk_root_dev)) {
734 PRINT_ERR("device_register() failed!\n");
735 return PTR_ERR(dcssblk_root_dev);
736 }
737 rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
738 if (rc) {
739 PRINT_ERR("device_create_file(add) failed!\n");
740 s390_root_dev_unregister(dcssblk_root_dev);
741 return rc;
742 }
743 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
744 if (rc) {
745 PRINT_ERR("device_create_file(remove) failed!\n");
746 s390_root_dev_unregister(dcssblk_root_dev);
747 return rc;
748 }
749 rc = register_blkdev(0, DCSSBLK_NAME);
750 if (rc < 0) {
751 PRINT_ERR("Can't get dynamic major!\n");
752 s390_root_dev_unregister(dcssblk_root_dev);
753 return rc;
754 }
755 dcssblk_major = rc;
756 init_rwsem(&dcssblk_devices_sem);
757
758 dcssblk_check_params();
759
760 PRINT_DEBUG("...finished!\n");
761 return 0;
762}
763
764module_init(dcssblk_init);
765module_exit(dcssblk_exit);
766
767module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
768MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
769 "comma-separated list, each name max. 8 chars.\n"
770 "Adding \"(local)\" to segment name equals echoing 0 to "
771 "/sys/devices/dcssblk/<segment name>/shared after loading "
772 "the segment - \n"
773 "e.g. segments=\"mydcss1,mydcss2,mydcss3(local)\"");
774
775MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
new file mode 100644
index 000000000000..d428c909b8a0
--- /dev/null
+++ b/drivers/s390/block/xpram.c
@@ -0,0 +1,539 @@
1/*
2 * Xpram.c -- the S/390 expanded memory RAM-disk
3 *
4 * significant parts of this code are based on
5 * the sbull device driver presented in
6 * A. Rubini: Linux Device Drivers
7 *
8 * Author of XPRAM specific coding: Reinhard Buendgen
9 * buendgen@de.ibm.com
10 * Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
11 *
12 * External interfaces:
13 * Interfaces to linux kernel
14 * xpram_setup: read kernel parameters
15 * Device specific file operations
16 * xpram_iotcl
17 * xpram_open
18 *
19 * "ad-hoc" partitioning:
20 * the expanded memory can be partitioned among several devices
21 * (with different minors). The partitioning set up can be
22 * set by kernel or module parameters (int devs & int sizes[])
23 *
24 * Potential future improvements:
25 * generic hard disk support to replace ad-hoc partitioning
26 */
27
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/ctype.h> /* isdigit, isxdigit */
31#include <linux/errno.h>
32#include <linux/init.h>
33#include <linux/slab.h>
34#include <linux/blkdev.h>
35#include <linux/blkpg.h>
36#include <linux/hdreg.h> /* HDIO_GETGEO */
37#include <linux/sysdev.h>
38#include <linux/bio.h>
39#include <linux/devfs_fs_kernel.h>
40#include <asm/uaccess.h>
41
42#define XPRAM_NAME "xpram"
43#define XPRAM_DEVS 1 /* one partition */
44#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
45
46#define PRINT_DEBUG(x...) printk(KERN_DEBUG XPRAM_NAME " debug:" x)
47#define PRINT_INFO(x...) printk(KERN_INFO XPRAM_NAME " info:" x)
48#define PRINT_WARN(x...) printk(KERN_WARNING XPRAM_NAME " warning:" x)
49#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x)
50
51
52static struct sysdev_class xpram_sysclass = {
53 set_kset_name("xpram"),
54};
55
56static struct sys_device xpram_sys_device = {
57 .id = 0,
58 .cls = &xpram_sysclass,
59};
60
61typedef struct {
62 unsigned int size; /* size of xpram segment in pages */
63 unsigned int offset; /* start page of xpram segment */
64} xpram_device_t;
65
66static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
67static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
68static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
69static unsigned int xpram_pages;
70static int xpram_devs;
71
72/*
73 * Parameter parsing functions.
74 */
75static int devs = XPRAM_DEVS;
76static unsigned int sizes[XPRAM_MAX_DEVS];
77
78module_param(devs, int, 0);
79module_param_array(sizes, int, NULL, 0);
80
81MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
82 "the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
83MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
84 "the defaults are 0s \n" \
85 "All devices with size 0 equally partition the "
86 "remaining space on the expanded strorage not "
87 "claimed by explicit sizes\n");
88MODULE_LICENSE("GPL");
89
90#ifndef MODULE
91/*
92 * Parses the kernel parameters given in the kernel parameter line.
93 * The expected format is
94 * <number_of_partitions>[","<partition_size>]*
95 * where
96 * devices is a positive integer that initializes xpram_devs
97 * each size is a non-negative integer possibly followed by a
98 * magnitude (k,K,m,M,g,G), the list of sizes initialises
99 * xpram_sizes
100 *
101 * Arguments
102 * str: substring of kernel parameter line that contains xprams
103 * kernel parameters.
104 *
105 * Result 0 on success, -EINVAL else -- only for Version > 2.3
106 *
107 * Side effects
108 * the global variabls devs is set to the value of
109 * <number_of_partitions> and sizes[i] is set to the i-th
110 * partition size (if provided). A parsing error of a value
111 * results in this value being set to -EINVAL.
112 */
113static int __init xpram_setup (char *str)
114{
115 char *cp;
116 int i;
117
118 devs = simple_strtoul(str, &cp, 10);
119 if (cp <= str || devs > XPRAM_MAX_DEVS)
120 return 0;
121 for (i = 0; (i < devs) && (*cp++ == ','); i++) {
122 sizes[i] = simple_strtoul(cp, &cp, 10);
123 if (*cp == 'g' || *cp == 'G') {
124 sizes[i] <<= 20;
125 cp++;
126 } else if (*cp == 'm' || *cp == 'M') {
127 sizes[i] <<= 10;
128 cp++;
129 } else if (*cp == 'k' || *cp == 'K')
130 cp++;
131 while (isspace(*cp)) cp++;
132 }
133 if (*cp == ',' && i >= devs)
134 PRINT_WARN("partition sizes list has too many entries.\n");
135 else if (*cp != 0)
136 PRINT_WARN("ignored '%s' at end of parameter string.\n", cp);
137 return 1;
138}
139
140__setup("xpram_parts=", xpram_setup);
141#endif
142
143/*
144 * Copy expanded memory page (4kB) into main memory
145 * Arguments
146 * page_addr: address of target page
147 * xpage_index: index of expandeded memory page
148 * Return value
149 * 0: if operation succeeds
150 * -EIO: if pgin failed
151 * -ENXIO: if xpram has vanished
152 */
153static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
154{
155 int cc;
156
157 __asm__ __volatile__ (
158 " lhi %0,2\n" /* return unused cc 2 if pgin traps */
159 " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
160 "0: ipm %0\n"
161 " srl %0,28\n"
162 "1:\n"
163#ifndef CONFIG_ARCH_S390X
164 ".section __ex_table,\"a\"\n"
165 " .align 4\n"
166 " .long 0b,1b\n"
167 ".previous"
168#else
169 ".section __ex_table,\"a\"\n"
170 " .align 8\n"
171 " .quad 0b,1b\n"
172 ".previous"
173#endif
174 : "=&d" (cc)
175 : "a" (__pa(page_addr)), "a" (xpage_index)
176 : "cc" );
177 if (cc == 3)
178 return -ENXIO;
179 if (cc == 2) {
180 PRINT_ERR("expanded storage lost!\n");
181 return -ENXIO;
182 }
183 if (cc == 1) {
184 PRINT_ERR("page in failed for page index %u.\n",
185 xpage_index);
186 return -EIO;
187 }
188 return 0;
189}
190
191/*
192 * Copy a 4kB page of main memory to an expanded memory page
193 * Arguments
194 * page_addr: address of source page
195 * xpage_index: index of expandeded memory page
196 * Return value
197 * 0: if operation succeeds
198 * -EIO: if pgout failed
199 * -ENXIO: if xpram has vanished
200 */
201static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
202{
203 int cc;
204
205 __asm__ __volatile__ (
206 " lhi %0,2\n" /* return unused cc 2 if pgout traps */
207 " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
208 "0: ipm %0\n"
209 " srl %0,28\n"
210 "1:\n"
211#ifndef CONFIG_ARCH_S390X
212 ".section __ex_table,\"a\"\n"
213 " .align 4\n"
214 " .long 0b,1b\n"
215 ".previous"
216#else
217 ".section __ex_table,\"a\"\n"
218 " .align 8\n"
219 " .quad 0b,1b\n"
220 ".previous"
221#endif
222 : "=&d" (cc)
223 : "a" (__pa(page_addr)), "a" (xpage_index)
224 : "cc" );
225 if (cc == 3)
226 return -ENXIO;
227 if (cc == 2) {
228 PRINT_ERR("expanded storage lost!\n");
229 return -ENXIO;
230 }
231 if (cc == 1) {
232 PRINT_ERR("page out failed for page index %u.\n",
233 xpage_index);
234 return -EIO;
235 }
236 return 0;
237}
238
239/*
240 * Check if xpram is available.
241 */
242static int __init xpram_present(void)
243{
244 unsigned long mem_page;
245 int rc;
246
247 mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
248 if (!mem_page)
249 return -ENOMEM;
250 rc = xpram_page_in(mem_page, 0);
251 free_page(mem_page);
252 return rc ? -ENXIO : 0;
253}
254
255/*
256 * Return index of the last available xpram page.
257 */
258static unsigned long __init xpram_highest_page_index(void)
259{
260 unsigned int page_index, add_bit;
261 unsigned long mem_page;
262
263 mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
264 if (!mem_page)
265 return 0;
266
267 page_index = 0;
268 add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
269 while (add_bit > 0) {
270 if (xpram_page_in(mem_page, page_index | add_bit) == 0)
271 page_index |= add_bit;
272 add_bit >>= 1;
273 }
274
275 free_page (mem_page);
276
277 return page_index;
278}
279
280/*
281 * Block device make request function.
282 */
283static int xpram_make_request(request_queue_t *q, struct bio *bio)
284{
285 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
286 struct bio_vec *bvec;
287 unsigned int index;
288 unsigned long page_addr;
289 unsigned long bytes;
290 int i;
291
292 if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
293 /* Request is not page-aligned. */
294 goto fail;
295 if ((bio->bi_size >> 12) > xdev->size)
296 /* Request size is no page-aligned. */
297 goto fail;
298 if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
299 goto fail;
300 index = (bio->bi_sector >> 3) + xdev->offset;
301 bio_for_each_segment(bvec, bio, i) {
302 page_addr = (unsigned long)
303 kmap(bvec->bv_page) + bvec->bv_offset;
304 bytes = bvec->bv_len;
305 if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
306 /* More paranoia. */
307 goto fail;
308 while (bytes > 0) {
309 if (bio_data_dir(bio) == READ) {
310 if (xpram_page_in(page_addr, index) != 0)
311 goto fail;
312 } else {
313 if (xpram_page_out(page_addr, index) != 0)
314 goto fail;
315 }
316 page_addr += 4096;
317 bytes -= 4096;
318 index++;
319 }
320 }
321 set_bit(BIO_UPTODATE, &bio->bi_flags);
322 bytes = bio->bi_size;
323 bio->bi_size = 0;
324 bio->bi_end_io(bio, bytes, 0);
325 return 0;
326fail:
327 bio_io_error(bio, bio->bi_size);
328 return 0;
329}
330
331static int xpram_ioctl (struct inode *inode, struct file *filp,
332 unsigned int cmd, unsigned long arg)
333{
334 struct hd_geometry __user *geo;
335 unsigned long size;
336 if (cmd != HDIO_GETGEO)
337 return -EINVAL;
338 /*
339 * get geometry: we have to fake one... trim the size to a
340 * multiple of 64 (32k): tell we have 16 sectors, 4 heads,
341 * whatever cylinders. Tell also that data starts at sector. 4.
342 */
343 geo = (struct hd_geometry __user *) arg;
344 size = (xpram_pages * 8) & ~0x3f;
345 put_user(size >> 6, &geo->cylinders);
346 put_user(4, &geo->heads);
347 put_user(16, &geo->sectors);
348 put_user(4, &geo->start);
349 return 0;
350}
351
352static struct block_device_operations xpram_devops =
353{
354 .owner = THIS_MODULE,
355 .ioctl = xpram_ioctl,
356};
357
358/*
359 * Setup xpram_sizes array.
360 */
361static int __init xpram_setup_sizes(unsigned long pages)
362{
363 unsigned long mem_needed;
364 unsigned long mem_auto;
365 int mem_auto_no;
366 int i;
367
368 /* Check number of devices. */
369 if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
370 PRINT_ERR("invalid number %d of devices\n",devs);
371 return -EINVAL;
372 }
373 xpram_devs = devs;
374
375 /*
376 * Copy sizes array to xpram_sizes and align partition
377 * sizes to page boundary.
378 */
379 mem_needed = 0;
380 mem_auto_no = 0;
381 for (i = 0; i < xpram_devs; i++) {
382 xpram_sizes[i] = (sizes[i] + 3) & -4UL;
383 if (xpram_sizes[i])
384 mem_needed += xpram_sizes[i];
385 else
386 mem_auto_no++;
387 }
388
389 PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs);
390 for (i = 0; i < xpram_devs; i++) {
391 if (xpram_sizes[i])
392 PRINT_INFO(" size of partition %d: %u kB\n",
393 i, xpram_sizes[i]);
394 else
395 PRINT_INFO(" size of partition %d to be set "
396 "automatically\n",i);
397 }
398 PRINT_DEBUG(" memory needed (for sized partitions): %lu kB\n",
399 mem_needed);
400 PRINT_DEBUG(" partitions to be sized automatically: %d\n",
401 mem_auto_no);
402
403 if (mem_needed > pages * 4) {
404 PRINT_ERR("Not enough expanded memory available\n");
405 return -EINVAL;
406 }
407
408 /*
409 * partitioning:
410 * xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
411 * else: ; all partitions with zero xpram_sizes[i]
412 * partition equally the remaining space
413 */
414 if (mem_auto_no) {
415 mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
416 PRINT_INFO(" automatically determined "
417 "partition size: %lu kB\n", mem_auto);
418 for (i = 0; i < xpram_devs; i++)
419 if (xpram_sizes[i] == 0)
420 xpram_sizes[i] = mem_auto;
421 }
422 return 0;
423}
424
425static struct request_queue *xpram_queue;
426
427static int __init xpram_setup_blkdev(void)
428{
429 unsigned long offset;
430 int i, rc = -ENOMEM;
431
432 for (i = 0; i < xpram_devs; i++) {
433 struct gendisk *disk = alloc_disk(1);
434 if (!disk)
435 goto out;
436 xpram_disks[i] = disk;
437 }
438
439 /*
440 * Register xpram major.
441 */
442 rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
443 if (rc < 0)
444 goto out;
445
446 devfs_mk_dir("slram");
447
448 /*
449 * Assign the other needed values: make request function, sizes and
450 * hardsect size. All the minor devices feature the same value.
451 */
452 xpram_queue = blk_alloc_queue(GFP_KERNEL);
453 if (!xpram_queue) {
454 rc = -ENOMEM;
455 goto out_unreg;
456 }
457 blk_queue_make_request(xpram_queue, xpram_make_request);
458 blk_queue_hardsect_size(xpram_queue, 4096);
459
460 /*
461 * Setup device structures.
462 */
463 offset = 0;
464 for (i = 0; i < xpram_devs; i++) {
465 struct gendisk *disk = xpram_disks[i];
466
467 xpram_devices[i].size = xpram_sizes[i] / 4;
468 xpram_devices[i].offset = offset;
469 offset += xpram_devices[i].size;
470 disk->major = XPRAM_MAJOR;
471 disk->first_minor = i;
472 disk->fops = &xpram_devops;
473 disk->private_data = &xpram_devices[i];
474 disk->queue = xpram_queue;
475 sprintf(disk->disk_name, "slram%d", i);
476 sprintf(disk->devfs_name, "slram/%d", i);
477 set_capacity(disk, xpram_sizes[i] << 1);
478 add_disk(disk);
479 }
480
481 return 0;
482out_unreg:
483 devfs_remove("slram");
484 unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
485out:
486 while (i--)
487 put_disk(xpram_disks[i]);
488 return rc;
489}
490
491/*
492 * Finally, the init/exit functions.
493 */
494static void __exit xpram_exit(void)
495{
496 int i;
497 for (i = 0; i < xpram_devs; i++) {
498 del_gendisk(xpram_disks[i]);
499 put_disk(xpram_disks[i]);
500 }
501 unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
502 devfs_remove("slram");
503 blk_cleanup_queue(xpram_queue);
504 sysdev_unregister(&xpram_sys_device);
505 sysdev_class_unregister(&xpram_sysclass);
506}
507
508static int __init xpram_init(void)
509{
510 int rc;
511
512 /* Find out size of expanded memory. */
513 if (xpram_present() != 0) {
514 PRINT_WARN("No expanded memory available\n");
515 return -ENODEV;
516 }
517 xpram_pages = xpram_highest_page_index();
518 PRINT_INFO(" %u pages expanded memory found (%lu KB).\n",
519 xpram_pages, (unsigned long) xpram_pages*4);
520 rc = xpram_setup_sizes(xpram_pages);
521 if (rc)
522 return rc;
523 rc = sysdev_class_register(&xpram_sysclass);
524 if (rc)
525 return rc;
526
527 rc = sysdev_register(&xpram_sys_device);
528 if (rc) {
529 sysdev_class_unregister(&xpram_sysclass);
530 return rc;
531 }
532 rc = xpram_setup_blkdev();
533 if (rc)
534 sysdev_unregister(&xpram_sys_device);
535 return rc;
536}
537
538module_init(xpram_init);
539module_exit(xpram_exit);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
new file mode 100644
index 000000000000..14e8cce9f862
--- /dev/null
+++ b/drivers/s390/char/Makefile
@@ -0,0 +1,28 @@
1#
2# S/390 character devices
3#
4
5obj-y += ctrlchar.o keyboard.o defkeymap.o
6
7obj-$(CONFIG_TN3270) += raw3270.o
8obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
9obj-$(CONFIG_TN3270_TTY) += tty3270.o
10obj-$(CONFIG_TN3270_FS) += fs3270.o
11
12obj-$(CONFIG_TN3215) += con3215.o
13
14obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o
15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
18obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
19
20obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
21obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
22
23tape-$(CONFIG_S390_TAPE_BLOCK) += tape_block.o
24tape-$(CONFIG_PROC_FS) += tape_proc.o
25tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y)
26obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
27obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
28obj-$(CONFIG_MONREADER) += monreader.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
new file mode 100644
index 000000000000..022f17bff731
--- /dev/null
+++ b/drivers/s390/char/con3215.c
@@ -0,0 +1,1192 @@
1/*
2 * drivers/s390/char/con3215.c
3 * 3215 line mode terminal driver.
4 *
5 * S390 version
6 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 *
9 * Updated:
10 * Aug-2000: Added tab support
11 * Dan Morrison, IBM Corporation (dmorriso@cse.buffalo.edu)
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kdev_t.h>
18#include <linux/tty.h>
19#include <linux/vt_kern.h>
20#include <linux/init.h>
21#include <linux/console.h>
22#include <linux/interrupt.h>
23
24#include <linux/slab.h>
25#include <linux/bootmem.h>
26
27#include <asm/ccwdev.h>
28#include <asm/cio.h>
29#include <asm/io.h>
30#include <asm/ebcdic.h>
31#include <asm/uaccess.h>
32#include <asm/delay.h>
33#include <asm/cpcmd.h>
34#include <asm/setup.h>
35
36#include "ctrlchar.h"
37
38#define NR_3215 1
39#define NR_3215_REQ (4*NR_3215)
40#define RAW3215_BUFFER_SIZE 65536 /* output buffer size */
41#define RAW3215_INBUF_SIZE 256 /* input buffer size */
42#define RAW3215_MIN_SPACE 128 /* minimum free space for wakeup */
43#define RAW3215_MIN_WRITE 1024 /* min. length for immediate output */
44#define RAW3215_MAX_BYTES 3968 /* max. bytes to write with one ssch */
45#define RAW3215_MAX_NEWLINE 50 /* max. lines to write with one ssch */
46#define RAW3215_NR_CCWS 3
47#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */
48
49#define RAW3215_FIXED 1 /* 3215 console device is not be freed */
50#define RAW3215_ACTIVE 2 /* set if the device is in use */
51#define RAW3215_WORKING 4 /* set if a request is being worked on */
52#define RAW3215_THROTTLED 8 /* set if reading is disabled */
53#define RAW3215_STOPPED 16 /* set if writing is disabled */
54#define RAW3215_CLOSING 32 /* set while in close process */
55#define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */
56#define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */
57
58#define TAB_STOP_SIZE 8 /* tab stop size */
59
60/*
61 * Request types for a 3215 device
62 */
63enum raw3215_type {
64 RAW3215_FREE, RAW3215_READ, RAW3215_WRITE
65};
66
67/*
68 * Request structure for a 3215 device
69 */
70struct raw3215_req {
71 enum raw3215_type type; /* type of the request */
72 int start, len; /* start index & len in output buffer */
73 int delayable; /* indication to wait for more data */
74 int residual; /* residual count for read request */
75 struct ccw1 ccws[RAW3215_NR_CCWS]; /* space for the channel program */
76 struct raw3215_info *info; /* pointer to main structure */
77 struct raw3215_req *next; /* pointer to next request */
78} __attribute__ ((aligned(8)));
79
80struct raw3215_info {
81 struct ccw_device *cdev; /* device for tty driver */
82 spinlock_t *lock; /* pointer to irq lock */
83 int flags; /* state flags */
84 char *buffer; /* pointer to output buffer */
85 char *inbuf; /* pointer to input buffer */
86 int head; /* first free byte in output buffer */
87 int count; /* number of bytes in output buffer */
88 int written; /* number of bytes in write requests */
89 struct tty_struct *tty; /* pointer to tty structure if present */
90 struct tasklet_struct tasklet;
91 struct raw3215_req *queued_read; /* pointer to queued read requests */
92 struct raw3215_req *queued_write;/* pointer to queued write requests */
93 wait_queue_head_t empty_wait; /* wait queue for flushing */
94 struct timer_list timer; /* timer for delayed output */
95 char *message; /* pending message from raw3215_irq */
96 int msg_dstat; /* dstat for pending message */
97 int msg_cstat; /* cstat for pending message */
98 int line_pos; /* position on the line (for tabs) */
99 char ubuffer[80]; /* copy_from_user buffer */
100};
101
102/* array of 3215 devices structures */
103static struct raw3215_info *raw3215[NR_3215];
104/* spinlock to protect the raw3215 array */
105static DEFINE_SPINLOCK(raw3215_device_lock);
106/* list of free request structures */
107static struct raw3215_req *raw3215_freelist;
108/* spinlock to protect free list */
109static spinlock_t raw3215_freelist_lock;
110
111static struct tty_driver *tty3215_driver;
112
113/*
114 * Get a request structure from the free list
115 */
116static inline struct raw3215_req *
117raw3215_alloc_req(void) {
118 struct raw3215_req *req;
119 unsigned long flags;
120
121 spin_lock_irqsave(&raw3215_freelist_lock, flags);
122 req = raw3215_freelist;
123 raw3215_freelist = req->next;
124 spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
125 return req;
126}
127
128/*
129 * Put a request structure back to the free list
130 */
131static inline void
132raw3215_free_req(struct raw3215_req *req) {
133 unsigned long flags;
134
135 if (req->type == RAW3215_FREE)
136 return; /* don't free a free request */
137 req->type = RAW3215_FREE;
138 spin_lock_irqsave(&raw3215_freelist_lock, flags);
139 req->next = raw3215_freelist;
140 raw3215_freelist = req;
141 spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
142}
143
144/*
145 * Set up a read request that reads up to 160 byte from the 3215 device.
146 * If there is a queued read request it is used, but that shouldn't happen
147 * because a 3215 terminal won't accept a new read before the old one is
148 * completed.
149 */
150static void
151raw3215_mk_read_req(struct raw3215_info *raw)
152{
153 struct raw3215_req *req;
154 struct ccw1 *ccw;
155
156 /* there can only be ONE read request at a time */
157 req = raw->queued_read;
158 if (req == NULL) {
159 /* no queued read request, use new req structure */
160 req = raw3215_alloc_req();
161 req->type = RAW3215_READ;
162 req->info = raw;
163 raw->queued_read = req;
164 }
165
166 ccw = req->ccws;
167 ccw->cmd_code = 0x0A; /* read inquiry */
168 ccw->flags = 0x20; /* ignore incorrect length */
169 ccw->count = 160;
170 ccw->cda = (__u32) __pa(raw->inbuf);
171}
172
173/*
174 * Set up a write request with the information from the main structure.
175 * A ccw chain is created that writes as much as possible from the output
176 * buffer to the 3215 device. If a queued write exists it is replaced by
177 * the new, probably lengthened request.
178 */
179static void
180raw3215_mk_write_req(struct raw3215_info *raw)
181{
182 struct raw3215_req *req;
183 struct ccw1 *ccw;
184 int len, count, ix, lines;
185
186 if (raw->count <= raw->written)
187 return;
188 /* check if there is a queued write request */
189 req = raw->queued_write;
190 if (req == NULL) {
191 /* no queued write request, use new req structure */
192 req = raw3215_alloc_req();
193 req->type = RAW3215_WRITE;
194 req->info = raw;
195 raw->queued_write = req;
196 } else {
197 raw->written -= req->len;
198 }
199
200 ccw = req->ccws;
201 req->start = (raw->head - raw->count + raw->written) &
202 (RAW3215_BUFFER_SIZE - 1);
203 /*
204 * now we have to count newlines. We can at max accept
205 * RAW3215_MAX_NEWLINE newlines in a single ssch due to
206 * a restriction in VM
207 */
208 lines = 0;
209 ix = req->start;
210 while (lines < RAW3215_MAX_NEWLINE && ix != raw->head) {
211 if (raw->buffer[ix] == 0x15)
212 lines++;
213 ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
214 }
215 len = ((ix - 1 - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1;
216 if (len > RAW3215_MAX_BYTES)
217 len = RAW3215_MAX_BYTES;
218 req->len = len;
219 raw->written += len;
220
221 /* set the indication if we should try to enlarge this request */
222 req->delayable = (ix == raw->head) && (len < RAW3215_MIN_WRITE);
223
224 ix = req->start;
225 while (len > 0) {
226 if (ccw > req->ccws)
227 ccw[-1].flags |= 0x40; /* use command chaining */
228 ccw->cmd_code = 0x01; /* write, auto carrier return */
229 ccw->flags = 0x20; /* ignore incorrect length ind. */
230 ccw->cda =
231 (__u32) __pa(raw->buffer + ix);
232 count = len;
233 if (ix + count > RAW3215_BUFFER_SIZE)
234 count = RAW3215_BUFFER_SIZE - ix;
235 ccw->count = count;
236 len -= count;
237 ix = (ix + count) & (RAW3215_BUFFER_SIZE - 1);
238 ccw++;
239 }
240 /*
241 * Add a NOP to the channel program. 3215 devices are purely
242 * emulated and its much better to avoid the channel end
243 * interrupt in this case.
244 */
245 if (ccw > req->ccws)
246 ccw[-1].flags |= 0x40; /* use command chaining */
247 ccw->cmd_code = 0x03; /* NOP */
248 ccw->flags = 0;
249 ccw->cda = 0;
250 ccw->count = 1;
251}
252
253/*
254 * Start a read or a write request
255 */
256static void
257raw3215_start_io(struct raw3215_info *raw)
258{
259 struct raw3215_req *req;
260 int res;
261
262 req = raw->queued_read;
263 if (req != NULL &&
264 !(raw->flags & (RAW3215_WORKING | RAW3215_THROTTLED))) {
265 /* dequeue request */
266 raw->queued_read = NULL;
267 res = ccw_device_start(raw->cdev, req->ccws,
268 (unsigned long) req, 0, 0);
269 if (res != 0) {
270 /* do_IO failed, put request back to queue */
271 raw->queued_read = req;
272 } else {
273 raw->flags |= RAW3215_WORKING;
274 }
275 }
276 req = raw->queued_write;
277 if (req != NULL &&
278 !(raw->flags & (RAW3215_WORKING | RAW3215_STOPPED))) {
279 /* dequeue request */
280 raw->queued_write = NULL;
281 res = ccw_device_start(raw->cdev, req->ccws,
282 (unsigned long) req, 0, 0);
283 if (res != 0) {
284 /* do_IO failed, put request back to queue */
285 raw->queued_write = req;
286 } else {
287 raw->flags |= RAW3215_WORKING;
288 }
289 }
290}
291
292/*
293 * Function to start a delayed output after RAW3215_TIMEOUT seconds
294 */
295static void
296raw3215_timeout(unsigned long __data)
297{
298 struct raw3215_info *raw = (struct raw3215_info *) __data;
299 unsigned long flags;
300
301 spin_lock_irqsave(raw->lock, flags);
302 if (raw->flags & RAW3215_TIMER_RUNS) {
303 del_timer(&raw->timer);
304 raw->flags &= ~RAW3215_TIMER_RUNS;
305 raw3215_mk_write_req(raw);
306 raw3215_start_io(raw);
307 }
308 spin_unlock_irqrestore(raw->lock, flags);
309}
310
311/*
312 * Function to conditionally start an IO. A read is started immediately,
313 * a write is only started immediately if the flush flag is on or the
314 * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not
315 * done immediately a timer is started with a delay of RAW3215_TIMEOUT.
316 */
317static inline void
318raw3215_try_io(struct raw3215_info *raw)
319{
320 if (!(raw->flags & RAW3215_ACTIVE))
321 return;
322 if (raw->queued_read != NULL)
323 raw3215_start_io(raw);
324 else if (raw->queued_write != NULL) {
325 if ((raw->queued_write->delayable == 0) ||
326 (raw->flags & RAW3215_FLUSHING)) {
327 /* execute write requests bigger than minimum size */
328 raw3215_start_io(raw);
329 if (raw->flags & RAW3215_TIMER_RUNS) {
330 del_timer(&raw->timer);
331 raw->flags &= ~RAW3215_TIMER_RUNS;
332 }
333 } else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
334 /* delay small writes */
335 init_timer(&raw->timer);
336 raw->timer.expires = RAW3215_TIMEOUT + jiffies;
337 raw->timer.data = (unsigned long) raw;
338 raw->timer.function = raw3215_timeout;
339 add_timer(&raw->timer);
340 raw->flags |= RAW3215_TIMER_RUNS;
341 }
342 }
343}
344
345/*
346 * The bottom half handler routine for 3215 devices. It tries to start
347 * the next IO and wakes up processes waiting on the tty.
348 */
349static void
350raw3215_tasklet(void *data)
351{
352 struct raw3215_info *raw;
353 struct tty_struct *tty;
354 unsigned long flags;
355
356 raw = (struct raw3215_info *) data;
357 spin_lock_irqsave(raw->lock, flags);
358 raw3215_mk_write_req(raw);
359 raw3215_try_io(raw);
360 spin_unlock_irqrestore(raw->lock, flags);
361 /* Check for pending message from raw3215_irq */
362 if (raw->message != NULL) {
363 printk(raw->message, raw->msg_dstat, raw->msg_cstat);
364 raw->message = NULL;
365 }
366 tty = raw->tty;
367 if (tty != NULL &&
368 RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
369 tty_wakeup(tty);
370 }
371}
372
373/*
374 * Interrupt routine, called from common io layer
375 */
376static void
377raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
378{
379 struct raw3215_info *raw;
380 struct raw3215_req *req;
381 struct tty_struct *tty;
382 int cstat, dstat;
383 int count, slen;
384
385 raw = cdev->dev.driver_data;
386 req = (struct raw3215_req *) intparm;
387 cstat = irb->scsw.cstat;
388 dstat = irb->scsw.dstat;
389 if (cstat != 0) {
390 raw->message = KERN_WARNING
391 "Got nonzero channel status in raw3215_irq "
392 "(dev sts 0x%2x, sch sts 0x%2x)";
393 raw->msg_dstat = dstat;
394 raw->msg_cstat = cstat;
395 tasklet_schedule(&raw->tasklet);
396 }
397 if (dstat & 0x01) { /* we got a unit exception */
398 dstat &= ~0x01; /* we can ignore it */
399 }
400 switch (dstat) {
401 case 0x80:
402 if (cstat != 0)
403 break;
404 /* Attention interrupt, someone hit the enter key */
405 raw3215_mk_read_req(raw);
406 if (MACHINE_IS_P390)
407 memset(raw->inbuf, 0, RAW3215_INBUF_SIZE);
408 tasklet_schedule(&raw->tasklet);
409 break;
410 case 0x08:
411 case 0x0C:
412 /* Channel end interrupt. */
413 if ((raw = req->info) == NULL)
414 return; /* That shouldn't happen ... */
415 if (req->type == RAW3215_READ) {
416 /* store residual count, then wait for device end */
417 req->residual = irb->scsw.count;
418 }
419 if (dstat == 0x08)
420 break;
421 case 0x04:
422 /* Device end interrupt. */
423 if ((raw = req->info) == NULL)
424 return; /* That shouldn't happen ... */
425 if (req->type == RAW3215_READ && raw->tty != NULL) {
426 unsigned int cchar;
427
428 tty = raw->tty;
429 count = 160 - req->residual;
430 if (MACHINE_IS_P390) {
431 slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE);
432 if (count > slen)
433 count = slen;
434 } else
435 if (count >= TTY_FLIPBUF_SIZE - tty->flip.count)
436 count = TTY_FLIPBUF_SIZE - tty->flip.count - 1;
437 EBCASC(raw->inbuf, count);
438 cchar = ctrlchar_handle(raw->inbuf, count, tty);
439 switch (cchar & CTRLCHAR_MASK) {
440 case CTRLCHAR_SYSRQ:
441 break;
442
443 case CTRLCHAR_CTRL:
444 tty->flip.count++;
445 *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
446 *tty->flip.char_buf_ptr++ = cchar;
447 tty_flip_buffer_push(raw->tty);
448 break;
449
450 case CTRLCHAR_NONE:
451 memcpy(tty->flip.char_buf_ptr,
452 raw->inbuf, count);
453 if (count < 2 ||
454 (strncmp(raw->inbuf+count-2, "^n", 2) &&
455 strncmp(raw->inbuf+count-2, "\252n", 2)) ) {
456 /* don't add the auto \n */
457 tty->flip.char_buf_ptr[count] = '\n';
458 memset(tty->flip.flag_buf_ptr,
459 TTY_NORMAL, count + 1);
460 count++;
461 } else
462 count-=2;
463 tty->flip.char_buf_ptr += count;
464 tty->flip.flag_buf_ptr += count;
465 tty->flip.count += count;
466 tty_flip_buffer_push(raw->tty);
467 break;
468 }
469 } else if (req->type == RAW3215_WRITE) {
470 raw->count -= req->len;
471 raw->written -= req->len;
472 }
473 raw->flags &= ~RAW3215_WORKING;
474 raw3215_free_req(req);
475 /* check for empty wait */
476 if (waitqueue_active(&raw->empty_wait) &&
477 raw->queued_write == NULL &&
478 raw->queued_read == NULL) {
479 wake_up_interruptible(&raw->empty_wait);
480 }
481 tasklet_schedule(&raw->tasklet);
482 break;
483 default:
484 /* Strange interrupt, I'll do my best to clean up */
485 if (req != NULL && req->type != RAW3215_FREE) {
486 if (req->type == RAW3215_WRITE) {
487 raw->count -= req->len;
488 raw->written -= req->len;
489 }
490 raw->flags &= ~RAW3215_WORKING;
491 raw3215_free_req(req);
492 }
493 raw->message = KERN_WARNING
494 "Spurious interrupt in in raw3215_irq "
495 "(dev sts 0x%2x, sch sts 0x%2x)";
496 raw->msg_dstat = dstat;
497 raw->msg_cstat = cstat;
498 tasklet_schedule(&raw->tasklet);
499 }
500 return;
501}
502
503/*
504 * Wait until length bytes are available int the output buffer.
505 * Has to be called with the s390irq lock held. Can be called
506 * disabled.
507 */
508static void
509raw3215_make_room(struct raw3215_info *raw, unsigned int length)
510{
511 while (RAW3215_BUFFER_SIZE - raw->count < length) {
512 /* there might be a request pending */
513 raw->flags |= RAW3215_FLUSHING;
514 raw3215_mk_write_req(raw);
515 raw3215_try_io(raw);
516 raw->flags &= ~RAW3215_FLUSHING;
517#ifdef CONFIG_TN3215_CONSOLE
518 wait_cons_dev();
519#endif
520 /* Enough room freed up ? */
521 if (RAW3215_BUFFER_SIZE - raw->count >= length)
522 break;
523 /* there might be another cpu waiting for the lock */
524 spin_unlock(raw->lock);
525 udelay(100);
526 spin_lock(raw->lock);
527 }
528}
529
530/*
531 * String write routine for 3215 devices
532 */
533static void
534raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
535{
536 unsigned long flags;
537 int c, count;
538
539 while (length > 0) {
540 spin_lock_irqsave(raw->lock, flags);
541 count = (length > RAW3215_BUFFER_SIZE) ?
542 RAW3215_BUFFER_SIZE : length;
543 length -= count;
544
545 raw3215_make_room(raw, count);
546
547 /* copy string to output buffer and convert it to EBCDIC */
548 while (1) {
549 c = min_t(int, count,
550 min(RAW3215_BUFFER_SIZE - raw->count,
551 RAW3215_BUFFER_SIZE - raw->head));
552 if (c <= 0)
553 break;
554 memcpy(raw->buffer + raw->head, str, c);
555 ASCEBC(raw->buffer + raw->head, c);
556 raw->head = (raw->head + c) & (RAW3215_BUFFER_SIZE - 1);
557 raw->count += c;
558 raw->line_pos += c;
559 str += c;
560 count -= c;
561 }
562 if (!(raw->flags & RAW3215_WORKING)) {
563 raw3215_mk_write_req(raw);
564 /* start or queue request */
565 raw3215_try_io(raw);
566 }
567 spin_unlock_irqrestore(raw->lock, flags);
568 }
569}
570
571/*
572 * Put character routine for 3215 devices
573 */
574static void
575raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
576{
577 unsigned long flags;
578 unsigned int length, i;
579
580 spin_lock_irqsave(raw->lock, flags);
581 if (ch == '\t') {
582 length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE);
583 raw->line_pos += length;
584 ch = ' ';
585 } else if (ch == '\n') {
586 length = 1;
587 raw->line_pos = 0;
588 } else {
589 length = 1;
590 raw->line_pos++;
591 }
592 raw3215_make_room(raw, length);
593
594 for (i = 0; i < length; i++) {
595 raw->buffer[raw->head] = (char) _ascebc[(int) ch];
596 raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1);
597 raw->count++;
598 }
599 if (!(raw->flags & RAW3215_WORKING)) {
600 raw3215_mk_write_req(raw);
601 /* start or queue request */
602 raw3215_try_io(raw);
603 }
604 spin_unlock_irqrestore(raw->lock, flags);
605}
606
607/*
608 * Flush routine, it simply sets the flush flag and tries to start
609 * pending IO.
610 */
611static void
612raw3215_flush_buffer(struct raw3215_info *raw)
613{
614 unsigned long flags;
615
616 spin_lock_irqsave(raw->lock, flags);
617 if (raw->count > 0) {
618 raw->flags |= RAW3215_FLUSHING;
619 raw3215_try_io(raw);
620 raw->flags &= ~RAW3215_FLUSHING;
621 }
622 spin_unlock_irqrestore(raw->lock, flags);
623}
624
625/*
626 * Fire up a 3215 device.
627 */
628static int
629raw3215_startup(struct raw3215_info *raw)
630{
631 unsigned long flags;
632
633 if (raw->flags & RAW3215_ACTIVE)
634 return 0;
635 raw->line_pos = 0;
636 raw->flags |= RAW3215_ACTIVE;
637 spin_lock_irqsave(raw->lock, flags);
638 raw3215_try_io(raw);
639 spin_unlock_irqrestore(raw->lock, flags);
640
641 return 0;
642}
643
644/*
645 * Shutdown a 3215 device.
646 */
647static void
648raw3215_shutdown(struct raw3215_info *raw)
649{
650 DECLARE_WAITQUEUE(wait, current);
651 unsigned long flags;
652
653 if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED))
654 return;
655 /* Wait for outstanding requests, then free irq */
656 spin_lock_irqsave(raw->lock, flags);
657 if ((raw->flags & RAW3215_WORKING) ||
658 raw->queued_write != NULL ||
659 raw->queued_read != NULL) {
660 raw->flags |= RAW3215_CLOSING;
661 add_wait_queue(&raw->empty_wait, &wait);
662 set_current_state(TASK_INTERRUPTIBLE);
663 spin_unlock_irqrestore(raw->lock, flags);
664 schedule();
665 spin_lock_irqsave(raw->lock, flags);
666 remove_wait_queue(&raw->empty_wait, &wait);
667 set_current_state(TASK_RUNNING);
668 raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING);
669 }
670 spin_unlock_irqrestore(raw->lock, flags);
671}
672
673static int
674raw3215_probe (struct ccw_device *cdev)
675{
676 struct raw3215_info *raw;
677 int line;
678
679 raw = kmalloc(sizeof(struct raw3215_info) +
680 RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
681 if (raw == NULL)
682 return -ENOMEM;
683
684 spin_lock(&raw3215_device_lock);
685 for (line = 0; line < NR_3215; line++) {
686 if (!raw3215[line]) {
687 raw3215[line] = raw;
688 break;
689 }
690 }
691 spin_unlock(&raw3215_device_lock);
692 if (line == NR_3215) {
693 kfree(raw);
694 return -ENODEV;
695 }
696
697 raw->cdev = cdev;
698 raw->lock = get_ccwdev_lock(cdev);
699 raw->inbuf = (char *) raw + sizeof(struct raw3215_info);
700 memset(raw, 0, sizeof(struct raw3215_info));
701 raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE,
702 GFP_KERNEL|GFP_DMA);
703 if (raw->buffer == NULL) {
704 spin_lock(&raw3215_device_lock);
705 raw3215[line] = 0;
706 spin_unlock(&raw3215_device_lock);
707 kfree(raw);
708 return -ENOMEM;
709 }
710 tasklet_init(&raw->tasklet,
711 (void (*)(unsigned long)) raw3215_tasklet,
712 (unsigned long) raw);
713 init_waitqueue_head(&raw->empty_wait);
714
715 cdev->dev.driver_data = raw;
716 cdev->handler = raw3215_irq;
717
718 return 0;
719}
720
721static void
722raw3215_remove (struct ccw_device *cdev)
723{
724 struct raw3215_info *raw;
725
726 ccw_device_set_offline(cdev);
727 raw = cdev->dev.driver_data;
728 if (raw) {
729 cdev->dev.driver_data = NULL;
730 if (raw->buffer)
731 kfree(raw->buffer);
732 kfree(raw);
733 }
734}
735
736static int
737raw3215_set_online (struct ccw_device *cdev)
738{
739 struct raw3215_info *raw;
740
741 raw = cdev->dev.driver_data;
742 if (!raw)
743 return -ENODEV;
744
745 return raw3215_startup(raw);
746}
747
748static int
749raw3215_set_offline (struct ccw_device *cdev)
750{
751 struct raw3215_info *raw;
752
753 raw = cdev->dev.driver_data;
754 if (!raw)
755 return -ENODEV;
756
757 raw3215_shutdown(raw);
758
759 return 0;
760}
761
762static struct ccw_device_id raw3215_id[] = {
763 { CCW_DEVICE(0x3215, 0) },
764 { /* end of list */ },
765};
766
767static struct ccw_driver raw3215_ccw_driver = {
768 .name = "3215",
769 .owner = THIS_MODULE,
770 .ids = raw3215_id,
771 .probe = &raw3215_probe,
772 .remove = &raw3215_remove,
773 .set_online = &raw3215_set_online,
774 .set_offline = &raw3215_set_offline,
775};
776
777#ifdef CONFIG_TN3215_CONSOLE
778/*
779 * Write a string to the 3215 console
780 */
781static void
782con3215_write(struct console *co, const char *str, unsigned int count)
783{
784 struct raw3215_info *raw;
785 int i;
786
787 if (count <= 0)
788 return;
789 raw = raw3215[0]; /* console 3215 is the first one */
790 while (count > 0) {
791 for (i = 0; i < count; i++)
792 if (str[i] == '\t' || str[i] == '\n')
793 break;
794 raw3215_write(raw, str, i);
795 count -= i;
796 str += i;
797 if (count > 0) {
798 raw3215_putchar(raw, *str);
799 count--;
800 str++;
801 }
802 }
803}
804
805static struct tty_driver *con3215_device(struct console *c, int *index)
806{
807 *index = c->index;
808 return tty3215_driver;
809}
810
811/*
812 * panic() calls console_unblank before the system enters a
813 * disabled, endless loop.
814 */
815static void
816con3215_unblank(void)
817{
818 struct raw3215_info *raw;
819 unsigned long flags;
820
821 raw = raw3215[0]; /* console 3215 is the first one */
822 spin_lock_irqsave(raw->lock, flags);
823 raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
824 spin_unlock_irqrestore(raw->lock, flags);
825}
826
827static int __init
828con3215_consetup(struct console *co, char *options)
829{
830 return 0;
831}
832
833/*
834 * The console structure for the 3215 console
835 */
836static struct console con3215 = {
837 .name = "ttyS",
838 .write = con3215_write,
839 .device = con3215_device,
840 .unblank = con3215_unblank,
841 .setup = con3215_consetup,
842 .flags = CON_PRINTBUFFER,
843};
844
845/*
846 * 3215 console initialization code called from console_init().
847 * NOTE: This is called before kmalloc is available.
848 */
849static int __init
850con3215_init(void)
851{
852 struct ccw_device *cdev;
853 struct raw3215_info *raw;
854 struct raw3215_req *req;
855 int i;
856
857 /* Check if 3215 is to be the console */
858 if (!CONSOLE_IS_3215)
859 return -ENODEV;
860
861 /* Set the console mode for VM */
862 if (MACHINE_IS_VM) {
863 cpcmd("TERM CONMODE 3215", NULL, 0);
864 cpcmd("TERM AUTOCR OFF", NULL, 0);
865 }
866
867 /* allocate 3215 request structures */
868 raw3215_freelist = NULL;
869 spin_lock_init(&raw3215_freelist_lock);
870 for (i = 0; i < NR_3215_REQ; i++) {
871 req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req));
872 req->next = raw3215_freelist;
873 raw3215_freelist = req;
874 }
875
876 cdev = ccw_device_probe_console();
877 if (!cdev)
878 return -ENODEV;
879
880 raw3215[0] = raw = (struct raw3215_info *)
881 alloc_bootmem_low(sizeof(struct raw3215_info));
882 memset(raw, 0, sizeof(struct raw3215_info));
883 raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
884 raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
885 raw->cdev = cdev;
886 raw->lock = get_ccwdev_lock(cdev);
887 cdev->dev.driver_data = raw;
888 cdev->handler = raw3215_irq;
889
890 raw->flags |= RAW3215_FIXED;
891 tasklet_init(&raw->tasklet,
892 (void (*)(unsigned long)) raw3215_tasklet,
893 (unsigned long) raw);
894 init_waitqueue_head(&raw->empty_wait);
895
896 /* Request the console irq */
897 if (raw3215_startup(raw) != 0) {
898 free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE);
899 free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
900 free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
901 raw3215[0] = NULL;
902 printk("Couldn't find a 3215 console device\n");
903 return -ENODEV;
904 }
905 register_console(&con3215);
906 return 0;
907}
908console_initcall(con3215_init);
909#endif
910
911/*
912 * tty3215_open
913 *
914 * This routine is called whenever a 3215 tty is opened.
915 */
916static int
917tty3215_open(struct tty_struct *tty, struct file * filp)
918{
919 struct raw3215_info *raw;
920 int retval, line;
921
922 line = tty->index;
923 if ((line < 0) || (line >= NR_3215))
924 return -ENODEV;
925
926 raw = raw3215[line];
927 if (raw == NULL)
928 return -ENODEV;
929
930 tty->driver_data = raw;
931 raw->tty = tty;
932
933 tty->low_latency = 0; /* don't use bottom half for pushing chars */
934 /*
935 * Start up 3215 device
936 */
937 retval = raw3215_startup(raw);
938 if (retval)
939 return retval;
940
941 return 0;
942}
943
944/*
945 * tty3215_close()
946 *
947 * This routine is called when the 3215 tty is closed. We wait
948 * for the remaining request to be completed. Then we clean up.
949 */
950static void
951tty3215_close(struct tty_struct *tty, struct file * filp)
952{
953 struct raw3215_info *raw;
954
955 raw = (struct raw3215_info *) tty->driver_data;
956 if (raw == NULL || tty->count > 1)
957 return;
958 tty->closing = 1;
959 /* Shutdown the terminal */
960 raw3215_shutdown(raw);
961 tty->closing = 0;
962 raw->tty = NULL;
963}
964
965/*
966 * Returns the amount of free space in the output buffer.
967 */
968static int
969tty3215_write_room(struct tty_struct *tty)
970{
971 struct raw3215_info *raw;
972
973 raw = (struct raw3215_info *) tty->driver_data;
974
975 /* Subtract TAB_STOP_SIZE to allow for a tab, 8 <<< 64K */
976 if ((RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE) >= 0)
977 return RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE;
978 else
979 return 0;
980}
981
982/*
983 * String write routine for 3215 ttys
984 */
985static int
986tty3215_write(struct tty_struct * tty,
987 const unsigned char *buf, int count)
988{
989 struct raw3215_info *raw;
990
991 if (!tty)
992 return 0;
993 raw = (struct raw3215_info *) tty->driver_data;
994 raw3215_write(raw, buf, count);
995 return count;
996}
997
998/*
999 * Put character routine for 3215 ttys
1000 */
1001static void
1002tty3215_put_char(struct tty_struct *tty, unsigned char ch)
1003{
1004 struct raw3215_info *raw;
1005
1006 if (!tty)
1007 return;
1008 raw = (struct raw3215_info *) tty->driver_data;
1009 raw3215_putchar(raw, ch);
1010}
1011
1012static void
1013tty3215_flush_chars(struct tty_struct *tty)
1014{
1015}
1016
1017/*
1018 * Returns the number of characters in the output buffer
1019 */
1020static int
1021tty3215_chars_in_buffer(struct tty_struct *tty)
1022{
1023 struct raw3215_info *raw;
1024
1025 raw = (struct raw3215_info *) tty->driver_data;
1026 return raw->count;
1027}
1028
1029static void
1030tty3215_flush_buffer(struct tty_struct *tty)
1031{
1032 struct raw3215_info *raw;
1033
1034 raw = (struct raw3215_info *) tty->driver_data;
1035 raw3215_flush_buffer(raw);
1036 tty_wakeup(tty);
1037}
1038
1039/*
1040 * Currently we don't have any io controls for 3215 ttys
1041 */
1042static int
1043tty3215_ioctl(struct tty_struct *tty, struct file * file,
1044 unsigned int cmd, unsigned long arg)
1045{
1046 if (tty->flags & (1 << TTY_IO_ERROR))
1047 return -EIO;
1048
1049 switch (cmd) {
1050 default:
1051 return -ENOIOCTLCMD;
1052 }
1053 return 0;
1054}
1055
1056/*
1057 * Disable reading from a 3215 tty
1058 */
1059static void
1060tty3215_throttle(struct tty_struct * tty)
1061{
1062 struct raw3215_info *raw;
1063
1064 raw = (struct raw3215_info *) tty->driver_data;
1065 raw->flags |= RAW3215_THROTTLED;
1066}
1067
1068/*
1069 * Enable reading from a 3215 tty
1070 */
1071static void
1072tty3215_unthrottle(struct tty_struct * tty)
1073{
1074 struct raw3215_info *raw;
1075 unsigned long flags;
1076
1077 raw = (struct raw3215_info *) tty->driver_data;
1078 if (raw->flags & RAW3215_THROTTLED) {
1079 spin_lock_irqsave(raw->lock, flags);
1080 raw->flags &= ~RAW3215_THROTTLED;
1081 raw3215_try_io(raw);
1082 spin_unlock_irqrestore(raw->lock, flags);
1083 }
1084}
1085
1086/*
1087 * Disable writing to a 3215 tty
1088 */
1089static void
1090tty3215_stop(struct tty_struct *tty)
1091{
1092 struct raw3215_info *raw;
1093
1094 raw = (struct raw3215_info *) tty->driver_data;
1095 raw->flags |= RAW3215_STOPPED;
1096}
1097
1098/*
1099 * Enable writing to a 3215 tty
1100 */
1101static void
1102tty3215_start(struct tty_struct *tty)
1103{
1104 struct raw3215_info *raw;
1105 unsigned long flags;
1106
1107 raw = (struct raw3215_info *) tty->driver_data;
1108 if (raw->flags & RAW3215_STOPPED) {
1109 spin_lock_irqsave(raw->lock, flags);
1110 raw->flags &= ~RAW3215_STOPPED;
1111 raw3215_try_io(raw);
1112 spin_unlock_irqrestore(raw->lock, flags);
1113 }
1114}
1115
1116static struct tty_operations tty3215_ops = {
1117 .open = tty3215_open,
1118 .close = tty3215_close,
1119 .write = tty3215_write,
1120 .put_char = tty3215_put_char,
1121 .flush_chars = tty3215_flush_chars,
1122 .write_room = tty3215_write_room,
1123 .chars_in_buffer = tty3215_chars_in_buffer,
1124 .flush_buffer = tty3215_flush_buffer,
1125 .ioctl = tty3215_ioctl,
1126 .throttle = tty3215_throttle,
1127 .unthrottle = tty3215_unthrottle,
1128 .stop = tty3215_stop,
1129 .start = tty3215_start,
1130};
1131
1132/*
1133 * 3215 tty registration code called from tty_init().
1134 * Most kernel services (incl. kmalloc) are available at this poimt.
1135 */
1136int __init
1137tty3215_init(void)
1138{
1139 struct tty_driver *driver;
1140 int ret;
1141
1142 if (!CONSOLE_IS_3215)
1143 return 0;
1144
1145 driver = alloc_tty_driver(NR_3215);
1146 if (!driver)
1147 return -ENOMEM;
1148
1149 ret = ccw_driver_register(&raw3215_ccw_driver);
1150 if (ret) {
1151 put_tty_driver(driver);
1152 return ret;
1153 }
1154 /*
1155 * Initialize the tty_driver structure
1156 * Entries in tty3215_driver that are NOT initialized:
1157 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
1158 */
1159
1160 driver->owner = THIS_MODULE;
1161 driver->driver_name = "tty3215";
1162 driver->name = "ttyS";
1163 driver->major = TTY_MAJOR;
1164 driver->minor_start = 64;
1165 driver->type = TTY_DRIVER_TYPE_SYSTEM;
1166 driver->subtype = SYSTEM_TYPE_TTY;
1167 driver->init_termios = tty_std_termios;
1168 driver->init_termios.c_iflag = IGNBRK | IGNPAR;
1169 driver->init_termios.c_oflag = ONLCR | XTABS;
1170 driver->init_termios.c_lflag = ISIG;
1171 driver->flags = TTY_DRIVER_REAL_RAW;
1172 tty_set_operations(driver, &tty3215_ops);
1173 ret = tty_register_driver(driver);
1174 if (ret) {
1175 printk("Couldn't register tty3215 driver\n");
1176 put_tty_driver(driver);
1177 return ret;
1178 }
1179 tty3215_driver = driver;
1180 return 0;
1181}
1182
1183static void __exit
1184tty3215_exit(void)
1185{
1186 tty_unregister_driver(tty3215_driver);
1187 put_tty_driver(tty3215_driver);
1188 ccw_driver_unregister(&raw3215_ccw_driver);
1189}
1190
1191module_init(tty3215_init);
1192module_exit(tty3215_exit);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
new file mode 100644
index 000000000000..d52fb57a6b19
--- /dev/null
+++ b/drivers/s390/char/con3270.c
@@ -0,0 +1,638 @@
1/*
2 * drivers/s390/char/con3270.c
3 * IBM/3270 Driver - console view.
4 *
5 * Author(s):
6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
7 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 */
10
11#include <linux/config.h>
12#include <linux/bootmem.h>
13#include <linux/console.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/types.h>
18
19#include <asm/ccwdev.h>
20#include <asm/cio.h>
21#include <asm/cpcmd.h>
22#include <asm/ebcdic.h>
23
24#include "raw3270.h"
25#include "ctrlchar.h"
26
27#define CON3270_OUTPUT_BUFFER_SIZE 1024
28#define CON3270_STRING_PAGES 4
29
30static struct raw3270_fn con3270_fn;
31
32/*
33 * Main 3270 console view data structure.
34 */
35struct con3270 {
36 struct raw3270_view view;
37 spinlock_t lock;
38 struct list_head freemem; /* list of free memory for strings. */
39
40 /* Output stuff. */
41 struct list_head lines; /* list of lines. */
42 struct list_head update; /* list of lines to update. */
43 int line_nr; /* line number for next update. */
44 int nr_lines; /* # lines in list. */
45 int nr_up; /* # lines up in history. */
46 unsigned long update_flags; /* Update indication bits. */
47 struct string *cline; /* current output line. */
48 struct string *status; /* last line of display. */
49 struct raw3270_request *write; /* single write request. */
50 struct timer_list timer;
51
52 /* Input stuff. */
53 struct string *input; /* input string for read request. */
54 struct raw3270_request *read; /* single read request. */
55 struct raw3270_request *kreset; /* single keyboard reset request. */
56 struct tasklet_struct readlet; /* tasklet to issue read request. */
57};
58
59static struct con3270 *condev;
60
61/* con3270->update_flags. See con3270_update for details. */
62#define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
63#define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */
64#define CON_UPDATE_STATUS 4 /* Update status line. */
65#define CON_UPDATE_ALL 7
66
67static void con3270_update(struct con3270 *);
68
69/*
70 * Setup timeout for a device. On timeout trigger an update.
71 */
72void
73con3270_set_timer(struct con3270 *cp, int expires)
74{
75 if (expires == 0) {
76 if (timer_pending(&cp->timer))
77 del_timer(&cp->timer);
78 return;
79 }
80 if (timer_pending(&cp->timer) &&
81 mod_timer(&cp->timer, jiffies + expires))
82 return;
83 cp->timer.function = (void (*)(unsigned long)) con3270_update;
84 cp->timer.data = (unsigned long) cp;
85 cp->timer.expires = jiffies + expires;
86 add_timer(&cp->timer);
87}
88
89/*
90 * The status line is the last line of the screen. It shows the string
91 * "console view" in the lower left corner and "Running"/"More..."/"Holding"
92 * in the lower right corner of the screen.
93 */
94static void
95con3270_update_status(struct con3270 *cp)
96{
97 char *str;
98
99 str = (cp->nr_up != 0) ? "History" : "Running";
100 memcpy(cp->status->string + 24, str, 7);
101 codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
102 cp->update_flags |= CON_UPDATE_STATUS;
103}
104
105static void
106con3270_create_status(struct con3270 *cp)
107{
108 static const unsigned char blueprint[] =
109 { TO_SBA, 0, 0, TO_SF,TF_LOG,TO_SA,TAT_COLOR, TAC_GREEN,
110 'c','o','n','s','o','l','e',' ','v','i','e','w',
111 TO_RA,0,0,0,'R','u','n','n','i','n','g',TO_SF,TF_LOG };
112
113 cp->status = alloc_string(&cp->freemem, sizeof(blueprint));
114 /* Copy blueprint to status line */
115 memcpy(cp->status->string, blueprint, sizeof(blueprint));
116 /* Set TO_RA addresses. */
117 raw3270_buffer_address(cp->view.dev, cp->status->string + 1,
118 cp->view.cols * (cp->view.rows - 1));
119 raw3270_buffer_address(cp->view.dev, cp->status->string + 21,
120 cp->view.cols * cp->view.rows - 8);
121 /* Convert strings to ebcdic. */
122 codepage_convert(cp->view.ascebc, cp->status->string + 8, 12);
123 codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
124}
125
126/*
127 * Set output offsets to 3270 datastream fragment of a console string.
128 */
129static void
130con3270_update_string(struct con3270 *cp, struct string *s, int nr)
131{
132 if (s->len >= cp->view.cols - 5)
133 return;
134 raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
135 cp->view.cols * (nr + 1));
136}
137
138/*
139 * Rebuild update list to print all lines.
140 */
141static void
142con3270_rebuild_update(struct con3270 *cp)
143{
144 struct string *s, *n;
145 int nr;
146
147 /*
148 * Throw away update list and create a new one,
149 * containing all lines that will fit on the screen.
150 */
151 list_for_each_entry_safe(s, n, &cp->update, update)
152 list_del_init(&s->update);
153 nr = cp->view.rows - 2 + cp->nr_up;
154 list_for_each_entry_reverse(s, &cp->lines, list) {
155 if (nr < cp->view.rows - 1)
156 list_add(&s->update, &cp->update);
157 if (--nr < 0)
158 break;
159 }
160 cp->line_nr = 0;
161 cp->update_flags |= CON_UPDATE_LIST;
162}
163
164/*
165 * Alloc string for size bytes. Free strings from history if necessary.
166 */
167static struct string *
168con3270_alloc_string(struct con3270 *cp, size_t size)
169{
170 struct string *s, *n;
171
172 s = alloc_string(&cp->freemem, size);
173 if (s)
174 return s;
175 list_for_each_entry_safe(s, n, &cp->lines, list) {
176 list_del(&s->list);
177 if (!list_empty(&s->update))
178 list_del(&s->update);
179 cp->nr_lines--;
180 if (free_string(&cp->freemem, s) >= size)
181 break;
182 }
183 s = alloc_string(&cp->freemem, size);
184 BUG_ON(!s);
185 if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) {
186 cp->nr_up = cp->nr_lines - cp->view.rows + 1;
187 con3270_rebuild_update(cp);
188 con3270_update_status(cp);
189 }
190 return s;
191}
192
193/*
194 * Write completion callback.
195 */
196static void
197con3270_write_callback(struct raw3270_request *rq, void *data)
198{
199 raw3270_request_reset(rq);
200 xchg(&((struct con3270 *) rq->view)->write, rq);
201}
202
203/*
204 * Update console display.
205 */
206static void
207con3270_update(struct con3270 *cp)
208{
209 struct raw3270_request *wrq;
210 char wcc, prolog[6];
211 unsigned long flags;
212 unsigned long updated;
213 struct string *s, *n;
214 int rc;
215
216 wrq = xchg(&cp->write, 0);
217 if (!wrq) {
218 con3270_set_timer(cp, 1);
219 return;
220 }
221
222 spin_lock_irqsave(&cp->view.lock, flags);
223 updated = 0;
224 if (cp->update_flags & CON_UPDATE_ERASE) {
225 /* Use erase write alternate to initialize display. */
226 raw3270_request_set_cmd(wrq, TC_EWRITEA);
227 updated |= CON_UPDATE_ERASE;
228 } else
229 raw3270_request_set_cmd(wrq, TC_WRITE);
230
231 wcc = TW_NONE;
232 raw3270_request_add_data(wrq, &wcc, 1);
233
234 /*
235 * Update status line.
236 */
237 if (cp->update_flags & CON_UPDATE_STATUS)
238 if (raw3270_request_add_data(wrq, cp->status->string,
239 cp->status->len) == 0)
240 updated |= CON_UPDATE_STATUS;
241
242 if (cp->update_flags & CON_UPDATE_LIST) {
243 prolog[0] = TO_SBA;
244 prolog[3] = TO_SA;
245 prolog[4] = TAT_COLOR;
246 prolog[5] = TAC_TURQ;
247 raw3270_buffer_address(cp->view.dev, prolog + 1,
248 cp->view.cols * cp->line_nr);
249 raw3270_request_add_data(wrq, prolog, 6);
250 /* Write strings in the update list to the screen. */
251 list_for_each_entry_safe(s, n, &cp->update, update) {
252 if (s != cp->cline)
253 con3270_update_string(cp, s, cp->line_nr);
254 if (raw3270_request_add_data(wrq, s->string,
255 s->len) != 0)
256 break;
257 list_del_init(&s->update);
258 if (s != cp->cline)
259 cp->line_nr++;
260 }
261 if (list_empty(&cp->update))
262 updated |= CON_UPDATE_LIST;
263 }
264 wrq->callback = con3270_write_callback;
265 rc = raw3270_start(&cp->view, wrq);
266 if (rc == 0) {
267 cp->update_flags &= ~updated;
268 if (cp->update_flags)
269 con3270_set_timer(cp, 1);
270 } else {
271 raw3270_request_reset(wrq);
272 xchg(&cp->write, wrq);
273 }
274 spin_unlock_irqrestore(&cp->view.lock, flags);
275}
276
277/*
278 * Read tasklet.
279 */
280static void
281con3270_read_tasklet(struct raw3270_request *rrq)
282{
283 static char kreset_data = TW_KR;
284 struct con3270 *cp;
285 unsigned long flags;
286 int nr_up, deactivate;
287
288 cp = (struct con3270 *) rrq->view;
289 spin_lock_irqsave(&cp->view.lock, flags);
290 nr_up = cp->nr_up;
291 deactivate = 0;
292 /* Check aid byte. */
293 switch (cp->input->string[0]) {
294 case 0x7d: /* enter: jump to bottom. */
295 nr_up = 0;
296 break;
297 case 0xf3: /* PF3: deactivate the console view. */
298 deactivate = 1;
299 break;
300 case 0x6d: /* clear: start from scratch. */
301 con3270_rebuild_update(cp);
302 cp->update_flags = CON_UPDATE_ALL;
303 con3270_set_timer(cp, 1);
304 break;
305 case 0xf7: /* PF7: do a page up in the console log. */
306 nr_up += cp->view.rows - 2;
307 if (nr_up + cp->view.rows - 1 > cp->nr_lines) {
308 nr_up = cp->nr_lines - cp->view.rows + 1;
309 if (nr_up < 0)
310 nr_up = 0;
311 }
312 break;
313 case 0xf8: /* PF8: do a page down in the console log. */
314 nr_up -= cp->view.rows - 2;
315 if (nr_up < 0)
316 nr_up = 0;
317 break;
318 }
319 if (nr_up != cp->nr_up) {
320 cp->nr_up = nr_up;
321 con3270_rebuild_update(cp);
322 con3270_update_status(cp);
323 con3270_set_timer(cp, 1);
324 }
325 spin_unlock_irqrestore(&cp->view.lock, flags);
326
327 /* Start keyboard reset command. */
328 raw3270_request_reset(cp->kreset);
329 raw3270_request_set_cmd(cp->kreset, TC_WRITE);
330 raw3270_request_add_data(cp->kreset, &kreset_data, 1);
331 raw3270_start(&cp->view, cp->kreset);
332
333 if (deactivate)
334 raw3270_deactivate_view(&cp->view);
335
336 raw3270_request_reset(rrq);
337 xchg(&cp->read, rrq);
338 raw3270_put_view(&cp->view);
339}
340
341/*
342 * Read request completion callback.
343 */
344static void
345con3270_read_callback(struct raw3270_request *rq, void *data)
346{
347 raw3270_get_view(rq->view);
348 /* Schedule tasklet to pass input to tty. */
349 tasklet_schedule(&((struct con3270 *) rq->view)->readlet);
350}
351
352/*
353 * Issue a read request. Called only from interrupt function.
354 */
355static void
356con3270_issue_read(struct con3270 *cp)
357{
358 struct raw3270_request *rrq;
359 int rc;
360
361 rrq = xchg(&cp->read, 0);
362 if (!rrq)
363 /* Read already scheduled. */
364 return;
365 rrq->callback = con3270_read_callback;
366 rrq->callback_data = cp;
367 raw3270_request_set_cmd(rrq, TC_READMOD);
368 raw3270_request_set_data(rrq, cp->input->string, cp->input->len);
369 /* Issue the read modified request. */
370 rc = raw3270_start_irq(&cp->view, rrq);
371 if (rc)
372 raw3270_request_reset(rrq);
373}
374
375/*
376 * Switch to the console view.
377 */
378static int
379con3270_activate(struct raw3270_view *view)
380{
381 unsigned long flags;
382 struct con3270 *cp;
383
384 cp = (struct con3270 *) view;
385 spin_lock_irqsave(&cp->view.lock, flags);
386 cp->nr_up = 0;
387 con3270_rebuild_update(cp);
388 con3270_update_status(cp);
389 cp->update_flags = CON_UPDATE_ALL;
390 con3270_set_timer(cp, 1);
391 spin_unlock_irqrestore(&cp->view.lock, flags);
392 return 0;
393}
394
395static void
396con3270_deactivate(struct raw3270_view *view)
397{
398 unsigned long flags;
399 struct con3270 *cp;
400
401 cp = (struct con3270 *) view;
402 spin_lock_irqsave(&cp->view.lock, flags);
403 del_timer(&cp->timer);
404 spin_unlock_irqrestore(&cp->view.lock, flags);
405}
406
407static int
408con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
409{
410 /* Handle ATTN. Schedule tasklet to read aid. */
411 if (irb->scsw.dstat & DEV_STAT_ATTENTION)
412 con3270_issue_read(cp);
413
414 if (rq) {
415 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
416 rq->rc = -EIO;
417 else
418 /* Normal end. Copy residual count. */
419 rq->rescnt = irb->scsw.count;
420 }
421 return RAW3270_IO_DONE;
422}
423
424/* Console view to a 3270 device. */
425static struct raw3270_fn con3270_fn = {
426 .activate = con3270_activate,
427 .deactivate = con3270_deactivate,
428 .intv = (void *) con3270_irq
429};
430
431static inline void
432con3270_cline_add(struct con3270 *cp)
433{
434 if (!list_empty(&cp->cline->list))
435 /* Already added. */
436 return;
437 list_add_tail(&cp->cline->list, &cp->lines);
438 cp->nr_lines++;
439 con3270_rebuild_update(cp);
440}
441
442static inline void
443con3270_cline_insert(struct con3270 *cp, unsigned char c)
444{
445 cp->cline->string[cp->cline->len++] =
446 cp->view.ascebc[(c < ' ') ? ' ' : c];
447 if (list_empty(&cp->cline->update)) {
448 list_add_tail(&cp->cline->update, &cp->update);
449 cp->update_flags |= CON_UPDATE_LIST;
450 }
451}
452
453static inline void
454con3270_cline_end(struct con3270 *cp)
455{
456 struct string *s;
457 unsigned int size;
458
459 /* Copy cline. */
460 size = (cp->cline->len < cp->view.cols - 5) ?
461 cp->cline->len + 4 : cp->view.cols;
462 s = con3270_alloc_string(cp, size);
463 memcpy(s->string, cp->cline->string, cp->cline->len);
464 if (s->len < cp->view.cols - 5) {
465 s->string[s->len - 4] = TO_RA;
466 s->string[s->len - 1] = 0;
467 } else {
468 while (--size > cp->cline->len)
469 s->string[size] = cp->view.ascebc[' '];
470 }
471 /* Replace cline with allocated line s and reset cline. */
472 list_add(&s->list, &cp->cline->list);
473 list_del_init(&cp->cline->list);
474 if (!list_empty(&cp->cline->update)) {
475 list_add(&s->update, &cp->cline->update);
476 list_del_init(&cp->cline->update);
477 }
478 cp->cline->len = 0;
479}
480
481/*
482 * Write a string to the 3270 console
483 */
484static void
485con3270_write(struct console *co, const char *str, unsigned int count)
486{
487 struct con3270 *cp;
488 unsigned long flags;
489 unsigned char c;
490
491 cp = condev;
492 if (cp->view.dev)
493 raw3270_activate_view(&cp->view);
494 spin_lock_irqsave(&cp->view.lock, flags);
495 while (count-- > 0) {
496 c = *str++;
497 if (cp->cline->len == 0)
498 con3270_cline_add(cp);
499 if (c != '\n')
500 con3270_cline_insert(cp, c);
501 if (c == '\n' || cp->cline->len >= cp->view.cols)
502 con3270_cline_end(cp);
503 }
504 /* Setup timer to output current console buffer after 1/10 second */
505 if (cp->view.dev && !timer_pending(&cp->timer))
506 con3270_set_timer(cp, HZ/10);
507 spin_unlock_irqrestore(&cp->view.lock,flags);
508}
509
510extern struct tty_driver *tty3270_driver;
511
512static struct tty_driver *
513con3270_device(struct console *c, int *index)
514{
515 *index = c->index;
516 return tty3270_driver;
517}
518
519/*
520 * Wait for end of write request.
521 */
522static void
523con3270_wait_write(struct con3270 *cp)
524{
525 while (!cp->write) {
526 raw3270_wait_cons_dev(cp->view.dev);
527 barrier();
528 }
529}
530
531/*
532 * panic() calls console_unblank before the system enters a
533 * disabled, endless loop.
534 */
535static void
536con3270_unblank(void)
537{
538 struct con3270 *cp;
539 unsigned long flags;
540
541 cp = condev;
542 if (!cp->view.dev)
543 return;
544 spin_lock_irqsave(&cp->view.lock, flags);
545 con3270_wait_write(cp);
546 cp->nr_up = 0;
547 con3270_rebuild_update(cp);
548 con3270_update_status(cp);
549 while (cp->update_flags != 0) {
550 spin_unlock_irqrestore(&cp->view.lock, flags);
551 con3270_update(cp);
552 spin_lock_irqsave(&cp->view.lock, flags);
553 con3270_wait_write(cp);
554 }
555 spin_unlock_irqrestore(&cp->view.lock, flags);
556}
557
558static int __init
559con3270_consetup(struct console *co, char *options)
560{
561 return 0;
562}
563
564/*
565 * The console structure for the 3270 console
566 */
567static struct console con3270 = {
568 .name = "tty3270",
569 .write = con3270_write,
570 .device = con3270_device,
571 .unblank = con3270_unblank,
572 .setup = con3270_consetup,
573 .flags = CON_PRINTBUFFER,
574};
575
576/*
577 * 3270 console initialization code called from console_init().
578 * NOTE: This is called before kmalloc is available.
579 */
580static int __init
581con3270_init(void)
582{
583 struct ccw_device *cdev;
584 struct raw3270 *rp;
585 void *cbuf;
586 int i;
587
588 /* Check if 3270 is to be the console */
589 if (!CONSOLE_IS_3270)
590 return -ENODEV;
591
592 /* Set the console mode for VM */
593 if (MACHINE_IS_VM) {
594 cpcmd("TERM CONMODE 3270", 0, 0);
595 cpcmd("TERM AUTOCR OFF", 0, 0);
596 }
597
598 cdev = ccw_device_probe_console();
599 if (!cdev)
600 return -ENODEV;
601 rp = raw3270_setup_console(cdev);
602 if (IS_ERR(rp))
603 return PTR_ERR(rp);
604
605 condev = (struct con3270 *) alloc_bootmem_low(sizeof(struct con3270));
606 memset(condev, 0, sizeof(struct con3270));
607 condev->view.dev = rp;
608
609 condev->read = raw3270_request_alloc_bootmem(0);
610 condev->read->callback = con3270_read_callback;
611 condev->read->callback_data = condev;
612 condev->write =
613 raw3270_request_alloc_bootmem(CON3270_OUTPUT_BUFFER_SIZE);
614 condev->kreset = raw3270_request_alloc_bootmem(1);
615
616 INIT_LIST_HEAD(&condev->lines);
617 INIT_LIST_HEAD(&condev->update);
618 init_timer(&condev->timer);
619 tasklet_init(&condev->readlet,
620 (void (*)(unsigned long)) con3270_read_tasklet,
621 (unsigned long) condev->read);
622
623 raw3270_add_view(&condev->view, &con3270_fn, 0);
624
625 INIT_LIST_HEAD(&condev->freemem);
626 for (i = 0; i < CON3270_STRING_PAGES; i++) {
627 cbuf = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
628 add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
629 }
630 condev->cline = alloc_string(&condev->freemem, condev->view.cols);
631 condev->cline->len = 0;
632 con3270_create_status(condev);
633 condev->input = alloc_string(&condev->freemem, 80);
634 register_console(&con3270);
635 return 0;
636}
637
638console_initcall(con3270_init);
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
new file mode 100644
index 000000000000..be463242cf0f
--- /dev/null
+++ b/drivers/s390/char/ctrlchar.c
@@ -0,0 +1,75 @@
1/*
2 * drivers/s390/char/ctrlchar.c
3 * Unified handling of special chars.
4 *
5 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
7 *
8 */
9
10#include <linux/config.h>
11#include <linux/stddef.h>
12#include <asm/errno.h>
13#include <linux/sysrq.h>
14#include <linux/ctype.h>
15
16#include "ctrlchar.h"
17
18#ifdef CONFIG_MAGIC_SYSRQ
19static int ctrlchar_sysrq_key;
20
21static void
22ctrlchar_handle_sysrq(void *tty)
23{
24 handle_sysrq(ctrlchar_sysrq_key, NULL, (struct tty_struct *) tty);
25}
26
27static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq, 0);
28#endif
29
30
31/**
32 * Check for special chars at start of input.
33 *
34 * @param buf Console input buffer.
35 * @param len Length of valid data in buffer.
36 * @param tty The tty struct for this console.
37 * @return CTRLCHAR_NONE, if nothing matched,
38 * CTRLCHAR_SYSRQ, if sysrq was encountered
39 * otherwise char to be inserted logically or'ed
40 * with CTRLCHAR_CTRL
41 */
42unsigned int
43ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
44{
45 if ((len < 2) || (len > 3))
46 return CTRLCHAR_NONE;
47
48 /* hat is 0xb1 in codepage 037 (US etc.) and thus */
49 /* converted to 0x5e in ascii ('^') */
50 if ((buf[0] != '^') && (buf[0] != '\252'))
51 return CTRLCHAR_NONE;
52
53#ifdef CONFIG_MAGIC_SYSRQ
54 /* racy */
55 if (len == 3 && buf[1] == '-') {
56 ctrlchar_sysrq_key = buf[2];
57 ctrlchar_work.data = tty;
58 schedule_work(&ctrlchar_work);
59 return CTRLCHAR_SYSRQ;
60 }
61#endif
62
63 if (len != 2)
64 return CTRLCHAR_NONE;
65
66 switch (tolower(buf[1])) {
67 case 'c':
68 return INTR_CHAR(tty) | CTRLCHAR_CTRL;
69 case 'd':
70 return EOF_CHAR(tty) | CTRLCHAR_CTRL;
71 case 'z':
72 return SUSP_CHAR(tty) | CTRLCHAR_CTRL;
73 }
74 return CTRLCHAR_NONE;
75}
diff --git a/drivers/s390/char/ctrlchar.h b/drivers/s390/char/ctrlchar.h
new file mode 100644
index 000000000000..935ffa0ea7c6
--- /dev/null
+++ b/drivers/s390/char/ctrlchar.h
@@ -0,0 +1,20 @@
1/*
2 * drivers/s390/char/ctrlchar.c
3 * Unified handling of special chars.
4 *
5 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
7 *
8 */
9
10#include <linux/tty.h>
11
12extern unsigned int
13ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
14
15
16#define CTRLCHAR_NONE (1 << 8)
17#define CTRLCHAR_CTRL (2 << 8)
18#define CTRLCHAR_SYSRQ (3 << 8)
19
20#define CTRLCHAR_MASK (~0xffu)
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
new file mode 100644
index 000000000000..ca15adb140d1
--- /dev/null
+++ b/drivers/s390/char/defkeymap.c
@@ -0,0 +1,156 @@
1
2/* Do not edit this file! It was automatically generated by */
3/* loadkeys --mktable defkeymap.map > defkeymap.c */
4
5#include <linux/types.h>
6#include <linux/keyboard.h>
7#include <linux/kd.h>
8
9u_short plain_map[NR_KEYS] = {
10 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
11 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
12 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
13 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
14 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
15 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
16 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
17 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
18 0xf020, 0xf000, 0xf0e2, 0xf0e4, 0xf0e0, 0xf0e1, 0xf0e3, 0xf0e5,
19 0xf0e7, 0xf0f1, 0xf0a2, 0xf02e, 0xf03c, 0xf028, 0xf02b, 0xf07c,
20 0xf026, 0xf0e9, 0xf0e2, 0xf0eb, 0xf0e8, 0xf0ed, 0xf0ee, 0xf0ef,
21 0xf0ec, 0xf0df, 0xf021, 0xf024, 0xf02a, 0xf029, 0xf03b, 0xf0ac,
22 0xf02d, 0xf02f, 0xf0c2, 0xf0c4, 0xf0c0, 0xf0c1, 0xf0c3, 0xf0c5,
23 0xf0c7, 0xf0d1, 0xf0a6, 0xf02c, 0xf025, 0xf05f, 0xf03e, 0xf03f,
24 0xf0f8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0c8, 0xf0cd, 0xf0ce, 0xf0cf,
25 0xf0cc, 0xf060, 0xf03a, 0xf023, 0xf040, 0xf027, 0xf03d, 0xf022,
26};
27
28static u_short shift_map[NR_KEYS] = {
29 0xf0d8, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
30 0xf068, 0xf069, 0xf0ab, 0xf0bb, 0xf0f0, 0xf0fd, 0xf0fe, 0xf0b1,
31 0xf0b0, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f, 0xf070,
32 0xf071, 0xf072, 0xf000, 0xf000, 0xf0e6, 0xf0b8, 0xf0c6, 0xf0a4,
33 0xf0b5, 0xf07e, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077, 0xf078,
34 0xf079, 0xf07a, 0xf0a1, 0xf0bf, 0xf0d0, 0xf0dd, 0xf0de, 0xf0ae,
35 0xf402, 0xf0a3, 0xf0a5, 0xf0b7, 0xf0a9, 0xf0a7, 0xf0b6, 0xf0bc,
36 0xf0bd, 0xf0be, 0xf05b, 0xf05d, 0xf000, 0xf0a8, 0xf0b4, 0xf0d7,
37 0xf07b, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
38 0xf048, 0xf049, 0xf000, 0xf0f4, 0xf0f6, 0xf0f2, 0xf0f3, 0xf0f5,
39 0xf07d, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f, 0xf050,
40 0xf051, 0xf052, 0xf0b9, 0xf0fb, 0xf0fc, 0xf0f9, 0xf0fa, 0xf0ff,
41 0xf05c, 0xf0f7, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057, 0xf058,
42 0xf059, 0xf05a, 0xf0b2, 0xf0d4, 0xf0d6, 0xf0d2, 0xf0d3, 0xf0d5,
43 0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
44 0xf038, 0xf039, 0xf0b3, 0xf0db, 0xf0dc, 0xf0d9, 0xf0da, 0xf000,
45};
46
47static u_short ctrl_map[NR_KEYS] = {
48 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
49 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
50 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
51 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
52 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
53 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
54 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
55 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
56 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
57 0xf200, 0xf200, 0xf11f, 0xf120, 0xf121, 0xf200, 0xf200, 0xf200,
58 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
59 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
60 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
61 0xf200, 0xf200, 0xf200, 0xf01a, 0xf003, 0xf212, 0xf004, 0xf200,
62 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
63 0xf200, 0xf200, 0xf109, 0xf10a, 0xf206, 0xf00a, 0xf200, 0xf200,
64};
65
66static u_short shift_ctrl_map[NR_KEYS] = {
67 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
68 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
69 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
70 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
71 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
72 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
73 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
74 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
75 0xf200, 0xf10c, 0xf10d, 0xf10e, 0xf10f, 0xf110, 0xf111, 0xf112,
76 0xf113, 0xf11e, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
77 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
78 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
79 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
80 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
81 0xf200, 0xf100, 0xf101, 0xf211, 0xf103, 0xf104, 0xf105, 0xf20b,
82 0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
83};
84
85ushort *key_maps[MAX_NR_KEYMAPS] = {
86 plain_map, shift_map, 0, 0,
87 ctrl_map, shift_ctrl_map, 0
88};
89
90unsigned int keymap_count = 4;
91
92
93/*
94 * Philosophy: most people do not define more strings, but they who do
95 * often want quite a lot of string space. So, we statically allocate
96 * the default and allocate dynamically in chunks of 512 bytes.
97 */
98
99char func_buf[] = {
100 '\033', '[', '[', 'A', 0,
101 '\033', '[', '[', 'B', 0,
102 '\033', '[', '[', 'C', 0,
103 '\033', '[', '[', 'D', 0,
104 '\033', '[', '[', 'E', 0,
105 '\033', '[', '1', '7', '~', 0,
106 '\033', '[', '1', '8', '~', 0,
107 '\033', '[', '1', '9', '~', 0,
108 '\033', '[', '2', '0', '~', 0,
109 '\033', '[', '2', '1', '~', 0,
110 '\033', '[', '2', '3', '~', 0,
111 '\033', '[', '2', '4', '~', 0,
112 '\033', '[', '2', '5', '~', 0,
113 '\033', '[', '2', '6', '~', 0,
114 '\033', '[', '2', '8', '~', 0,
115 '\033', '[', '2', '9', '~', 0,
116 '\033', '[', '3', '1', '~', 0,
117 '\033', '[', '3', '2', '~', 0,
118 '\033', '[', '3', '3', '~', 0,
119 '\033', '[', '3', '4', '~', 0,
120};
121
122
123char *funcbufptr = func_buf;
124int funcbufsize = sizeof(func_buf);
125int funcbufleft = 0; /* space left */
126
127char *func_table[MAX_NR_FUNC] = {
128 func_buf + 0,
129 func_buf + 5,
130 func_buf + 10,
131 func_buf + 15,
132 func_buf + 20,
133 func_buf + 25,
134 func_buf + 31,
135 func_buf + 37,
136 func_buf + 43,
137 func_buf + 49,
138 func_buf + 55,
139 func_buf + 61,
140 func_buf + 67,
141 func_buf + 73,
142 func_buf + 79,
143 func_buf + 85,
144 func_buf + 91,
145 func_buf + 97,
146 func_buf + 103,
147 func_buf + 109,
148 0,
149};
150
151struct kbdiacr accent_table[MAX_DIACR] = {
152 {'^', 'c', '\003'}, {'^', 'd', '\004'},
153 {'^', 'z', '\032'}, {'^', '\012', '\000'},
154};
155
156unsigned int accent_table_size = 4;
diff --git a/drivers/s390/char/defkeymap.map b/drivers/s390/char/defkeymap.map
new file mode 100644
index 000000000000..353b3f268824
--- /dev/null
+++ b/drivers/s390/char/defkeymap.map
@@ -0,0 +1,191 @@
1# Default keymap for 3270 (ebcdic codepage 037).
2keymaps 0-1,4-5
3
4keycode 0 = nul Oslash
5keycode 1 = nul a
6keycode 2 = nul b
7keycode 3 = nul c
8keycode 4 = nul d
9keycode 5 = nul e
10keycode 6 = nul f
11keycode 7 = nul g
12keycode 8 = nul h
13keycode 9 = nul i
14keycode 10 = nul guillemotleft
15keycode 11 = nul guillemotright
16keycode 12 = nul eth
17keycode 13 = nul yacute
18keycode 14 = nul thorn
19keycode 15 = nul plusminus
20keycode 16 = nul degree
21keycode 17 = nul j
22keycode 18 = nul k
23keycode 19 = nul l
24keycode 20 = nul m
25keycode 21 = nul n
26keycode 22 = nul o
27keycode 23 = nul p
28keycode 24 = nul q
29keycode 25 = nul r
30keycode 26 = nul nul
31keycode 27 = nul nul
32keycode 28 = nul ae
33keycode 29 = nul cedilla
34keycode 30 = nul AE
35keycode 31 = nul currency
36keycode 32 = nul mu
37keycode 33 = nul tilde
38keycode 34 = nul s
39keycode 35 = nul t
40keycode 36 = nul u
41keycode 37 = nul v
42keycode 38 = nul w
43keycode 39 = nul x
44keycode 40 = nul y
45keycode 41 = nul z
46keycode 42 = nul exclamdown
47keycode 43 = nul questiondown
48keycode 44 = nul ETH
49keycode 45 = nul Yacute
50keycode 46 = nul THORN
51keycode 47 = nul registered
52keycode 48 = nul dead_circumflex
53keycode 49 = nul sterling
54keycode 50 = nul yen
55keycode 51 = nul periodcentered
56keycode 52 = nul copyright
57keycode 53 = nul section
58keycode 54 = nul paragraph
59keycode 55 = nul onequarter
60keycode 56 = nul onehalf
61keycode 57 = nul threequarters
62keycode 58 = nul bracketleft
63keycode 59 = nul bracketright
64keycode 60 = nul nul
65keycode 61 = nul diaeresis
66keycode 62 = nul acute
67keycode 63 = nul multiply
68keycode 64 = space braceleft
69keycode 65 = nul A
70keycode 66 = acircumflex B
71keycode 67 = adiaeresis C
72keycode 68 = agrave D
73keycode 69 = aacute E
74keycode 70 = atilde F
75keycode 71 = aring G
76keycode 72 = ccedilla H
77keycode 73 = ntilde I
78keycode 74 = cent nul
79keycode 75 = period ocircumflex
80keycode 76 = less odiaeresis
81keycode 77 = parenleft ograve
82keycode 78 = plus oacute
83keycode 79 = bar otilde
84keycode 80 = ampersand braceright
85keycode 81 = eacute J
86keycode 82 = acircumflex K
87keycode 83 = ediaeresis L
88keycode 84 = egrave M
89keycode 85 = iacute N
90keycode 86 = icircumflex O
91keycode 87 = idiaeresis P
92keycode 88 = igrave Q
93keycode 89 = ssharp R
94keycode 90 = exclam onesuperior
95keycode 91 = dollar ucircumflex
96keycode 92 = asterisk udiaeresis
97keycode 93 = parenright ugrave
98keycode 94 = semicolon uacute
99keycode 95 = notsign ydiaeresis
100keycode 96 = minus backslash
101keycode 97 = slash division
102keycode 98 = Acircumflex S
103keycode 99 = Adiaeresis T
104keycode 100 = Agrave U
105keycode 101 = Aacute V
106keycode 102 = Atilde W
107keycode 103 = Aring X
108keycode 104 = Ccedilla Y
109keycode 105 = Ntilde Z
110keycode 106 = brokenbar twosuperior
111keycode 107 = comma Ocircumflex
112keycode 108 = percent Odiaeresis
113keycode 109 = underscore Ograve
114keycode 110 = greater Oacute
115keycode 111 = question Otilde
116keycode 112 = oslash zero
117keycode 113 = Eacute one
118keycode 114 = Ecircumflex two
119keycode 115 = Ediaeresis three
120keycode 116 = Egrave four
121keycode 117 = Iacute five
122keycode 118 = Icircumflex six
123keycode 119 = Idiaeresis seven
124keycode 120 = Igrave eight
125keycode 121 = grave nine
126keycode 122 = colon threesuperior
127keycode 123 = numbersign Ucircumflex
128keycode 124 = at Udiaeresis
129keycode 125 = apostrophe Ugrave
130keycode 126 = equal Uacute
131keycode 127 = quotedbl nul
132
133# AID keys
134control keycode 74 = F22
135control keycode 75 = F23
136control keycode 76 = F24
137control keycode 107 = Control_z # PA3
138control keycode 108 = Control_c # PA1
139control keycode 109 = KeyboardSignal # Clear
140control keycode 110 = Control_d # PA2
141control keycode 122 = F10
142control keycode 123 = F11 # F11
143control keycode 124 = Last_Console # F12
144control keycode 125 = Linefeed
145shift control keycode 65 = F13
146shift control keycode 66 = F14
147shift control keycode 67 = F15
148shift control keycode 68 = F16
149shift control keycode 69 = F17
150shift control keycode 70 = F18
151shift control keycode 71 = F19
152shift control keycode 72 = F20
153shift control keycode 73 = F21
154shift control keycode 113 = F1
155shift control keycode 114 = F2
156shift control keycode 115 = Incr_Console
157shift control keycode 116 = F4
158shift control keycode 117 = F5
159shift control keycode 118 = F6
160shift control keycode 119 = Scroll_Backward
161shift control keycode 120 = Scroll_Forward
162shift control keycode 121 = F9
163
164string F1 = "\033[[A"
165string F2 = "\033[[B"
166string F3 = "\033[[C"
167string F4 = "\033[[D"
168string F5 = "\033[[E"
169string F6 = "\033[17~"
170string F7 = "\033[18~"
171string F8 = "\033[19~"
172string F9 = "\033[20~"
173string F10 = "\033[21~"
174string F11 = "\033[23~"
175string F12 = "\033[24~"
176string F13 = "\033[25~"
177string F14 = "\033[26~"
178string F15 = "\033[28~"
179string F16 = "\033[29~"
180string F17 = "\033[31~"
181string F18 = "\033[32~"
182string F19 = "\033[33~"
183string F20 = "\033[34~"
184# string F21 ??
185# string F22 ??
186# string F23 ??
187# string F24 ??
188compose '^' 'c' to Control_c
189compose '^' 'd' to Control_d
190compose '^' 'z' to Control_z
191compose '^' '\012' to nul
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
new file mode 100644
index 000000000000..60afcdcf91c2
--- /dev/null
+++ b/drivers/s390/char/fs3270.c
@@ -0,0 +1,373 @@
1/*
2 * drivers/s390/char/fs3270.c
3 * IBM/3270 Driver - fullscreen driver.
4 *
5 * Author(s):
6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
7 * Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 */
10
11#include <linux/config.h>
12#include <linux/bootmem.h>
13#include <linux/console.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/types.h>
18
19#include <asm/ccwdev.h>
20#include <asm/cio.h>
21#include <asm/cpcmd.h>
22#include <asm/ebcdic.h>
23#include <asm/idals.h>
24
25#include "raw3270.h"
26#include "ctrlchar.h"
27
28struct raw3270_fn fs3270_fn;
29
30struct fs3270 {
31 struct raw3270_view view;
32 pid_t fs_pid; /* Pid of controlling program. */
33 int read_command; /* ccw command to use for reads. */
34 int write_command; /* ccw command to use for writes. */
35 int attention; /* Got attention. */
36 struct raw3270_request *clear; /* single clear request. */
37 wait_queue_head_t attn_wait; /* Attention wait queue. */
38};
39
40static void
41fs3270_wake_up(struct raw3270_request *rq, void *data)
42{
43 wake_up((wait_queue_head_t *) data);
44}
45
46static int
47fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
48{
49 wait_queue_head_t wq;
50 int rc;
51
52 init_waitqueue_head(&wq);
53 rq->callback = fs3270_wake_up;
54 rq->callback_data = &wq;
55 rc = raw3270_start(view, rq);
56 if (rc)
57 return rc;
58 /* Started sucessfully. Now wait for completion. */
59 wait_event(wq, raw3270_request_final(rq));
60 return rq->rc;
61}
62
63static void
64fs3270_reset_callback(struct raw3270_request *rq, void *data)
65{
66 raw3270_request_reset(rq);
67}
68
69/*
70 * Switch to the fullscreen view.
71 */
72static int
73fs3270_activate(struct raw3270_view *view)
74{
75 struct fs3270 *fp;
76
77 fp = (struct fs3270 *) view;
78 raw3270_request_set_cmd(fp->clear, TC_EWRITEA);
79 fp->clear->callback = fs3270_reset_callback;
80 return raw3270_start(view, fp->clear);
81}
82
83/*
84 * Shutdown fullscreen view.
85 */
86static void
87fs3270_deactivate(struct raw3270_view *view)
88{
89 // FIXME: is this a good idea? The user program using fullscreen 3270
90 // will die just because a console message appeared. On the other
91 // hand the fullscreen device is unoperational now.
92 struct fs3270 *fp;
93
94 fp = (struct fs3270 *) view;
95 if (fp->fs_pid != 0)
96 kill_proc(fp->fs_pid, SIGHUP, 1);
97 fp->fs_pid = 0;
98}
99
100static int
101fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
102{
103 /* Handle ATTN. Set indication and wake waiters for attention. */
104 if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
105 fp->attention = 1;
106 wake_up(&fp->attn_wait);
107 }
108
109 if (rq) {
110 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
111 rq->rc = -EIO;
112 else
113 /* Normal end. Copy residual count. */
114 rq->rescnt = irb->scsw.count;
115 }
116 return RAW3270_IO_DONE;
117}
118
119/*
120 * Process reads from fullscreen 3270.
121 */
122static ssize_t
123fs3270_read(struct file *filp, char *data, size_t count, loff_t *off)
124{
125 struct fs3270 *fp;
126 struct raw3270_request *rq;
127 struct idal_buffer *ib;
128 int rc;
129
130 if (count == 0 || count > 65535)
131 return -EINVAL;
132 fp = filp->private_data;
133 if (!fp)
134 return -ENODEV;
135 ib = idal_buffer_alloc(count, 0);
136 if (!ib)
137 return -ENOMEM;
138 rq = raw3270_request_alloc(0);
139 if (!IS_ERR(rq)) {
140 if (fp->read_command == 0 && fp->write_command != 0)
141 fp->read_command = 6;
142 raw3270_request_set_cmd(rq, fp->read_command ? : 2);
143 raw3270_request_set_idal(rq, ib);
144 wait_event(fp->attn_wait, fp->attention);
145 rc = fs3270_do_io(&fp->view, rq);
146 if (rc == 0 && idal_buffer_to_user(ib, data, count))
147 rc = -EFAULT;
148 raw3270_request_free(rq);
149 } else
150 rc = PTR_ERR(rq);
151 idal_buffer_free(ib);
152 return rc;
153}
154
155/*
156 * Process writes to fullscreen 3270.
157 */
158static ssize_t
159fs3270_write(struct file *filp, const char *data, size_t count, loff_t *off)
160{
161 struct fs3270 *fp;
162 struct raw3270_request *rq;
163 struct idal_buffer *ib;
164 int write_command;
165 int rc;
166
167 fp = filp->private_data;
168 if (!fp)
169 return -ENODEV;
170 ib = idal_buffer_alloc(count, 0);
171 if (!ib)
172 return -ENOMEM;
173 rq = raw3270_request_alloc(0);
174 if (!IS_ERR(rq)) {
175 if (idal_buffer_from_user(ib, data, count) == 0) {
176 write_command = fp->write_command ? : 1;
177 if (write_command == 5)
178 write_command = 13;
179 raw3270_request_set_cmd(rq, write_command);
180 raw3270_request_set_idal(rq, ib);
181 rc = fs3270_do_io(&fp->view, rq);
182 } else
183 rc = -EFAULT;
184 raw3270_request_free(rq);
185 } else
186 rc = PTR_ERR(rq);
187 idal_buffer_free(ib);
188 return rc;
189}
190
191/*
192 * process ioctl commands for the tube driver
193 */
194static int
195fs3270_ioctl(struct inode *inode, struct file *filp,
196 unsigned int cmd, unsigned long arg)
197{
198 struct fs3270 *fp;
199 struct raw3270_iocb iocb;
200 int rc;
201
202 fp = filp->private_data;
203 if (!fp)
204 return -ENODEV;
205 rc = 0;
206 switch (cmd) {
207 case TUBICMD:
208 fp->read_command = arg;
209 break;
210 case TUBOCMD:
211 fp->write_command = arg;
212 break;
213 case TUBGETI:
214 rc = put_user(fp->read_command, (char *) arg);
215 break;
216 case TUBGETO:
217 rc = put_user(fp->write_command,(char *) arg);
218 break;
219 case TUBGETMOD:
220 iocb.model = fp->view.model;
221 iocb.line_cnt = fp->view.rows;
222 iocb.col_cnt = fp->view.cols;
223 iocb.pf_cnt = 24;
224 iocb.re_cnt = 20;
225 iocb.map = 0;
226 if (copy_to_user((char *) arg, &iocb,
227 sizeof(struct raw3270_iocb)))
228 rc = -EFAULT;
229 break;
230 }
231 return rc;
232}
233
234/*
235 * Allocate tty3270 structure.
236 */
237static struct fs3270 *
238fs3270_alloc_view(void)
239{
240 struct fs3270 *fp;
241
242 fp = (struct fs3270 *) kmalloc(sizeof(struct fs3270),GFP_KERNEL);
243 if (!fp)
244 return ERR_PTR(-ENOMEM);
245 memset(fp, 0, sizeof(struct fs3270));
246 fp->clear = raw3270_request_alloc(0);
247 if (!IS_ERR(fp->clear)) {
248 kfree(fp);
249 return ERR_PTR(-ENOMEM);
250 }
251 return fp;
252}
253
254/*
255 * Free tty3270 structure.
256 */
257static void
258fs3270_free_view(struct raw3270_view *view)
259{
260 raw3270_request_free(((struct fs3270 *) view)->clear);
261 kfree(view);
262}
263
264/*
265 * Unlink fs3270 data structure from filp.
266 */
267static void
268fs3270_release(struct raw3270_view *view)
269{
270}
271
272/* View to a 3270 device. Can be console, tty or fullscreen. */
273struct raw3270_fn fs3270_fn = {
274 .activate = fs3270_activate,
275 .deactivate = fs3270_deactivate,
276 .intv = (void *) fs3270_irq,
277 .release = fs3270_release,
278 .free = fs3270_free_view
279};
280
281/*
282 * This routine is called whenever a 3270 fullscreen device is opened.
283 */
284static int
285fs3270_open(struct inode *inode, struct file *filp)
286{
287 struct fs3270 *fp;
288 int minor, rc;
289
290 if (imajor(filp->f_dentry->d_inode) != IBM_FS3270_MAJOR)
291 return -ENODEV;
292 minor = iminor(filp->f_dentry->d_inode);
293 /* Check if some other program is already using fullscreen mode. */
294 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
295 if (!IS_ERR(fp)) {
296 raw3270_put_view(&fp->view);
297 return -EBUSY;
298 }
299 /* Allocate fullscreen view structure. */
300 fp = fs3270_alloc_view();
301 if (IS_ERR(fp))
302 return PTR_ERR(fp);
303
304 init_waitqueue_head(&fp->attn_wait);
305 fp->fs_pid = current->pid;
306 rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
307 if (rc) {
308 fs3270_free_view(&fp->view);
309 return rc;
310 }
311
312 rc = raw3270_activate_view(&fp->view);
313 if (rc) {
314 raw3270_del_view(&fp->view);
315 return rc;
316 }
317 filp->private_data = fp;
318 return 0;
319}
320
321/*
322 * This routine is called when the 3270 tty is closed. We wait
323 * for the remaining request to be completed. Then we clean up.
324 */
325static int
326fs3270_close(struct inode *inode, struct file *filp)
327{
328 struct fs3270 *fp;
329
330 fp = filp->private_data;
331 filp->private_data = 0;
332 if (fp)
333 raw3270_del_view(&fp->view);
334 return 0;
335}
336
337static struct file_operations fs3270_fops = {
338 .owner = THIS_MODULE, /* owner */
339 .read = fs3270_read, /* read */
340 .write = fs3270_write, /* write */
341 .ioctl = fs3270_ioctl, /* ioctl */
342 .open = fs3270_open, /* open */
343 .release = fs3270_close, /* release */
344};
345
346/*
347 * 3270 fullscreen driver initialization.
348 */
349static int __init
350fs3270_init(void)
351{
352 int rc;
353
354 rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
355 if (rc) {
356 printk(KERN_ERR "fs3270 can't get major number %d: errno %d\n",
357 IBM_FS3270_MAJOR, rc);
358 return rc;
359 }
360 return 0;
361}
362
363static void __exit
364fs3270_exit(void)
365{
366 unregister_chrdev(IBM_FS3270_MAJOR, "fs3270");
367}
368
369MODULE_LICENSE("GPL");
370MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR);
371
372module_init(fs3270_init);
373module_exit(fs3270_exit);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
new file mode 100644
index 000000000000..fd43d99b45a3
--- /dev/null
+++ b/drivers/s390/char/keyboard.c
@@ -0,0 +1,519 @@
1/*
2 * drivers/s390/char/keyboard.c
3 * ebcdic keycode functions for s390 console drivers
4 *
5 * S390 version
6 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 */
9
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/sysrq.h>
14
15#include <linux/kbd_kern.h>
16#include <linux/kbd_diacr.h>
17#include <asm/uaccess.h>
18
19#include "keyboard.h"
20
21/*
22 * Handler Tables.
23 */
24#define K_HANDLERS\
25 k_self, k_fn, k_spec, k_ignore,\
26 k_dead, k_ignore, k_ignore, k_ignore,\
27 k_ignore, k_ignore, k_ignore, k_ignore,\
28 k_ignore, k_ignore, k_ignore, k_ignore
29
30typedef void (k_handler_fn)(struct kbd_data *, unsigned char);
31static k_handler_fn K_HANDLERS;
32static k_handler_fn *k_handler[16] = { K_HANDLERS };
33
34/* maximum values each key_handler can handle */
35static const int kbd_max_vals[] = {
36 255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0,
37 NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0
38};
39static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals);
40
41static unsigned char ret_diacr[NR_DEAD] = {
42 '`', '\'', '^', '~', '"', ','
43};
44
45/*
46 * Alloc/free of kbd_data structures.
47 */
48struct kbd_data *
49kbd_alloc(void) {
50 struct kbd_data *kbd;
51 int i, len;
52
53 kbd = kmalloc(sizeof(struct kbd_data), GFP_KERNEL);
54 if (!kbd)
55 goto out;
56 memset(kbd, 0, sizeof(struct kbd_data));
57 kbd->key_maps = kmalloc(sizeof(key_maps), GFP_KERNEL);
58 if (!key_maps)
59 goto out_kbd;
60 memset(kbd->key_maps, 0, sizeof(key_maps));
61 for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
62 if (key_maps[i]) {
63 kbd->key_maps[i] =
64 kmalloc(sizeof(u_short)*NR_KEYS, GFP_KERNEL);
65 if (!kbd->key_maps[i])
66 goto out_maps;
67 memcpy(kbd->key_maps[i], key_maps[i],
68 sizeof(u_short)*NR_KEYS);
69 }
70 }
71 kbd->func_table = kmalloc(sizeof(func_table), GFP_KERNEL);
72 if (!kbd->func_table)
73 goto out_maps;
74 memset(kbd->func_table, 0, sizeof(func_table));
75 for (i = 0; i < ARRAY_SIZE(func_table); i++) {
76 if (func_table[i]) {
77 len = strlen(func_table[i]) + 1;
78 kbd->func_table[i] = kmalloc(len, GFP_KERNEL);
79 if (!kbd->func_table[i])
80 goto out_func;
81 memcpy(kbd->func_table[i], func_table[i], len);
82 }
83 }
84 kbd->fn_handler =
85 kmalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
86 if (!kbd->fn_handler)
87 goto out_func;
88 memset(kbd->fn_handler, 0, sizeof(fn_handler_fn *) * NR_FN_HANDLER);
89 kbd->accent_table =
90 kmalloc(sizeof(struct kbdiacr)*MAX_DIACR, GFP_KERNEL);
91 if (!kbd->accent_table)
92 goto out_fn_handler;
93 memcpy(kbd->accent_table, accent_table,
94 sizeof(struct kbdiacr)*MAX_DIACR);
95 kbd->accent_table_size = accent_table_size;
96 return kbd;
97
98out_fn_handler:
99 kfree(kbd->fn_handler);
100out_func:
101 for (i = 0; i < ARRAY_SIZE(func_table); i++)
102 if (kbd->func_table[i])
103 kfree(kbd->func_table[i]);
104 kfree(kbd->func_table);
105out_maps:
106 for (i = 0; i < ARRAY_SIZE(key_maps); i++)
107 if (kbd->key_maps[i])
108 kfree(kbd->key_maps[i]);
109 kfree(kbd->key_maps);
110out_kbd:
111 kfree(kbd);
112out:
113 return 0;
114}
115
116void
117kbd_free(struct kbd_data *kbd)
118{
119 int i;
120
121 kfree(kbd->accent_table);
122 kfree(kbd->fn_handler);
123 for (i = 0; i < ARRAY_SIZE(func_table); i++)
124 if (kbd->func_table[i])
125 kfree(kbd->func_table[i]);
126 kfree(kbd->func_table);
127 for (i = 0; i < ARRAY_SIZE(key_maps); i++)
128 if (kbd->key_maps[i])
129 kfree(kbd->key_maps[i]);
130 kfree(kbd->key_maps);
131 kfree(kbd);
132}
133
134/*
135 * Generate ascii -> ebcdic translation table from kbd_data.
136 */
137void
138kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
139{
140 unsigned short *keymap, keysym;
141 int i, j, k;
142
143 memset(ascebc, 0x40, 256);
144 for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
145 keymap = kbd->key_maps[i];
146 if (!keymap)
147 continue;
148 for (j = 0; j < NR_KEYS; j++) {
149 k = ((i & 1) << 7) + j;
150 keysym = keymap[j];
151 if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
152 KTYP(keysym) == (KT_LETTER | 0xf0))
153 ascebc[KVAL(keysym)] = k;
154 else if (KTYP(keysym) == (KT_DEAD | 0xf0))
155 ascebc[ret_diacr[KVAL(keysym)]] = k;
156 }
157 }
158}
159
160/*
161 * Generate ebcdic -> ascii translation table from kbd_data.
162 */
163void
164kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
165{
166 unsigned short *keymap, keysym;
167 int i, j, k;
168
169 memset(ebcasc, ' ', 256);
170 for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
171 keymap = kbd->key_maps[i];
172 if (!keymap)
173 continue;
174 for (j = 0; j < NR_KEYS; j++) {
175 keysym = keymap[j];
176 k = ((i & 1) << 7) + j;
177 if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
178 KTYP(keysym) == (KT_LETTER | 0xf0))
179 ebcasc[k] = KVAL(keysym);
180 else if (KTYP(keysym) == (KT_DEAD | 0xf0))
181 ebcasc[k] = ret_diacr[KVAL(keysym)];
182 }
183 }
184}
185
186/*
187 * We have a combining character DIACR here, followed by the character CH.
188 * If the combination occurs in the table, return the corresponding value.
189 * Otherwise, if CH is a space or equals DIACR, return DIACR.
190 * Otherwise, conclude that DIACR was not combining after all,
191 * queue it and return CH.
192 */
193static unsigned char
194handle_diacr(struct kbd_data *kbd, unsigned char ch)
195{
196 int i, d;
197
198 d = kbd->diacr;
199 kbd->diacr = 0;
200
201 for (i = 0; i < kbd->accent_table_size; i++) {
202 if (kbd->accent_table[i].diacr == d &&
203 kbd->accent_table[i].base == ch)
204 return kbd->accent_table[i].result;
205 }
206
207 if (ch == ' ' || ch == d)
208 return d;
209
210 kbd_put_queue(kbd->tty, d);
211 return ch;
212}
213
214/*
215 * Handle dead key.
216 */
217static void
218k_dead(struct kbd_data *kbd, unsigned char value)
219{
220 value = ret_diacr[value];
221 kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value);
222}
223
224/*
225 * Normal character handler.
226 */
227static void
228k_self(struct kbd_data *kbd, unsigned char value)
229{
230 if (kbd->diacr)
231 value = handle_diacr(kbd, value);
232 kbd_put_queue(kbd->tty, value);
233}
234
235/*
236 * Special key handlers
237 */
238static void
239k_ignore(struct kbd_data *kbd, unsigned char value)
240{
241}
242
243/*
244 * Function key handler.
245 */
246static void
247k_fn(struct kbd_data *kbd, unsigned char value)
248{
249 if (kbd->func_table[value])
250 kbd_puts_queue(kbd->tty, kbd->func_table[value]);
251}
252
253static void
254k_spec(struct kbd_data *kbd, unsigned char value)
255{
256 if (value >= NR_FN_HANDLER)
257 return;
258 if (kbd->fn_handler[value])
259 kbd->fn_handler[value](kbd);
260}
261
262/*
263 * Put utf8 character to tty flip buffer.
264 * UTF-8 is defined for words of up to 31 bits,
265 * but we need only 16 bits here
266 */
267static void
268to_utf8(struct tty_struct *tty, ushort c)
269{
270 if (c < 0x80)
271 /* 0******* */
272 kbd_put_queue(tty, c);
273 else if (c < 0x800) {
274 /* 110***** 10****** */
275 kbd_put_queue(tty, 0xc0 | (c >> 6));
276 kbd_put_queue(tty, 0x80 | (c & 0x3f));
277 } else {
278 /* 1110**** 10****** 10****** */
279 kbd_put_queue(tty, 0xe0 | (c >> 12));
280 kbd_put_queue(tty, 0x80 | ((c >> 6) & 0x3f));
281 kbd_put_queue(tty, 0x80 | (c & 0x3f));
282 }
283}
284
285/*
286 * Process keycode.
287 */
288void
289kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
290{
291 unsigned short keysym;
292 unsigned char type, value;
293
294 if (!kbd || !kbd->tty)
295 return;
296
297 if (keycode >= 384)
298 keysym = kbd->key_maps[5][keycode - 384];
299 else if (keycode >= 256)
300 keysym = kbd->key_maps[4][keycode - 256];
301 else if (keycode >= 128)
302 keysym = kbd->key_maps[1][keycode - 128];
303 else
304 keysym = kbd->key_maps[0][keycode];
305
306 type = KTYP(keysym);
307 if (type >= 0xf0) {
308 type -= 0xf0;
309 if (type == KT_LETTER)
310 type = KT_LATIN;
311 value = KVAL(keysym);
312#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
313 if (kbd->sysrq) {
314 if (kbd->sysrq == K(KT_LATIN, '-')) {
315 kbd->sysrq = 0;
316 handle_sysrq(value, 0, kbd->tty);
317 return;
318 }
319 if (value == '-') {
320 kbd->sysrq = K(KT_LATIN, '-');
321 return;
322 }
323 /* Incomplete sysrq sequence. */
324 (*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq));
325 kbd->sysrq = 0;
326 } else if ((type == KT_LATIN && value == '^') ||
327 (type == KT_DEAD && ret_diacr[value] == '^')) {
328 kbd->sysrq = K(type, value);
329 return;
330 }
331#endif
332 (*k_handler[type])(kbd, value);
333 } else
334 to_utf8(kbd->tty, keysym);
335}
336
337/*
338 * Ioctl stuff.
339 */
340static int
341do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
342 int cmd, int perm)
343{
344 struct kbentry tmp;
345 ushort *key_map, val, ov;
346
347 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
348 return -EFAULT;
349#if NR_KEYS < 256
350 if (tmp.kb_index >= NR_KEYS)
351 return -EINVAL;
352#endif
353#if MAX_NR_KEYMAPS < 256
354 if (tmp.kb_table >= MAX_NR_KEYMAPS)
355 return -EINVAL;
356#endif
357
358 switch (cmd) {
359 case KDGKBENT:
360 key_map = kbd->key_maps[tmp.kb_table];
361 if (key_map) {
362 val = U(key_map[tmp.kb_index]);
363 if (KTYP(val) >= KBD_NR_TYPES)
364 val = K_HOLE;
365 } else
366 val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
367 return put_user(val, &user_kbe->kb_value);
368 case KDSKBENT:
369 if (!perm)
370 return -EPERM;
371 if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
372 /* disallocate map */
373 key_map = kbd->key_maps[tmp.kb_table];
374 if (key_map) {
375 kbd->key_maps[tmp.kb_table] = 0;
376 kfree(key_map);
377 }
378 break;
379 }
380
381 if (KTYP(tmp.kb_value) >= KBD_NR_TYPES)
382 return -EINVAL;
383 if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
384 return -EINVAL;
385
386 if (!(key_map = kbd->key_maps[tmp.kb_table])) {
387 int j;
388
389 key_map = (ushort *) kmalloc(sizeof(plain_map),
390 GFP_KERNEL);
391 if (!key_map)
392 return -ENOMEM;
393 kbd->key_maps[tmp.kb_table] = key_map;
394 for (j = 0; j < NR_KEYS; j++)
395 key_map[j] = U(K_HOLE);
396 }
397 ov = U(key_map[tmp.kb_index]);
398 if (tmp.kb_value == ov)
399 break; /* nothing to do */
400 /*
401 * Attention Key.
402 */
403 if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
404 !capable(CAP_SYS_ADMIN))
405 return -EPERM;
406 key_map[tmp.kb_index] = U(tmp.kb_value);
407 break;
408 }
409 return 0;
410}
411
412static int
413do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
414 int cmd, int perm)
415{
416 unsigned char kb_func;
417 char *p;
418 int len;
419
420 /* Get u_kbs->kb_func. */
421 if (get_user(kb_func, &u_kbs->kb_func))
422 return -EFAULT;
423#if MAX_NR_FUNC < 256
424 if (kb_func >= MAX_NR_FUNC)
425 return -EINVAL;
426#endif
427
428 switch (cmd) {
429 case KDGKBSENT:
430 p = kbd->func_table[kb_func];
431 if (p) {
432 len = strlen(p);
433 if (len >= sizeof(u_kbs->kb_string))
434 len = sizeof(u_kbs->kb_string) - 1;
435 if (copy_to_user(u_kbs->kb_string, p, len))
436 return -EFAULT;
437 } else
438 len = 0;
439 if (put_user('\0', u_kbs->kb_string + len))
440 return -EFAULT;
441 break;
442 case KDSKBSENT:
443 if (!perm)
444 return -EPERM;
445 len = strnlen_user(u_kbs->kb_string,
446 sizeof(u_kbs->kb_string) - 1);
447 p = kmalloc(len, GFP_KERNEL);
448 if (!p)
449 return -ENOMEM;
450 if (copy_from_user(p, u_kbs->kb_string, len)) {
451 kfree(p);
452 return -EFAULT;
453 }
454 p[len] = 0;
455 if (kbd->func_table[kb_func])
456 kfree(kbd->func_table[kb_func]);
457 kbd->func_table[kb_func] = p;
458 break;
459 }
460 return 0;
461}
462
463int
464kbd_ioctl(struct kbd_data *kbd, struct file *file,
465 unsigned int cmd, unsigned long arg)
466{
467 struct kbdiacrs __user *a;
468 void __user *argp;
469 int ct, perm;
470
471 argp = (void __user *)arg;
472
473 /*
474 * To have permissions to do most of the vt ioctls, we either have
475 * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
476 */
477 perm = current->signal->tty == kbd->tty || capable(CAP_SYS_TTY_CONFIG);
478 switch (cmd) {
479 case KDGKBTYPE:
480 return put_user(KB_101, (char __user *)argp);
481 case KDGKBENT:
482 case KDSKBENT:
483 return do_kdsk_ioctl(kbd, argp, cmd, perm);
484 case KDGKBSENT:
485 case KDSKBSENT:
486 return do_kdgkb_ioctl(kbd, argp, cmd, perm);
487 case KDGKBDIACR:
488 a = argp;
489
490 if (put_user(kbd->accent_table_size, &a->kb_cnt))
491 return -EFAULT;
492 ct = kbd->accent_table_size;
493 if (copy_to_user(a->kbdiacr, kbd->accent_table,
494 ct * sizeof(struct kbdiacr)))
495 return -EFAULT;
496 return 0;
497 case KDSKBDIACR:
498 a = argp;
499 if (!perm)
500 return -EPERM;
501 if (get_user(ct, &a->kb_cnt))
502 return -EFAULT;
503 if (ct >= MAX_DIACR)
504 return -EINVAL;
505 kbd->accent_table_size = ct;
506 if (copy_from_user(kbd->accent_table, a->kbdiacr,
507 ct * sizeof(struct kbdiacr)))
508 return -EFAULT;
509 return 0;
510 default:
511 return -ENOIOCTLCMD;
512 }
513}
514
515EXPORT_SYMBOL(kbd_ioctl);
516EXPORT_SYMBOL(kbd_ascebc);
517EXPORT_SYMBOL(kbd_free);
518EXPORT_SYMBOL(kbd_alloc);
519EXPORT_SYMBOL(kbd_keycode);
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
new file mode 100644
index 000000000000..3b4da5a9cf79
--- /dev/null
+++ b/drivers/s390/char/keyboard.h
@@ -0,0 +1,57 @@
1/*
2 * drivers/s390/char/keyboard.h
3 * ebcdic keycode functions for s390 console drivers
4 *
5 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 */
8
9#include <linux/tty.h>
10#include <linux/tty_flip.h>
11#include <linux/keyboard.h>
12
13#define NR_FN_HANDLER 20
14
15struct kbd_data;
16
17typedef void (fn_handler_fn)(struct kbd_data *);
18
19/*
20 * FIXME: explain key_maps tricks.
21 */
22
23struct kbd_data {
24 struct tty_struct *tty;
25 unsigned short **key_maps;
26 char **func_table;
27 fn_handler_fn **fn_handler;
28 struct kbdiacr *accent_table;
29 unsigned int accent_table_size;
30 unsigned char diacr;
31 unsigned short sysrq;
32};
33
34struct kbd_data *kbd_alloc(void);
35void kbd_free(struct kbd_data *);
36void kbd_ascebc(struct kbd_data *, unsigned char *);
37
38void kbd_keycode(struct kbd_data *, unsigned int);
39int kbd_ioctl(struct kbd_data *, struct file *, unsigned int, unsigned long);
40
41/*
42 * Helper Functions.
43 */
44extern inline void
45kbd_put_queue(struct tty_struct *tty, int ch)
46{
47 tty_insert_flip_char(tty, ch, 0);
48 tty_schedule_flip(tty);
49}
50
51extern inline void
52kbd_puts_queue(struct tty_struct *tty, char *cp)
53{
54 while (*cp)
55 tty_insert_flip_char(tty, *cp++, 0);
56 tty_schedule_flip(tty);
57}
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
new file mode 100644
index 000000000000..5fd3ad867386
--- /dev/null
+++ b/drivers/s390/char/monreader.c
@@ -0,0 +1,662 @@
1/*
2 * drivers/s390/char/monreader.c
3 *
4 * Character device driver for reading z/VM *MONITOR service records.
5 *
6 * Copyright (C) 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH.
7 *
8 * Author: Gerald Schaefer <geraldsc@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/errno.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/miscdevice.h>
18#include <linux/ctype.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
21#include <asm/uaccess.h>
22#include <asm/ebcdic.h>
23#include <asm/extmem.h>
24#include <linux/poll.h>
25#include "../net/iucv.h"
26
27
28//#define MON_DEBUG /* Debug messages on/off */
29
30#define MON_NAME "monreader"
31
32#define P_INFO(x...) printk(KERN_INFO MON_NAME " info: " x)
33#define P_ERROR(x...) printk(KERN_ERR MON_NAME " error: " x)
34#define P_WARNING(x...) printk(KERN_WARNING MON_NAME " warning: " x)
35
36#ifdef MON_DEBUG
37#define P_DEBUG(x...) printk(KERN_DEBUG MON_NAME " debug: " x)
38#else
39#define P_DEBUG(x...) do {} while (0)
40#endif
41
42#define MON_COLLECT_SAMPLE 0x80
43#define MON_COLLECT_EVENT 0x40
44#define MON_SERVICE "*MONITOR"
45#define MON_IN_USE 0x01
46#define MON_MSGLIM 255
47
48static char mon_dcss_name[9] = "MONDCSS\0";
49
50struct mon_msg {
51 u32 pos;
52 u32 mca_offset;
53 iucv_MessagePending local_eib;
54 char msglim_reached;
55 char replied_msglim;
56};
57
58struct mon_private {
59 u16 pathid;
60 iucv_handle_t iucv_handle;
61 struct mon_msg *msg_array[MON_MSGLIM];
62 unsigned int write_index;
63 unsigned int read_index;
64 atomic_t msglim_count;
65 atomic_t read_ready;
66 atomic_t iucv_connected;
67 atomic_t iucv_severed;
68};
69
70static unsigned long mon_in_use = 0;
71
72static unsigned long mon_dcss_start;
73static unsigned long mon_dcss_end;
74
75static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
76static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
77
78static u8 iucv_host[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
79
80static u8 user_data_connect[16] = {
81 /* Version code, must be 0x01 for shared mode */
82 0x01,
83 /* what to collect */
84 MON_COLLECT_SAMPLE | MON_COLLECT_EVENT,
85 /* DCSS name in EBCDIC, 8 bytes padded with blanks */
86 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
87 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
88};
89
90static u8 user_data_sever[16] = {
91 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
92 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
93};
94
95
96/******************************************************************************
97 * helper functions *
98 *****************************************************************************/
99/*
100 * Create the 8 bytes EBCDIC DCSS segment name from
101 * an ASCII name, incl. padding
102 */
103static inline void
104dcss_mkname(char *ascii_name, char *ebcdic_name)
105{
106 int i;
107
108 for (i = 0; i < 8; i++) {
109 if (ascii_name[i] == '\0')
110 break;
111 ebcdic_name[i] = toupper(ascii_name[i]);
112 };
113 for (; i < 8; i++)
114 ebcdic_name[i] = ' ';
115 ASCEBC(ebcdic_name, 8);
116}
117
118/*
119 * print appropriate error message for segment_load()/segment_type()
120 * return code
121 */
122static void
123mon_segment_warn(int rc, char* seg_name)
124{
125 switch (rc) {
126 case -ENOENT:
127 P_WARNING("cannot load/query segment %s, does not exist\n",
128 seg_name);
129 break;
130 case -ENOSYS:
131 P_WARNING("cannot load/query segment %s, not running on VM\n",
132 seg_name);
133 break;
134 case -EIO:
135 P_WARNING("cannot load/query segment %s, hardware error\n",
136 seg_name);
137 break;
138 case -ENOTSUPP:
139 P_WARNING("cannot load/query segment %s, is a multi-part "
140 "segment\n", seg_name);
141 break;
142 case -ENOSPC:
143 P_WARNING("cannot load/query segment %s, overlaps with "
144 "storage\n", seg_name);
145 break;
146 case -EBUSY:
147 P_WARNING("cannot load/query segment %s, overlaps with "
148 "already loaded dcss\n", seg_name);
149 break;
150 case -EPERM:
151 P_WARNING("cannot load/query segment %s, already loaded in "
152 "incompatible mode\n", seg_name);
153 break;
154 case -ENOMEM:
155 P_WARNING("cannot load/query segment %s, out of memory\n",
156 seg_name);
157 break;
158 case -ERANGE:
159 P_WARNING("cannot load/query segment %s, exceeds kernel "
160 "mapping range\n", seg_name);
161 break;
162 default:
163 P_WARNING("cannot load/query segment %s, return value %i\n",
164 seg_name, rc);
165 break;
166 }
167}
168
169static inline unsigned long
170mon_mca_start(struct mon_msg *monmsg)
171{
172 return monmsg->local_eib.ln1msg1.iprmmsg1_u32;
173}
174
175static inline unsigned long
176mon_mca_end(struct mon_msg *monmsg)
177{
178 return monmsg->local_eib.ln1msg2.ipbfln1f;
179}
180
181static inline u8
182mon_mca_type(struct mon_msg *monmsg, u8 index)
183{
184 return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
185}
186
187static inline u32
188mon_mca_size(struct mon_msg *monmsg)
189{
190 return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
191}
192
193static inline u32
194mon_rec_start(struct mon_msg *monmsg)
195{
196 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
197}
198
199static inline u32
200mon_rec_end(struct mon_msg *monmsg)
201{
202 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
203}
204
205static inline int
206mon_check_mca(struct mon_msg *monmsg)
207{
208 if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
209 (mon_rec_start(monmsg) < mon_dcss_start) ||
210 (mon_rec_end(monmsg) > mon_dcss_end) ||
211 (mon_mca_type(monmsg, 0) == 0) ||
212 (mon_mca_size(monmsg) % 12 != 0) ||
213 (mon_mca_end(monmsg) <= mon_mca_start(monmsg)) ||
214 (mon_mca_end(monmsg) > mon_dcss_end) ||
215 (mon_mca_start(monmsg) < mon_dcss_start) ||
216 ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
217 {
218 P_DEBUG("READ, IGNORED INVALID MCA\n\n");
219 return -EINVAL;
220 }
221 return 0;
222}
223
224static inline int
225mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv)
226{
227 u8 prmmsg[8];
228 int rc;
229
230 P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = "
231 "0x%08X\n\n",
232 monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid,
233 monmsg->local_eib.iptrgcls);
234 rc = iucv_reply_prmmsg(monmsg->local_eib.ippathid,
235 monmsg->local_eib.ipmsgid,
236 monmsg->local_eib.iptrgcls,
237 0, prmmsg);
238 atomic_dec(&monpriv->msglim_count);
239 if (likely(!monmsg->msglim_reached)) {
240 monmsg->pos = 0;
241 monmsg->mca_offset = 0;
242 monpriv->read_index = (monpriv->read_index + 1) %
243 MON_MSGLIM;
244 atomic_dec(&monpriv->read_ready);
245 } else
246 monmsg->replied_msglim = 1;
247 if (rc) {
248 P_ERROR("read, IUCV reply failed with rc = %i\n\n", rc);
249 return -EIO;
250 }
251 return 0;
252}
253
254static inline struct mon_private *
255mon_alloc_mem(void)
256{
257 int i,j;
258 struct mon_private *monpriv;
259
260 monpriv = kmalloc(sizeof(struct mon_private), GFP_KERNEL);
261 if (!monpriv) {
262 P_ERROR("no memory for monpriv\n");
263 return NULL;
264 }
265 memset(monpriv, 0, sizeof(struct mon_private));
266 for (i = 0; i < MON_MSGLIM; i++) {
267 monpriv->msg_array[i] = kmalloc(sizeof(struct mon_msg),
268 GFP_KERNEL);
269 if (!monpriv->msg_array[i]) {
270 P_ERROR("open, no memory for msg_array\n");
271 for (j = 0; j < i; j++)
272 kfree(monpriv->msg_array[j]);
273 return NULL;
274 }
275 memset(monpriv->msg_array[i], 0, sizeof(struct mon_msg));
276 }
277 return monpriv;
278}
279
280static inline void
281mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
282{
283#ifdef MON_DEBUG
284 u8 msg_type[2], mca_type;
285 unsigned long records_len;
286
287 records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1;
288
289 memcpy(msg_type, &monmsg->local_eib.iptrgcls, 2);
290 EBCASC(msg_type, 2);
291 mca_type = mon_mca_type(monmsg, 0);
292 EBCASC(&mca_type, 1);
293
294 P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n",
295 monpriv->read_index, monpriv->write_index);
296 P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n",
297 monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid,
298 monmsg->local_eib.iptrgcls);
299 P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n",
300 msg_type[0], msg_type[1], mca_type ? mca_type : 'X',
301 mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2));
302 P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n",
303 mon_mca_start(monmsg), mon_mca_end(monmsg));
304 P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n",
305 mon_rec_start(monmsg), mon_rec_end(monmsg), records_len);
306 if (mon_mca_size(monmsg) > 12)
307 P_DEBUG("READ, MORE THAN ONE MCA\n\n");
308#endif
309}
310
311static inline void
312mon_next_mca(struct mon_msg *monmsg)
313{
314 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
315 return;
316 P_DEBUG("READ, NEXT MCA\n\n");
317 monmsg->mca_offset += 12;
318 monmsg->pos = 0;
319}
320
321static inline struct mon_msg *
322mon_next_message(struct mon_private *monpriv)
323{
324 struct mon_msg *monmsg;
325
326 if (!atomic_read(&monpriv->read_ready))
327 return NULL;
328 monmsg = monpriv->msg_array[monpriv->read_index];
329 if (unlikely(monmsg->replied_msglim)) {
330 monmsg->replied_msglim = 0;
331 monmsg->msglim_reached = 0;
332 monmsg->pos = 0;
333 monmsg->mca_offset = 0;
334 P_WARNING("read, message limit reached\n");
335 monpriv->read_index = (monpriv->read_index + 1) %
336 MON_MSGLIM;
337 atomic_dec(&monpriv->read_ready);
338 return ERR_PTR(-EOVERFLOW);
339 }
340 return monmsg;
341}
342
343
344/******************************************************************************
345 * IUCV handler *
346 *****************************************************************************/
347static void
348mon_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data)
349{
350 struct mon_private *monpriv = (struct mon_private *) pgm_data;
351
352 P_DEBUG("IUCV connection completed\n");
353 P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = "
354 "0x%02X, Sample = 0x%02X\n",
355 eib->ipuser[0], eib->ipuser[1], eib->ipuser[2]);
356 atomic_set(&monpriv->iucv_connected, 1);
357 wake_up(&mon_conn_wait_queue);
358}
359
360static void
361mon_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data)
362{
363 struct mon_private *monpriv = (struct mon_private *) pgm_data;
364
365 P_ERROR("IUCV connection severed with rc = 0x%X\n",
366 (u8) eib->ipuser[0]);
367 atomic_set(&monpriv->iucv_severed, 1);
368 wake_up(&mon_conn_wait_queue);
369 wake_up_interruptible(&mon_read_wait_queue);
370}
371
372static void
373mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data)
374{
375 struct mon_private *monpriv = (struct mon_private *) pgm_data;
376
377 P_DEBUG("IUCV message pending\n");
378 memcpy(&monpriv->msg_array[monpriv->write_index]->local_eib, eib,
379 sizeof(iucv_MessagePending));
380 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
381 P_WARNING("IUCV message pending, message limit (%i) reached\n",
382 MON_MSGLIM);
383 monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
384 }
385 monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
386 atomic_inc(&monpriv->read_ready);
387 wake_up_interruptible(&mon_read_wait_queue);
388}
389
390static iucv_interrupt_ops_t mon_iucvops = {
391 .ConnectionComplete = mon_iucv_ConnectionComplete,
392 .ConnectionSevered = mon_iucv_ConnectionSevered,
393 .MessagePending = mon_iucv_MessagePending,
394};
395
396/******************************************************************************
397 * file operations *
398 *****************************************************************************/
399static int
400mon_open(struct inode *inode, struct file *filp)
401{
402 int rc, i;
403 struct mon_private *monpriv;
404
405 /*
406 * only one user allowed
407 */
408 if (test_and_set_bit(MON_IN_USE, &mon_in_use))
409 return -EBUSY;
410
411 monpriv = mon_alloc_mem();
412 if (!monpriv)
413 return -ENOMEM;
414
415 /*
416 * Register with IUCV and connect to *MONITOR service
417 */
418 monpriv->iucv_handle = iucv_register_program("my_monreader ",
419 MON_SERVICE,
420 NULL,
421 &mon_iucvops,
422 monpriv);
423 if (!monpriv->iucv_handle) {
424 P_ERROR("failed to register with iucv driver\n");
425 rc = -EIO;
426 goto out_error;
427 }
428 P_INFO("open, registered with IUCV\n");
429
430 rc = iucv_connect(&monpriv->pathid, MON_MSGLIM, user_data_connect,
431 MON_SERVICE, iucv_host, IPRMDATA, NULL, NULL,
432 monpriv->iucv_handle, NULL);
433 if (rc) {
434 P_ERROR("iucv connection to *MONITOR failed with "
435 "IPUSER SEVER code = %i\n", rc);
436 rc = -EIO;
437 goto out_unregister;
438 }
439 /*
440 * Wait for connection confirmation
441 */
442 wait_event(mon_conn_wait_queue,
443 atomic_read(&monpriv->iucv_connected) ||
444 atomic_read(&monpriv->iucv_severed));
445 if (atomic_read(&monpriv->iucv_severed)) {
446 atomic_set(&monpriv->iucv_severed, 0);
447 atomic_set(&monpriv->iucv_connected, 0);
448 rc = -EIO;
449 goto out_unregister;
450 }
451 P_INFO("open, established connection to *MONITOR service\n\n");
452 filp->private_data = monpriv;
453 return nonseekable_open(inode, filp);
454
455out_unregister:
456 iucv_unregister_program(monpriv->iucv_handle);
457out_error:
458 for (i = 0; i < MON_MSGLIM; i++)
459 kfree(monpriv->msg_array[i]);
460 kfree(monpriv);
461 clear_bit(MON_IN_USE, &mon_in_use);
462 return rc;
463}
464
465static int
466mon_close(struct inode *inode, struct file *filp)
467{
468 int rc, i;
469 struct mon_private *monpriv = filp->private_data;
470
471 /*
472 * Close IUCV connection and unregister
473 */
474 rc = iucv_sever(monpriv->pathid, user_data_sever);
475 if (rc)
476 P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
477 else
478 P_INFO("close, terminated connection to *MONITOR service\n");
479
480 rc = iucv_unregister_program(monpriv->iucv_handle);
481 if (rc)
482 P_ERROR("close, iucv_unregister failed with rc = %i\n", rc);
483 else
484 P_INFO("close, unregistered with IUCV\n");
485
486 atomic_set(&monpriv->iucv_severed, 0);
487 atomic_set(&monpriv->iucv_connected, 0);
488 atomic_set(&monpriv->read_ready, 0);
489 atomic_set(&monpriv->msglim_count, 0);
490 monpriv->write_index = 0;
491 monpriv->read_index = 0;
492
493 for (i = 0; i < MON_MSGLIM; i++)
494 kfree(monpriv->msg_array[i]);
495 kfree(monpriv);
496 clear_bit(MON_IN_USE, &mon_in_use);
497 return 0;
498}
499
500static ssize_t
501mon_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
502{
503 struct mon_private *monpriv = filp->private_data;
504 struct mon_msg *monmsg;
505 int ret;
506 u32 mce_start;
507
508 monmsg = mon_next_message(monpriv);
509 if (IS_ERR(monmsg))
510 return PTR_ERR(monmsg);
511
512 if (!monmsg) {
513 if (filp->f_flags & O_NONBLOCK)
514 return -EAGAIN;
515 ret = wait_event_interruptible(mon_read_wait_queue,
516 atomic_read(&monpriv->read_ready) ||
517 atomic_read(&monpriv->iucv_severed));
518 if (ret)
519 return ret;
520 if (unlikely(atomic_read(&monpriv->iucv_severed)))
521 return -EIO;
522 monmsg = monpriv->msg_array[monpriv->read_index];
523 }
524
525 if (!monmsg->pos) {
526 monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
527 mon_read_debug(monmsg, monpriv);
528 }
529 if (mon_check_mca(monmsg))
530 goto reply;
531
532 /* read monitor control element (12 bytes) first */
533 mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
534 if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
535 count = min(count, (size_t) mce_start + 12 - monmsg->pos);
536 ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
537 count);
538 if (ret)
539 return -EFAULT;
540 monmsg->pos += count;
541 if (monmsg->pos == mce_start + 12)
542 monmsg->pos = mon_rec_start(monmsg);
543 goto out_copy;
544 }
545
546 /* read records */
547 if (monmsg->pos <= mon_rec_end(monmsg)) {
548 count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
549 + 1);
550 ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
551 count);
552 if (ret)
553 return -EFAULT;
554 monmsg->pos += count;
555 if (monmsg->pos > mon_rec_end(monmsg))
556 mon_next_mca(monmsg);
557 goto out_copy;
558 }
559reply:
560 ret = mon_send_reply(monmsg, monpriv);
561 return ret;
562
563out_copy:
564 *ppos += count;
565 return count;
566}
567
568static unsigned int
569mon_poll(struct file *filp, struct poll_table_struct *p)
570{
571 struct mon_private *monpriv = filp->private_data;
572
573 poll_wait(filp, &mon_read_wait_queue, p);
574 if (unlikely(atomic_read(&monpriv->iucv_severed)))
575 return POLLERR;
576 if (atomic_read(&monpriv->read_ready))
577 return POLLIN | POLLRDNORM;
578 return 0;
579}
580
581static struct file_operations mon_fops = {
582 .owner = THIS_MODULE,
583 .open = &mon_open,
584 .release = &mon_close,
585 .read = &mon_read,
586 .poll = &mon_poll,
587};
588
589static struct miscdevice mon_dev = {
590 .name = "monreader",
591 .devfs_name = "monreader",
592 .fops = &mon_fops,
593 .minor = MISC_DYNAMIC_MINOR,
594};
595
596/******************************************************************************
597 * module init/exit *
598 *****************************************************************************/
599static int __init
600mon_init(void)
601{
602 int rc;
603
604 if (!MACHINE_IS_VM) {
605 P_ERROR("not running under z/VM, driver not loaded\n");
606 return -ENODEV;
607 }
608
609 rc = segment_type(mon_dcss_name);
610 if (rc < 0) {
611 mon_segment_warn(rc, mon_dcss_name);
612 return rc;
613 }
614 if (rc != SEG_TYPE_SC) {
615 P_ERROR("segment %s has unsupported type, should be SC\n",
616 mon_dcss_name);
617 return -EINVAL;
618 }
619
620 rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
621 &mon_dcss_start, &mon_dcss_end);
622 if (rc < 0) {
623 mon_segment_warn(rc, mon_dcss_name);
624 return -EINVAL;
625 }
626 dcss_mkname(mon_dcss_name, &user_data_connect[8]);
627
628 rc = misc_register(&mon_dev);
629 if (rc < 0 ) {
630 P_ERROR("misc_register failed, rc = %i\n", rc);
631 goto out;
632 }
633 P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n",
634 mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end,
635 mon_dcss_end - mon_dcss_start + 1);
636 return 0;
637
638out:
639 segment_unload(mon_dcss_name);
640 return rc;
641}
642
643static void __exit
644mon_exit(void)
645{
646 segment_unload(mon_dcss_name);
647 WARN_ON(misc_deregister(&mon_dev) != 0);
648 return;
649}
650
651
652module_init(mon_init);
653module_exit(mon_exit);
654
655module_param_string(mondcss, mon_dcss_name, 9, 0444);
656MODULE_PARM_DESC(mondcss, "Name of DCSS segment to be used for *MONITOR "
657 "service, max. 8 chars. Default is MONDCSS");
658
659MODULE_AUTHOR("Gerald Schaefer <geraldsc@de.ibm.com>");
660MODULE_DESCRIPTION("Character device driver for reading z/VM "
661 "monitor service records.");
662MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
new file mode 100644
index 000000000000..8e16a9716686
--- /dev/null
+++ b/drivers/s390/char/raw3270.c
@@ -0,0 +1,1335 @@
1/*
2 * drivers/s390/char/raw3270.c
3 * IBM/3270 Driver - core functions.
4 *
5 * Author(s):
6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
7 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 */
10
11#include <linux/config.h>
12#include <linux/bootmem.h>
13#include <linux/module.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/types.h>
20#include <linux/wait.h>
21
22#include <asm/ccwdev.h>
23#include <asm/cio.h>
24#include <asm/ebcdic.h>
25
26#include "raw3270.h"
27
28/* The main 3270 data structure. */
29struct raw3270 {
30 struct list_head list;
31 struct ccw_device *cdev;
32 int minor;
33
34 short model, rows, cols;
35 unsigned long flags;
36
37 struct list_head req_queue; /* Request queue. */
38 struct list_head view_list; /* List of available views. */
39 struct raw3270_view *view; /* Active view. */
40
41 struct timer_list timer; /* Device timer. */
42
43 unsigned char *ascebc; /* ascii -> ebcdic table */
44};
45
46/* raw3270->flags */
47#define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */
48#define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */
49#define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */
50#define RAW3270_FLAGS_READY 4 /* Device is useable by views */
51#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
52
53/* Semaphore to protect global data of raw3270 (devices, views, etc). */
54static DECLARE_MUTEX(raw3270_sem);
55
56/* List of 3270 devices. */
57static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices);
58
59/*
60 * Flag to indicate if the driver has been registered. Some operations
61 * like waiting for the end of i/o need to be done differently as long
62 * as the kernel is still starting up (console support).
63 */
64static int raw3270_registered;
65
66/* Module parameters */
67static int tubxcorrect = 0;
68module_param(tubxcorrect, bool, 0);
69
70/*
71 * Wait queue for device init/delete, view delete.
72 */
73DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
74
75/*
76 * Encode array for 12 bit 3270 addresses.
77 */
78unsigned char raw3270_ebcgraf[64] = {
79 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
80 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
81 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
82 0xd8, 0xd9, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
83 0x60, 0x61, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
84 0xe8, 0xe9, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
85 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
86 0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
87};
88
89void
90raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
91{
92 if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) {
93 cp[0] = (addr >> 8) & 0x3f;
94 cp[1] = addr & 0xff;
95 } else {
96 cp[0] = raw3270_ebcgraf[(addr >> 6) & 0x3f];
97 cp[1] = raw3270_ebcgraf[addr & 0x3f];
98 }
99}
100
101/*
102 * Allocate a new 3270 ccw request
103 */
104struct raw3270_request *
105raw3270_request_alloc(size_t size)
106{
107 struct raw3270_request *rq;
108
109 /* Allocate request structure */
110 rq = kmalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA);
111 if (!rq)
112 return ERR_PTR(-ENOMEM);
113 memset(rq, 0, sizeof(struct raw3270_request));
114
115 /* alloc output buffer. */
116 if (size > 0) {
117 rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
118 if (!rq->buffer) {
119 kfree(rq);
120 return ERR_PTR(-ENOMEM);
121 }
122 }
123 rq->size = size;
124 INIT_LIST_HEAD(&rq->list);
125
126 /*
127 * Setup ccw.
128 */
129 rq->ccw.cda = __pa(rq->buffer);
130 rq->ccw.flags = CCW_FLAG_SLI;
131
132 return rq;
133}
134
135#ifdef CONFIG_TN3270_CONSOLE
136/*
137 * Allocate a new 3270 ccw request from bootmem. Only works very
138 * early in the boot process. Only con3270.c should be using this.
139 */
140struct raw3270_request *
141raw3270_request_alloc_bootmem(size_t size)
142{
143 struct raw3270_request *rq;
144
145 rq = alloc_bootmem_low(sizeof(struct raw3270));
146 if (!rq)
147 return ERR_PTR(-ENOMEM);
148 memset(rq, 0, sizeof(struct raw3270_request));
149
150 /* alloc output buffer. */
151 if (size > 0) {
152 rq->buffer = alloc_bootmem_low(size);
153 if (!rq->buffer) {
154 free_bootmem((unsigned long) rq,
155 sizeof(struct raw3270));
156 return ERR_PTR(-ENOMEM);
157 }
158 }
159 rq->size = size;
160 INIT_LIST_HEAD(&rq->list);
161
162 /*
163 * Setup ccw.
164 */
165 rq->ccw.cda = __pa(rq->buffer);
166 rq->ccw.flags = CCW_FLAG_SLI;
167
168 return rq;
169}
170#endif
171
172/*
173 * Free 3270 ccw request
174 */
175void
176raw3270_request_free (struct raw3270_request *rq)
177{
178 if (rq->buffer)
179 kfree(rq->buffer);
180 kfree(rq);
181}
182
183/*
184 * Reset request to initial state.
185 */
186void
187raw3270_request_reset(struct raw3270_request *rq)
188{
189 BUG_ON(!list_empty(&rq->list));
190 rq->ccw.cmd_code = 0;
191 rq->ccw.count = 0;
192 rq->ccw.cda = __pa(rq->buffer);
193 rq->ccw.flags = CCW_FLAG_SLI;
194 rq->rescnt = 0;
195 rq->rc = 0;
196}
197
198/*
199 * Set command code to ccw of a request.
200 */
201void
202raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
203{
204 rq->ccw.cmd_code = cmd;
205}
206
207/*
208 * Add data fragment to output buffer.
209 */
210int
211raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
212{
213 if (size + rq->ccw.count > rq->size)
214 return -E2BIG;
215 memcpy(rq->buffer + rq->ccw.count, data, size);
216 rq->ccw.count += size;
217 return 0;
218}
219
220/*
221 * Set address/length pair to ccw of a request.
222 */
223void
224raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
225{
226 rq->ccw.cda = __pa(data);
227 rq->ccw.count = size;
228}
229
230/*
231 * Set idal buffer to ccw of a request.
232 */
233void
234raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
235{
236 rq->ccw.cda = __pa(ib->data);
237 rq->ccw.count = ib->size;
238 rq->ccw.flags |= CCW_FLAG_IDA;
239}
240
241/*
242 * Stop running ccw.
243 */
244static int
245raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
246{
247 int retries;
248 int rc;
249
250 if (raw3270_request_final(rq))
251 return 0;
252 /* Check if interrupt has already been processed */
253 for (retries = 0; retries < 5; retries++) {
254 if (retries < 2)
255 rc = ccw_device_halt(rp->cdev, (long) rq);
256 else
257 rc = ccw_device_clear(rp->cdev, (long) rq);
258 if (rc == 0)
259 break; /* termination successful */
260 }
261 return rc;
262}
263
264static int
265raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
266{
267 unsigned long flags;
268 int rc;
269
270 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
271 rc = raw3270_halt_io_nolock(rp, rq);
272 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
273 return rc;
274}
275
276/*
277 * Add the request to the request queue, try to start it if the
278 * 3270 device is idle. Return without waiting for end of i/o.
279 */
280static int
281__raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
282 struct raw3270_request *rq)
283{
284 rq->view = view;
285 raw3270_get_view(view);
286 if (list_empty(&rp->req_queue) &&
287 !test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
288 /* No other requests are on the queue. Start this one. */
289 rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
290 (unsigned long) rq, 0, 0);
291 if (rq->rc) {
292 raw3270_put_view(view);
293 return rq->rc;
294 }
295 }
296 list_add_tail(&rq->list, &rp->req_queue);
297 return 0;
298}
299
300int
301raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
302{
303 unsigned long flags;
304 struct raw3270 *rp;
305 int rc;
306
307 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
308 rp = view->dev;
309 if (!rp || rp->view != view)
310 rc = -EACCES;
311 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
312 rc = -ENODEV;
313 else
314 rc = __raw3270_start(rp, view, rq);
315 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
316 return rc;
317}
318
319int
320raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
321{
322 struct raw3270 *rp;
323
324 rp = view->dev;
325 rq->view = view;
326 raw3270_get_view(view);
327 list_add_tail(&rq->list, &rp->req_queue);
328 return 0;
329}
330
331/*
332 * 3270 interrupt routine, called from the ccw_device layer
333 */
334static void
335raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
336{
337 struct raw3270 *rp;
338 struct raw3270_view *view;
339 struct raw3270_request *rq;
340 int rc;
341
342 rp = (struct raw3270 *) cdev->dev.driver_data;
343 if (!rp)
344 return;
345 rq = (struct raw3270_request *) intparm;
346 view = rq ? rq->view : rp->view;
347
348 if (IS_ERR(irb))
349 rc = RAW3270_IO_RETRY;
350 else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
351 rq->rc = -EIO;
352 rc = RAW3270_IO_DONE;
353 } else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
354 DEV_STAT_UNIT_EXCEP)) {
355 /* Handle CE-DE-UE and subsequent UDE */
356 set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
357 rc = RAW3270_IO_BUSY;
358 } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
359 /* Wait for UDE if busy flag is set. */
360 if (irb->scsw.dstat & DEV_STAT_DEV_END) {
361 clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
362 /* Got it, now retry. */
363 rc = RAW3270_IO_RETRY;
364 } else
365 rc = RAW3270_IO_BUSY;
366 } else if (view)
367 rc = view->fn->intv(view, rq, irb);
368 else
369 rc = RAW3270_IO_DONE;
370
371 switch (rc) {
372 case RAW3270_IO_DONE:
373 break;
374 case RAW3270_IO_BUSY:
375 /*
376 * Intervention required by the operator. We have to wait
377 * for unsolicited device end.
378 */
379 return;
380 case RAW3270_IO_RETRY:
381 if (!rq)
382 break;
383 rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
384 (unsigned long) rq, 0, 0);
385 if (rq->rc == 0)
386 return; /* Sucessfully restarted. */
387 break;
388 case RAW3270_IO_STOP:
389 if (!rq)
390 break;
391 raw3270_halt_io_nolock(rp, rq);
392 rq->rc = -EIO;
393 break;
394 default:
395 BUG();
396 }
397 if (rq) {
398 BUG_ON(list_empty(&rq->list));
399 /* The request completed, remove from queue and do callback. */
400 list_del_init(&rq->list);
401 if (rq->callback)
402 rq->callback(rq, rq->callback_data);
403 /* Do put_device for get_device in raw3270_start. */
404 raw3270_put_view(view);
405 }
406 /*
407 * Try to start each request on request queue until one is
408 * started successful.
409 */
410 while (!list_empty(&rp->req_queue)) {
411 rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
412 rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
413 (unsigned long) rq, 0, 0);
414 if (rq->rc == 0)
415 break;
416 /* Start failed. Remove request and do callback. */
417 list_del_init(&rq->list);
418 if (rq->callback)
419 rq->callback(rq, rq->callback_data);
420 /* Do put_device for get_device in raw3270_start. */
421 raw3270_put_view(view);
422 }
423}
424
425/*
426 * Size sensing.
427 */
428
429struct raw3270_ua { /* Query Reply structure for Usable Area */
430 struct { /* Usable Area Query Reply Base */
431 short l; /* Length of this structured field */
432 char sfid; /* 0x81 if Query Reply */
433 char qcode; /* 0x81 if Usable Area */
434 char flags0;
435 char flags1;
436 short w; /* Width of usable area */
437 short h; /* Heigth of usavle area */
438 char units; /* 0x00:in; 0x01:mm */
439 int xr;
440 int yr;
441 char aw;
442 char ah;
443 short buffsz; /* Character buffer size, bytes */
444 char xmin;
445 char ymin;
446 char xmax;
447 char ymax;
448 } __attribute__ ((packed)) uab;
449 struct { /* Alternate Usable Area Self-Defining Parameter */
450 char l; /* Length of this Self-Defining Parm */
451 char sdpid; /* 0x02 if Alternate Usable Area */
452 char res;
453 char auaid; /* 0x01 is Id for the A U A */
454 short wauai; /* Width of AUAi */
455 short hauai; /* Height of AUAi */
456 char auaunits; /* 0x00:in, 0x01:mm */
457 int auaxr;
458 int auayr;
459 char awauai;
460 char ahauai;
461 } __attribute__ ((packed)) aua;
462} __attribute__ ((packed));
463
464static unsigned char raw3270_init_data[256];
465static struct raw3270_request raw3270_init_request;
466static struct diag210 raw3270_init_diag210;
467static DECLARE_MUTEX(raw3270_init_sem);
468
469static int
470raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
471 struct irb *irb)
472{
473 /*
474 * Unit-Check Processing:
475 * Expect Command Reject or Intervention Required.
476 */
477 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
478 /* Request finished abnormally. */
479 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
480 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
481 return RAW3270_IO_BUSY;
482 }
483 }
484 if (rq) {
485 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
486 if (irb->ecw[0] & SNS0_CMD_REJECT)
487 rq->rc = -EOPNOTSUPP;
488 else
489 rq->rc = -EIO;
490 } else
491 /* Request finished normally. Copy residual count. */
492 rq->rescnt = irb->scsw.count;
493 }
494 if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
495 set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
496 wake_up(&raw3270_wait_queue);
497 }
498 return RAW3270_IO_DONE;
499}
500
501static struct raw3270_fn raw3270_init_fn = {
502 .intv = raw3270_init_irq
503};
504
505static struct raw3270_view raw3270_init_view = {
506 .fn = &raw3270_init_fn
507};
508
509/*
510 * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup
511 * Wait for end of request. The request must have been started
512 * with raw3270_start, rc = 0. The device lock may NOT have been
513 * released between calling raw3270_start and raw3270_wait.
514 */
515static void
516raw3270_wake_init(struct raw3270_request *rq, void *data)
517{
518 wake_up((wait_queue_head_t *) data);
519}
520
521/*
522 * Special wait function that can cope with console initialization.
523 */
524static int
525raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
526 struct raw3270_request *rq)
527{
528 unsigned long flags;
529 wait_queue_head_t wq;
530 int rc;
531
532#ifdef CONFIG_TN3270_CONSOLE
533 if (raw3270_registered == 0) {
534 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
535 rq->callback = 0;
536 rc = __raw3270_start(rp, view, rq);
537 if (rc == 0)
538 while (!raw3270_request_final(rq)) {
539 wait_cons_dev();
540 barrier();
541 }
542 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
543 return rq->rc;
544 }
545#endif
546 init_waitqueue_head(&wq);
547 rq->callback = raw3270_wake_init;
548 rq->callback_data = &wq;
549 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
550 rc = __raw3270_start(rp, view, rq);
551 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
552 if (rc)
553 return rc;
554 /* Now wait for the completion. */
555 rc = wait_event_interruptible(wq, raw3270_request_final(rq));
556 if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */
557 raw3270_halt_io(view->dev, rq);
558 /* No wait for the halt to complete. */
559 wait_event(wq, raw3270_request_final(rq));
560 return -ERESTARTSYS;
561 }
562 return rq->rc;
563}
564
565static int
566__raw3270_size_device_vm(struct raw3270 *rp)
567{
568 int rc, model;
569
570 raw3270_init_diag210.vrdcdvno =
571 _ccw_device_get_device_number(rp->cdev);
572 raw3270_init_diag210.vrdclen = sizeof(struct diag210);
573 rc = diag210(&raw3270_init_diag210);
574 if (rc)
575 return rc;
576 model = raw3270_init_diag210.vrdccrmd;
577 switch (model) {
578 case 2:
579 rp->model = model;
580 rp->rows = 24;
581 rp->cols = 80;
582 break;
583 case 3:
584 rp->model = model;
585 rp->rows = 32;
586 rp->cols = 80;
587 break;
588 case 4:
589 rp->model = model;
590 rp->rows = 43;
591 rp->cols = 80;
592 break;
593 case 5:
594 rp->model = model;
595 rp->rows = 27;
596 rp->cols = 132;
597 break;
598 default:
599 printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model);
600 rc = -EOPNOTSUPP;
601 break;
602 }
603 return rc;
604}
605
606static int
607__raw3270_size_device(struct raw3270 *rp)
608{
609 static const unsigned char wbuf[] =
610 { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
611 struct raw3270_ua *uap;
612 unsigned short count;
613 int rc;
614
615 /*
616 * To determine the size of the 3270 device we need to do:
617 * 1) send a 'read partition' data stream to the device
618 * 2) wait for the attn interrupt that preceeds the query reply
619 * 3) do a read modified to get the query reply
620 * To make things worse we have to cope with intervention
621 * required (3270 device switched to 'stand-by') and command
622 * rejects (old devices that can't do 'read partition').
623 */
624 memset(&raw3270_init_request, 0, sizeof(raw3270_init_request));
625 memset(raw3270_init_data, 0, sizeof(raw3270_init_data));
626 /* Store 'read partition' data stream to raw3270_init_data */
627 memcpy(raw3270_init_data, wbuf, sizeof(wbuf));
628 INIT_LIST_HEAD(&raw3270_init_request.list);
629 raw3270_init_request.ccw.cmd_code = TC_WRITESF;
630 raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
631 raw3270_init_request.ccw.count = sizeof(wbuf);
632 raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
633
634 rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
635 if (rc) {
636 /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
637 if (rc == -EOPNOTSUPP && MACHINE_IS_VM)
638 return __raw3270_size_device_vm(rp);
639 return rc;
640 }
641
642 /* Wait for attention interrupt. */
643#ifdef CONFIG_TN3270_CONSOLE
644 if (raw3270_registered == 0) {
645 unsigned long flags;
646
647 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
648 while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags))
649 wait_cons_dev();
650 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
651 } else
652#endif
653 rc = wait_event_interruptible(raw3270_wait_queue,
654 test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags));
655 if (rc)
656 return rc;
657
658 /*
659 * The device accepted the 'read partition' command. Now
660 * set up a read ccw and issue it.
661 */
662 raw3270_init_request.ccw.cmd_code = TC_READMOD;
663 raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
664 raw3270_init_request.ccw.count = sizeof(raw3270_init_data);
665 raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
666 rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
667 if (rc)
668 return rc;
669 /* Got a Query Reply */
670 count = sizeof(raw3270_init_data) - raw3270_init_request.rescnt;
671 uap = (struct raw3270_ua *) (raw3270_init_data + 1);
672 /* Paranoia check. */
673 if (raw3270_init_data[0] != 0x88 || uap->uab.qcode != 0x81)
674 return -EOPNOTSUPP;
675 /* Copy rows/columns of default Usable Area */
676 rp->rows = uap->uab.h;
677 rp->cols = uap->uab.w;
678 /* Check for 14 bit addressing */
679 if ((uap->uab.flags0 & 0x0d) == 0x01)
680 set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
681 /* Check for Alternate Usable Area */
682 if (uap->uab.l == sizeof(struct raw3270_ua) &&
683 uap->aua.sdpid == 0x02) {
684 rp->rows = uap->aua.hauai;
685 rp->cols = uap->aua.wauai;
686 }
687 return 0;
688}
689
690static int
691raw3270_size_device(struct raw3270 *rp)
692{
693 int rc;
694
695 down(&raw3270_init_sem);
696 rp->view = &raw3270_init_view;
697 raw3270_init_view.dev = rp;
698 rc = __raw3270_size_device(rp);
699 raw3270_init_view.dev = 0;
700 rp->view = 0;
701 up(&raw3270_init_sem);
702 if (rc == 0) { /* Found something. */
703 /* Try to find a model. */
704 rp->model = 0;
705 if (rp->rows == 24 && rp->cols == 80)
706 rp->model = 2;
707 if (rp->rows == 32 && rp->cols == 80)
708 rp->model = 3;
709 if (rp->rows == 43 && rp->cols == 80)
710 rp->model = 4;
711 if (rp->rows == 27 && rp->cols == 132)
712 rp->model = 5;
713 }
714 return rc;
715}
716
717static int
718raw3270_reset_device(struct raw3270 *rp)
719{
720 int rc;
721
722 down(&raw3270_init_sem);
723 memset(&raw3270_init_request, 0, sizeof(raw3270_init_request));
724 memset(raw3270_init_data, 0, sizeof(raw3270_init_data));
725 /* Store reset data stream to raw3270_init_data/raw3270_init_request */
726 raw3270_init_data[0] = TW_KR;
727 INIT_LIST_HEAD(&raw3270_init_request.list);
728 raw3270_init_request.ccw.cmd_code = TC_EWRITEA;
729 raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
730 raw3270_init_request.ccw.count = 1;
731 raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
732 rp->view = &raw3270_init_view;
733 raw3270_init_view.dev = rp;
734 rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
735 raw3270_init_view.dev = 0;
736 rp->view = 0;
737 up(&raw3270_init_sem);
738 return rc;
739}
740
741/*
742 * Setup new 3270 device.
743 */
744static int
745raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
746{
747 struct list_head *l;
748 struct raw3270 *tmp;
749 int minor;
750
751 memset(rp, 0, sizeof(struct raw3270));
752 /* Copy ebcdic -> ascii translation table. */
753 memcpy(ascebc, _ascebc, 256);
754 if (tubxcorrect) {
755 /* correct brackets and circumflex */
756 ascebc['['] = 0xad;
757 ascebc[']'] = 0xbd;
758 ascebc['^'] = 0xb0;
759 }
760 rp->ascebc = ascebc;
761
762 /* Set defaults. */
763 rp->rows = 24;
764 rp->cols = 80;
765
766 INIT_LIST_HEAD(&rp->req_queue);
767 INIT_LIST_HEAD(&rp->view_list);
768
769 /*
770 * Add device to list and find the smallest unused minor
771 * number for it.
772 */
773 down(&raw3270_sem);
774 /* Keep the list sorted. */
775 minor = 0;
776 rp->minor = -1;
777 list_for_each(l, &raw3270_devices) {
778 tmp = list_entry(l, struct raw3270, list);
779 if (tmp->minor > minor) {
780 rp->minor = minor;
781 __list_add(&rp->list, l->prev, l);
782 break;
783 }
784 minor++;
785 }
786 if (rp->minor == -1 && minor < RAW3270_MAXDEVS) {
787 rp->minor = minor;
788 list_add_tail(&rp->list, &raw3270_devices);
789 }
790 up(&raw3270_sem);
791 /* No free minor number? Then give up. */
792 if (rp->minor == -1)
793 return -EUSERS;
794 rp->cdev = cdev;
795 cdev->dev.driver_data = rp;
796 cdev->handler = raw3270_irq;
797 return 0;
798}
799
800#ifdef CONFIG_TN3270_CONSOLE
801/*
802 * Setup 3270 device configured as console.
803 */
804struct raw3270 *
805raw3270_setup_console(struct ccw_device *cdev)
806{
807 struct raw3270 *rp;
808 char *ascebc;
809 int rc;
810
811 rp = (struct raw3270 *) alloc_bootmem(sizeof(struct raw3270));
812 ascebc = (char *) alloc_bootmem(256);
813 rc = raw3270_setup_device(cdev, rp, ascebc);
814 if (rc)
815 return ERR_PTR(rc);
816 set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
817 rc = raw3270_reset_device(rp);
818 if (rc)
819 return ERR_PTR(rc);
820 rc = raw3270_size_device(rp);
821 if (rc)
822 return ERR_PTR(rc);
823 rc = raw3270_reset_device(rp);
824 if (rc)
825 return ERR_PTR(rc);
826 set_bit(RAW3270_FLAGS_READY, &rp->flags);
827 return rp;
828}
829
830void
831raw3270_wait_cons_dev(struct raw3270 *rp)
832{
833 unsigned long flags;
834
835 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
836 wait_cons_dev();
837 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
838}
839
840#endif
841
842/*
843 * Create a 3270 device structure.
844 */
845static struct raw3270 *
846raw3270_create_device(struct ccw_device *cdev)
847{
848 struct raw3270 *rp;
849 char *ascebc;
850 int rc;
851
852 rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL);
853 if (!rp)
854 return ERR_PTR(-ENOMEM);
855 ascebc = kmalloc(256, GFP_KERNEL);
856 if (!ascebc) {
857 kfree(rp);
858 return ERR_PTR(-ENOMEM);
859 }
860 rc = raw3270_setup_device(cdev, rp, ascebc);
861 if (rc) {
862 kfree(rp->ascebc);
863 kfree(rp);
864 rp = ERR_PTR(rc);
865 }
866 /* Get reference to ccw_device structure. */
867 get_device(&cdev->dev);
868 return rp;
869}
870
871/*
872 * Activate a view.
873 */
874int
875raw3270_activate_view(struct raw3270_view *view)
876{
877 struct raw3270 *rp;
878 struct raw3270_view *oldview, *nv;
879 unsigned long flags;
880 int rc;
881
882 rp = view->dev;
883 if (!rp)
884 return -ENODEV;
885 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
886 if (rp->view == view)
887 rc = 0;
888 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
889 rc = -ENODEV;
890 else {
891 oldview = 0;
892 if (rp->view) {
893 oldview = rp->view;
894 oldview->fn->deactivate(oldview);
895 }
896 rp->view = view;
897 rc = view->fn->activate(view);
898 if (rc) {
899 /* Didn't work. Try to reactivate the old view. */
900 rp->view = oldview;
901 if (!oldview || oldview->fn->activate(oldview) != 0) {
902 /* Didn't work as well. Try any other view. */
903 list_for_each_entry(nv, &rp->view_list, list)
904 if (nv != view && nv != oldview) {
905 rp->view = nv;
906 if (nv->fn->activate(nv) == 0)
907 break;
908 rp->view = 0;
909 }
910 }
911 }
912 }
913 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
914 return rc;
915}
916
917/*
918 * Deactivate current view.
919 */
920void
921raw3270_deactivate_view(struct raw3270_view *view)
922{
923 unsigned long flags;
924 struct raw3270 *rp;
925
926 rp = view->dev;
927 if (!rp)
928 return;
929 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
930 if (rp->view == view) {
931 view->fn->deactivate(view);
932 rp->view = 0;
933 /* Move deactivated view to end of list. */
934 list_del_init(&view->list);
935 list_add_tail(&view->list, &rp->view_list);
936 /* Try to activate another view. */
937 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
938 list_for_each_entry(view, &rp->view_list, list)
939 if (view->fn->activate(view) == 0) {
940 rp->view = view;
941 break;
942 }
943 }
944 }
945 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
946}
947
948/*
949 * Add view to device with minor "minor".
950 */
951int
952raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
953{
954 unsigned long flags;
955 struct raw3270 *rp;
956 int rc;
957
958 down(&raw3270_sem);
959 rc = -ENODEV;
960 list_for_each_entry(rp, &raw3270_devices, list) {
961 if (rp->minor != minor)
962 continue;
963 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
964 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
965 atomic_set(&view->ref_count, 2);
966 view->dev = rp;
967 view->fn = fn;
968 view->model = rp->model;
969 view->rows = rp->rows;
970 view->cols = rp->cols;
971 view->ascebc = rp->ascebc;
972 spin_lock_init(&view->lock);
973 list_add_tail(&view->list, &rp->view_list);
974 rc = 0;
975 }
976 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
977 break;
978 }
979 up(&raw3270_sem);
980 return rc;
981}
982
983/*
984 * Find specific view of device with minor "minor".
985 */
986struct raw3270_view *
987raw3270_find_view(struct raw3270_fn *fn, int minor)
988{
989 struct raw3270 *rp;
990 struct raw3270_view *view, *tmp;
991 unsigned long flags;
992
993 down(&raw3270_sem);
994 view = ERR_PTR(-ENODEV);
995 list_for_each_entry(rp, &raw3270_devices, list) {
996 if (rp->minor != minor)
997 continue;
998 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
999 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
1000 view = ERR_PTR(-ENOENT);
1001 list_for_each_entry(tmp, &rp->view_list, list) {
1002 if (tmp->fn == fn) {
1003 raw3270_get_view(tmp);
1004 view = tmp;
1005 break;
1006 }
1007 }
1008 }
1009 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1010 break;
1011 }
1012 up(&raw3270_sem);
1013 return view;
1014}
1015
1016/*
1017 * Remove view from device and free view structure via call to view->fn->free.
1018 */
1019void
1020raw3270_del_view(struct raw3270_view *view)
1021{
1022 unsigned long flags;
1023 struct raw3270 *rp;
1024 struct raw3270_view *nv;
1025
1026 rp = view->dev;
1027 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
1028 if (rp->view == view) {
1029 view->fn->deactivate(view);
1030 rp->view = 0;
1031 }
1032 list_del_init(&view->list);
1033 if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
1034 /* Try to activate another view. */
1035 list_for_each_entry(nv, &rp->view_list, list) {
1036 if (nv->fn->activate(view) == 0) {
1037 rp->view = nv;
1038 break;
1039 }
1040 }
1041 }
1042 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1043 /* Wait for reference counter to drop to zero. */
1044 atomic_dec(&view->ref_count);
1045 wait_event(raw3270_wait_queue, atomic_read(&view->ref_count) == 0);
1046 if (view->fn->free)
1047 view->fn->free(view);
1048}
1049
1050/*
1051 * Remove a 3270 device structure.
1052 */
1053static void
1054raw3270_delete_device(struct raw3270 *rp)
1055{
1056 struct ccw_device *cdev;
1057
1058 /* Remove from device chain. */
1059 down(&raw3270_sem);
1060 list_del_init(&rp->list);
1061 up(&raw3270_sem);
1062
1063 /* Disconnect from ccw_device. */
1064 cdev = rp->cdev;
1065 rp->cdev = 0;
1066 cdev->dev.driver_data = 0;
1067 cdev->handler = 0;
1068
1069 /* Put ccw_device structure. */
1070 put_device(&cdev->dev);
1071
1072 /* Now free raw3270 structure. */
1073 kfree(rp->ascebc);
1074 kfree(rp);
1075}
1076
1077static int
1078raw3270_probe (struct ccw_device *cdev)
1079{
1080 return 0;
1081}
1082
1083/*
1084 * Additional attributes for a 3270 device
1085 */
1086static ssize_t
1087raw3270_model_show(struct device *dev, char *buf)
1088{
1089 return snprintf(buf, PAGE_SIZE, "%i\n",
1090 ((struct raw3270 *) dev->driver_data)->model);
1091}
1092static DEVICE_ATTR(model, 0444, raw3270_model_show, 0);
1093
1094static ssize_t
1095raw3270_rows_show(struct device *dev, char *buf)
1096{
1097 return snprintf(buf, PAGE_SIZE, "%i\n",
1098 ((struct raw3270 *) dev->driver_data)->rows);
1099}
1100static DEVICE_ATTR(rows, 0444, raw3270_rows_show, 0);
1101
1102static ssize_t
1103raw3270_columns_show(struct device *dev, char *buf)
1104{
1105 return snprintf(buf, PAGE_SIZE, "%i\n",
1106 ((struct raw3270 *) dev->driver_data)->cols);
1107}
1108static DEVICE_ATTR(columns, 0444, raw3270_columns_show, 0);
1109
1110static struct attribute * raw3270_attrs[] = {
1111 &dev_attr_model.attr,
1112 &dev_attr_rows.attr,
1113 &dev_attr_columns.attr,
1114 NULL,
1115};
1116
1117static struct attribute_group raw3270_attr_group = {
1118 .attrs = raw3270_attrs,
1119};
1120
1121static void
1122raw3270_create_attributes(struct raw3270 *rp)
1123{
1124 //FIXME: check return code
1125 sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1126}
1127
1128/*
1129 * Notifier for device addition/removal
1130 */
1131struct raw3270_notifier {
1132 struct list_head list;
1133 void (*notifier)(int, int);
1134};
1135
1136static struct list_head raw3270_notifier = LIST_HEAD_INIT(raw3270_notifier);
1137
1138int raw3270_register_notifier(void (*notifier)(int, int))
1139{
1140 struct raw3270_notifier *np;
1141 struct raw3270 *rp;
1142
1143 np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL);
1144 if (!np)
1145 return -ENOMEM;
1146 np->notifier = notifier;
1147 down(&raw3270_sem);
1148 list_add_tail(&np->list, &raw3270_notifier);
1149 list_for_each_entry(rp, &raw3270_devices, list) {
1150 get_device(&rp->cdev->dev);
1151 notifier(rp->minor, 1);
1152 }
1153 up(&raw3270_sem);
1154 return 0;
1155}
1156
1157void raw3270_unregister_notifier(void (*notifier)(int, int))
1158{
1159 struct raw3270_notifier *np;
1160
1161 down(&raw3270_sem);
1162 list_for_each_entry(np, &raw3270_notifier, list)
1163 if (np->notifier == notifier) {
1164 list_del(&np->list);
1165 kfree(np);
1166 break;
1167 }
1168 up(&raw3270_sem);
1169}
1170
1171/*
1172 * Set 3270 device online.
1173 */
1174static int
1175raw3270_set_online (struct ccw_device *cdev)
1176{
1177 struct raw3270 *rp;
1178 struct raw3270_notifier *np;
1179 int rc;
1180
1181 rp = raw3270_create_device(cdev);
1182 if (IS_ERR(rp))
1183 return PTR_ERR(rp);
1184 rc = raw3270_reset_device(rp);
1185 if (rc)
1186 return rc;
1187 rc = raw3270_size_device(rp);
1188 if (rc)
1189 return rc;
1190 rc = raw3270_reset_device(rp);
1191 if (rc)
1192 return rc;
1193 raw3270_create_attributes(rp);
1194 set_bit(RAW3270_FLAGS_READY, &rp->flags);
1195 down(&raw3270_sem);
1196 list_for_each_entry(np, &raw3270_notifier, list)
1197 np->notifier(rp->minor, 1);
1198 up(&raw3270_sem);
1199 return 0;
1200}
1201
1202/*
1203 * Remove 3270 device structure.
1204 */
1205static void
1206raw3270_remove (struct ccw_device *cdev)
1207{
1208 unsigned long flags;
1209 struct raw3270 *rp;
1210 struct raw3270_view *v;
1211 struct raw3270_notifier *np;
1212
1213 rp = cdev->dev.driver_data;
1214 clear_bit(RAW3270_FLAGS_READY, &rp->flags);
1215
1216 sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
1217
1218 /* Deactivate current view and remove all views. */
1219 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1220 if (rp->view) {
1221 rp->view->fn->deactivate(rp->view);
1222 rp->view = 0;
1223 }
1224 while (!list_empty(&rp->view_list)) {
1225 v = list_entry(rp->view_list.next, struct raw3270_view, list);
1226 if (v->fn->release)
1227 v->fn->release(v);
1228 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1229 raw3270_del_view(v);
1230 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1231 }
1232 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1233
1234 down(&raw3270_sem);
1235 list_for_each_entry(np, &raw3270_notifier, list)
1236 np->notifier(rp->minor, 0);
1237 up(&raw3270_sem);
1238
1239 /* Reset 3270 device. */
1240 raw3270_reset_device(rp);
1241 /* And finally remove it. */
1242 raw3270_delete_device(rp);
1243}
1244
1245/*
1246 * Set 3270 device offline.
1247 */
1248static int
1249raw3270_set_offline (struct ccw_device *cdev)
1250{
1251 struct raw3270 *rp;
1252
1253 rp = cdev->dev.driver_data;
1254 if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
1255 return -EBUSY;
1256 raw3270_remove(cdev);
1257 return 0;
1258}
1259
1260static struct ccw_device_id raw3270_id[] = {
1261 { CCW_DEVICE(0x3270, 0) },
1262 { CCW_DEVICE(0x3271, 0) },
1263 { CCW_DEVICE(0x3272, 0) },
1264 { CCW_DEVICE(0x3273, 0) },
1265 { CCW_DEVICE(0x3274, 0) },
1266 { CCW_DEVICE(0x3275, 0) },
1267 { CCW_DEVICE(0x3276, 0) },
1268 { CCW_DEVICE(0x3277, 0) },
1269 { CCW_DEVICE(0x3278, 0) },
1270 { CCW_DEVICE(0x3279, 0) },
1271 { CCW_DEVICE(0x3174, 0) },
1272 { /* end of list */ },
1273};
1274
1275static struct ccw_driver raw3270_ccw_driver = {
1276 .name = "3270",
1277 .owner = THIS_MODULE,
1278 .ids = raw3270_id,
1279 .probe = &raw3270_probe,
1280 .remove = &raw3270_remove,
1281 .set_online = &raw3270_set_online,
1282 .set_offline = &raw3270_set_offline,
1283};
1284
1285static int
1286raw3270_init(void)
1287{
1288 struct raw3270 *rp;
1289 int rc;
1290
1291 if (raw3270_registered)
1292 return 0;
1293 raw3270_registered = 1;
1294 rc = ccw_driver_register(&raw3270_ccw_driver);
1295 if (rc == 0) {
1296 /* Create attributes for early (= console) device. */
1297 down(&raw3270_sem);
1298 list_for_each_entry(rp, &raw3270_devices, list) {
1299 get_device(&rp->cdev->dev);
1300 raw3270_create_attributes(rp);
1301 }
1302 up(&raw3270_sem);
1303 }
1304 return rc;
1305}
1306
1307static void
1308raw3270_exit(void)
1309{
1310 ccw_driver_unregister(&raw3270_ccw_driver);
1311}
1312
1313MODULE_LICENSE("GPL");
1314
1315module_init(raw3270_init);
1316module_exit(raw3270_exit);
1317
1318EXPORT_SYMBOL(raw3270_request_alloc);
1319EXPORT_SYMBOL(raw3270_request_free);
1320EXPORT_SYMBOL(raw3270_request_reset);
1321EXPORT_SYMBOL(raw3270_request_set_cmd);
1322EXPORT_SYMBOL(raw3270_request_add_data);
1323EXPORT_SYMBOL(raw3270_request_set_data);
1324EXPORT_SYMBOL(raw3270_request_set_idal);
1325EXPORT_SYMBOL(raw3270_buffer_address);
1326EXPORT_SYMBOL(raw3270_add_view);
1327EXPORT_SYMBOL(raw3270_del_view);
1328EXPORT_SYMBOL(raw3270_find_view);
1329EXPORT_SYMBOL(raw3270_activate_view);
1330EXPORT_SYMBOL(raw3270_deactivate_view);
1331EXPORT_SYMBOL(raw3270_start);
1332EXPORT_SYMBOL(raw3270_start_irq);
1333EXPORT_SYMBOL(raw3270_register_notifier);
1334EXPORT_SYMBOL(raw3270_unregister_notifier);
1335EXPORT_SYMBOL(raw3270_wait_queue);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
new file mode 100644
index 000000000000..ed5d4eb9f623
--- /dev/null
+++ b/drivers/s390/char/raw3270.h
@@ -0,0 +1,274 @@
1/*
2 * drivers/s390/char/raw3270.h
3 * IBM/3270 Driver
4 *
5 * Author(s):
6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
7 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 */
10
11#include <asm/idals.h>
12#include <asm/ioctl.h>
13
14/* ioctls for fullscreen 3270 */
15#define TUBICMD _IO('3', 3) /* set ccw command for fs reads. */
16#define TUBOCMD _IO('3', 4) /* set ccw command for fs writes. */
17#define TUBGETI _IO('3', 7) /* get ccw command for fs reads. */
18#define TUBGETO _IO('3', 8) /* get ccw command for fs writes. */
19#define TUBSETMOD _IO('3',12) /* FIXME: what does it do ?*/
20#define TUBGETMOD _IO('3',13) /* FIXME: what does it do ?*/
21
22/* Local Channel Commands */
23#define TC_WRITE 0x01 /* Write */
24#define TC_EWRITE 0x05 /* Erase write */
25#define TC_READMOD 0x06 /* Read modified */
26#define TC_EWRITEA 0x0d /* Erase write alternate */
27#define TC_WRITESF 0x11 /* Write structured field */
28
29/* Buffer Control Orders */
30#define TO_SF 0x1d /* Start field */
31#define TO_SBA 0x11 /* Set buffer address */
32#define TO_IC 0x13 /* Insert cursor */
33#define TO_PT 0x05 /* Program tab */
34#define TO_RA 0x3c /* Repeat to address */
35#define TO_SFE 0x29 /* Start field extended */
36#define TO_EUA 0x12 /* Erase unprotected to address */
37#define TO_MF 0x2c /* Modify field */
38#define TO_SA 0x28 /* Set attribute */
39
40/* Field Attribute Bytes */
41#define TF_INPUT 0x40 /* Visible input */
42#define TF_INPUTN 0x4c /* Invisible input */
43#define TF_INMDT 0xc1 /* Visible, Set-MDT */
44#define TF_LOG 0x60
45
46/* Character Attribute Bytes */
47#define TAT_RESET 0x00
48#define TAT_FIELD 0xc0
49#define TAT_EXTHI 0x41
50#define TAT_COLOR 0x42
51#define TAT_CHARS 0x43
52#define TAT_TRANS 0x46
53
54/* Extended-Highlighting Bytes */
55#define TAX_RESET 0x00
56#define TAX_BLINK 0xf1
57#define TAX_REVER 0xf2
58#define TAX_UNDER 0xf4
59
60/* Reset value */
61#define TAR_RESET 0x00
62
63/* Color values */
64#define TAC_RESET 0x00
65#define TAC_BLUE 0xf1
66#define TAC_RED 0xf2
67#define TAC_PINK 0xf3
68#define TAC_GREEN 0xf4
69#define TAC_TURQ 0xf5
70#define TAC_YELLOW 0xf6
71#define TAC_WHITE 0xf7
72#define TAC_DEFAULT 0x00
73
74/* Write Control Characters */
75#define TW_NONE 0x40 /* No particular action */
76#define TW_KR 0xc2 /* Keyboard restore */
77#define TW_PLUSALARM 0x04 /* Add this bit for alarm */
78
79#define RAW3270_MAXDEVS 256
80
81/* For TUBGETMOD and TUBSETMOD. Should include. */
82struct raw3270_iocb {
83 short model;
84 short line_cnt;
85 short col_cnt;
86 short pf_cnt;
87 short re_cnt;
88 short map;
89};
90
91struct raw3270;
92struct raw3270_view;
93
94/* 3270 CCW request */
95struct raw3270_request {
96 struct list_head list; /* list head for request queueing. */
97 struct raw3270_view *view; /* view of this request */
98 struct ccw1 ccw; /* single ccw. */
99 void *buffer; /* output buffer. */
100 size_t size; /* size of output buffer. */
101 int rescnt; /* residual count from devstat. */
102 int rc; /* return code for this request. */
103
104 /* Callback for delivering final status. */
105 void (*callback)(struct raw3270_request *, void *);
106 void *callback_data;
107};
108
109struct raw3270_request *raw3270_request_alloc(size_t size);
110struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
111void raw3270_request_free(struct raw3270_request *);
112void raw3270_request_reset(struct raw3270_request *);
113void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
114int raw3270_request_add_data(struct raw3270_request *, void *, size_t);
115void raw3270_request_set_data(struct raw3270_request *, void *, size_t);
116void raw3270_request_set_idal(struct raw3270_request *, struct idal_buffer *);
117
118static inline int
119raw3270_request_final(struct raw3270_request *rq)
120{
121 return list_empty(&rq->list);
122}
123
124void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
125
126/* Return value of *intv (see raw3270_fn below) can be one of the following: */
127#define RAW3270_IO_DONE 0 /* request finished */
128#define RAW3270_IO_BUSY 1 /* request still active */
129#define RAW3270_IO_RETRY 2 /* retry current request */
130#define RAW3270_IO_STOP 3 /* kill current request */
131
132/*
133 * Functions of a 3270 view.
134 */
135struct raw3270_fn {
136 int (*activate)(struct raw3270_view *);
137 void (*deactivate)(struct raw3270_view *);
138 int (*intv)(struct raw3270_view *,
139 struct raw3270_request *, struct irb *);
140 void (*release)(struct raw3270_view *);
141 void (*free)(struct raw3270_view *);
142};
143
144/*
145 * View structure chaining. The raw3270_view structure is meant to
146 * be embedded at the start of the real view data structure, e.g.:
147 * struct example {
148 * struct raw3270_view view;
149 * ...
150 * };
151 */
152struct raw3270_view {
153 struct list_head list;
154 spinlock_t lock;
155 atomic_t ref_count;
156 struct raw3270 *dev;
157 struct raw3270_fn *fn;
158 unsigned int model;
159 unsigned int rows, cols; /* # of rows & colums of the view */
160 unsigned char *ascebc; /* ascii -> ebcdic table */
161};
162
163int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
164int raw3270_activate_view(struct raw3270_view *);
165void raw3270_del_view(struct raw3270_view *);
166void raw3270_deactivate_view(struct raw3270_view *);
167struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int);
168int raw3270_start(struct raw3270_view *, struct raw3270_request *);
169int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
170
171/* Reference count inliner for view structures. */
172static inline void
173raw3270_get_view(struct raw3270_view *view)
174{
175 atomic_inc(&view->ref_count);
176}
177
178extern wait_queue_head_t raw3270_wait_queue;
179
180static inline void
181raw3270_put_view(struct raw3270_view *view)
182{
183 if (atomic_dec_return(&view->ref_count) == 0)
184 wake_up(&raw3270_wait_queue);
185}
186
187struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
188void raw3270_wait_cons_dev(struct raw3270 *);
189
190/* Notifier for device addition/removal */
191int raw3270_register_notifier(void (*notifier)(int, int));
192void raw3270_unregister_notifier(void (*notifier)(int, int));
193
194/*
195 * Little memory allocator for string objects.
196 */
197struct string
198{
199 struct list_head list;
200 struct list_head update;
201 unsigned long size;
202 unsigned long len;
203 char string[0];
204} __attribute__ ((aligned(8)));
205
206static inline struct string *
207alloc_string(struct list_head *free_list, unsigned long len)
208{
209 struct string *cs, *tmp;
210 unsigned long size;
211
212 size = (len + 7L) & -8L;
213 list_for_each_entry(cs, free_list, list) {
214 if (cs->size < size)
215 continue;
216 if (cs->size > size + sizeof(struct string)) {
217 char *endaddr = (char *) (cs + 1) + cs->size;
218 tmp = (struct string *) (endaddr - size) - 1;
219 tmp->size = size;
220 cs->size -= size + sizeof(struct string);
221 cs = tmp;
222 } else
223 list_del(&cs->list);
224 cs->len = len;
225 INIT_LIST_HEAD(&cs->list);
226 INIT_LIST_HEAD(&cs->update);
227 return cs;
228 }
229 return 0;
230}
231
232static inline unsigned long
233free_string(struct list_head *free_list, struct string *cs)
234{
235 struct string *tmp;
236 struct list_head *p, *left;
237
238 /* Find out the left neighbour in free memory list. */
239 left = free_list;
240 list_for_each(p, free_list) {
241 if (list_entry(p, struct string, list) > cs)
242 break;
243 left = p;
244 }
245 /* Try to merge with right neighbour = next element from left. */
246 if (left->next != free_list) {
247 tmp = list_entry(left->next, struct string, list);
248 if ((char *) (cs + 1) + cs->size == (char *) tmp) {
249 list_del(&tmp->list);
250 cs->size += tmp->size + sizeof(struct string);
251 }
252 }
253 /* Try to merge with left neighbour. */
254 if (left != free_list) {
255 tmp = list_entry(left, struct string, list);
256 if ((char *) (tmp + 1) + tmp->size == (char *) cs) {
257 tmp->size += cs->size + sizeof(struct string);
258 return tmp->size;
259 }
260 }
261 __list_add(&cs->list, left, left->next);
262 return cs->size;
263}
264
265static inline void
266add_string_memory(struct list_head *free_list, void *mem, unsigned long size)
267{
268 struct string *cs;
269
270 cs = (struct string *) mem;
271 cs->size = size - sizeof(struct string);
272 free_string(free_list, cs);
273}
274
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
new file mode 100644
index 000000000000..ceb0e474fde4
--- /dev/null
+++ b/drivers/s390/char/sclp.c
@@ -0,0 +1,915 @@
1/*
2 * drivers/s390/char/sclp.c
3 * core function to access sclp interface
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/err.h>
13#include <linux/spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/timer.h>
16#include <linux/reboot.h>
17#include <linux/jiffies.h>
18#include <asm/types.h>
19#include <asm/s390_ext.h>
20
21#include "sclp.h"
22
23#define SCLP_HEADER "sclp: "
24
25/* Structure for register_early_external_interrupt. */
26static ext_int_info_t ext_int_info_hwc;
27
28/* Lock to protect internal data consistency. */
29static DEFINE_SPINLOCK(sclp_lock);
30
31/* Mask of events that we can receive from the sclp interface. */
32static sccb_mask_t sclp_receive_mask;
33
34/* Mask of events that we can send to the sclp interface. */
35static sccb_mask_t sclp_send_mask;
36
37/* List of registered event listeners and senders. */
38static struct list_head sclp_reg_list;
39
40/* List of queued requests. */
41static struct list_head sclp_req_queue;
42
43/* Data for read and and init requests. */
44static struct sclp_req sclp_read_req;
45static struct sclp_req sclp_init_req;
46static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
47static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48
49/* Timer for request retries. */
50static struct timer_list sclp_request_timer;
51
52/* Internal state: is the driver initialized? */
53static volatile enum sclp_init_state_t {
54 sclp_init_state_uninitialized,
55 sclp_init_state_initializing,
56 sclp_init_state_initialized
57} sclp_init_state = sclp_init_state_uninitialized;
58
59/* Internal state: is a request active at the sclp? */
60static volatile enum sclp_running_state_t {
61 sclp_running_state_idle,
62 sclp_running_state_running
63} sclp_running_state = sclp_running_state_idle;
64
65/* Internal state: is a read request pending? */
66static volatile enum sclp_reading_state_t {
67 sclp_reading_state_idle,
68 sclp_reading_state_reading
69} sclp_reading_state = sclp_reading_state_idle;
70
71/* Internal state: is the driver currently serving requests? */
72static volatile enum sclp_activation_state_t {
73 sclp_activation_state_active,
74 sclp_activation_state_deactivating,
75 sclp_activation_state_inactive,
76 sclp_activation_state_activating
77} sclp_activation_state = sclp_activation_state_active;
78
79/* Internal state: is an init mask request pending? */
80static volatile enum sclp_mask_state_t {
81 sclp_mask_state_idle,
82 sclp_mask_state_initializing
83} sclp_mask_state = sclp_mask_state_idle;
84
85/* Maximum retry counts */
86#define SCLP_INIT_RETRY 3
87#define SCLP_MASK_RETRY 3
88#define SCLP_REQUEST_RETRY 3
89
90/* Timeout intervals in seconds.*/
91#define SCLP_BUSY_INTERVAL 2
92#define SCLP_RETRY_INTERVAL 5
93
94static void sclp_process_queue(void);
95static int sclp_init_mask(int calculate);
96static int sclp_init(void);
97
98/* Perform service call. Return 0 on success, non-zero otherwise. */
99static int
100service_call(sclp_cmdw_t command, void *sccb)
101{
102 int cc;
103
104 __asm__ __volatile__(
105 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
106 " ipm %0\n"
107 " srl %0,28"
108 : "=&d" (cc)
109 : "d" (command), "a" (__pa(sccb))
110 : "cc", "memory" );
111 if (cc == 3)
112 return -EIO;
113 if (cc == 2)
114 return -EBUSY;
115 return 0;
116}
117
118/* Request timeout handler. Restart the request queue. If DATA is non-zero,
119 * force restart of running request. */
120static void
121sclp_request_timeout(unsigned long data)
122{
123 unsigned long flags;
124
125 if (data) {
126 spin_lock_irqsave(&sclp_lock, flags);
127 sclp_running_state = sclp_running_state_idle;
128 spin_unlock_irqrestore(&sclp_lock, flags);
129 }
130 sclp_process_queue();
131}
132
133/* Set up request retry timer. Called while sclp_lock is locked. */
134static inline void
135__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
136 unsigned long data)
137{
138 del_timer(&sclp_request_timer);
139 sclp_request_timer.function = function;
140 sclp_request_timer.data = data;
141 sclp_request_timer.expires = jiffies + time;
142 add_timer(&sclp_request_timer);
143}
144
145/* Try to start a request. Return zero if the request was successfully
146 * started or if it will be started at a later time. Return non-zero otherwise.
147 * Called while sclp_lock is locked. */
148static int
149__sclp_start_request(struct sclp_req *req)
150{
151 int rc;
152
153 if (sclp_running_state != sclp_running_state_idle)
154 return 0;
155 del_timer(&sclp_request_timer);
156 if (req->start_count <= SCLP_REQUEST_RETRY) {
157 rc = service_call(req->command, req->sccb);
158 req->start_count++;
159 } else
160 rc = -EIO;
161 if (rc == 0) {
162 /* Sucessfully started request */
163 req->status = SCLP_REQ_RUNNING;
164 sclp_running_state = sclp_running_state_running;
165 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
166 sclp_request_timeout, 1);
167 return 0;
168 } else if (rc == -EBUSY) {
169 /* Try again later */
170 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
171 sclp_request_timeout, 0);
172 return 0;
173 }
174 /* Request failed */
175 req->status = SCLP_REQ_FAILED;
176 return rc;
177}
178
179/* Try to start queued requests. */
180static void
181sclp_process_queue(void)
182{
183 struct sclp_req *req;
184 int rc;
185 unsigned long flags;
186
187 spin_lock_irqsave(&sclp_lock, flags);
188 if (sclp_running_state != sclp_running_state_idle) {
189 spin_unlock_irqrestore(&sclp_lock, flags);
190 return;
191 }
192 del_timer(&sclp_request_timer);
193 while (!list_empty(&sclp_req_queue)) {
194 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
195 rc = __sclp_start_request(req);
196 if (rc == 0)
197 break;
198 /* Request failed. */
199 list_del(&req->list);
200 if (req->callback) {
201 spin_unlock_irqrestore(&sclp_lock, flags);
202 req->callback(req, req->callback_data);
203 spin_lock_irqsave(&sclp_lock, flags);
204 }
205 }
206 spin_unlock_irqrestore(&sclp_lock, flags);
207}
208
209/* Queue a new request. Return zero on success, non-zero otherwise. */
210int
211sclp_add_request(struct sclp_req *req)
212{
213 unsigned long flags;
214 int rc;
215
216 spin_lock_irqsave(&sclp_lock, flags);
217 if ((sclp_init_state != sclp_init_state_initialized ||
218 sclp_activation_state != sclp_activation_state_active) &&
219 req != &sclp_init_req) {
220 spin_unlock_irqrestore(&sclp_lock, flags);
221 return -EIO;
222 }
223 req->status = SCLP_REQ_QUEUED;
224 req->start_count = 0;
225 list_add_tail(&req->list, &sclp_req_queue);
226 rc = 0;
227 /* Start if request is first in list */
228 if (req->list.prev == &sclp_req_queue) {
229 rc = __sclp_start_request(req);
230 if (rc)
231 list_del(&req->list);
232 }
233 spin_unlock_irqrestore(&sclp_lock, flags);
234 return rc;
235}
236
237EXPORT_SYMBOL(sclp_add_request);
238
239/* Dispatch events found in request buffer to registered listeners. Return 0
240 * if all events were dispatched, non-zero otherwise. */
241static int
242sclp_dispatch_evbufs(struct sccb_header *sccb)
243{
244 unsigned long flags;
245 struct evbuf_header *evbuf;
246 struct list_head *l;
247 struct sclp_register *reg;
248 int offset;
249 int rc;
250
251 spin_lock_irqsave(&sclp_lock, flags);
252 rc = 0;
253 for (offset = sizeof(struct sccb_header); offset < sccb->length;
254 offset += evbuf->length) {
255 /* Search for event handler */
256 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
257 reg = NULL;
258 list_for_each(l, &sclp_reg_list) {
259 reg = list_entry(l, struct sclp_register, list);
260 if (reg->receive_mask & (1 << (32 - evbuf->type)))
261 break;
262 else
263 reg = NULL;
264 }
265 if (reg && reg->receiver_fn) {
266 spin_unlock_irqrestore(&sclp_lock, flags);
267 reg->receiver_fn(evbuf);
268 spin_lock_irqsave(&sclp_lock, flags);
269 } else if (reg == NULL)
270 rc = -ENOSYS;
271 }
272 spin_unlock_irqrestore(&sclp_lock, flags);
273 return rc;
274}
275
276/* Read event data request callback. */
277static void
278sclp_read_cb(struct sclp_req *req, void *data)
279{
280 unsigned long flags;
281 struct sccb_header *sccb;
282
283 sccb = (struct sccb_header *) req->sccb;
284 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
285 sccb->response_code == 0x220))
286 sclp_dispatch_evbufs(sccb);
287 spin_lock_irqsave(&sclp_lock, flags);
288 sclp_reading_state = sclp_reading_state_idle;
289 spin_unlock_irqrestore(&sclp_lock, flags);
290}
291
292/* Prepare read event data request. Called while sclp_lock is locked. */
293static inline void
294__sclp_make_read_req(void)
295{
296 struct sccb_header *sccb;
297
298 sccb = (struct sccb_header *) sclp_read_sccb;
299 clear_page(sccb);
300 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
301 sclp_read_req.command = SCLP_CMDW_READDATA;
302 sclp_read_req.status = SCLP_REQ_QUEUED;
303 sclp_read_req.start_count = 0;
304 sclp_read_req.callback = sclp_read_cb;
305 sclp_read_req.sccb = sccb;
306 sccb->length = PAGE_SIZE;
307 sccb->function_code = 0;
308 sccb->control_mask[2] = 0x80;
309}
310
311/* Search request list for request with matching sccb. Return request if found,
312 * NULL otherwise. Called while sclp_lock is locked. */
313static inline struct sclp_req *
314__sclp_find_req(u32 sccb)
315{
316 struct list_head *l;
317 struct sclp_req *req;
318
319 list_for_each(l, &sclp_req_queue) {
320 req = list_entry(l, struct sclp_req, list);
321 if (sccb == (u32) (addr_t) req->sccb)
322 return req;
323 }
324 return NULL;
325}
326
327/* Handler for external interruption. Perform request post-processing.
328 * Prepare read event data request if necessary. Start processing of next
329 * request on queue. */
330static void
331sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
332{
333 struct sclp_req *req;
334 u32 finished_sccb;
335 u32 evbuf_pending;
336
337 spin_lock(&sclp_lock);
338 finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
339 evbuf_pending = S390_lowcore.ext_params & 0x3;
340 if (finished_sccb) {
341 req = __sclp_find_req(finished_sccb);
342 if (req) {
343 /* Request post-processing */
344 list_del(&req->list);
345 req->status = SCLP_REQ_DONE;
346 if (req->callback) {
347 spin_unlock(&sclp_lock);
348 req->callback(req, req->callback_data);
349 spin_lock(&sclp_lock);
350 }
351 }
352 sclp_running_state = sclp_running_state_idle;
353 }
354 if (evbuf_pending && sclp_receive_mask != 0 &&
355 sclp_reading_state == sclp_reading_state_idle &&
356 sclp_activation_state == sclp_activation_state_active ) {
357 sclp_reading_state = sclp_reading_state_reading;
358 __sclp_make_read_req();
359 /* Add request to head of queue */
360 list_add(&sclp_read_req.list, &sclp_req_queue);
361 }
362 spin_unlock(&sclp_lock);
363 sclp_process_queue();
364}
365
366/* Return current Time-Of-Day clock. */
367static inline u64
368sclp_get_clock(void)
369{
370 u64 result;
371
372 asm volatile ("STCK 0(%1)" : "=m" (result) : "a" (&(result)) : "cc");
373 return result;
374}
375
376/* Convert interval in jiffies to TOD ticks. */
377static inline u64
378sclp_tod_from_jiffies(unsigned long jiffies)
379{
380 return (u64) (jiffies / HZ) << 32;
381}
382
383/* Wait until a currently running request finished. Note: while this function
384 * is running, no timers are served on the calling CPU. */
385void
386sclp_sync_wait(void)
387{
388 unsigned long psw_mask;
389 unsigned long cr0, cr0_sync;
390 u64 timeout;
391
392 /* We'll be disabling timer interrupts, so we need a custom timeout
393 * mechanism */
394 timeout = 0;
395 if (timer_pending(&sclp_request_timer)) {
396 /* Get timeout TOD value */
397 timeout = sclp_get_clock() +
398 sclp_tod_from_jiffies(sclp_request_timer.expires -
399 jiffies);
400 }
401 /* Prevent bottom half from executing once we force interrupts open */
402 local_bh_disable();
403 /* Enable service-signal interruption, disable timer interrupts */
404 __ctl_store(cr0, 0, 0);
405 cr0_sync = cr0;
406 cr0_sync |= 0x00000200;
407 cr0_sync &= 0xFFFFF3AC;
408 __ctl_load(cr0_sync, 0, 0);
409 asm volatile ("STOSM 0(%1),0x01"
410 : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
411 /* Loop until driver state indicates finished request */
412 while (sclp_running_state != sclp_running_state_idle) {
413 /* Check for expired request timer */
414 if (timer_pending(&sclp_request_timer) &&
415 sclp_get_clock() > timeout &&
416 del_timer(&sclp_request_timer))
417 sclp_request_timer.function(sclp_request_timer.data);
418 barrier();
419 cpu_relax();
420 }
421 /* Restore interrupt settings */
422 asm volatile ("SSM 0(%0)"
423 : : "a" (&psw_mask) : "memory");
424 __ctl_load(cr0, 0, 0);
425 __local_bh_enable();
426}
427
428EXPORT_SYMBOL(sclp_sync_wait);
429
430/* Dispatch changes in send and receive mask to registered listeners. */
431static inline void
432sclp_dispatch_state_change(void)
433{
434 struct list_head *l;
435 struct sclp_register *reg;
436 unsigned long flags;
437 sccb_mask_t receive_mask;
438 sccb_mask_t send_mask;
439
440 do {
441 spin_lock_irqsave(&sclp_lock, flags);
442 reg = NULL;
443 list_for_each(l, &sclp_reg_list) {
444 reg = list_entry(l, struct sclp_register, list);
445 receive_mask = reg->receive_mask & sclp_receive_mask;
446 send_mask = reg->send_mask & sclp_send_mask;
447 if (reg->sclp_receive_mask != receive_mask ||
448 reg->sclp_send_mask != send_mask) {
449 reg->sclp_receive_mask = receive_mask;
450 reg->sclp_send_mask = send_mask;
451 break;
452 } else
453 reg = NULL;
454 }
455 spin_unlock_irqrestore(&sclp_lock, flags);
456 if (reg && reg->state_change_fn)
457 reg->state_change_fn(reg);
458 } while (reg);
459}
460
461struct sclp_statechangebuf {
462 struct evbuf_header header;
463 u8 validity_sclp_active_facility_mask : 1;
464 u8 validity_sclp_receive_mask : 1;
465 u8 validity_sclp_send_mask : 1;
466 u8 validity_read_data_function_mask : 1;
467 u16 _zeros : 12;
468 u16 mask_length;
469 u64 sclp_active_facility_mask;
470 sccb_mask_t sclp_receive_mask;
471 sccb_mask_t sclp_send_mask;
472 u32 read_data_function_mask;
473} __attribute__((packed));
474
475
476/* State change event callback. Inform listeners of changes. */
477static void
478sclp_state_change_cb(struct evbuf_header *evbuf)
479{
480 unsigned long flags;
481 struct sclp_statechangebuf *scbuf;
482
483 scbuf = (struct sclp_statechangebuf *) evbuf;
484 if (scbuf->mask_length != sizeof(sccb_mask_t))
485 return;
486 spin_lock_irqsave(&sclp_lock, flags);
487 if (scbuf->validity_sclp_receive_mask)
488 sclp_receive_mask = scbuf->sclp_receive_mask;
489 if (scbuf->validity_sclp_send_mask)
490 sclp_send_mask = scbuf->sclp_send_mask;
491 spin_unlock_irqrestore(&sclp_lock, flags);
492 sclp_dispatch_state_change();
493}
494
495static struct sclp_register sclp_state_change_event = {
496 .receive_mask = EvTyp_StateChange_Mask,
497 .receiver_fn = sclp_state_change_cb
498};
499
500/* Calculate receive and send mask of currently registered listeners.
501 * Called while sclp_lock is locked. */
502static inline void
503__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
504{
505 struct list_head *l;
506 struct sclp_register *t;
507
508 *receive_mask = 0;
509 *send_mask = 0;
510 list_for_each(l, &sclp_reg_list) {
511 t = list_entry(l, struct sclp_register, list);
512 *receive_mask |= t->receive_mask;
513 *send_mask |= t->send_mask;
514 }
515}
516
517/* Register event listener. Return 0 on success, non-zero otherwise. */
518int
519sclp_register(struct sclp_register *reg)
520{
521 unsigned long flags;
522 sccb_mask_t receive_mask;
523 sccb_mask_t send_mask;
524 int rc;
525
526 rc = sclp_init();
527 if (rc)
528 return rc;
529 spin_lock_irqsave(&sclp_lock, flags);
530 /* Check event mask for collisions */
531 __sclp_get_mask(&receive_mask, &send_mask);
532 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
533 spin_unlock_irqrestore(&sclp_lock, flags);
534 return -EBUSY;
535 }
536 /* Trigger initial state change callback */
537 reg->sclp_receive_mask = 0;
538 reg->sclp_send_mask = 0;
539 list_add(&reg->list, &sclp_reg_list);
540 spin_unlock_irqrestore(&sclp_lock, flags);
541 rc = sclp_init_mask(1);
542 if (rc) {
543 spin_lock_irqsave(&sclp_lock, flags);
544 list_del(&reg->list);
545 spin_unlock_irqrestore(&sclp_lock, flags);
546 }
547 return rc;
548}
549
550EXPORT_SYMBOL(sclp_register);
551
552/* Unregister event listener. */
553void
554sclp_unregister(struct sclp_register *reg)
555{
556 unsigned long flags;
557
558 spin_lock_irqsave(&sclp_lock, flags);
559 list_del(&reg->list);
560 spin_unlock_irqrestore(&sclp_lock, flags);
561 sclp_init_mask(1);
562}
563
564EXPORT_SYMBOL(sclp_unregister);
565
566/* Remove event buffers which are marked processed. Return the number of
567 * remaining event buffers. */
568int
569sclp_remove_processed(struct sccb_header *sccb)
570{
571 struct evbuf_header *evbuf;
572 int unprocessed;
573 u16 remaining;
574
575 evbuf = (struct evbuf_header *) (sccb + 1);
576 unprocessed = 0;
577 remaining = sccb->length - sizeof(struct sccb_header);
578 while (remaining > 0) {
579 remaining -= evbuf->length;
580 if (evbuf->flags & 0x80) {
581 sccb->length -= evbuf->length;
582 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
583 remaining);
584 } else {
585 unprocessed++;
586 evbuf = (struct evbuf_header *)
587 ((addr_t) evbuf + evbuf->length);
588 }
589 }
590 return unprocessed;
591}
592
593EXPORT_SYMBOL(sclp_remove_processed);
594
595struct init_sccb {
596 struct sccb_header header;
597 u16 _reserved;
598 u16 mask_length;
599 sccb_mask_t receive_mask;
600 sccb_mask_t send_mask;
601 sccb_mask_t sclp_send_mask;
602 sccb_mask_t sclp_receive_mask;
603} __attribute__((packed));
604
605/* Prepare init mask request. Called while sclp_lock is locked. */
606static inline void
607__sclp_make_init_req(u32 receive_mask, u32 send_mask)
608{
609 struct init_sccb *sccb;
610
611 sccb = (struct init_sccb *) sclp_init_sccb;
612 clear_page(sccb);
613 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
614 sclp_init_req.command = SCLP_CMDW_WRITEMASK;
615 sclp_init_req.status = SCLP_REQ_FILLED;
616 sclp_init_req.start_count = 0;
617 sclp_init_req.callback = NULL;
618 sclp_init_req.callback_data = NULL;
619 sclp_init_req.sccb = sccb;
620 sccb->header.length = sizeof(struct init_sccb);
621 sccb->mask_length = sizeof(sccb_mask_t);
622 sccb->receive_mask = receive_mask;
623 sccb->send_mask = send_mask;
624 sccb->sclp_receive_mask = 0;
625 sccb->sclp_send_mask = 0;
626}
627
628/* Start init mask request. If calculate is non-zero, calculate the mask as
629 * requested by registered listeners. Use zero mask otherwise. Return 0 on
630 * success, non-zero otherwise. */
631static int
632sclp_init_mask(int calculate)
633{
634 unsigned long flags;
635 struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
636 sccb_mask_t receive_mask;
637 sccb_mask_t send_mask;
638 int retry;
639 int rc;
640 unsigned long wait;
641
642 spin_lock_irqsave(&sclp_lock, flags);
643 /* Check if interface is in appropriate state */
644 if (sclp_mask_state != sclp_mask_state_idle) {
645 spin_unlock_irqrestore(&sclp_lock, flags);
646 return -EBUSY;
647 }
648 if (sclp_activation_state == sclp_activation_state_inactive) {
649 spin_unlock_irqrestore(&sclp_lock, flags);
650 return -EINVAL;
651 }
652 sclp_mask_state = sclp_mask_state_initializing;
653 /* Determine mask */
654 if (calculate)
655 __sclp_get_mask(&receive_mask, &send_mask);
656 else {
657 receive_mask = 0;
658 send_mask = 0;
659 }
660 rc = -EIO;
661 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
662 /* Prepare request */
663 __sclp_make_init_req(receive_mask, send_mask);
664 spin_unlock_irqrestore(&sclp_lock, flags);
665 if (sclp_add_request(&sclp_init_req)) {
666 /* Try again later */
667 wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
668 while (time_before(jiffies, wait))
669 sclp_sync_wait();
670 spin_lock_irqsave(&sclp_lock, flags);
671 continue;
672 }
673 while (sclp_init_req.status != SCLP_REQ_DONE &&
674 sclp_init_req.status != SCLP_REQ_FAILED)
675 sclp_sync_wait();
676 spin_lock_irqsave(&sclp_lock, flags);
677 if (sclp_init_req.status == SCLP_REQ_DONE &&
678 sccb->header.response_code == 0x20) {
679 /* Successful request */
680 if (calculate) {
681 sclp_receive_mask = sccb->sclp_receive_mask;
682 sclp_send_mask = sccb->sclp_send_mask;
683 } else {
684 sclp_receive_mask = 0;
685 sclp_send_mask = 0;
686 }
687 spin_unlock_irqrestore(&sclp_lock, flags);
688 sclp_dispatch_state_change();
689 spin_lock_irqsave(&sclp_lock, flags);
690 rc = 0;
691 break;
692 }
693 }
694 sclp_mask_state = sclp_mask_state_idle;
695 spin_unlock_irqrestore(&sclp_lock, flags);
696 return rc;
697}
698
699/* Deactivate SCLP interface. On success, new requests will be rejected,
700 * events will no longer be dispatched. Return 0 on success, non-zero
701 * otherwise. */
702int
703sclp_deactivate(void)
704{
705 unsigned long flags;
706 int rc;
707
708 spin_lock_irqsave(&sclp_lock, flags);
709 /* Deactivate can only be called when active */
710 if (sclp_activation_state != sclp_activation_state_active) {
711 spin_unlock_irqrestore(&sclp_lock, flags);
712 return -EINVAL;
713 }
714 sclp_activation_state = sclp_activation_state_deactivating;
715 spin_unlock_irqrestore(&sclp_lock, flags);
716 rc = sclp_init_mask(0);
717 spin_lock_irqsave(&sclp_lock, flags);
718 if (rc == 0)
719 sclp_activation_state = sclp_activation_state_inactive;
720 else
721 sclp_activation_state = sclp_activation_state_active;
722 spin_unlock_irqrestore(&sclp_lock, flags);
723 return rc;
724}
725
726EXPORT_SYMBOL(sclp_deactivate);
727
728/* Reactivate SCLP interface after sclp_deactivate. On success, new
729 * requests will be accepted, events will be dispatched again. Return 0 on
730 * success, non-zero otherwise. */
731int
732sclp_reactivate(void)
733{
734 unsigned long flags;
735 int rc;
736
737 spin_lock_irqsave(&sclp_lock, flags);
738 /* Reactivate can only be called when inactive */
739 if (sclp_activation_state != sclp_activation_state_inactive) {
740 spin_unlock_irqrestore(&sclp_lock, flags);
741 return -EINVAL;
742 }
743 sclp_activation_state = sclp_activation_state_activating;
744 spin_unlock_irqrestore(&sclp_lock, flags);
745 rc = sclp_init_mask(1);
746 spin_lock_irqsave(&sclp_lock, flags);
747 if (rc == 0)
748 sclp_activation_state = sclp_activation_state_active;
749 else
750 sclp_activation_state = sclp_activation_state_inactive;
751 spin_unlock_irqrestore(&sclp_lock, flags);
752 return rc;
753}
754
755EXPORT_SYMBOL(sclp_reactivate);
756
757/* Handler for external interruption used during initialization. Modify
758 * request state to done. */
759static void
760sclp_check_handler(struct pt_regs *regs, __u16 code)
761{
762 u32 finished_sccb;
763
764 finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
765 /* Is this the interrupt we are waiting for? */
766 if (finished_sccb == 0)
767 return;
768 if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
769 printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
770 "for buffer at 0x%x\n", finished_sccb);
771 return;
772 }
773 spin_lock(&sclp_lock);
774 if (sclp_running_state == sclp_running_state_running) {
775 sclp_init_req.status = SCLP_REQ_DONE;
776 sclp_running_state = sclp_running_state_idle;
777 }
778 spin_unlock(&sclp_lock);
779}
780
781/* Initial init mask request timed out. Modify request state to failed. */
782static void
783sclp_check_timeout(unsigned long data)
784{
785 unsigned long flags;
786
787 spin_lock_irqsave(&sclp_lock, flags);
788 if (sclp_running_state == sclp_running_state_running) {
789 sclp_init_req.status = SCLP_REQ_FAILED;
790 sclp_running_state = sclp_running_state_idle;
791 }
792 spin_unlock_irqrestore(&sclp_lock, flags);
793}
794
795/* Perform a check of the SCLP interface. Return zero if the interface is
796 * available and there are no pending requests from a previous instance.
797 * Return non-zero otherwise. */
798static int
799sclp_check_interface(void)
800{
801 struct init_sccb *sccb;
802 unsigned long flags;
803 int retry;
804 int rc;
805
806 spin_lock_irqsave(&sclp_lock, flags);
807 /* Prepare init mask command */
808 rc = register_early_external_interrupt(0x2401, sclp_check_handler,
809 &ext_int_info_hwc);
810 if (rc) {
811 spin_unlock_irqrestore(&sclp_lock, flags);
812 return rc;
813 }
814 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
815 __sclp_make_init_req(0, 0);
816 sccb = (struct init_sccb *) sclp_init_req.sccb;
817 rc = service_call(sclp_init_req.command, sccb);
818 if (rc == -EIO)
819 break;
820 sclp_init_req.status = SCLP_REQ_RUNNING;
821 sclp_running_state = sclp_running_state_running;
822 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
823 sclp_check_timeout, 0);
824 spin_unlock_irqrestore(&sclp_lock, flags);
825 /* Enable service-signal interruption - needs to happen
826 * with IRQs enabled. */
827 ctl_set_bit(0, 9);
828 /* Wait for signal from interrupt or timeout */
829 sclp_sync_wait();
830 /* Disable service-signal interruption - needs to happen
831 * with IRQs enabled. */
832 ctl_clear_bit(0,9);
833 spin_lock_irqsave(&sclp_lock, flags);
834 del_timer(&sclp_request_timer);
835 if (sclp_init_req.status == SCLP_REQ_DONE &&
836 sccb->header.response_code == 0x20) {
837 rc = 0;
838 break;
839 } else
840 rc = -EBUSY;
841 }
842 unregister_early_external_interrupt(0x2401, sclp_check_handler,
843 &ext_int_info_hwc);
844 spin_unlock_irqrestore(&sclp_lock, flags);
845 return rc;
846}
847
848/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
849 * events from interfering with rebooted system. */
850static int
851sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
852{
853 sclp_deactivate();
854 return NOTIFY_DONE;
855}
856
857static struct notifier_block sclp_reboot_notifier = {
858 .notifier_call = sclp_reboot_event
859};
860
861/* Initialize SCLP driver. Return zero if driver is operational, non-zero
862 * otherwise. */
863static int
864sclp_init(void)
865{
866 unsigned long flags;
867 int rc;
868
869 if (!MACHINE_HAS_SCLP)
870 return -ENODEV;
871 spin_lock_irqsave(&sclp_lock, flags);
872 /* Check for previous or running initialization */
873 if (sclp_init_state != sclp_init_state_uninitialized) {
874 spin_unlock_irqrestore(&sclp_lock, flags);
875 return 0;
876 }
877 sclp_init_state = sclp_init_state_initializing;
878 /* Set up variables */
879 INIT_LIST_HEAD(&sclp_req_queue);
880 INIT_LIST_HEAD(&sclp_reg_list);
881 list_add(&sclp_state_change_event.list, &sclp_reg_list);
882 init_timer(&sclp_request_timer);
883 /* Check interface */
884 spin_unlock_irqrestore(&sclp_lock, flags);
885 rc = sclp_check_interface();
886 spin_lock_irqsave(&sclp_lock, flags);
887 if (rc) {
888 sclp_init_state = sclp_init_state_uninitialized;
889 spin_unlock_irqrestore(&sclp_lock, flags);
890 return rc;
891 }
892 /* Register reboot handler */
893 rc = register_reboot_notifier(&sclp_reboot_notifier);
894 if (rc) {
895 sclp_init_state = sclp_init_state_uninitialized;
896 spin_unlock_irqrestore(&sclp_lock, flags);
897 return rc;
898 }
899 /* Register interrupt handler */
900 rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
901 &ext_int_info_hwc);
902 if (rc) {
903 unregister_reboot_notifier(&sclp_reboot_notifier);
904 sclp_init_state = sclp_init_state_uninitialized;
905 spin_unlock_irqrestore(&sclp_lock, flags);
906 return rc;
907 }
908 sclp_init_state = sclp_init_state_initialized;
909 spin_unlock_irqrestore(&sclp_lock, flags);
910 /* Enable service-signal external interruption - needs to happen with
911 * IRQs enabled. */
912 ctl_set_bit(0, 9);
913 sclp_init_mask(1);
914 return 0;
915}
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
new file mode 100644
index 000000000000..2c71d6ee7b5b
--- /dev/null
+++ b/drivers/s390/char/sclp.h
@@ -0,0 +1,159 @@
1/*
2 * drivers/s390/char/sclp.h
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#ifndef __SCLP_H__
11#define __SCLP_H__
12
13#include <linux/types.h>
14#include <linux/list.h>
15
16#include <asm/ebcdic.h>
17
18/* maximum number of pages concerning our own memory management */
19#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
20#define MAX_CONSOLE_PAGES 4
21
22#define EvTyp_OpCmd 0x01
23#define EvTyp_Msg 0x02
24#define EvTyp_StateChange 0x08
25#define EvTyp_PMsgCmd 0x09
26#define EvTyp_CntlProgOpCmd 0x20
27#define EvTyp_CntlProgIdent 0x0B
28#define EvTyp_SigQuiesce 0x1D
29#define EvTyp_VT220Msg 0x1A
30
31#define EvTyp_OpCmd_Mask 0x80000000
32#define EvTyp_Msg_Mask 0x40000000
33#define EvTyp_StateChange_Mask 0x01000000
34#define EvTyp_PMsgCmd_Mask 0x00800000
35#define EvTyp_CtlProgOpCmd_Mask 0x00000001
36#define EvTyp_CtlProgIdent_Mask 0x00200000
37#define EvTyp_SigQuiesce_Mask 0x00000008
38#define EvTyp_VT220Msg_Mask 0x00000040
39
40#define GnrlMsgFlgs_DOM 0x8000
41#define GnrlMsgFlgs_SndAlrm 0x4000
42#define GnrlMsgFlgs_HoldMsg 0x2000
43
44#define LnTpFlgs_CntlText 0x8000
45#define LnTpFlgs_LabelText 0x4000
46#define LnTpFlgs_DataText 0x2000
47#define LnTpFlgs_EndText 0x1000
48#define LnTpFlgs_PromptText 0x0800
49
50typedef unsigned int sclp_cmdw_t;
51
52#define SCLP_CMDW_READDATA 0x00770005
53#define SCLP_CMDW_WRITEDATA 0x00760005
54#define SCLP_CMDW_WRITEMASK 0x00780005
55
56#define GDS_ID_MDSMU 0x1310
57#define GDS_ID_MDSRouteInfo 0x1311
58#define GDS_ID_AgUnWrkCorr 0x1549
59#define GDS_ID_SNACondReport 0x1532
60#define GDS_ID_CPMSU 0x1212
61#define GDS_ID_RoutTargInstr 0x154D
62#define GDS_ID_OpReq 0x8070
63#define GDS_ID_TextCmd 0x1320
64
65#define GDS_KEY_SelfDefTextMsg 0x31
66
67typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
68
69struct sccb_header {
70 u16 length;
71 u8 function_code;
72 u8 control_mask[3];
73 u16 response_code;
74} __attribute__((packed));
75
76struct gds_subvector {
77 u8 length;
78 u8 key;
79} __attribute__((packed));
80
81struct gds_vector {
82 u16 length;
83 u16 gds_id;
84} __attribute__((packed));
85
86struct evbuf_header {
87 u16 length;
88 u8 type;
89 u8 flags;
90 u16 _reserved;
91} __attribute__((packed));
92
93struct sclp_req {
94 struct list_head list; /* list_head for request queueing. */
95 sclp_cmdw_t command; /* sclp command to execute */
96 void *sccb; /* pointer to the sccb to execute */
97 char status; /* status of this request */
98 int start_count; /* number of SVCs done for this req */
99 /* Callback that is called after reaching final status. */
100 void (*callback)(struct sclp_req *, void *data);
101 void *callback_data;
102};
103
104#define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */
105#define SCLP_REQ_QUEUED 0x01 /* request is queued to be processed */
106#define SCLP_REQ_RUNNING 0x02 /* request is currently running */
107#define SCLP_REQ_DONE 0x03 /* request is completed successfully */
108#define SCLP_REQ_FAILED 0x05 /* request is finally failed */
109
110/* function pointers that a high level driver has to use for registration */
111/* of some routines it wants to be called from the low level driver */
112struct sclp_register {
113 struct list_head list;
114 /* event masks this user is registered for */
115 sccb_mask_t receive_mask;
116 sccb_mask_t send_mask;
117 /* actually present events */
118 sccb_mask_t sclp_receive_mask;
119 sccb_mask_t sclp_send_mask;
120 /* called if event type availability changes */
121 void (*state_change_fn)(struct sclp_register *);
122 /* called for events in cp_receive_mask/sclp_receive_mask */
123 void (*receiver_fn)(struct evbuf_header *);
124};
125
126/* externals from sclp.c */
127int sclp_add_request(struct sclp_req *req);
128void sclp_sync_wait(void);
129int sclp_register(struct sclp_register *reg);
130void sclp_unregister(struct sclp_register *reg);
131int sclp_remove_processed(struct sccb_header *sccb);
132int sclp_deactivate(void);
133int sclp_reactivate(void);
134
135/* useful inlines */
136
137/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
138/* translate single character from ASCII to EBCDIC */
139static inline unsigned char
140sclp_ascebc(unsigned char ch)
141{
142 return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch];
143}
144
145/* translate string from EBCDIC to ASCII */
146static inline void
147sclp_ebcasc_str(unsigned char *str, int nr)
148{
149 (MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
150}
151
152/* translate string from ASCII to EBCDIC */
153static inline void
154sclp_ascebc_str(unsigned char *str, int nr)
155{
156 (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
157}
158
159#endif /* __SCLP_H__ */
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
new file mode 100644
index 000000000000..10ef22f13541
--- /dev/null
+++ b/drivers/s390/char/sclp_con.c
@@ -0,0 +1,252 @@
1/*
2 * drivers/s390/char/sclp_con.c
3 * SCLP line mode console driver
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#include <linux/config.h>
12#include <linux/kmod.h>
13#include <linux/console.h>
14#include <linux/init.h>
15#include <linux/timer.h>
16#include <linux/jiffies.h>
17#include <linux/bootmem.h>
18#include <linux/err.h>
19
20#include "sclp.h"
21#include "sclp_rw.h"
22#include "sclp_tty.h"
23
24#define SCLP_CON_PRINT_HEADER "sclp console driver: "
25
26#define sclp_console_major 4 /* TTYAUX_MAJOR */
27#define sclp_console_minor 64
28#define sclp_console_name "ttyS"
29
30/* Lock to guard over changes to global variables */
31static spinlock_t sclp_con_lock;
32/* List of free pages that can be used for console output buffering */
33static struct list_head sclp_con_pages;
34/* List of full struct sclp_buffer structures ready for output */
35static struct list_head sclp_con_outqueue;
36/* Counter how many buffers are emitted (max 1) and how many */
37/* are on the output queue. */
38static int sclp_con_buffer_count;
39/* Pointer to current console buffer */
40static struct sclp_buffer *sclp_conbuf;
41/* Timer for delayed output of console messages */
42static struct timer_list sclp_con_timer;
43
44/* Output format for console messages */
45static unsigned short sclp_con_columns;
46static unsigned short sclp_con_width_htab;
47
48static void
49sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
50{
51 unsigned long flags;
52 void *page;
53
54 do {
55 page = sclp_unmake_buffer(buffer);
56 spin_lock_irqsave(&sclp_con_lock, flags);
57 /* Remove buffer from outqueue */
58 list_del(&buffer->list);
59 sclp_con_buffer_count--;
60 list_add_tail((struct list_head *) page, &sclp_con_pages);
61 /* Check if there is a pending buffer on the out queue. */
62 buffer = NULL;
63 if (!list_empty(&sclp_con_outqueue))
64 buffer = list_entry(sclp_con_outqueue.next,
65 struct sclp_buffer, list);
66 spin_unlock_irqrestore(&sclp_con_lock, flags);
67 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback));
68}
69
70static inline void
71sclp_conbuf_emit(void)
72{
73 struct sclp_buffer* buffer;
74 unsigned long flags;
75 int count;
76 int rc;
77
78 spin_lock_irqsave(&sclp_con_lock, flags);
79 buffer = sclp_conbuf;
80 sclp_conbuf = NULL;
81 if (buffer == NULL) {
82 spin_unlock_irqrestore(&sclp_con_lock, flags);
83 return;
84 }
85 list_add_tail(&buffer->list, &sclp_con_outqueue);
86 count = sclp_con_buffer_count++;
87 spin_unlock_irqrestore(&sclp_con_lock, flags);
88 if (count)
89 return;
90 rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
91 if (rc)
92 sclp_conbuf_callback(buffer, rc);
93}
94
95/*
96 * When this routine is called from the timer then we flush the
97 * temporary write buffer without further waiting on a final new line.
98 */
99static void
100sclp_console_timeout(unsigned long data)
101{
102 sclp_conbuf_emit();
103}
104
105/*
106 * Writes the given message to S390 system console
107 */
108static void
109sclp_console_write(struct console *console, const char *message,
110 unsigned int count)
111{
112 unsigned long flags;
113 void *page;
114 int written;
115
116 if (count == 0)
117 return;
118 spin_lock_irqsave(&sclp_con_lock, flags);
119 /*
120 * process escape characters, write message into buffer,
121 * send buffer to SCLP
122 */
123 do {
124 /* make sure we have a console output buffer */
125 if (sclp_conbuf == NULL) {
126 while (list_empty(&sclp_con_pages)) {
127 spin_unlock_irqrestore(&sclp_con_lock, flags);
128 sclp_sync_wait();
129 spin_lock_irqsave(&sclp_con_lock, flags);
130 }
131 page = sclp_con_pages.next;
132 list_del((struct list_head *) page);
133 sclp_conbuf = sclp_make_buffer(page, sclp_con_columns,
134 sclp_con_width_htab);
135 }
136 /* try to write the string to the current output buffer */
137 written = sclp_write(sclp_conbuf, (const unsigned char *)
138 message, count);
139 if (written == count)
140 break;
141 /*
142 * Not all characters could be written to the current
143 * output buffer. Emit the buffer, create a new buffer
144 * and then output the rest of the string.
145 */
146 spin_unlock_irqrestore(&sclp_con_lock, flags);
147 sclp_conbuf_emit();
148 spin_lock_irqsave(&sclp_con_lock, flags);
149 message += written;
150 count -= written;
151 } while (count > 0);
152 /* Setup timer to output current console buffer after 1/10 second */
153 if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
154 !timer_pending(&sclp_con_timer)) {
155 init_timer(&sclp_con_timer);
156 sclp_con_timer.function = sclp_console_timeout;
157 sclp_con_timer.data = 0UL;
158 sclp_con_timer.expires = jiffies + HZ/10;
159 add_timer(&sclp_con_timer);
160 }
161 spin_unlock_irqrestore(&sclp_con_lock, flags);
162}
163
164static struct tty_driver *
165sclp_console_device(struct console *c, int *index)
166{
167 *index = c->index;
168 return sclp_tty_driver;
169}
170
171/*
172 * This routine is called from panic when the kernel
173 * is going to give up. We have to make sure that all buffers
174 * will be flushed to the SCLP.
175 */
176static void
177sclp_console_unblank(void)
178{
179 unsigned long flags;
180
181 sclp_conbuf_emit();
182 spin_lock_irqsave(&sclp_con_lock, flags);
183 if (timer_pending(&sclp_con_timer))
184 del_timer(&sclp_con_timer);
185 while (sclp_con_buffer_count > 0) {
186 spin_unlock_irqrestore(&sclp_con_lock, flags);
187 sclp_sync_wait();
188 spin_lock_irqsave(&sclp_con_lock, flags);
189 }
190 spin_unlock_irqrestore(&sclp_con_lock, flags);
191}
192
193/*
194 * used to register the SCLP console to the kernel and to
195 * give printk necessary information
196 */
197static struct console sclp_console =
198{
199 .name = sclp_console_name,
200 .write = sclp_console_write,
201 .device = sclp_console_device,
202 .unblank = sclp_console_unblank,
203 .flags = CON_PRINTBUFFER,
204 .index = 0 /* ttyS0 */
205};
206
207/*
208 * called by console_init() in drivers/char/tty_io.c at boot-time.
209 */
210static int __init
211sclp_console_init(void)
212{
213 void *page;
214 int i;
215 int rc;
216
217 if (!CONSOLE_IS_SCLP)
218 return 0;
219 rc = sclp_rw_init();
220 if (rc)
221 return rc;
222 /* Allocate pages for output buffering */
223 INIT_LIST_HEAD(&sclp_con_pages);
224 for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
225 page = alloc_bootmem_low_pages(PAGE_SIZE);
226 if (page == NULL)
227 return -ENOMEM;
228 list_add_tail((struct list_head *) page, &sclp_con_pages);
229 }
230 INIT_LIST_HEAD(&sclp_con_outqueue);
231 spin_lock_init(&sclp_con_lock);
232 sclp_con_buffer_count = 0;
233 sclp_conbuf = NULL;
234 init_timer(&sclp_con_timer);
235
236 /* Set output format */
237 if (MACHINE_IS_VM)
238 /*
239 * save 4 characters for the CPU number
240 * written at start of each line by VM/CP
241 */
242 sclp_con_columns = 76;
243 else
244 sclp_con_columns = 80;
245 sclp_con_width_htab = 8;
246
247 /* enable printk-access to this driver */
248 register_console(&sclp_console);
249 return 0;
250}
251
252console_initcall(sclp_console_init);
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
new file mode 100644
index 000000000000..5a6cef2dfa13
--- /dev/null
+++ b/drivers/s390/char/sclp_cpi.c
@@ -0,0 +1,254 @@
1/*
2 * Author: Martin Peschke <mpeschke@de.ibm.com>
3 * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation
4 *
5 * SCLP Control-Program Identification.
6 */
7
8#include <linux/config.h>
9#include <linux/version.h>
10#include <linux/kmod.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/timer.h>
15#include <linux/string.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <asm/ebcdic.h>
19#include <asm/semaphore.h>
20
21#include "sclp.h"
22#include "sclp_rw.h"
23
24#define CPI_LENGTH_SYSTEM_TYPE 8
25#define CPI_LENGTH_SYSTEM_NAME 8
26#define CPI_LENGTH_SYSPLEX_NAME 8
27
28struct cpi_evbuf {
29 struct evbuf_header header;
30 u8 id_format;
31 u8 reserved0;
32 u8 system_type[CPI_LENGTH_SYSTEM_TYPE];
33 u64 reserved1;
34 u8 system_name[CPI_LENGTH_SYSTEM_NAME];
35 u64 reserved2;
36 u64 system_level;
37 u64 reserved3;
38 u8 sysplex_name[CPI_LENGTH_SYSPLEX_NAME];
39 u8 reserved4[16];
40} __attribute__((packed));
41
42struct cpi_sccb {
43 struct sccb_header header;
44 struct cpi_evbuf cpi_evbuf;
45} __attribute__((packed));
46
47/* Event type structure for write message and write priority message */
48static struct sclp_register sclp_cpi_event =
49{
50 .send_mask = EvTyp_CtlProgIdent_Mask
51};
52
53MODULE_AUTHOR(
54 "Martin Peschke, IBM Deutschland Entwicklung GmbH "
55 "<mpeschke@de.ibm.com>");
56
57MODULE_DESCRIPTION(
58 "identify this operating system instance to the S/390 "
59 "or zSeries hardware");
60
61static char *system_name = NULL;
62module_param(system_name, charp, 0);
63MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters");
64
65static char *sysplex_name = NULL;
66#ifdef ALLOW_SYSPLEX_NAME
67module_param(sysplex_name, charp, 0);
68MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters");
69#endif
70
71/* use default value for this field (as well as for system level) */
72static char *system_type = "LINUX";
73
74static int
75cpi_check_parms(void)
76{
77 /* reject if no system type specified */
78 if (!system_type) {
79 printk("cpi: bug: no system type specified\n");
80 return -EINVAL;
81 }
82
83 /* reject if system type larger than 8 characters */
84 if (strlen(system_type) > CPI_LENGTH_SYSTEM_NAME) {
85 printk("cpi: bug: system type has length of %li characters - "
86 "only %i characters supported\n",
87 strlen(system_type), CPI_LENGTH_SYSTEM_TYPE);
88 return -EINVAL;
89 }
90
91 /* reject if no system name specified */
92 if (!system_name) {
93 printk("cpi: no system name specified\n");
94 return -EINVAL;
95 }
96
97 /* reject if system name larger than 8 characters */
98 if (strlen(system_name) > CPI_LENGTH_SYSTEM_NAME) {
99 printk("cpi: system name has length of %li characters - "
100 "only %i characters supported\n",
101 strlen(system_name), CPI_LENGTH_SYSTEM_NAME);
102 return -EINVAL;
103 }
104
105 /* reject if specified sysplex name larger than 8 characters */
106 if (sysplex_name && strlen(sysplex_name) > CPI_LENGTH_SYSPLEX_NAME) {
107 printk("cpi: sysplex name has length of %li characters"
108 " - only %i characters supported\n",
109 strlen(sysplex_name), CPI_LENGTH_SYSPLEX_NAME);
110 return -EINVAL;
111 }
112 return 0;
113}
114
115static void
116cpi_callback(struct sclp_req *req, void *data)
117{
118 struct semaphore *sem;
119
120 sem = (struct semaphore *) data;
121 up(sem);
122}
123
124static struct sclp_req *
125cpi_prepare_req(void)
126{
127 struct sclp_req *req;
128 struct cpi_sccb *sccb;
129 struct cpi_evbuf *evb;
130
131 req = (struct sclp_req *) kmalloc(sizeof(struct sclp_req), GFP_KERNEL);
132 if (req == NULL)
133 return ERR_PTR(-ENOMEM);
134 sccb = (struct cpi_sccb *) __get_free_page(GFP_KERNEL | GFP_DMA);
135 if (sccb == NULL) {
136 kfree(req);
137 return ERR_PTR(-ENOMEM);
138 }
139 memset(sccb, 0, sizeof(struct cpi_sccb));
140
141 /* setup SCCB for Control-Program Identification */
142 sccb->header.length = sizeof(struct cpi_sccb);
143 sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
144 sccb->cpi_evbuf.header.type = 0x0B;
145 evb = &sccb->cpi_evbuf;
146
147 /* set system type */
148 memset(evb->system_type, ' ', CPI_LENGTH_SYSTEM_TYPE);
149 memcpy(evb->system_type, system_type, strlen(system_type));
150 sclp_ascebc_str(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
151 EBC_TOUPPER(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
152
153 /* set system name */
154 memset(evb->system_name, ' ', CPI_LENGTH_SYSTEM_NAME);
155 memcpy(evb->system_name, system_name, strlen(system_name));
156 sclp_ascebc_str(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
157 EBC_TOUPPER(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
158
159 /* set sytem level */
160 evb->system_level = LINUX_VERSION_CODE;
161
162 /* set sysplex name */
163 if (sysplex_name) {
164 memset(evb->sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME);
165 memcpy(evb->sysplex_name, sysplex_name, strlen(sysplex_name));
166 sclp_ascebc_str(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
167 EBC_TOUPPER(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
168 }
169
170 /* prepare request data structure presented to SCLP driver */
171 req->command = SCLP_CMDW_WRITEDATA;
172 req->sccb = sccb;
173 req->status = SCLP_REQ_FILLED;
174 req->callback = cpi_callback;
175 return req;
176}
177
178static void
179cpi_free_req(struct sclp_req *req)
180{
181 free_page((unsigned long) req->sccb);
182 kfree(req);
183}
184
185static int __init
186cpi_module_init(void)
187{
188 struct semaphore sem;
189 struct sclp_req *req;
190 int rc;
191
192 rc = cpi_check_parms();
193 if (rc)
194 return rc;
195
196 rc = sclp_register(&sclp_cpi_event);
197 if (rc) {
198 /* could not register sclp event. Die. */
199 printk(KERN_WARNING "cpi: could not register to hardware "
200 "console.\n");
201 return -EINVAL;
202 }
203 if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) {
204 printk(KERN_WARNING "cpi: no control program identification "
205 "support\n");
206 sclp_unregister(&sclp_cpi_event);
207 return -ENOTSUPP;
208 }
209
210 req = cpi_prepare_req();
211 if (IS_ERR(req)) {
212 printk(KERN_WARNING "cpi: couldn't allocate request\n");
213 sclp_unregister(&sclp_cpi_event);
214 return PTR_ERR(req);
215 }
216
217 /* Prepare semaphore */
218 sema_init(&sem, 0);
219 req->callback_data = &sem;
220 /* Add request to sclp queue */
221 rc = sclp_add_request(req);
222 if (rc) {
223 printk(KERN_WARNING "cpi: could not start request\n");
224 cpi_free_req(req);
225 sclp_unregister(&sclp_cpi_event);
226 return rc;
227 }
228 /* make "insmod" sleep until callback arrives */
229 down(&sem);
230
231 rc = ((struct cpi_sccb *) req->sccb)->header.response_code;
232 if (rc != 0x0020) {
233 printk(KERN_WARNING "cpi: failed with response code 0x%x\n",
234 rc);
235 rc = -ECOMM;
236 } else
237 rc = 0;
238
239 cpi_free_req(req);
240 sclp_unregister(&sclp_cpi_event);
241
242 return rc;
243}
244
245
246static void __exit cpi_module_exit(void)
247{
248}
249
250
251/* declare driver module init/cleanup functions */
252module_init(cpi_module_init);
253module_exit(cpi_module_exit);
254
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
new file mode 100644
index 000000000000..83f75774df60
--- /dev/null
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -0,0 +1,99 @@
1/*
2 * drivers/s390/char/sclp_quiesce.c
3 * signal quiesce handler
4 *
5 * (C) Copyright IBM Corp. 1999,2004
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 */
9
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/cpumask.h>
14#include <linux/smp.h>
15#include <linux/init.h>
16#include <asm/atomic.h>
17#include <asm/ptrace.h>
18#include <asm/sigp.h>
19
20#include "sclp.h"
21
22
23#ifdef CONFIG_SMP
24/* Signal completion of shutdown process. All CPUs except the first to enter
25 * this function: go to stopped state. First CPU: wait until all other
26 * CPUs are in stopped or check stop state. Afterwards, load special PSW
27 * to indicate completion. */
28static void
29do_load_quiesce_psw(void * __unused)
30{
31 static atomic_t cpuid = ATOMIC_INIT(-1);
32 psw_t quiesce_psw;
33 int cpu;
34
35 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
36 signal_processor(smp_processor_id(), sigp_stop);
37 /* Wait for all other cpus to enter stopped state */
38 for_each_online_cpu(cpu) {
39 if (cpu == smp_processor_id())
40 continue;
41 while(!smp_cpu_not_running(cpu))
42 cpu_relax();
43 }
44 /* Quiesce the last cpu with the special psw */
45 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
46 quiesce_psw.addr = 0xfff;
47 __load_psw(quiesce_psw);
48}
49
50/* Shutdown handler. Perform shutdown function on all CPUs. */
51static void
52do_machine_quiesce(void)
53{
54 on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
55}
56#else
57/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
58static void
59do_machine_quiesce(void)
60{
61 psw_t quiesce_psw;
62
63 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
64 quiesce_psw.addr = 0xfff;
65 __load_psw(quiesce_psw);
66}
67#endif
68
69extern void ctrl_alt_del(void);
70
71/* Handler for quiesce event. Start shutdown procedure. */
72static void
73sclp_quiesce_handler(struct evbuf_header *evbuf)
74{
75 _machine_restart = (void *) do_machine_quiesce;
76 _machine_halt = do_machine_quiesce;
77 _machine_power_off = do_machine_quiesce;
78 ctrl_alt_del();
79}
80
81static struct sclp_register sclp_quiesce_event = {
82 .receive_mask = EvTyp_SigQuiesce_Mask,
83 .receiver_fn = sclp_quiesce_handler
84};
85
86/* Initialize quiesce driver. */
87static int __init
88sclp_quiesce_init(void)
89{
90 int rc;
91
92 rc = sclp_register(&sclp_quiesce_event);
93 if (rc)
94 printk(KERN_WARNING "sclp: could not register quiesce handler "
95 "(rc=%d)\n", rc);
96 return rc;
97}
98
99module_init(sclp_quiesce_init);
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
new file mode 100644
index 000000000000..ac10dfb20a62
--- /dev/null
+++ b/drivers/s390/char/sclp_rw.c
@@ -0,0 +1,471 @@
1/*
2 * drivers/s390/char/sclp_rw.c
3 * driver: reading from and writing to system console on S/390 via SCLP
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#include <linux/config.h>
12#include <linux/kmod.h>
13#include <linux/types.h>
14#include <linux/err.h>
15#include <linux/string.h>
16#include <linux/spinlock.h>
17#include <linux/ctype.h>
18#include <asm/uaccess.h>
19
20#include "sclp.h"
21#include "sclp_rw.h"
22
23#define SCLP_RW_PRINT_HEADER "sclp low level driver: "
24
25/*
26 * The room for the SCCB (only for writing) is not equal to a pages size
27 * (as it is specified as the maximum size in the the SCLP ducumentation)
28 * because of the additional data structure described above.
29 */
30#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
31
32/* Event type structure for write message and write priority message */
33static struct sclp_register sclp_rw_event = {
34 .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask
35};
36
37/*
38 * Setup a sclp write buffer. Gets a page as input (4K) and returns
39 * a pointer to a struct sclp_buffer structure that is located at the
40 * end of the input page. This reduces the buffer space by a few
41 * bytes but simplifies things.
42 */
43struct sclp_buffer *
44sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
45{
46 struct sclp_buffer *buffer;
47 struct write_sccb *sccb;
48
49 sccb = (struct write_sccb *) page;
50 /*
51 * We keep the struct sclp_buffer structure at the end
52 * of the sccb page.
53 */
54 buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
55 buffer->sccb = sccb;
56 buffer->retry_count = 0;
57 buffer->mto_number = 0;
58 buffer->mto_char_sum = 0;
59 buffer->current_line = NULL;
60 buffer->current_length = 0;
61 buffer->columns = columns;
62 buffer->htab = htab;
63
64 /* initialize sccb */
65 memset(sccb, 0, sizeof(struct write_sccb));
66 sccb->header.length = sizeof(struct write_sccb);
67 sccb->msg_buf.header.length = sizeof(struct msg_buf);
68 sccb->msg_buf.header.type = EvTyp_Msg;
69 sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
70 sccb->msg_buf.mdb.header.type = 1;
71 sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
72 sccb->msg_buf.mdb.header.revision_code = 1;
73 sccb->msg_buf.mdb.go.length = sizeof(struct go);
74 sccb->msg_buf.mdb.go.type = 1;
75
76 return buffer;
77}
78
79/*
80 * Return a pointer to the orignal page that has been used to create
81 * the buffer.
82 */
83void *
84sclp_unmake_buffer(struct sclp_buffer *buffer)
85{
86 return buffer->sccb;
87}
88
89/*
90 * Initialize a new Message Text Object (MTO) at the end of the provided buffer
91 * with enough room for max_len characters. Return 0 on success.
92 */
93static int
94sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
95{
96 struct write_sccb *sccb;
97 struct mto *mto;
98 int mto_size;
99
100 /* max size of new Message Text Object including message text */
101 mto_size = sizeof(struct mto) + max_len;
102
103 /* check if current buffer sccb can contain the mto */
104 sccb = buffer->sccb;
105 if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size)
106 return -ENOMEM;
107
108 /* find address of new message text object */
109 mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
110
111 /*
112 * fill the new Message-Text Object,
113 * starting behind the former last byte of the SCCB
114 */
115 memset(mto, 0, sizeof(struct mto));
116 mto->length = sizeof(struct mto);
117 mto->type = 4; /* message text object */
118 mto->line_type_flags = LnTpFlgs_EndText; /* end text */
119
120 /* set pointer to first byte after struct mto. */
121 buffer->current_line = (char *) (mto + 1);
122 buffer->current_length = 0;
123
124 return 0;
125}
126
127/*
128 * Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of
129 * MTO, enclosing MDB, event buffer and SCCB.
130 */
131static void
132sclp_finalize_mto(struct sclp_buffer *buffer)
133{
134 struct write_sccb *sccb;
135 struct mto *mto;
136 int str_len, mto_size;
137
138 str_len = buffer->current_length;
139 buffer->current_line = NULL;
140 buffer->current_length = 0;
141
142 /* real size of new Message Text Object including message text */
143 mto_size = sizeof(struct mto) + str_len;
144
145 /* find address of new message text object */
146 sccb = buffer->sccb;
147 mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
148
149 /* set size of message text object */
150 mto->length = mto_size;
151
152 /*
153 * update values of sizes
154 * (SCCB, Event(Message) Buffer, Message Data Block)
155 */
156 sccb->header.length += mto_size;
157 sccb->msg_buf.header.length += mto_size;
158 sccb->msg_buf.mdb.header.length += mto_size;
159
160 /*
161 * count number of buffered messages (= number of Message Text
162 * Objects) and number of buffered characters
163 * for the SCCB currently used for buffering and at all
164 */
165 buffer->mto_number++;
166 buffer->mto_char_sum += str_len;
167}
168
169/*
170 * processing of a message including escape characters,
171 * returns number of characters written to the output sccb
172 * ("processed" means that is not guaranteed that the character have already
173 * been sent to the SCLP but that it will be done at least next time the SCLP
174 * is not busy)
175 */
176int
177sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
178{
179 int spaces, i_msg;
180 int rc;
181
182 /*
183 * parse msg for escape sequences (\t,\v ...) and put formated
184 * msg into an mto (created by sclp_initialize_mto).
185 *
186 * We have to do this work ourselfs because there is no support for
187 * these characters on the native machine and only partial support
188 * under VM (Why does VM interpret \n but the native machine doesn't ?)
189 *
190 * Depending on i/o-control setting the message is always written
191 * immediately or we wait for a final new line maybe coming with the
192 * next message. Besides we avoid a buffer overrun by writing its
193 * content.
194 *
195 * RESTRICTIONS:
196 *
197 * \r and \b work within one line because we are not able to modify
198 * previous output that have already been accepted by the SCLP.
199 *
200 * \t combined with following \r is not correctly represented because
201 * \t is expanded to some spaces but \r does not know about a
202 * previous \t and decreases the current position by one column.
203 * This is in order to a slim and quick implementation.
204 */
205 for (i_msg = 0; i_msg < count; i_msg++) {
206 switch (msg[i_msg]) {
207 case '\n': /* new line, line feed (ASCII) */
208 /* check if new mto needs to be created */
209 if (buffer->current_line == NULL) {
210 rc = sclp_initialize_mto(buffer, 0);
211 if (rc)
212 return i_msg;
213 }
214 sclp_finalize_mto(buffer);
215 break;
216 case '\a': /* bell, one for several times */
217 /* set SCLP sound alarm bit in General Object */
218 buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
219 GnrlMsgFlgs_SndAlrm;
220 break;
221 case '\t': /* horizontal tabulator */
222 /* check if new mto needs to be created */
223 if (buffer->current_line == NULL) {
224 rc = sclp_initialize_mto(buffer,
225 buffer->columns);
226 if (rc)
227 return i_msg;
228 }
229 /* "go to (next htab-boundary + 1, same line)" */
230 do {
231 if (buffer->current_length >= buffer->columns)
232 break;
233 /* ok, add a blank */
234 *buffer->current_line++ = 0x40;
235 buffer->current_length++;
236 } while (buffer->current_length % buffer->htab);
237 break;
238 case '\f': /* form feed */
239 case '\v': /* vertical tabulator */
240 /* "go to (actual column, actual line + 1)" */
241 /* = new line, leading spaces */
242 if (buffer->current_line != NULL) {
243 spaces = buffer->current_length;
244 sclp_finalize_mto(buffer);
245 rc = sclp_initialize_mto(buffer,
246 buffer->columns);
247 if (rc)
248 return i_msg;
249 memset(buffer->current_line, 0x40, spaces);
250 buffer->current_line += spaces;
251 buffer->current_length = spaces;
252 } else {
253 /* one an empty line this is the same as \n */
254 rc = sclp_initialize_mto(buffer,
255 buffer->columns);
256 if (rc)
257 return i_msg;
258 sclp_finalize_mto(buffer);
259 }
260 break;
261 case '\b': /* backspace */
262 /* "go to (actual column - 1, actual line)" */
263 /* decrement counter indicating position, */
264 /* do not remove last character */
265 if (buffer->current_line != NULL &&
266 buffer->current_length > 0) {
267 buffer->current_length--;
268 buffer->current_line--;
269 }
270 break;
271 case 0x00: /* end of string */
272 /* transfer current line to SCCB */
273 if (buffer->current_line != NULL)
274 sclp_finalize_mto(buffer);
275 /* skip the rest of the message including the 0 byte */
276 i_msg = count - 1;
277 break;
278 default: /* no escape character */
279 /* do not output unprintable characters */
280 if (!isprint(msg[i_msg]))
281 break;
282 /* check if new mto needs to be created */
283 if (buffer->current_line == NULL) {
284 rc = sclp_initialize_mto(buffer,
285 buffer->columns);
286 if (rc)
287 return i_msg;
288 }
289 *buffer->current_line++ = sclp_ascebc(msg[i_msg]);
290 buffer->current_length++;
291 break;
292 }
293 /* check if current mto is full */
294 if (buffer->current_line != NULL &&
295 buffer->current_length >= buffer->columns)
296 sclp_finalize_mto(buffer);
297 }
298
299 /* return number of processed characters */
300 return i_msg;
301}
302
303/*
304 * Return the number of free bytes in the sccb
305 */
306int
307sclp_buffer_space(struct sclp_buffer *buffer)
308{
309 int count;
310
311 count = MAX_SCCB_ROOM - buffer->sccb->header.length;
312 if (buffer->current_line != NULL)
313 count -= sizeof(struct mto) + buffer->current_length;
314 return count;
315}
316
317/*
318 * Return number of characters in buffer
319 */
320int
321sclp_chars_in_buffer(struct sclp_buffer *buffer)
322{
323 int count;
324
325 count = buffer->mto_char_sum;
326 if (buffer->current_line != NULL)
327 count += buffer->current_length;
328 return count;
329}
330
331/*
332 * sets or provides some values that influence the drivers behaviour
333 */
334void
335sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
336{
337 buffer->columns = columns;
338 if (buffer->current_line != NULL &&
339 buffer->current_length > buffer->columns)
340 sclp_finalize_mto(buffer);
341}
342
343void
344sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
345{
346 buffer->htab = htab;
347}
348
349/*
350 * called by sclp_console_init and/or sclp_tty_init
351 */
352int
353sclp_rw_init(void)
354{
355 static int init_done = 0;
356 int rc;
357
358 if (init_done)
359 return 0;
360
361 rc = sclp_register(&sclp_rw_event);
362 if (rc == 0)
363 init_done = 1;
364 return rc;
365}
366
367#define SCLP_BUFFER_MAX_RETRY 1
368
369/*
370 * second half of Write Event Data-function that has to be done after
371 * interruption indicating completion of Service Call.
372 */
373static void
374sclp_writedata_callback(struct sclp_req *request, void *data)
375{
376 int rc;
377 struct sclp_buffer *buffer;
378 struct write_sccb *sccb;
379
380 buffer = (struct sclp_buffer *) data;
381 sccb = buffer->sccb;
382
383 if (request->status == SCLP_REQ_FAILED) {
384 if (buffer->callback != NULL)
385 buffer->callback(buffer, -EIO);
386 return;
387 }
388 /* check SCLP response code and choose suitable action */
389 switch (sccb->header.response_code) {
390 case 0x0020 :
391 /* Normal completion, buffer processed, message(s) sent */
392 rc = 0;
393 break;
394
395 case 0x0340: /* Contained SCLP equipment check */
396 if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
397 rc = -EIO;
398 break;
399 }
400 /* remove processed buffers and requeue rest */
401 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
402 /* not all buffers were processed */
403 sccb->header.response_code = 0x0000;
404 buffer->request.status = SCLP_REQ_FILLED;
405 rc = sclp_add_request(request);
406 if (rc == 0)
407 return;
408 } else
409 rc = 0;
410 break;
411
412 case 0x0040: /* SCLP equipment check */
413 case 0x05f0: /* Target resource in improper state */
414 if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
415 rc = -EIO;
416 break;
417 }
418 /* retry request */
419 sccb->header.response_code = 0x0000;
420 buffer->request.status = SCLP_REQ_FILLED;
421 rc = sclp_add_request(request);
422 if (rc == 0)
423 return;
424 break;
425 default:
426 if (sccb->header.response_code == 0x71f0)
427 rc = -ENOMEM;
428 else
429 rc = -EINVAL;
430 break;
431 }
432 if (buffer->callback != NULL)
433 buffer->callback(buffer, rc);
434}
435
436/*
437 * Setup the request structure in the struct sclp_buffer to do SCLP Write
438 * Event Data and pass the request to the core SCLP loop. Return zero on
439 * success, non-zero otherwise.
440 */
441int
442sclp_emit_buffer(struct sclp_buffer *buffer,
443 void (*callback)(struct sclp_buffer *, int))
444{
445 struct write_sccb *sccb;
446
447 /* add current line if there is one */
448 if (buffer->current_line != NULL)
449 sclp_finalize_mto(buffer);
450
451 /* Are there messages in the output buffer ? */
452 if (buffer->mto_number == 0)
453 return -EIO;
454
455 sccb = buffer->sccb;
456 if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask)
457 /* Use normal write message */
458 sccb->msg_buf.header.type = EvTyp_Msg;
459 else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask)
460 /* Use write priority message */
461 sccb->msg_buf.header.type = EvTyp_PMsgCmd;
462 else
463 return -ENOSYS;
464 buffer->request.command = SCLP_CMDW_WRITEDATA;
465 buffer->request.status = SCLP_REQ_FILLED;
466 buffer->request.callback = sclp_writedata_callback;
467 buffer->request.callback_data = buffer;
468 buffer->request.sccb = sccb;
469 buffer->callback = callback;
470 return sclp_add_request(&buffer->request);
471}
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
new file mode 100644
index 000000000000..6aa7a6948bc9
--- /dev/null
+++ b/drivers/s390/char/sclp_rw.h
@@ -0,0 +1,96 @@
1/*
2 * drivers/s390/char/sclp_rw.h
3 * interface to the SCLP-read/write driver
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#ifndef __SCLP_RW_H__
12#define __SCLP_RW_H__
13
14#include <linux/list.h>
15
16struct mto {
17 u16 length;
18 u16 type;
19 u16 line_type_flags;
20 u8 alarm_control;
21 u8 _reserved[3];
22} __attribute__((packed));
23
24struct go {
25 u16 length;
26 u16 type;
27 u32 domid;
28 u8 hhmmss_time[8];
29 u8 th_time[3];
30 u8 reserved_0;
31 u8 dddyyyy_date[7];
32 u8 _reserved_1;
33 u16 general_msg_flags;
34 u8 _reserved_2[10];
35 u8 originating_system_name[8];
36 u8 job_guest_name[8];
37} __attribute__((packed));
38
39struct mdb_header {
40 u16 length;
41 u16 type;
42 u32 tag;
43 u32 revision_code;
44} __attribute__((packed));
45
46struct mdb {
47 struct mdb_header header;
48 struct go go;
49} __attribute__((packed));
50
51struct msg_buf {
52 struct evbuf_header header;
53 struct mdb mdb;
54} __attribute__((packed));
55
56struct write_sccb {
57 struct sccb_header header;
58 struct msg_buf msg_buf;
59} __attribute__((packed));
60
61/* The number of empty mto buffers that can be contained in a single sccb. */
62#define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
63 sizeof(struct write_sccb)) / sizeof(struct mto))
64
65/*
66 * data structure for information about list of SCCBs (only for writing),
67 * will be located at the end of a SCCBs page
68 */
69struct sclp_buffer {
70 struct list_head list; /* list_head for sccb_info chain */
71 struct sclp_req request;
72 struct write_sccb *sccb;
73 char *current_line;
74 int current_length;
75 int retry_count;
76 /* output format settings */
77 unsigned short columns;
78 unsigned short htab;
79 /* statistics about this buffer */
80 unsigned int mto_char_sum; /* # chars in sccb */
81 unsigned int mto_number; /* # mtos in sccb */
82 /* Callback that is called after reaching final status. */
83 void (*callback)(struct sclp_buffer *, int);
84};
85
86int sclp_rw_init(void);
87struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short);
88void *sclp_unmake_buffer(struct sclp_buffer *);
89int sclp_buffer_space(struct sclp_buffer *);
90int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
91int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
92void sclp_set_columns(struct sclp_buffer *, unsigned short);
93void sclp_set_htab(struct sclp_buffer *, unsigned short);
94int sclp_chars_in_buffer(struct sclp_buffer *);
95
96#endif /* __SCLP_RW_H__ */
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
new file mode 100644
index 000000000000..a20d7c89341d
--- /dev/null
+++ b/drivers/s390/char/sclp_tty.c
@@ -0,0 +1,813 @@
1/*
2 * drivers/s390/char/sclp_tty.c
3 * SCLP line mode terminal driver.
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/kmod.h>
14#include <linux/tty.h>
15#include <linux/tty_driver.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <asm/uaccess.h>
23
24#include "ctrlchar.h"
25#include "sclp.h"
26#include "sclp_rw.h"
27#include "sclp_tty.h"
28
29#define SCLP_TTY_PRINT_HEADER "sclp tty driver: "
30
31/*
32 * size of a buffer that collects single characters coming in
33 * via sclp_tty_put_char()
34 */
35#define SCLP_TTY_BUF_SIZE 512
36
37/*
38 * There is exactly one SCLP terminal, so we can keep things simple
39 * and allocate all variables statically.
40 */
41
42/* Lock to guard over changes to global variables. */
43static spinlock_t sclp_tty_lock;
44/* List of free pages that can be used for console output buffering. */
45static struct list_head sclp_tty_pages;
46/* List of full struct sclp_buffer structures ready for output. */
47static struct list_head sclp_tty_outqueue;
48/* Counter how many buffers are emitted. */
49static int sclp_tty_buffer_count;
50/* Pointer to current console buffer. */
51static struct sclp_buffer *sclp_ttybuf;
52/* Timer for delayed output of console messages. */
53static struct timer_list sclp_tty_timer;
54/* Waitqueue to wait for buffers to get empty. */
55static wait_queue_head_t sclp_tty_waitq;
56
57static struct tty_struct *sclp_tty;
58static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
59static unsigned short int sclp_tty_chars_count;
60
61struct tty_driver *sclp_tty_driver;
62
63extern struct termios tty_std_termios;
64
65static struct sclp_ioctls sclp_ioctls;
66static struct sclp_ioctls sclp_ioctls_init =
67{
68 8, /* 1 hor. tab. = 8 spaces */
69 0, /* no echo of input by this driver */
70 80, /* 80 characters/line */
71 1, /* write after 1/10 s without final new line */
72 MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */
73 MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */
74 0, /* do not convert to lower case */
75 0x6c /* to seprate upper and lower case */
76 /* ('%' in EBCDIC) */
77};
78
79/* This routine is called whenever we try to open a SCLP terminal. */
80static int
81sclp_tty_open(struct tty_struct *tty, struct file *filp)
82{
83 sclp_tty = tty;
84 tty->driver_data = NULL;
85 tty->low_latency = 0;
86 return 0;
87}
88
89/* This routine is called when the SCLP terminal is closed. */
90static void
91sclp_tty_close(struct tty_struct *tty, struct file *filp)
92{
93 if (tty->count > 1)
94 return;
95 sclp_tty = NULL;
96}
97
98/* execute commands to control the i/o behaviour of the SCLP tty at runtime */
99static int
100sclp_tty_ioctl(struct tty_struct *tty, struct file * file,
101 unsigned int cmd, unsigned long arg)
102{
103 unsigned long flags;
104 unsigned int obuf;
105 int check;
106 int rc;
107
108 if (tty->flags & (1 << TTY_IO_ERROR))
109 return -EIO;
110 rc = 0;
111 check = 0;
112 switch (cmd) {
113 case TIOCSCLPSHTAB:
114 /* set width of horizontal tab */
115 if (get_user(sclp_ioctls.htab, (unsigned short __user *) arg))
116 rc = -EFAULT;
117 else
118 check = 1;
119 break;
120 case TIOCSCLPGHTAB:
121 /* get width of horizontal tab */
122 if (put_user(sclp_ioctls.htab, (unsigned short __user *) arg))
123 rc = -EFAULT;
124 break;
125 case TIOCSCLPSECHO:
126 /* enable/disable echo of input */
127 if (get_user(sclp_ioctls.echo, (unsigned char __user *) arg))
128 rc = -EFAULT;
129 break;
130 case TIOCSCLPGECHO:
131 /* Is echo of input enabled ? */
132 if (put_user(sclp_ioctls.echo, (unsigned char __user *) arg))
133 rc = -EFAULT;
134 break;
135 case TIOCSCLPSCOLS:
136 /* set number of columns for output */
137 if (get_user(sclp_ioctls.columns, (unsigned short __user *) arg))
138 rc = -EFAULT;
139 else
140 check = 1;
141 break;
142 case TIOCSCLPGCOLS:
143 /* get number of columns for output */
144 if (put_user(sclp_ioctls.columns, (unsigned short __user *) arg))
145 rc = -EFAULT;
146 break;
147 case TIOCSCLPSNL:
148 /* enable/disable writing without final new line character */
149 if (get_user(sclp_ioctls.final_nl, (signed char __user *) arg))
150 rc = -EFAULT;
151 break;
152 case TIOCSCLPGNL:
153 /* Is writing without final new line character enabled ? */
154 if (put_user(sclp_ioctls.final_nl, (signed char __user *) arg))
155 rc = -EFAULT;
156 break;
157 case TIOCSCLPSOBUF:
158 /*
159 * set the maximum buffers size for output, will be rounded
160 * up to next 4kB boundary and stored as number of SCCBs
161 * (4kB Buffers) limitation: 256 x 4kB
162 */
163 if (get_user(obuf, (unsigned int __user *) arg) == 0) {
164 if (obuf & 0xFFF)
165 sclp_ioctls.max_sccb = (obuf >> 12) + 1;
166 else
167 sclp_ioctls.max_sccb = (obuf >> 12);
168 } else
169 rc = -EFAULT;
170 break;
171 case TIOCSCLPGOBUF:
172 /* get the maximum buffers size for output */
173 obuf = sclp_ioctls.max_sccb << 12;
174 if (put_user(obuf, (unsigned int __user *) arg))
175 rc = -EFAULT;
176 break;
177 case TIOCSCLPGKBUF:
178 /* get the number of buffers got from kernel at startup */
179 if (put_user(sclp_ioctls.kmem_sccb, (unsigned short __user *) arg))
180 rc = -EFAULT;
181 break;
182 case TIOCSCLPSCASE:
183 /* enable/disable conversion from upper to lower case */
184 if (get_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
185 rc = -EFAULT;
186 break;
187 case TIOCSCLPGCASE:
188 /* Is conversion from upper to lower case of input enabled? */
189 if (put_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
190 rc = -EFAULT;
191 break;
192 case TIOCSCLPSDELIM:
193 /*
194 * set special character used for separating upper and
195 * lower case, 0x00 disables this feature
196 */
197 if (get_user(sclp_ioctls.delim, (unsigned char __user *) arg))
198 rc = -EFAULT;
199 break;
200 case TIOCSCLPGDELIM:
201 /*
202 * get special character used for separating upper and
203 * lower case, 0x00 disables this feature
204 */
205 if (put_user(sclp_ioctls.delim, (unsigned char __user *) arg))
206 rc = -EFAULT;
207 break;
208 case TIOCSCLPSINIT:
209 /* set initial (default) sclp ioctls */
210 sclp_ioctls = sclp_ioctls_init;
211 check = 1;
212 break;
213 default:
214 rc = -ENOIOCTLCMD;
215 break;
216 }
217 if (check) {
218 spin_lock_irqsave(&sclp_tty_lock, flags);
219 if (sclp_ttybuf != NULL) {
220 sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab);
221 sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns);
222 }
223 spin_unlock_irqrestore(&sclp_tty_lock, flags);
224 }
225 return rc;
226}
227
228/*
229 * This routine returns the numbers of characters the tty driver
230 * will accept for queuing to be written. This number is subject
231 * to change as output buffers get emptied, or if the output flow
232 * control is acted. This is not an exact number because not every
233 * character needs the same space in the sccb. The worst case is
234 * a string of newlines. Every newlines creates a new mto which
235 * needs 8 bytes.
236 */
237static int
238sclp_tty_write_room (struct tty_struct *tty)
239{
240 unsigned long flags;
241 struct list_head *l;
242 int count;
243
244 spin_lock_irqsave(&sclp_tty_lock, flags);
245 count = 0;
246 if (sclp_ttybuf != NULL)
247 count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto);
248 list_for_each(l, &sclp_tty_pages)
249 count += NR_EMPTY_MTO_PER_SCCB;
250 spin_unlock_irqrestore(&sclp_tty_lock, flags);
251 return count;
252}
253
254static void
255sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
256{
257 unsigned long flags;
258 void *page;
259
260 do {
261 page = sclp_unmake_buffer(buffer);
262 spin_lock_irqsave(&sclp_tty_lock, flags);
263 /* Remove buffer from outqueue */
264 list_del(&buffer->list);
265 sclp_tty_buffer_count--;
266 list_add_tail((struct list_head *) page, &sclp_tty_pages);
267 /* Check if there is a pending buffer on the out queue. */
268 buffer = NULL;
269 if (!list_empty(&sclp_tty_outqueue))
270 buffer = list_entry(sclp_tty_outqueue.next,
271 struct sclp_buffer, list);
272 spin_unlock_irqrestore(&sclp_tty_lock, flags);
273 } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
274 wake_up(&sclp_tty_waitq);
275 /* check if the tty needs a wake up call */
276 if (sclp_tty != NULL) {
277 tty_wakeup(sclp_tty);
278 }
279}
280
281static inline void
282__sclp_ttybuf_emit(struct sclp_buffer *buffer)
283{
284 unsigned long flags;
285 int count;
286 int rc;
287
288 spin_lock_irqsave(&sclp_tty_lock, flags);
289 list_add_tail(&buffer->list, &sclp_tty_outqueue);
290 count = sclp_tty_buffer_count++;
291 spin_unlock_irqrestore(&sclp_tty_lock, flags);
292 if (count)
293 return;
294 rc = sclp_emit_buffer(buffer, sclp_ttybuf_callback);
295 if (rc)
296 sclp_ttybuf_callback(buffer, rc);
297}
298
299/*
300 * When this routine is called from the timer then we flush the
301 * temporary write buffer.
302 */
303static void
304sclp_tty_timeout(unsigned long data)
305{
306 unsigned long flags;
307 struct sclp_buffer *buf;
308
309 spin_lock_irqsave(&sclp_tty_lock, flags);
310 buf = sclp_ttybuf;
311 sclp_ttybuf = NULL;
312 spin_unlock_irqrestore(&sclp_tty_lock, flags);
313
314 if (buf != NULL) {
315 __sclp_ttybuf_emit(buf);
316 }
317}
318
319/*
320 * Write a string to the sclp tty.
321 */
322static void
323sclp_tty_write_string(const unsigned char *str, int count)
324{
325 unsigned long flags;
326 void *page;
327 int written;
328 struct sclp_buffer *buf;
329
330 if (count <= 0)
331 return;
332 spin_lock_irqsave(&sclp_tty_lock, flags);
333 do {
334 /* Create a sclp output buffer if none exists yet */
335 if (sclp_ttybuf == NULL) {
336 while (list_empty(&sclp_tty_pages)) {
337 spin_unlock_irqrestore(&sclp_tty_lock, flags);
338 if (in_interrupt())
339 sclp_sync_wait();
340 else
341 wait_event(sclp_tty_waitq,
342 !list_empty(&sclp_tty_pages));
343 spin_lock_irqsave(&sclp_tty_lock, flags);
344 }
345 page = sclp_tty_pages.next;
346 list_del((struct list_head *) page);
347 sclp_ttybuf = sclp_make_buffer(page,
348 sclp_ioctls.columns,
349 sclp_ioctls.htab);
350 }
351 /* try to write the string to the current output buffer */
352 written = sclp_write(sclp_ttybuf, str, count);
353 if (written == count)
354 break;
355 /*
356 * Not all characters could be written to the current
357 * output buffer. Emit the buffer, create a new buffer
358 * and then output the rest of the string.
359 */
360 buf = sclp_ttybuf;
361 sclp_ttybuf = NULL;
362 spin_unlock_irqrestore(&sclp_tty_lock, flags);
363 __sclp_ttybuf_emit(buf);
364 spin_lock_irqsave(&sclp_tty_lock, flags);
365 str += written;
366 count -= written;
367 } while (count > 0);
368 /* Setup timer to output current console buffer after 1/10 second */
369 if (sclp_ioctls.final_nl) {
370 if (sclp_ttybuf != NULL &&
371 sclp_chars_in_buffer(sclp_ttybuf) != 0 &&
372 !timer_pending(&sclp_tty_timer)) {
373 init_timer(&sclp_tty_timer);
374 sclp_tty_timer.function = sclp_tty_timeout;
375 sclp_tty_timer.data = 0UL;
376 sclp_tty_timer.expires = jiffies + HZ/10;
377 add_timer(&sclp_tty_timer);
378 }
379 } else {
380 if (sclp_ttybuf != NULL &&
381 sclp_chars_in_buffer(sclp_ttybuf) != 0) {
382 buf = sclp_ttybuf;
383 sclp_ttybuf = NULL;
384 spin_unlock_irqrestore(&sclp_tty_lock, flags);
385 __sclp_ttybuf_emit(buf);
386 spin_lock_irqsave(&sclp_tty_lock, flags);
387 }
388 }
389 spin_unlock_irqrestore(&sclp_tty_lock, flags);
390}
391
392/*
393 * This routine is called by the kernel to write a series of characters to the
394 * tty device. The characters may come from user space or kernel space. This
395 * routine will return the number of characters actually accepted for writing.
396 */
397static int
398sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
399{
400 if (sclp_tty_chars_count > 0) {
401 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
402 sclp_tty_chars_count = 0;
403 }
404 sclp_tty_write_string(buf, count);
405 return count;
406}
407
408/*
409 * This routine is called by the kernel to write a single character to the tty
410 * device. If the kernel uses this routine, it must call the flush_chars()
411 * routine (if defined) when it is done stuffing characters into the driver.
412 *
413 * Characters provided to sclp_tty_put_char() are buffered by the SCLP driver.
414 * If the given character is a '\n' the contents of the SCLP write buffer
415 * - including previous characters from sclp_tty_put_char() and strings from
416 * sclp_write() without final '\n' - will be written.
417 */
418static void
419sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
420{
421 sclp_tty_chars[sclp_tty_chars_count++] = ch;
422 if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
423 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
424 sclp_tty_chars_count = 0;
425 }
426}
427
428/*
429 * This routine is called by the kernel after it has written a series of
430 * characters to the tty device using put_char().
431 */
432static void
433sclp_tty_flush_chars(struct tty_struct *tty)
434{
435 if (sclp_tty_chars_count > 0) {
436 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
437 sclp_tty_chars_count = 0;
438 }
439}
440
441/*
442 * This routine returns the number of characters in the write buffer of the
443 * SCLP driver. The provided number includes all characters that are stored
444 * in the SCCB (will be written next time the SCLP is not busy) as well as
445 * characters in the write buffer (will not be written as long as there is a
446 * final line feed missing).
447 */
448static int
449sclp_tty_chars_in_buffer(struct tty_struct *tty)
450{
451 unsigned long flags;
452 struct list_head *l;
453 struct sclp_buffer *t;
454 int count;
455
456 spin_lock_irqsave(&sclp_tty_lock, flags);
457 count = 0;
458 if (sclp_ttybuf != NULL)
459 count = sclp_chars_in_buffer(sclp_ttybuf);
460 list_for_each(l, &sclp_tty_outqueue) {
461 t = list_entry(l, struct sclp_buffer, list);
462 count += sclp_chars_in_buffer(t);
463 }
464 spin_unlock_irqrestore(&sclp_tty_lock, flags);
465 return count;
466}
467
468/*
469 * removes all content from buffers of low level driver
470 */
471static void
472sclp_tty_flush_buffer(struct tty_struct *tty)
473{
474 if (sclp_tty_chars_count > 0) {
475 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
476 sclp_tty_chars_count = 0;
477 }
478}
479
480/*
481 * push input to tty
482 */
483static void
484sclp_tty_input(unsigned char* buf, unsigned int count)
485{
486 unsigned int cchar;
487
488 /*
489 * If this tty driver is currently closed
490 * then throw the received input away.
491 */
492 if (sclp_tty == NULL)
493 return;
494 cchar = ctrlchar_handle(buf, count, sclp_tty);
495 switch (cchar & CTRLCHAR_MASK) {
496 case CTRLCHAR_SYSRQ:
497 break;
498 case CTRLCHAR_CTRL:
499 sclp_tty->flip.count++;
500 *sclp_tty->flip.flag_buf_ptr++ = TTY_NORMAL;
501 *sclp_tty->flip.char_buf_ptr++ = cchar;
502 tty_flip_buffer_push(sclp_tty);
503 break;
504 case CTRLCHAR_NONE:
505 /* send (normal) input to line discipline */
506 memcpy(sclp_tty->flip.char_buf_ptr, buf, count);
507 if (count < 2 ||
508 (strncmp ((const char *) buf + count - 2, "^n", 2) &&
509 strncmp ((const char *) buf + count - 2, "\0252n", 2))) {
510 sclp_tty->flip.char_buf_ptr[count] = '\n';
511 count++;
512 } else
513 count -= 2;
514 memset(sclp_tty->flip.flag_buf_ptr, TTY_NORMAL, count);
515 sclp_tty->flip.char_buf_ptr += count;
516 sclp_tty->flip.flag_buf_ptr += count;
517 sclp_tty->flip.count += count;
518 tty_flip_buffer_push(sclp_tty);
519 break;
520 }
521}
522
523/*
524 * get a EBCDIC string in upper/lower case,
525 * find out characters in lower/upper case separated by a special character,
526 * modifiy original string,
527 * returns length of resulting string
528 */
529static int
530sclp_switch_cases(unsigned char *buf, int count,
531 unsigned char delim, int tolower)
532{
533 unsigned char *ip, *op;
534 int toggle;
535
536 /* initially changing case is off */
537 toggle = 0;
538 ip = op = buf;
539 while (count-- > 0) {
540 /* compare with special character */
541 if (*ip == delim) {
542 /* followed by another special character? */
543 if (count && ip[1] == delim) {
544 /*
545 * ... then put a single copy of the special
546 * character to the output string
547 */
548 *op++ = *ip++;
549 count--;
550 } else
551 /*
552 * ... special character follower by a normal
553 * character toggles the case change behaviour
554 */
555 toggle = ~toggle;
556 /* skip special character */
557 ip++;
558 } else
559 /* not the special character */
560 if (toggle)
561 /* but case switching is on */
562 if (tolower)
563 /* switch to uppercase */
564 *op++ = _ebc_toupper[(int) *ip++];
565 else
566 /* switch to lowercase */
567 *op++ = _ebc_tolower[(int) *ip++];
568 else
569 /* no case switching, copy the character */
570 *op++ = *ip++;
571 }
572 /* return length of reformatted string. */
573 return op - buf;
574}
575
576static void
577sclp_get_input(unsigned char *start, unsigned char *end)
578{
579 int count;
580
581 count = end - start;
582 /*
583 * if set in ioctl convert EBCDIC to lower case
584 * (modify original input in SCCB)
585 */
586 if (sclp_ioctls.tolower)
587 EBC_TOLOWER(start, count);
588
589 /*
590 * if set in ioctl find out characters in lower or upper case
591 * (depends on current case) separated by a special character,
592 * works on EBCDIC
593 */
594 if (sclp_ioctls.delim)
595 count = sclp_switch_cases(start, count,
596 sclp_ioctls.delim,
597 sclp_ioctls.tolower);
598
599 /* convert EBCDIC to ASCII (modify original input in SCCB) */
600 sclp_ebcasc_str(start, count);
601
602 /* if set in ioctl write operators input to console */
603 if (sclp_ioctls.echo)
604 sclp_tty_write(sclp_tty, start, count);
605
606 /* transfer input to high level driver */
607 sclp_tty_input(start, count);
608}
609
610static inline struct gds_vector *
611find_gds_vector(struct gds_vector *start, struct gds_vector *end, u16 id)
612{
613 struct gds_vector *vec;
614
615 for (vec = start; vec < end; vec = (void *) vec + vec->length)
616 if (vec->gds_id == id)
617 return vec;
618 return NULL;
619}
620
621static inline struct gds_subvector *
622find_gds_subvector(struct gds_subvector *start,
623 struct gds_subvector *end, u8 key)
624{
625 struct gds_subvector *subvec;
626
627 for (subvec = start; subvec < end;
628 subvec = (void *) subvec + subvec->length)
629 if (subvec->key == key)
630 return subvec;
631 return NULL;
632}
633
634static inline void
635sclp_eval_selfdeftextmsg(struct gds_subvector *start,
636 struct gds_subvector *end)
637{
638 struct gds_subvector *subvec;
639
640 subvec = start;
641 while (subvec < end) {
642 subvec = find_gds_subvector(subvec, end, 0x30);
643 if (!subvec)
644 break;
645 sclp_get_input((unsigned char *)(subvec + 1),
646 (unsigned char *) subvec + subvec->length);
647 subvec = (void *) subvec + subvec->length;
648 }
649}
650
651static inline void
652sclp_eval_textcmd(struct gds_subvector *start,
653 struct gds_subvector *end)
654{
655 struct gds_subvector *subvec;
656
657 subvec = start;
658 while (subvec < end) {
659 subvec = find_gds_subvector(subvec, end,
660 GDS_KEY_SelfDefTextMsg);
661 if (!subvec)
662 break;
663 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
664 (void *)subvec + subvec->length);
665 subvec = (void *) subvec + subvec->length;
666 }
667}
668
669static inline void
670sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
671{
672 struct gds_vector *vec;
673
674 vec = start;
675 while (vec < end) {
676 vec = find_gds_vector(vec, end, GDS_ID_TextCmd);
677 if (!vec)
678 break;
679 sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
680 (void *) vec + vec->length);
681 vec = (void *) vec + vec->length;
682 }
683}
684
685
686static inline void
687sclp_eval_mdsmu(struct gds_vector *start, void *end)
688{
689 struct gds_vector *vec;
690
691 vec = find_gds_vector(start, end, GDS_ID_CPMSU);
692 if (vec)
693 sclp_eval_cpmsu(vec + 1, (void *) vec + vec->length);
694}
695
696static void
697sclp_tty_receiver(struct evbuf_header *evbuf)
698{
699 struct gds_vector *start, *end, *vec;
700
701 start = (struct gds_vector *)(evbuf + 1);
702 end = (void *) evbuf + evbuf->length;
703 vec = find_gds_vector(start, end, GDS_ID_MDSMU);
704 if (vec)
705 sclp_eval_mdsmu(vec + 1, (void *) vec + vec->length);
706}
707
708static void
709sclp_tty_state_change(struct sclp_register *reg)
710{
711}
712
713static struct sclp_register sclp_input_event =
714{
715 .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask,
716 .state_change_fn = sclp_tty_state_change,
717 .receiver_fn = sclp_tty_receiver
718};
719
720static struct tty_operations sclp_ops = {
721 .open = sclp_tty_open,
722 .close = sclp_tty_close,
723 .write = sclp_tty_write,
724 .put_char = sclp_tty_put_char,
725 .flush_chars = sclp_tty_flush_chars,
726 .write_room = sclp_tty_write_room,
727 .chars_in_buffer = sclp_tty_chars_in_buffer,
728 .flush_buffer = sclp_tty_flush_buffer,
729 .ioctl = sclp_tty_ioctl,
730};
731
732int __init
733sclp_tty_init(void)
734{
735 struct tty_driver *driver;
736 void *page;
737 int i;
738 int rc;
739
740 if (!CONSOLE_IS_SCLP)
741 return 0;
742 driver = alloc_tty_driver(1);
743 if (!driver)
744 return -ENOMEM;
745
746 rc = sclp_rw_init();
747 if (rc) {
748 printk(KERN_ERR SCLP_TTY_PRINT_HEADER
749 "could not register tty - "
750 "sclp_rw_init returned %d\n", rc);
751 put_tty_driver(driver);
752 return rc;
753 }
754 /* Allocate pages for output buffering */
755 INIT_LIST_HEAD(&sclp_tty_pages);
756 for (i = 0; i < MAX_KMEM_PAGES; i++) {
757 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
758 if (page == NULL) {
759 put_tty_driver(driver);
760 return -ENOMEM;
761 }
762 list_add_tail((struct list_head *) page, &sclp_tty_pages);
763 }
764 INIT_LIST_HEAD(&sclp_tty_outqueue);
765 spin_lock_init(&sclp_tty_lock);
766 init_waitqueue_head(&sclp_tty_waitq);
767 init_timer(&sclp_tty_timer);
768 sclp_ttybuf = NULL;
769 sclp_tty_buffer_count = 0;
770 if (MACHINE_IS_VM) {
771 /*
772 * save 4 characters for the CPU number
773 * written at start of each line by VM/CP
774 */
775 sclp_ioctls_init.columns = 76;
776 /* case input lines to lowercase */
777 sclp_ioctls_init.tolower = 1;
778 }
779 sclp_ioctls = sclp_ioctls_init;
780 sclp_tty_chars_count = 0;
781 sclp_tty = NULL;
782
783 rc = sclp_register(&sclp_input_event);
784 if (rc) {
785 put_tty_driver(driver);
786 return rc;
787 }
788
789 driver->owner = THIS_MODULE;
790 driver->driver_name = "sclp_line";
791 driver->name = "sclp_line";
792 driver->major = TTY_MAJOR;
793 driver->minor_start = 64;
794 driver->type = TTY_DRIVER_TYPE_SYSTEM;
795 driver->subtype = SYSTEM_TYPE_TTY;
796 driver->init_termios = tty_std_termios;
797 driver->init_termios.c_iflag = IGNBRK | IGNPAR;
798 driver->init_termios.c_oflag = ONLCR | XTABS;
799 driver->init_termios.c_lflag = ISIG | ECHO;
800 driver->flags = TTY_DRIVER_REAL_RAW;
801 tty_set_operations(driver, &sclp_ops);
802 rc = tty_register_driver(driver);
803 if (rc) {
804 printk(KERN_ERR SCLP_TTY_PRINT_HEADER
805 "could not register tty - "
806 "tty_register_driver returned %d\n", rc);
807 put_tty_driver(driver);
808 return rc;
809 }
810 sclp_tty_driver = driver;
811 return 0;
812}
813module_init(sclp_tty_init);
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h
new file mode 100644
index 000000000000..0ce2c1fc5340
--- /dev/null
+++ b/drivers/s390/char/sclp_tty.h
@@ -0,0 +1,71 @@
1/*
2 * drivers/s390/char/sclp_tty.h
3 * interface to the SCLP-read/write driver
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#ifndef __SCLP_TTY_H__
12#define __SCLP_TTY_H__
13
14#include <linux/ioctl.h>
15#include <linux/termios.h>
16#include <linux/tty_driver.h>
17
18/* This is the type of data structures storing sclp ioctl setting. */
19struct sclp_ioctls {
20 unsigned short htab;
21 unsigned char echo;
22 unsigned short columns;
23 unsigned char final_nl;
24 unsigned short max_sccb;
25 unsigned short kmem_sccb; /* can't be modified at run time */
26 unsigned char tolower;
27 unsigned char delim;
28};
29
30/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */
31#define SCLP_IOCTL_LETTER 'B'
32
33/* set width of horizontal tabulator */
34#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short)
35/* enable/disable echo of input (independent from line discipline) */
36#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char)
37/* set number of colums for output */
38#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short)
39/* enable/disable writing without final new line character */
40#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char)
41/* set the maximum buffers size for output, rounded up to next 4kB boundary */
42#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short)
43/* set initial (default) sclp ioctls */
44#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6)
45/* enable/disable conversion from upper to lower case of input */
46#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char)
47/* set special character used for separating upper and lower case, */
48/* 0x00 disables this feature */
49#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char)
50
51/* get width of horizontal tabulator */
52#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short)
53/* Is echo of input enabled ? (independent from line discipline) */
54#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char)
55/* get number of colums for output */
56#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short)
57/* Is writing without final new line character enabled ? */
58#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char)
59/* get the maximum buffers size for output */
60#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short)
61/* Is conversion from upper to lower case of input enabled ? */
62#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char)
63/* get special character used for separating upper and lower case, */
64/* 0x00 disables this feature */
65#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char)
66/* get the number of buffers/pages got from kernel at startup */
67#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short)
68
69extern struct tty_driver *sclp_tty_driver;
70
71#endif /* __SCLP_TTY_H__ */
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
new file mode 100644
index 000000000000..06bd85824d7b
--- /dev/null
+++ b/drivers/s390/char/sclp_vt220.c
@@ -0,0 +1,785 @@
1/*
2 * drivers/s390/char/sclp_vt220.c
3 * SCLP VT220 terminal driver.
4 *
5 * S390 version
6 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
8 */
9
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/spinlock.h>
13#include <linux/list.h>
14#include <linux/wait.h>
15#include <linux/timer.h>
16#include <linux/kernel.h>
17#include <linux/tty.h>
18#include <linux/tty_driver.h>
19#include <linux/sched.h>
20#include <linux/errno.h>
21#include <linux/mm.h>
22#include <linux/major.h>
23#include <linux/console.h>
24#include <linux/kdev_t.h>
25#include <linux/bootmem.h>
26#include <linux/interrupt.h>
27#include <linux/init.h>
28#include <asm/uaccess.h>
29#include "sclp.h"
30
31#define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: "
32#define SCLP_VT220_MAJOR TTY_MAJOR
33#define SCLP_VT220_MINOR 65
34#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
35#define SCLP_VT220_DEVICE_NAME "ttysclp"
36#define SCLP_VT220_CONSOLE_NAME "ttyS"
37#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
38#define SCLP_VT220_BUF_SIZE 80
39
40/* Representation of a single write request */
41struct sclp_vt220_request {
42 struct list_head list;
43 struct sclp_req sclp_req;
44 int retry_count;
45};
46
47/* VT220 SCCB */
48struct sclp_vt220_sccb {
49 struct sccb_header header;
50 struct evbuf_header evbuf;
51};
52
53#define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
54 sizeof(struct sclp_vt220_request) - \
55 sizeof(struct sclp_vt220_sccb))
56
57/* Structures and data needed to register tty driver */
58static struct tty_driver *sclp_vt220_driver;
59
60/* The tty_struct that the kernel associated with us */
61static struct tty_struct *sclp_vt220_tty;
62
63/* Lock to protect internal data from concurrent access */
64static spinlock_t sclp_vt220_lock;
65
66/* List of empty pages to be used as write request buffers */
67static struct list_head sclp_vt220_empty;
68
69/* List of pending requests */
70static struct list_head sclp_vt220_outqueue;
71
72/* Number of requests in outqueue */
73static int sclp_vt220_outqueue_count;
74
75/* Wait queue used to delay write requests while we've run out of buffers */
76static wait_queue_head_t sclp_vt220_waitq;
77
78/* Timer used for delaying write requests to merge subsequent messages into
79 * a single buffer */
80static struct timer_list sclp_vt220_timer;
81
82/* Pointer to current request buffer which has been partially filled but not
83 * yet sent */
84static struct sclp_vt220_request *sclp_vt220_current_request;
85
86/* Number of characters in current request buffer */
87static int sclp_vt220_buffered_chars;
88
89/* Flag indicating whether this driver has already been initialized */
90static int sclp_vt220_initialized = 0;
91
92/* Flag indicating that sclp_vt220_current_request should really
93 * have been already queued but wasn't because the SCLP was processing
94 * another buffer */
95static int sclp_vt220_flush_later;
96
97static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
98static int __sclp_vt220_emit(struct sclp_vt220_request *request);
99static void sclp_vt220_emit_current(void);
100
101/* Registration structure for our interest in SCLP event buffers */
102static struct sclp_register sclp_vt220_register = {
103 .send_mask = EvTyp_VT220Msg_Mask,
104 .receive_mask = EvTyp_VT220Msg_Mask,
105 .state_change_fn = NULL,
106 .receiver_fn = sclp_vt220_receiver_fn
107};
108
109
110/*
111 * Put provided request buffer back into queue and check emit pending
112 * buffers if necessary.
113 */
114static void
115sclp_vt220_process_queue(struct sclp_vt220_request *request)
116{
117 unsigned long flags;
118 void *page;
119
120 do {
121 /* Put buffer back to list of empty buffers */
122 page = request->sclp_req.sccb;
123 spin_lock_irqsave(&sclp_vt220_lock, flags);
124 /* Move request from outqueue to empty queue */
125 list_del(&request->list);
126 sclp_vt220_outqueue_count--;
127 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
128 /* Check if there is a pending buffer on the out queue. */
129 request = NULL;
130 if (!list_empty(&sclp_vt220_outqueue))
131 request = list_entry(sclp_vt220_outqueue.next,
132 struct sclp_vt220_request, list);
133 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
134 } while (request && __sclp_vt220_emit(request));
135 if (request == NULL && sclp_vt220_flush_later)
136 sclp_vt220_emit_current();
137 wake_up(&sclp_vt220_waitq);
138 /* Check if the tty needs a wake up call */
139 if (sclp_vt220_tty != NULL) {
140 tty_wakeup(sclp_vt220_tty);
141 }
142}
143
144#define SCLP_BUFFER_MAX_RETRY 1
145
146/*
147 * Callback through which the result of a write request is reported by the
148 * SCLP.
149 */
150static void
151sclp_vt220_callback(struct sclp_req *request, void *data)
152{
153 struct sclp_vt220_request *vt220_request;
154 struct sclp_vt220_sccb *sccb;
155
156 vt220_request = (struct sclp_vt220_request *) data;
157 if (request->status == SCLP_REQ_FAILED) {
158 sclp_vt220_process_queue(vt220_request);
159 return;
160 }
161 sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
162
163 /* Check SCLP response code and choose suitable action */
164 switch (sccb->header.response_code) {
165 case 0x0020 :
166 break;
167
168 case 0x05f0: /* Target resource in improper state */
169 break;
170
171 case 0x0340: /* Contained SCLP equipment check */
172 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
173 break;
174 /* Remove processed buffers and requeue rest */
175 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
176 /* Not all buffers were processed */
177 sccb->header.response_code = 0x0000;
178 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
179 if (sclp_add_request(request) == 0)
180 return;
181 }
182 break;
183
184 case 0x0040: /* SCLP equipment check */
185 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
186 break;
187 sccb->header.response_code = 0x0000;
188 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
189 if (sclp_add_request(request) == 0)
190 return;
191 break;
192
193 default:
194 break;
195 }
196 sclp_vt220_process_queue(vt220_request);
197}
198
199/*
200 * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
201 * otherwise.
202 */
203static int
204__sclp_vt220_emit(struct sclp_vt220_request *request)
205{
206 if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) {
207 request->sclp_req.status = SCLP_REQ_FAILED;
208 return -EIO;
209 }
210 request->sclp_req.command = SCLP_CMDW_WRITEDATA;
211 request->sclp_req.status = SCLP_REQ_FILLED;
212 request->sclp_req.callback = sclp_vt220_callback;
213 request->sclp_req.callback_data = (void *) request;
214
215 return sclp_add_request(&request->sclp_req);
216}
217
218/*
219 * Queue and emit given request.
220 */
221static void
222sclp_vt220_emit(struct sclp_vt220_request *request)
223{
224 unsigned long flags;
225 int count;
226
227 spin_lock_irqsave(&sclp_vt220_lock, flags);
228 list_add_tail(&request->list, &sclp_vt220_outqueue);
229 count = sclp_vt220_outqueue_count++;
230 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
231 /* Emit only the first buffer immediately - callback takes care of
232 * the rest */
233 if (count == 0 && __sclp_vt220_emit(request))
234 sclp_vt220_process_queue(request);
235}
236
237/*
238 * Queue and emit current request. Return zero on success, non-zero otherwise.
239 */
240static void
241sclp_vt220_emit_current(void)
242{
243 unsigned long flags;
244 struct sclp_vt220_request *request;
245 struct sclp_vt220_sccb *sccb;
246
247 spin_lock_irqsave(&sclp_vt220_lock, flags);
248 request = NULL;
249 if (sclp_vt220_current_request != NULL) {
250 sccb = (struct sclp_vt220_sccb *)
251 sclp_vt220_current_request->sclp_req.sccb;
252 /* Only emit buffers with content */
253 if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
254 request = sclp_vt220_current_request;
255 sclp_vt220_current_request = NULL;
256 if (timer_pending(&sclp_vt220_timer))
257 del_timer(&sclp_vt220_timer);
258 }
259 sclp_vt220_flush_later = 0;
260 }
261 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
262 if (request != NULL)
263 sclp_vt220_emit(request);
264}
265
266#define SCLP_NORMAL_WRITE 0x00
267
268/*
269 * Helper function to initialize a page with the sclp request structure.
270 */
271static struct sclp_vt220_request *
272sclp_vt220_initialize_page(void *page)
273{
274 struct sclp_vt220_request *request;
275 struct sclp_vt220_sccb *sccb;
276
277 /* Place request structure at end of page */
278 request = ((struct sclp_vt220_request *)
279 ((addr_t) page + PAGE_SIZE)) - 1;
280 request->retry_count = 0;
281 request->sclp_req.sccb = page;
282 /* SCCB goes at start of page */
283 sccb = (struct sclp_vt220_sccb *) page;
284 memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
285 sccb->header.length = sizeof(struct sclp_vt220_sccb);
286 sccb->header.function_code = SCLP_NORMAL_WRITE;
287 sccb->header.response_code = 0x0000;
288 sccb->evbuf.type = EvTyp_VT220Msg;
289 sccb->evbuf.length = sizeof(struct evbuf_header);
290
291 return request;
292}
293
294static inline unsigned int
295sclp_vt220_space_left(struct sclp_vt220_request *request)
296{
297 struct sclp_vt220_sccb *sccb;
298 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
299 return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
300 sccb->header.length;
301}
302
303static inline unsigned int
304sclp_vt220_chars_stored(struct sclp_vt220_request *request)
305{
306 struct sclp_vt220_sccb *sccb;
307 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
308 return sccb->evbuf.length - sizeof(struct evbuf_header);
309}
310
311/*
312 * Add msg to buffer associated with request. Return the number of characters
313 * added.
314 */
315static int
316sclp_vt220_add_msg(struct sclp_vt220_request *request,
317 const unsigned char *msg, int count, int convertlf)
318{
319 struct sclp_vt220_sccb *sccb;
320 void *buffer;
321 unsigned char c;
322 int from;
323 int to;
324
325 if (count > sclp_vt220_space_left(request))
326 count = sclp_vt220_space_left(request);
327 if (count <= 0)
328 return 0;
329
330 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
331 buffer = (void *) ((addr_t) sccb + sccb->header.length);
332
333 if (convertlf) {
334 /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
335 for (from=0, to=0;
336 (from < count) && (to < sclp_vt220_space_left(request));
337 from++) {
338 /* Retrieve character */
339 c = msg[from];
340 /* Perform conversion */
341 if (c == 0x0a) {
342 if (to + 1 < sclp_vt220_space_left(request)) {
343 ((unsigned char *) buffer)[to++] = c;
344 ((unsigned char *) buffer)[to++] = 0x0d;
345 } else
346 break;
347
348 } else
349 ((unsigned char *) buffer)[to++] = c;
350 }
351 sccb->header.length += to;
352 sccb->evbuf.length += to;
353 return from;
354 } else {
355 memcpy(buffer, (const void *) msg, count);
356 sccb->header.length += count;
357 sccb->evbuf.length += count;
358 return count;
359 }
360}
361
362/*
363 * Emit buffer after having waited long enough for more data to arrive.
364 */
365static void
366sclp_vt220_timeout(unsigned long data)
367{
368 sclp_vt220_emit_current();
369}
370
371#define BUFFER_MAX_DELAY HZ/2
372
373/*
374 * Internal implementation of the write function. Write COUNT bytes of data
375 * from memory at BUF
376 * to the SCLP interface. In case that the data does not fit into the current
377 * write buffer, emit the current one and allocate a new one. If there are no
378 * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
379 * is non-zero, the buffer will be scheduled for emitting after a timeout -
380 * otherwise the user has to explicitly call the flush function.
381 * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
382 * buffer should be converted to 0x0a 0x0d. After completion, return the number
383 * of bytes written.
384 */
385static int
386__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
387 int convertlf)
388{
389 unsigned long flags;
390 void *page;
391 int written;
392 int overall_written;
393
394 if (count <= 0)
395 return 0;
396 overall_written = 0;
397 spin_lock_irqsave(&sclp_vt220_lock, flags);
398 do {
399 /* Create a sclp output buffer if none exists yet */
400 if (sclp_vt220_current_request == NULL) {
401 while (list_empty(&sclp_vt220_empty)) {
402 spin_unlock_irqrestore(&sclp_vt220_lock,
403 flags);
404 if (in_interrupt())
405 sclp_sync_wait();
406 else
407 wait_event(sclp_vt220_waitq,
408 !list_empty(&sclp_vt220_empty));
409 spin_lock_irqsave(&sclp_vt220_lock, flags);
410 }
411 page = (void *) sclp_vt220_empty.next;
412 list_del((struct list_head *) page);
413 sclp_vt220_current_request =
414 sclp_vt220_initialize_page(page);
415 }
416 /* Try to write the string to the current request buffer */
417 written = sclp_vt220_add_msg(sclp_vt220_current_request,
418 buf, count, convertlf);
419 overall_written += written;
420 if (written == count)
421 break;
422 /*
423 * Not all characters could be written to the current
424 * output buffer. Emit the buffer, create a new buffer
425 * and then output the rest of the string.
426 */
427 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
428 sclp_vt220_emit_current();
429 spin_lock_irqsave(&sclp_vt220_lock, flags);
430 buf += written;
431 count -= written;
432 } while (count > 0);
433 /* Setup timer to output current console buffer after some time */
434 if (sclp_vt220_current_request != NULL &&
435 !timer_pending(&sclp_vt220_timer) && do_schedule) {
436 sclp_vt220_timer.function = sclp_vt220_timeout;
437 sclp_vt220_timer.data = 0UL;
438 sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
439 add_timer(&sclp_vt220_timer);
440 }
441 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
442 return overall_written;
443}
444
445/*
446 * This routine is called by the kernel to write a series of
447 * characters to the tty device. The characters may come from
448 * user space or kernel space. This routine will return the
449 * number of characters actually accepted for writing.
450 */
451static int
452sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
453{
454 return __sclp_vt220_write(buf, count, 1, 0);
455}
456
457#define SCLP_VT220_SESSION_ENDED 0x01
458#define SCLP_VT220_SESSION_STARTED 0x80
459#define SCLP_VT220_SESSION_DATA 0x00
460
461/*
462 * Called by the SCLP to report incoming event buffers.
463 */
464static void
465sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
466{
467 char *buffer;
468 unsigned int count;
469
470 /* Ignore input if device is not open */
471 if (sclp_vt220_tty == NULL)
472 return;
473
474 buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
475 count = evbuf->length - sizeof(struct evbuf_header);
476
477 switch (*buffer) {
478 case SCLP_VT220_SESSION_ENDED:
479 case SCLP_VT220_SESSION_STARTED:
480 break;
481 case SCLP_VT220_SESSION_DATA:
482 /* Send input to line discipline */
483 buffer++;
484 count--;
485 /* Prevent buffer overrun by discarding input. Note that
486 * because buffer_push works asynchronously, we cannot wait
487 * for the buffer to be emptied. */
488 if (count + sclp_vt220_tty->flip.count > TTY_FLIPBUF_SIZE)
489 count = TTY_FLIPBUF_SIZE - sclp_vt220_tty->flip.count;
490 memcpy(sclp_vt220_tty->flip.char_buf_ptr, buffer, count);
491 memset(sclp_vt220_tty->flip.flag_buf_ptr, TTY_NORMAL, count);
492 sclp_vt220_tty->flip.char_buf_ptr += count;
493 sclp_vt220_tty->flip.flag_buf_ptr += count;
494 sclp_vt220_tty->flip.count += count;
495 tty_flip_buffer_push(sclp_vt220_tty);
496 break;
497 }
498}
499
500/*
501 * This routine is called when a particular tty device is opened.
502 */
503static int
504sclp_vt220_open(struct tty_struct *tty, struct file *filp)
505{
506 if (tty->count == 1) {
507 sclp_vt220_tty = tty;
508 tty->driver_data = kmalloc(SCLP_VT220_BUF_SIZE, GFP_KERNEL);
509 if (tty->driver_data == NULL)
510 return -ENOMEM;
511 tty->low_latency = 0;
512 }
513 return 0;
514}
515
516/*
517 * This routine is called when a particular tty device is closed.
518 */
519static void
520sclp_vt220_close(struct tty_struct *tty, struct file *filp)
521{
522 if (tty->count == 1) {
523 sclp_vt220_tty = NULL;
524 kfree(tty->driver_data);
525 tty->driver_data = NULL;
526 }
527}
528
529/*
530 * This routine is called by the kernel to write a single
531 * character to the tty device. If the kernel uses this routine,
532 * it must call the flush_chars() routine (if defined) when it is
533 * done stuffing characters into the driver.
534 *
535 * NOTE: include/linux/tty_driver.h specifies that a character should be
536 * ignored if there is no room in the queue. This driver implements a different
537 * semantic in that it will block when there is no more room left.
538 */
539static void
540sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
541{
542 __sclp_vt220_write(&ch, 1, 0, 0);
543}
544
545/*
546 * This routine is called by the kernel after it has written a
547 * series of characters to the tty device using put_char().
548 */
549static void
550sclp_vt220_flush_chars(struct tty_struct *tty)
551{
552 if (sclp_vt220_outqueue_count == 0)
553 sclp_vt220_emit_current();
554 else
555 sclp_vt220_flush_later = 1;
556}
557
558/*
559 * This routine returns the numbers of characters the tty driver
560 * will accept for queuing to be written. This number is subject
561 * to change as output buffers get emptied, or if the output flow
562 * control is acted.
563 */
564static int
565sclp_vt220_write_room(struct tty_struct *tty)
566{
567 unsigned long flags;
568 struct list_head *l;
569 int count;
570
571 spin_lock_irqsave(&sclp_vt220_lock, flags);
572 count = 0;
573 if (sclp_vt220_current_request != NULL)
574 count = sclp_vt220_space_left(sclp_vt220_current_request);
575 list_for_each(l, &sclp_vt220_empty)
576 count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
577 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
578 return count;
579}
580
581/*
582 * Return number of buffered chars.
583 */
584static int
585sclp_vt220_chars_in_buffer(struct tty_struct *tty)
586{
587 unsigned long flags;
588 struct list_head *l;
589 struct sclp_vt220_request *r;
590 int count;
591
592 spin_lock_irqsave(&sclp_vt220_lock, flags);
593 count = 0;
594 if (sclp_vt220_current_request != NULL)
595 count = sclp_vt220_chars_stored(sclp_vt220_current_request);
596 list_for_each(l, &sclp_vt220_outqueue) {
597 r = list_entry(l, struct sclp_vt220_request, list);
598 count += sclp_vt220_chars_stored(r);
599 }
600 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
601 return count;
602}
603
604static void
605__sclp_vt220_flush_buffer(void)
606{
607 unsigned long flags;
608
609 sclp_vt220_emit_current();
610 spin_lock_irqsave(&sclp_vt220_lock, flags);
611 if (timer_pending(&sclp_vt220_timer))
612 del_timer(&sclp_vt220_timer);
613 while (sclp_vt220_outqueue_count > 0) {
614 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
615 sclp_sync_wait();
616 spin_lock_irqsave(&sclp_vt220_lock, flags);
617 }
618 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
619}
620
621/*
622 * Pass on all buffers to the hardware. Return only when there are no more
623 * buffers pending.
624 */
625static void
626sclp_vt220_flush_buffer(struct tty_struct *tty)
627{
628 sclp_vt220_emit_current();
629}
630
631/*
632 * Initialize all relevant components and register driver with system.
633 */
634static int
635__sclp_vt220_init(int early)
636{
637 void *page;
638 int i;
639
640 if (sclp_vt220_initialized)
641 return 0;
642 sclp_vt220_initialized = 1;
643 spin_lock_init(&sclp_vt220_lock);
644 INIT_LIST_HEAD(&sclp_vt220_empty);
645 INIT_LIST_HEAD(&sclp_vt220_outqueue);
646 init_waitqueue_head(&sclp_vt220_waitq);
647 init_timer(&sclp_vt220_timer);
648 sclp_vt220_current_request = NULL;
649 sclp_vt220_buffered_chars = 0;
650 sclp_vt220_outqueue_count = 0;
651 sclp_vt220_tty = NULL;
652 sclp_vt220_flush_later = 0;
653
654 /* Allocate pages for output buffering */
655 for (i = 0; i < (early ? MAX_CONSOLE_PAGES : MAX_KMEM_PAGES); i++) {
656 if (early)
657 page = alloc_bootmem_low_pages(PAGE_SIZE);
658 else
659 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
660 if (!page)
661 return -ENOMEM;
662 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
663 }
664 return 0;
665}
666
667static struct tty_operations sclp_vt220_ops = {
668 .open = sclp_vt220_open,
669 .close = sclp_vt220_close,
670 .write = sclp_vt220_write,
671 .put_char = sclp_vt220_put_char,
672 .flush_chars = sclp_vt220_flush_chars,
673 .write_room = sclp_vt220_write_room,
674 .chars_in_buffer = sclp_vt220_chars_in_buffer,
675 .flush_buffer = sclp_vt220_flush_buffer
676};
677
678/*
679 * Register driver with SCLP and Linux and initialize internal tty structures.
680 */
681int __init
682sclp_vt220_tty_init(void)
683{
684 struct tty_driver *driver;
685 int rc;
686
687 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
688 * symmetry between VM and LPAR systems regarding ttyS1. */
689 driver = alloc_tty_driver(1);
690 if (!driver)
691 return -ENOMEM;
692 rc = __sclp_vt220_init(0);
693 if (rc) {
694 put_tty_driver(driver);
695 return rc;
696 }
697 rc = sclp_register(&sclp_vt220_register);
698 if (rc) {
699 printk(KERN_ERR SCLP_VT220_PRINT_HEADER
700 "could not register tty - "
701 "sclp_register returned %d\n", rc);
702 put_tty_driver(driver);
703 return rc;
704 }
705
706 driver->owner = THIS_MODULE;
707 driver->driver_name = SCLP_VT220_DRIVER_NAME;
708 driver->name = SCLP_VT220_DEVICE_NAME;
709 driver->major = SCLP_VT220_MAJOR;
710 driver->minor_start = SCLP_VT220_MINOR;
711 driver->type = TTY_DRIVER_TYPE_SYSTEM;
712 driver->subtype = SYSTEM_TYPE_TTY;
713 driver->init_termios = tty_std_termios;
714 driver->flags = TTY_DRIVER_REAL_RAW;
715 tty_set_operations(driver, &sclp_vt220_ops);
716
717 rc = tty_register_driver(driver);
718 if (rc) {
719 printk(KERN_ERR SCLP_VT220_PRINT_HEADER
720 "could not register tty - "
721 "tty_register_driver returned %d\n", rc);
722 put_tty_driver(driver);
723 return rc;
724 }
725 sclp_vt220_driver = driver;
726 return 0;
727}
728
729module_init(sclp_vt220_tty_init);
730
731#ifdef CONFIG_SCLP_VT220_CONSOLE
732
733static void
734sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
735{
736 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1);
737}
738
739static struct tty_driver *
740sclp_vt220_con_device(struct console *c, int *index)
741{
742 *index = 0;
743 return sclp_vt220_driver;
744}
745
746/*
747 * This routine is called from panic when the kernel is going to give up.
748 * We have to make sure that all buffers will be flushed to the SCLP.
749 * Note that this function may be called from within an interrupt context.
750 */
751static void
752sclp_vt220_con_unblank(void)
753{
754 __sclp_vt220_flush_buffer();
755}
756
757/* Structure needed to register with printk */
758static struct console sclp_vt220_console =
759{
760 .name = SCLP_VT220_CONSOLE_NAME,
761 .write = sclp_vt220_con_write,
762 .device = sclp_vt220_con_device,
763 .unblank = sclp_vt220_con_unblank,
764 .flags = CON_PRINTBUFFER,
765 .index = SCLP_VT220_CONSOLE_INDEX
766};
767
768static int __init
769sclp_vt220_con_init(void)
770{
771 int rc;
772
773 if (!CONSOLE_IS_SCLP)
774 return 0;
775 rc = __sclp_vt220_init(1);
776 if (rc)
777 return rc;
778 /* Attach linux console */
779 register_console(&sclp_vt220_console);
780 return 0;
781}
782
783console_initcall(sclp_vt220_con_init);
784#endif /* CONFIG_SCLP_VT220_CONSOLE */
785
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
new file mode 100644
index 000000000000..d04e6c2c3cc1
--- /dev/null
+++ b/drivers/s390/char/tape.h
@@ -0,0 +1,384 @@
1/*
2 * drivers/s390/char/tape.h
3 * tape device driver for 3480/3490E/3590 tapes.
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 */
11
12#ifndef _TAPE_H
13#define _TAPE_H
14
15#include <asm/ccwdev.h>
16#include <asm/debug.h>
17#include <asm/idals.h>
18#include <linux/config.h>
19#include <linux/blkdev.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/mtio.h>
23#include <linux/interrupt.h>
24#include <linux/workqueue.h>
25
26struct gendisk;
27
28/*
29 * Define DBF_LIKE_HELL for lots of messages in the debug feature.
30 */
31#define DBF_LIKE_HELL
32#ifdef DBF_LIKE_HELL
33#define DBF_LH(level, str, ...) \
34do { \
35 debug_sprintf_event(TAPE_DBF_AREA, level, str, ## __VA_ARGS__); \
36} while (0)
37#else
38#define DBF_LH(level, str, ...) do {} while(0)
39#endif
40
41/*
42 * macros s390 debug feature (dbf)
43 */
44#define DBF_EVENT(d_level, d_str...) \
45do { \
46 debug_sprintf_event(TAPE_DBF_AREA, d_level, d_str); \
47} while (0)
48
49#define DBF_EXCEPTION(d_level, d_str...) \
50do { \
51 debug_sprintf_exception(TAPE_DBF_AREA, d_level, d_str); \
52} while (0)
53
54#define TAPE_VERSION_MAJOR 2
55#define TAPE_VERSION_MINOR 0
56#define TAPE_MAGIC "tape"
57
58#define TAPE_MINORS_PER_DEV 2 /* two minors per device */
59#define TAPEBLOCK_HSEC_SIZE 2048
60#define TAPEBLOCK_HSEC_S2B 2
61#define TAPEBLOCK_RETRIES 5
62
63enum tape_medium_state {
64 MS_UNKNOWN,
65 MS_LOADED,
66 MS_UNLOADED,
67 MS_SIZE
68};
69
70enum tape_state {
71 TS_UNUSED=0,
72 TS_IN_USE,
73 TS_BLKUSE,
74 TS_INIT,
75 TS_NOT_OPER,
76 TS_SIZE
77};
78
79enum tape_op {
80 TO_BLOCK, /* Block read */
81 TO_BSB, /* Backward space block */
82 TO_BSF, /* Backward space filemark */
83 TO_DSE, /* Data security erase */
84 TO_FSB, /* Forward space block */
85 TO_FSF, /* Forward space filemark */
86 TO_LBL, /* Locate block label */
87 TO_NOP, /* No operation */
88 TO_RBA, /* Read backward */
89 TO_RBI, /* Read block information */
90 TO_RFO, /* Read forward */
91 TO_REW, /* Rewind tape */
92 TO_RUN, /* Rewind and unload tape */
93 TO_WRI, /* Write block */
94 TO_WTM, /* Write tape mark */
95 TO_MSEN, /* Medium sense */
96 TO_LOAD, /* Load tape */
97 TO_READ_CONFIG, /* Read configuration data */
98 TO_READ_ATTMSG, /* Read attention message */
99 TO_DIS, /* Tape display */
100 TO_ASSIGN, /* Assign tape to channel path */
101 TO_UNASSIGN, /* Unassign tape from channel path */
102 TO_SIZE /* #entries in tape_op_t */
103};
104
105/* Forward declaration */
106struct tape_device;
107
108/* tape_request->status can be: */
109enum tape_request_status {
110 TAPE_REQUEST_INIT, /* request is ready to be processed */
111 TAPE_REQUEST_QUEUED, /* request is queued to be processed */
112 TAPE_REQUEST_IN_IO, /* request is currently in IO */
113 TAPE_REQUEST_DONE, /* request is completed. */
114};
115
116/* Tape CCW request */
117struct tape_request {
118 struct list_head list; /* list head for request queueing. */
119 struct tape_device *device; /* tape device of this request */
120 struct ccw1 *cpaddr; /* address of the channel program. */
121 void *cpdata; /* pointer to ccw data. */
122 enum tape_request_status status;/* status of this request */
123 int options; /* options for execution. */
124 int retries; /* retry counter for error recovery. */
125 int rescnt; /* residual count from devstat. */
126
127 /* Callback for delivering final status. */
128 void (*callback)(struct tape_request *, void *);
129 void *callback_data;
130
131 enum tape_op op;
132 int rc;
133};
134
135/* Function type for magnetic tape commands */
136typedef int (*tape_mtop_fn)(struct tape_device *, int);
137
138/* Size of the array containing the mtops for a discipline */
139#define TAPE_NR_MTOPS (MTMKPART+1)
140
141/* Tape Discipline */
142struct tape_discipline {
143 struct module *owner;
144 int (*setup_device)(struct tape_device *);
145 void (*cleanup_device)(struct tape_device *);
146 int (*irq)(struct tape_device *, struct tape_request *, struct irb *);
147 struct tape_request *(*read_block)(struct tape_device *, size_t);
148 struct tape_request *(*write_block)(struct tape_device *, size_t);
149 void (*process_eov)(struct tape_device*);
150#ifdef CONFIG_S390_TAPE_BLOCK
151 /* Block device stuff. */
152 struct tape_request *(*bread)(struct tape_device *, struct request *);
153 void (*check_locate)(struct tape_device *, struct tape_request *);
154 void (*free_bread)(struct tape_request *);
155#endif
156 /* ioctl function for additional ioctls. */
157 int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
158 /* Array of tape commands with TAPE_NR_MTOPS entries */
159 tape_mtop_fn *mtop_array;
160};
161
162/*
163 * The discipline irq function either returns an error code (<0) which
164 * means that the request has failed with an error or one of the following:
165 */
166#define TAPE_IO_SUCCESS 0 /* request successful */
167#define TAPE_IO_PENDING 1 /* request still running */
168#define TAPE_IO_RETRY 2 /* retry to current request */
169#define TAPE_IO_STOP 3 /* stop the running request */
170
171/* Char Frontend Data */
172struct tape_char_data {
173 struct idal_buffer *idal_buf; /* idal buffer for user char data */
174 int block_size; /* of size block_size. */
175};
176
177#ifdef CONFIG_S390_TAPE_BLOCK
178/* Block Frontend Data */
179struct tape_blk_data
180{
181 /* Block device request queue. */
182 request_queue_t * request_queue;
183 spinlock_t request_queue_lock;
184
185 /* Task to move entries from block request to CCS request queue. */
186 struct work_struct requeue_task;
187 atomic_t requeue_scheduled;
188
189 /* Current position on the tape. */
190 long block_position;
191 int medium_changed;
192 struct gendisk * disk;
193};
194#endif
195
196/* Tape Info */
197struct tape_device {
198 /* entry in tape_device_list */
199 struct list_head node;
200
201 int cdev_id;
202 struct ccw_device * cdev;
203 struct tape_class_device * nt;
204 struct tape_class_device * rt;
205
206 /* Device discipline information. */
207 struct tape_discipline * discipline;
208 void * discdata;
209
210 /* Generic status flags */
211 long tape_generic_status;
212
213 /* Device state information. */
214 wait_queue_head_t state_change_wq;
215 enum tape_state tape_state;
216 enum tape_medium_state medium_state;
217 unsigned char * modeset_byte;
218
219 /* Reference count. */
220 atomic_t ref_count;
221
222 /* Request queue. */
223 struct list_head req_queue;
224
225 /* Each tape device has (currently) two minor numbers. */
226 int first_minor;
227
228 /* Number of tapemarks required for correct termination. */
229 int required_tapemarks;
230
231 /* Block ID of the BOF */
232 unsigned int bof;
233
234 /* Character device frontend data */
235 struct tape_char_data char_data;
236#ifdef CONFIG_S390_TAPE_BLOCK
237 /* Block dev frontend data */
238 struct tape_blk_data blk_data;
239#endif
240};
241
242/* Externals from tape_core.c */
243extern struct tape_request *tape_alloc_request(int cplength, int datasize);
244extern void tape_free_request(struct tape_request *);
245extern int tape_do_io(struct tape_device *, struct tape_request *);
246extern int tape_do_io_async(struct tape_device *, struct tape_request *);
247extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
248void tape_hotplug_event(struct tape_device *, int major, int action);
249
250static inline int
251tape_do_io_free(struct tape_device *device, struct tape_request *request)
252{
253 int rc;
254
255 rc = tape_do_io(device, request);
256 tape_free_request(request);
257 return rc;
258}
259
260extern int tape_oper_handler(int irq, int status);
261extern void tape_noper_handler(int irq, int status);
262extern int tape_open(struct tape_device *);
263extern int tape_release(struct tape_device *);
264extern int tape_mtop(struct tape_device *, int, int);
265extern void tape_state_set(struct tape_device *, enum tape_state);
266
267extern int tape_generic_online(struct tape_device *, struct tape_discipline *);
268extern int tape_generic_offline(struct tape_device *device);
269
270/* Externals from tape_devmap.c */
271extern int tape_generic_probe(struct ccw_device *);
272extern void tape_generic_remove(struct ccw_device *);
273
274extern struct tape_device *tape_get_device(int devindex);
275extern struct tape_device *tape_get_device_reference(struct tape_device *);
276extern struct tape_device *tape_put_device(struct tape_device *);
277
278/* Externals from tape_char.c */
279extern int tapechar_init(void);
280extern void tapechar_exit(void);
281extern int tapechar_setup_device(struct tape_device *);
282extern void tapechar_cleanup_device(struct tape_device *);
283
284/* Externals from tape_block.c */
285#ifdef CONFIG_S390_TAPE_BLOCK
286extern int tapeblock_init (void);
287extern void tapeblock_exit(void);
288extern int tapeblock_setup_device(struct tape_device *);
289extern void tapeblock_cleanup_device(struct tape_device *);
290#else
291static inline int tapeblock_init (void) {return 0;}
292static inline void tapeblock_exit (void) {;}
293static inline int tapeblock_setup_device(struct tape_device *t) {return 0;}
294static inline void tapeblock_cleanup_device (struct tape_device *t) {;}
295#endif
296
297/* tape initialisation functions */
298#ifdef CONFIG_PROC_FS
299extern void tape_proc_init (void);
300extern void tape_proc_cleanup (void);
301#else
302static inline void tape_proc_init (void) {;}
303static inline void tape_proc_cleanup (void) {;}
304#endif
305
306/* a function for dumping device sense info */
307extern void tape_dump_sense(struct tape_device *, struct tape_request *,
308 struct irb *);
309extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
310 struct irb *);
311
312/* functions for handling the status of a device */
313extern void tape_med_state_set(struct tape_device *, enum tape_medium_state);
314
315/* The debug area */
316extern debug_info_t *TAPE_DBF_AREA;
317
318/* functions for building ccws */
319static inline struct ccw1 *
320tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
321{
322 ccw->cmd_code = cmd_code;
323 ccw->flags = CCW_FLAG_CC;
324 ccw->count = memsize;
325 ccw->cda = (__u32)(addr_t) cda;
326 return ccw + 1;
327}
328
329static inline struct ccw1 *
330tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
331{
332 ccw->cmd_code = cmd_code;
333 ccw->flags = 0;
334 ccw->count = memsize;
335 ccw->cda = (__u32)(addr_t) cda;
336 return ccw + 1;
337}
338
339static inline struct ccw1 *
340tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
341{
342 ccw->cmd_code = cmd_code;
343 ccw->flags = 0;
344 ccw->count = 0;
345 ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
346 return ccw + 1;
347}
348
349static inline struct ccw1 *
350tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
351{
352 while (count-- > 0) {
353 ccw->cmd_code = cmd_code;
354 ccw->flags = CCW_FLAG_CC;
355 ccw->count = 0;
356 ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
357 ccw++;
358 }
359 return ccw;
360}
361
362static inline struct ccw1 *
363tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
364{
365 ccw->cmd_code = cmd_code;
366 ccw->flags = CCW_FLAG_CC;
367 idal_buffer_set_cda(idal, ccw);
368 return ccw++;
369}
370
371static inline struct ccw1 *
372tape_ccw_end_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
373{
374 ccw->cmd_code = cmd_code;
375 ccw->flags = 0;
376 idal_buffer_set_cda(idal, ccw);
377 return ccw++;
378}
379
380/* Global vars */
381extern const char *tape_state_verbose[];
382extern const char *tape_op_verbose[];
383
384#endif /* for ifdef tape.h */
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
new file mode 100644
index 000000000000..480ec87976fb
--- /dev/null
+++ b/drivers/s390/char/tape_34xx.c
@@ -0,0 +1,1385 @@
1/*
2 * drivers/s390/char/tape_34xx.c
3 * tape device discipline for 3480/3490 tapes.
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/bio.h>
16#include <linux/workqueue.h>
17
18#define TAPE_DBF_AREA tape_34xx_dbf
19
20#include "tape.h"
21#include "tape_std.h"
22
23#define PRINTK_HEADER "TAPE_34XX: "
24
25/*
26 * Pointer to debug area.
27 */
28debug_info_t *TAPE_DBF_AREA = NULL;
29EXPORT_SYMBOL(TAPE_DBF_AREA);
30
31enum tape_34xx_type {
32 tape_3480,
33 tape_3490,
34};
35
36#define TAPE34XX_FMT_3480 0
37#define TAPE34XX_FMT_3480_2_XF 1
38#define TAPE34XX_FMT_3480_XF 2
39
40struct tape_34xx_block_id {
41 unsigned int wrap : 1;
42 unsigned int segment : 7;
43 unsigned int format : 2;
44 unsigned int block : 22;
45};
46
47/*
48 * A list of block ID's is used to faster seek blocks.
49 */
50struct tape_34xx_sbid {
51 struct list_head list;
52 struct tape_34xx_block_id bid;
53};
54
55static void tape_34xx_delete_sbid_from(struct tape_device *, int);
56
57/*
58 * Medium sense for 34xx tapes. There is no 'real' medium sense call.
59 * So we just do a normal sense.
60 */
61static int
62tape_34xx_medium_sense(struct tape_device *device)
63{
64 struct tape_request *request;
65 unsigned char *sense;
66 int rc;
67
68 request = tape_alloc_request(1, 32);
69 if (IS_ERR(request)) {
70 DBF_EXCEPTION(6, "MSEN fail\n");
71 return PTR_ERR(request);
72 }
73
74 request->op = TO_MSEN;
75 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
76
77 rc = tape_do_io_interruptible(device, request);
78 if (request->rc == 0) {
79 sense = request->cpdata;
80
81 /*
82 * This isn't quite correct. But since INTERVENTION_REQUIRED
83 * means that the drive is 'neither ready nor on-line' it is
84 * only slightly inaccurate to say there is no tape loaded if
85 * the drive isn't online...
86 */
87 if (sense[0] & SENSE_INTERVENTION_REQUIRED)
88 tape_med_state_set(device, MS_UNLOADED);
89 else
90 tape_med_state_set(device, MS_LOADED);
91
92 if (sense[1] & SENSE_WRITE_PROTECT)
93 device->tape_generic_status |= GMT_WR_PROT(~0);
94 else
95 device->tape_generic_status &= ~GMT_WR_PROT(~0);
96 } else {
97 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
98 request->rc);
99 }
100 tape_free_request(request);
101
102 return rc;
103}
104
105/*
106 * These functions are currently used only to schedule a medium_sense for
107 * later execution. This is because we get an interrupt whenever a medium
108 * is inserted but cannot call tape_do_io* from an interrupt context.
109 * Maybe that's useful for other actions we want to start from the
110 * interrupt handler.
111 */
112static void
113tape_34xx_work_handler(void *data)
114{
115 struct {
116 struct tape_device *device;
117 enum tape_op op;
118 struct work_struct work;
119 } *p = data;
120
121 switch(p->op) {
122 case TO_MSEN:
123 tape_34xx_medium_sense(p->device);
124 break;
125 default:
126 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
127 }
128
129 p->device = tape_put_device(p->device);
130 kfree(p);
131}
132
133static int
134tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
135{
136 struct {
137 struct tape_device *device;
138 enum tape_op op;
139 struct work_struct work;
140 } *p;
141
142 if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
143 return -ENOMEM;
144
145 memset(p, 0, sizeof(*p));
146 INIT_WORK(&p->work, tape_34xx_work_handler, p);
147
148 p->device = tape_get_device_reference(device);
149 p->op = op;
150
151 schedule_work(&p->work);
152 return 0;
153}
154
155/*
156 * Done Handler is called when dev stat = DEVICE-END (successful operation)
157 */
158static inline int
159tape_34xx_done(struct tape_request *request)
160{
161 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
162
163 switch (request->op) {
164 case TO_DSE:
165 case TO_RUN:
166 case TO_WRI:
167 case TO_WTM:
168 case TO_ASSIGN:
169 case TO_UNASSIGN:
170 tape_34xx_delete_sbid_from(request->device, 0);
171 break;
172 default:
173 ;
174 }
175 return TAPE_IO_SUCCESS;
176}
177
178static inline int
179tape_34xx_erp_failed(struct tape_request *request, int rc)
180{
181 DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n",
182 tape_op_verbose[request->op], rc);
183 return rc;
184}
185
186static inline int
187tape_34xx_erp_succeeded(struct tape_request *request)
188{
189 DBF_EVENT(3, "Error Recovery successful for %s\n",
190 tape_op_verbose[request->op]);
191 return tape_34xx_done(request);
192}
193
194static inline int
195tape_34xx_erp_retry(struct tape_request *request)
196{
197 DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]);
198 return TAPE_IO_RETRY;
199}
200
201/*
202 * This function is called, when no request is outstanding and we get an
203 * interrupt
204 */
205static int
206tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
207{
208 if (irb->scsw.dstat == 0x85 /* READY */) {
209 /* A medium was inserted in the drive. */
210 DBF_EVENT(6, "xuud med\n");
211 tape_34xx_delete_sbid_from(device, 0);
212 tape_34xx_schedule_work(device, TO_MSEN);
213 } else {
214 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
215 PRINT_WARN("Unsolicited IRQ (Device End) caught.\n");
216 tape_dump_sense(device, NULL, irb);
217 }
218 return TAPE_IO_SUCCESS;
219}
220
221/*
222 * Read Opposite Error Recovery Function:
223 * Used, when Read Forward does not work
224 */
225static int
226tape_34xx_erp_read_opposite(struct tape_device *device,
227 struct tape_request *request)
228{
229 if (request->op == TO_RFO) {
230 /*
231 * We did read forward, but the data could not be read
232 * *correctly*. We transform the request to a read backward
233 * and try again.
234 */
235 tape_std_read_backward(device, request);
236 return tape_34xx_erp_retry(request);
237 }
238 if (request->op != TO_RBA)
239 PRINT_ERR("read_opposite called with state:%s\n",
240 tape_op_verbose[request->op]);
241 /*
242 * We tried to read forward and backward, but hat no
243 * success -> failed.
244 */
245 return tape_34xx_erp_failed(request, -EIO);
246}
247
248static int
249tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
250 struct irb *irb, int no)
251{
252 if (request->op != TO_ASSIGN) {
253 PRINT_WARN("An unexpected condition #%d was caught in "
254 "tape error recovery.\n", no);
255 PRINT_WARN("Please report this incident.\n");
256 if (request)
257 PRINT_WARN("Operation of tape:%s\n",
258 tape_op_verbose[request->op]);
259 tape_dump_sense(device, request, irb);
260 }
261 return tape_34xx_erp_failed(request, -EIO);
262}
263
264/*
265 * Handle data overrun between cu and drive. The channel speed might
266 * be too slow.
267 */
268static int
269tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
270 struct irb *irb)
271{
272 if (irb->ecw[3] == 0x40) {
273 PRINT_WARN ("Data overrun error between control-unit "
274 "and drive. Use a faster channel connection, "
275 "if possible! \n");
276 return tape_34xx_erp_failed(request, -EIO);
277 }
278 return tape_34xx_erp_bug(device, request, irb, -1);
279}
280
281/*
282 * Handle record sequence error.
283 */
284static int
285tape_34xx_erp_sequence(struct tape_device *device,
286 struct tape_request *request, struct irb *irb)
287{
288 if (irb->ecw[3] == 0x41) {
289 /*
290 * cu detected incorrect block-id sequence on tape.
291 */
292 PRINT_WARN("Illegal block-id sequence found!\n");
293 return tape_34xx_erp_failed(request, -EIO);
294 }
295 /*
296 * Record sequence error bit is set, but erpa does not
297 * show record sequence error.
298 */
299 return tape_34xx_erp_bug(device, request, irb, -2);
300}
301
302/*
303 * This function analyses the tape's sense-data in case of a unit-check.
304 * If possible, it tries to recover from the error. Else the user is
305 * informed about the problem.
306 */
307static int
308tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
309 struct irb *irb)
310{
311 int inhibit_cu_recovery;
312 __u8* sense;
313
314 inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
315 sense = irb->ecw;
316
317#ifdef CONFIG_S390_TAPE_BLOCK
318 if (request->op == TO_BLOCK) {
319 /*
320 * Recovery for block device requests. Set the block_position
321 * to something invalid and retry.
322 */
323 device->blk_data.block_position = -1;
324 if (request->retries-- <= 0)
325 return tape_34xx_erp_failed(request, -EIO);
326 else
327 return tape_34xx_erp_retry(request);
328 }
329#endif
330
331 if (
332 sense[0] & SENSE_COMMAND_REJECT &&
333 sense[1] & SENSE_WRITE_PROTECT
334 ) {
335 if (
336 request->op == TO_DSE ||
337 request->op == TO_WRI ||
338 request->op == TO_WTM
339 ) {
340 /* medium is write protected */
341 return tape_34xx_erp_failed(request, -EACCES);
342 } else {
343 return tape_34xx_erp_bug(device, request, irb, -3);
344 }
345 }
346
347 /*
348 * Special cases for various tape-states when reaching
349 * end of recorded area
350 *
351 * FIXME: Maybe a special case of the special case:
352 * sense[0] == SENSE_EQUIPMENT_CHECK &&
353 * sense[1] == SENSE_DRIVE_ONLINE &&
354 * sense[3] == 0x47 (Volume Fenced)
355 *
356 * This was caused by continued FSF or FSR after an
357 * 'End Of Data'.
358 */
359 if ((
360 sense[0] == SENSE_DATA_CHECK ||
361 sense[0] == SENSE_EQUIPMENT_CHECK ||
362 sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
363 ) && (
364 sense[1] == SENSE_DRIVE_ONLINE ||
365 sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
366 )) {
367 switch (request->op) {
368 /*
369 * sense[0] == SENSE_DATA_CHECK &&
370 * sense[1] == SENSE_DRIVE_ONLINE
371 * sense[3] == 0x36 (End Of Data)
372 *
373 * Further seeks might return a 'Volume Fenced'.
374 */
375 case TO_FSF:
376 case TO_FSB:
377 /* Trying to seek beyond end of recorded area */
378 return tape_34xx_erp_failed(request, -ENOSPC);
379 case TO_BSB:
380 return tape_34xx_erp_retry(request);
381
382 /*
383 * sense[0] == SENSE_DATA_CHECK &&
384 * sense[1] == SENSE_DRIVE_ONLINE &&
385 * sense[3] == 0x36 (End Of Data)
386 */
387 case TO_LBL:
388 /* Block could not be located. */
389 tape_34xx_delete_sbid_from(device, 0);
390 return tape_34xx_erp_failed(request, -EIO);
391
392 case TO_RFO:
393 /* Read beyond end of recorded area -> 0 bytes read */
394 return tape_34xx_erp_failed(request, 0);
395
396 /*
397 * sense[0] == SENSE_EQUIPMENT_CHECK &&
398 * sense[1] == SENSE_DRIVE_ONLINE &&
399 * sense[3] == 0x38 (Physical End Of Volume)
400 */
401 case TO_WRI:
402 /* Writing at physical end of volume */
403 return tape_34xx_erp_failed(request, -ENOSPC);
404 default:
405 PRINT_ERR("Invalid op in %s:%i\n",
406 __FUNCTION__, __LINE__);
407 return tape_34xx_erp_failed(request, 0);
408 }
409 }
410
411 /* Sensing special bits */
412 if (sense[0] & SENSE_BUS_OUT_CHECK)
413 return tape_34xx_erp_retry(request);
414
415 if (sense[0] & SENSE_DATA_CHECK) {
416 /*
417 * hardware failure, damaged tape or improper
418 * operating conditions
419 */
420 switch (sense[3]) {
421 case 0x23:
422 /* a read data check occurred */
423 if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
424 inhibit_cu_recovery)
425 // data check is not permanent, may be
426 // recovered. We always use async-mode with
427 // cu-recovery, so this should *never* happen.
428 return tape_34xx_erp_bug(device, request,
429 irb, -4);
430
431 /* data check is permanent, CU recovery has failed */
432 PRINT_WARN("Permanent read error\n");
433 return tape_34xx_erp_failed(request, -EIO);
434 case 0x25:
435 // a write data check occurred
436 if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
437 inhibit_cu_recovery)
438 // data check is not permanent, may be
439 // recovered. We always use async-mode with
440 // cu-recovery, so this should *never* happen.
441 return tape_34xx_erp_bug(device, request,
442 irb, -5);
443
444 // data check is permanent, cu-recovery has failed
445 PRINT_WARN("Permanent write error\n");
446 return tape_34xx_erp_failed(request, -EIO);
447 case 0x26:
448 /* Data Check (read opposite) occurred. */
449 return tape_34xx_erp_read_opposite(device, request);
450 case 0x28:
451 /* ID-Mark at tape start couldn't be written */
452 PRINT_WARN("ID-Mark could not be written.\n");
453 return tape_34xx_erp_failed(request, -EIO);
454 case 0x31:
455 /* Tape void. Tried to read beyond end of device. */
456 PRINT_WARN("Read beyond end of recorded area.\n");
457 return tape_34xx_erp_failed(request, -ENOSPC);
458 case 0x41:
459 /* Record sequence error. */
460 PRINT_WARN("Invalid block-id sequence found.\n");
461 return tape_34xx_erp_failed(request, -EIO);
462 default:
463 /* all data checks for 3480 should result in one of
464 * the above erpa-codes. For 3490, other data-check
465 * conditions do exist. */
466 if (device->cdev->id.driver_info == tape_3480)
467 return tape_34xx_erp_bug(device, request,
468 irb, -6);
469 }
470 }
471
472 if (sense[0] & SENSE_OVERRUN)
473 return tape_34xx_erp_overrun(device, request, irb);
474
475 if (sense[1] & SENSE_RECORD_SEQUENCE_ERR)
476 return tape_34xx_erp_sequence(device, request, irb);
477
478 /* Sensing erpa codes */
479 switch (sense[3]) {
480 case 0x00:
481 /* Unit check with erpa code 0. Report and ignore. */
482 PRINT_WARN("Non-error sense was found. "
483 "Unit-check will be ignored.\n");
484 return TAPE_IO_SUCCESS;
485 case 0x21:
486 /*
487 * Data streaming not operational. CU will switch to
488 * interlock mode. Reissue the command.
489 */
490 PRINT_WARN("Data streaming not operational. "
491 "Switching to interlock-mode.\n");
492 return tape_34xx_erp_retry(request);
493 case 0x22:
494 /*
495 * Path equipment check. Might be drive adapter error, buffer
496 * error on the lower interface, internal path not usable,
497 * or error during cartridge load.
498 */
499 PRINT_WARN("A path equipment check occurred. One of the "
500 "following conditions occurred:\n");
501 PRINT_WARN("drive adapter error, buffer error on the lower "
502 "interface, internal path not usable, error "
503 "during cartridge load.\n");
504 return tape_34xx_erp_failed(request, -EIO);
505 case 0x24:
506 /*
507 * Load display check. Load display was command was issued,
508 * but the drive is displaying a drive check message. Can
509 * be threated as "device end".
510 */
511 return tape_34xx_erp_succeeded(request);
512 case 0x27:
513 /*
514 * Command reject. May indicate illegal channel program or
515 * buffer over/underrun. Since all channel programs are
516 * issued by this driver and ought be correct, we assume a
517 * over/underrun situation and retry the channel program.
518 */
519 return tape_34xx_erp_retry(request);
520 case 0x29:
521 /*
522 * Function incompatible. Either the tape is idrc compressed
523 * but the hardware isn't capable to do idrc, or a perform
524 * subsystem func is issued and the CU is not on-line.
525 */
526 PRINT_WARN ("Function incompatible. Try to switch off idrc\n");
527 return tape_34xx_erp_failed(request, -EIO);
528 case 0x2a:
529 /*
530 * Unsolicited environmental data. An internal counter
531 * overflows, we can ignore this and reissue the cmd.
532 */
533 return tape_34xx_erp_retry(request);
534 case 0x2b:
535 /*
536 * Environmental data present. Indicates either unload
537 * completed ok or read buffered log command completed ok.
538 */
539 if (request->op == TO_RUN) {
540 /* Rewind unload completed ok. */
541 tape_med_state_set(device, MS_UNLOADED);
542 return tape_34xx_erp_succeeded(request);
543 }
544 /* tape_34xx doesn't use read buffered log commands. */
545 return tape_34xx_erp_bug(device, request, irb, sense[3]);
546 case 0x2c:
547 /*
548 * Permanent equipment check. CU has tried recovery, but
549 * did not succeed.
550 */
551 return tape_34xx_erp_failed(request, -EIO);
552 case 0x2d:
553 /* Data security erase failure. */
554 if (request->op == TO_DSE)
555 return tape_34xx_erp_failed(request, -EIO);
556 /* Data security erase failure, but no such command issued. */
557 return tape_34xx_erp_bug(device, request, irb, sense[3]);
558 case 0x2e:
559 /*
560 * Not capable. This indicates either that the drive fails
561 * reading the format id mark or that that format specified
562 * is not supported by the drive.
563 */
564 PRINT_WARN("Drive not capable processing the tape format!\n");
565 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
566 case 0x30:
567 /* The medium is write protected. */
568 PRINT_WARN("Medium is write protected!\n");
569 return tape_34xx_erp_failed(request, -EACCES);
570 case 0x32:
571 // Tension loss. We cannot recover this, it's an I/O error.
572 PRINT_WARN("The drive lost tape tension.\n");
573 return tape_34xx_erp_failed(request, -EIO);
574 case 0x33:
575 /*
576 * Load Failure. The cartridge was not inserted correctly or
577 * the tape is not threaded correctly.
578 */
579 PRINT_WARN("Cartridge load failure. Reload the cartridge "
580 "and try again.\n");
581 tape_34xx_delete_sbid_from(device, 0);
582 return tape_34xx_erp_failed(request, -EIO);
583 case 0x34:
584 /*
585 * Unload failure. The drive cannot maintain tape tension
586 * and control tape movement during an unload operation.
587 */
588 PRINT_WARN("Failure during cartridge unload. "
589 "Please try manually.\n");
590 if (request->op == TO_RUN)
591 return tape_34xx_erp_failed(request, -EIO);
592 return tape_34xx_erp_bug(device, request, irb, sense[3]);
593 case 0x35:
594 /*
595 * Drive equipment check. One of the following:
596 * - cu cannot recover from a drive detected error
597 * - a check code message is shown on drive display
598 * - the cartridge loader does not respond correctly
599 * - a failure occurs during an index, load, or unload cycle
600 */
601 PRINT_WARN("Equipment check! Please check the drive and "
602 "the cartridge loader.\n");
603 return tape_34xx_erp_failed(request, -EIO);
604 case 0x36:
605 if (device->cdev->id.driver_info == tape_3490)
606 /* End of data. */
607 return tape_34xx_erp_failed(request, -EIO);
608 /* This erpa is reserved for 3480 */
609 return tape_34xx_erp_bug(device, request, irb, sense[3]);
610 case 0x37:
611 /*
612 * Tape length error. The tape is shorter than reported in
613 * the beginning-of-tape data.
614 */
615 PRINT_WARN("Tape length error.\n");
616 return tape_34xx_erp_failed(request, -EIO);
617 case 0x38:
618 /*
619 * Physical end of tape. A read/write operation reached
620 * the physical end of tape.
621 */
622 if (request->op==TO_WRI ||
623 request->op==TO_DSE ||
624 request->op==TO_WTM)
625 return tape_34xx_erp_failed(request, -ENOSPC);
626 return tape_34xx_erp_failed(request, -EIO);
627 case 0x39:
628 /* Backward at Beginning of tape. */
629 return tape_34xx_erp_failed(request, -EIO);
630 case 0x3a:
631 /* Drive switched to not ready. */
632 PRINT_WARN("Drive not ready. Turn the ready/not ready switch "
633 "to ready position and try again.\n");
634 return tape_34xx_erp_failed(request, -EIO);
635 case 0x3b:
636 /* Manual rewind or unload. This causes an I/O error. */
637 PRINT_WARN("Medium was rewound or unloaded manually.\n");
638 tape_34xx_delete_sbid_from(device, 0);
639 return tape_34xx_erp_failed(request, -EIO);
640 case 0x42:
641 /*
642 * Degraded mode. A condition that can cause degraded
643 * performance is detected.
644 */
645 PRINT_WARN("Subsystem is running in degraded mode.\n");
646 return tape_34xx_erp_retry(request);
647 case 0x43:
648 /* Drive not ready. */
649 tape_34xx_delete_sbid_from(device, 0);
650 tape_med_state_set(device, MS_UNLOADED);
651 /* Some commands commands are successful even in this case */
652 if (sense[1] & SENSE_DRIVE_ONLINE) {
653 switch(request->op) {
654 case TO_ASSIGN:
655 case TO_UNASSIGN:
656 case TO_DIS:
657 case TO_NOP:
658 return tape_34xx_done(request);
659 break;
660 default:
661 break;
662 }
663 }
664 PRINT_WARN("The drive is not ready.\n");
665 return tape_34xx_erp_failed(request, -ENOMEDIUM);
666 case 0x44:
667 /* Locate Block unsuccessful. */
668 if (request->op != TO_BLOCK && request->op != TO_LBL)
669 /* No locate block was issued. */
670 return tape_34xx_erp_bug(device, request,
671 irb, sense[3]);
672 return tape_34xx_erp_failed(request, -EIO);
673 case 0x45:
674 /* The drive is assigned to a different channel path. */
675 PRINT_WARN("The drive is assigned elsewhere.\n");
676 return tape_34xx_erp_failed(request, -EIO);
677 case 0x46:
678 /*
679 * Drive not on-line. Drive may be switched offline,
680 * the power supply may be switched off or
681 * the drive address may not be set correctly.
682 */
683 PRINT_WARN("The drive is not on-line.");
684 return tape_34xx_erp_failed(request, -EIO);
685 case 0x47:
686 /* Volume fenced. CU reports volume integrity is lost. */
687 PRINT_WARN("Volume fenced. The volume integrity is lost.\n");
688 tape_34xx_delete_sbid_from(device, 0);
689 return tape_34xx_erp_failed(request, -EIO);
690 case 0x48:
691 /* Log sense data and retry request. */
692 return tape_34xx_erp_retry(request);
693 case 0x49:
694 /* Bus out check. A parity check error on the bus was found. */
695 PRINT_WARN("Bus out check. A data transfer over the bus "
696 "has been corrupted.\n");
697 return tape_34xx_erp_failed(request, -EIO);
698 case 0x4a:
699 /* Control unit erp failed. */
700 PRINT_WARN("The control unit I/O error recovery failed.\n");
701 return tape_34xx_erp_failed(request, -EIO);
702 case 0x4b:
703 /*
704 * CU and drive incompatible. The drive requests micro-program
705 * patches, which are not available on the CU.
706 */
707 PRINT_WARN("The drive needs microprogram patches from the "
708 "control unit, which are not available.\n");
709 return tape_34xx_erp_failed(request, -EIO);
710 case 0x4c:
711 /*
712 * Recovered Check-One failure. Cu develops a hardware error,
713 * but is able to recover.
714 */
715 return tape_34xx_erp_retry(request);
716 case 0x4d:
717 if (device->cdev->id.driver_info == tape_3490)
718 /*
719 * Resetting event received. Since the driver does
720 * not support resetting event recovery (which has to
721 * be handled by the I/O Layer), retry our command.
722 */
723 return tape_34xx_erp_retry(request);
724 /* This erpa is reserved for 3480. */
725 return tape_34xx_erp_bug(device, request, irb, sense[3]);
726 case 0x4e:
727 if (device->cdev->id.driver_info == tape_3490) {
728 /*
729 * Maximum block size exceeded. This indicates, that
730 * the block to be written is larger than allowed for
731 * buffered mode.
732 */
733 PRINT_WARN("Maximum block size for buffered "
734 "mode exceeded.\n");
735 return tape_34xx_erp_failed(request, -ENOBUFS);
736 }
737 /* This erpa is reserved for 3480. */
738 return tape_34xx_erp_bug(device, request, irb, sense[3]);
739 case 0x50:
740 /*
741 * Read buffered log (Overflow). CU is running in extended
742 * buffered log mode, and a counter overflows. This should
743 * never happen, since we're never running in extended
744 * buffered log mode.
745 */
746 return tape_34xx_erp_retry(request);
747 case 0x51:
748 /*
749 * Read buffered log (EOV). EOF processing occurs while the
750 * CU is in extended buffered log mode. This should never
751 * happen, since we're never running in extended buffered
752 * log mode.
753 */
754 return tape_34xx_erp_retry(request);
755 case 0x52:
756 /* End of Volume complete. Rewind unload completed ok. */
757 if (request->op == TO_RUN) {
758 tape_med_state_set(device, MS_UNLOADED);
759 tape_34xx_delete_sbid_from(device, 0);
760 return tape_34xx_erp_succeeded(request);
761 }
762 return tape_34xx_erp_bug(device, request, irb, sense[3]);
763 case 0x53:
764 /* Global command intercept. */
765 return tape_34xx_erp_retry(request);
766 case 0x54:
767 /* Channel interface recovery (temporary). */
768 return tape_34xx_erp_retry(request);
769 case 0x55:
770 /* Channel interface recovery (permanent). */
771 PRINT_WARN("A permanent channel interface error occurred.\n");
772 return tape_34xx_erp_failed(request, -EIO);
773 case 0x56:
774 /* Channel protocol error. */
775 PRINT_WARN("A channel protocol error occurred.\n");
776 return tape_34xx_erp_failed(request, -EIO);
777 case 0x57:
778 if (device->cdev->id.driver_info == tape_3480) {
779 /* Attention intercept. */
780 PRINT_WARN("An attention intercept occurred, "
781 "which will be recovered.\n");
782 return tape_34xx_erp_retry(request);
783 } else {
784 /* Global status intercept. */
785 PRINT_WARN("An global status intercept was received, "
786 "which will be recovered.\n");
787 return tape_34xx_erp_retry(request);
788 }
789 case 0x5a:
790 /*
791 * Tape length incompatible. The tape inserted is too long,
792 * which could cause damage to the tape or the drive.
793 */
794 PRINT_WARN("Tape Length Incompatible\n");
795 PRINT_WARN("Tape length exceeds IBM enhanced capacity "
796 "cartdridge length or a medium\n");
797 PRINT_WARN("with EC-CST identification mark has been mounted "
798 "in a device that writes\n");
799 PRINT_WARN("3480 or 3480 XF format.\n");
800 return tape_34xx_erp_failed(request, -EIO);
801 case 0x5b:
802 /* Format 3480 XF incompatible */
803 if (sense[1] & SENSE_BEGINNING_OF_TAPE)
804 /* The tape will get overwritten. */
805 return tape_34xx_erp_retry(request);
806 PRINT_WARN("Format 3480 XF Incompatible\n");
807 PRINT_WARN("Medium has been created in 3480 format. "
808 "To change the format writes\n");
809 PRINT_WARN("must be issued at BOT.\n");
810 return tape_34xx_erp_failed(request, -EIO);
811 case 0x5c:
812 /* Format 3480-2 XF incompatible */
813 PRINT_WARN("Format 3480-2 XF Incompatible\n");
814 PRINT_WARN("Device can only read 3480 or 3480 XF format.\n");
815 return tape_34xx_erp_failed(request, -EIO);
816 case 0x5d:
817 /* Tape length violation. */
818 PRINT_WARN("Tape Length Violation\n");
819 PRINT_WARN("The mounted tape exceeds IBM Enhanced Capacity "
820 "Cartdridge System Tape length.\n");
821 PRINT_WARN("This may cause damage to the drive or tape when "
822 "processing to the EOV\n");
823 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
824 case 0x5e:
825 /* Compaction algorithm incompatible. */
826 PRINT_WARN("Compaction Algorithm Incompatible\n");
827 PRINT_WARN("The volume is recorded using an incompatible "
828 "compaction algorithm,\n");
829 PRINT_WARN("which is not supported by the device.\n");
830 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
831
832 /* The following erpas should have been covered earlier. */
833 case 0x23: /* Read data check. */
834 case 0x25: /* Write data check. */
835 case 0x26: /* Data check (read opposite). */
836 case 0x28: /* Write id mark check. */
837 case 0x31: /* Tape void. */
838 case 0x40: /* Overrun error. */
839 case 0x41: /* Record sequence error. */
840 /* All other erpas are reserved for future use. */
841 default:
842 return tape_34xx_erp_bug(device, request, irb, sense[3]);
843 }
844}
845
846/*
847 * 3480/3490 interrupt handler
848 */
849static int
850tape_34xx_irq(struct tape_device *device, struct tape_request *request,
851 struct irb *irb)
852{
853 if (request == NULL)
854 return tape_34xx_unsolicited_irq(device, irb);
855
856 if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) &&
857 (irb->scsw.dstat & DEV_STAT_DEV_END) &&
858 (request->op == TO_WRI)) {
859 /* Write at end of volume */
860 PRINT_INFO("End of volume\n"); /* XXX */
861 return tape_34xx_erp_failed(request, -ENOSPC);
862 }
863
864 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
865 return tape_34xx_unit_check(device, request, irb);
866
867 if (irb->scsw.dstat & DEV_STAT_DEV_END) {
868 /*
869 * A unit exception occurs on skipping over a tapemark block.
870 */
871 if (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) {
872 if (request->op == TO_BSB || request->op == TO_FSB)
873 request->rescnt++;
874 else
875 DBF_EVENT(5, "Unit Exception!\n");
876 }
877 return tape_34xx_done(request);
878 }
879
880 DBF_EVENT(6, "xunknownirq\n");
881 PRINT_ERR("Unexpected interrupt.\n");
882 PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]);
883 tape_dump_sense(device, request, irb);
884 return TAPE_IO_STOP;
885}
886
887/*
888 * ioctl_overload
889 */
890static int
891tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
892{
893 if (cmd == TAPE390_DISPLAY) {
894 struct display_struct disp;
895
896 if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)) != 0)
897 return -EFAULT;
898
899 return tape_std_display(device, &disp);
900 } else
901 return -EINVAL;
902}
903
904static inline void
905tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l)
906{
907 struct tape_34xx_sbid * new_sbid;
908
909 new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC);
910 if (!new_sbid)
911 return;
912
913 new_sbid->bid = bid;
914 list_add(&new_sbid->list, l);
915}
916
917/*
918 * Build up the search block ID list. The block ID consists of a logical
919 * block number and a hardware specific part. The hardware specific part
920 * helps the tape drive to speed up searching for a specific block.
921 */
922static void
923tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid)
924{
925 struct list_head * sbid_list;
926 struct tape_34xx_sbid * sbid;
927 struct list_head * l;
928
929 /*
930 * immediately return if there is no list at all or the block to add
931 * is located in segment 1 of wrap 0 because this position is used
932 * if no hardware position data is supplied.
933 */
934 sbid_list = (struct list_head *) device->discdata;
935 if (!sbid_list || (bid.segment < 2 && bid.wrap == 0))
936 return;
937
938 /*
939 * Search the position where to insert the new entry. Hardware
940 * acceleration uses only the segment and wrap number. So we
941 * need only one entry for a specific wrap/segment combination.
942 * If there is a block with a lower number but the same hard-
943 * ware position data we just update the block number in the
944 * existing entry.
945 */
946 list_for_each(l, sbid_list) {
947 sbid = list_entry(l, struct tape_34xx_sbid, list);
948
949 if (
950 (sbid->bid.segment == bid.segment) &&
951 (sbid->bid.wrap == bid.wrap)
952 ) {
953 if (bid.block < sbid->bid.block)
954 sbid->bid = bid;
955 else return;
956 break;
957 }
958
959 /* Sort in according to logical block number. */
960 if (bid.block < sbid->bid.block) {
961 tape_34xx_append_new_sbid(bid, l->prev);
962 break;
963 }
964 }
965 /* List empty or new block bigger than last entry. */
966 if (l == sbid_list)
967 tape_34xx_append_new_sbid(bid, l->prev);
968
969 DBF_LH(4, "Current list is:\n");
970 list_for_each(l, sbid_list) {
971 sbid = list_entry(l, struct tape_34xx_sbid, list);
972 DBF_LH(4, "%d:%03d@%05d\n",
973 sbid->bid.wrap,
974 sbid->bid.segment,
975 sbid->bid.block
976 );
977 }
978}
979
980/*
981 * Delete all entries from the search block ID list that belong to tape blocks
982 * equal or higher than the given number.
983 */
984static void
985tape_34xx_delete_sbid_from(struct tape_device *device, int from)
986{
987 struct list_head * sbid_list;
988 struct tape_34xx_sbid * sbid;
989 struct list_head * l;
990 struct list_head * n;
991
992 sbid_list = (struct list_head *) device->discdata;
993 if (!sbid_list)
994 return;
995
996 list_for_each_safe(l, n, sbid_list) {
997 sbid = list_entry(l, struct tape_34xx_sbid, list);
998 if (sbid->bid.block >= from) {
999 DBF_LH(4, "Delete sbid %d:%03d@%05d\n",
1000 sbid->bid.wrap,
1001 sbid->bid.segment,
1002 sbid->bid.block
1003 );
1004 list_del(l);
1005 kfree(sbid);
1006 }
1007 }
1008}
1009
1010/*
1011 * Merge hardware position data into a block id.
1012 */
1013static void
1014tape_34xx_merge_sbid(
1015 struct tape_device * device,
1016 struct tape_34xx_block_id * bid
1017) {
1018 struct tape_34xx_sbid * sbid;
1019 struct tape_34xx_sbid * sbid_to_use;
1020 struct list_head * sbid_list;
1021 struct list_head * l;
1022
1023 sbid_list = (struct list_head *) device->discdata;
1024 bid->wrap = 0;
1025 bid->segment = 1;
1026
1027 if (!sbid_list || list_empty(sbid_list))
1028 return;
1029
1030 sbid_to_use = NULL;
1031 list_for_each(l, sbid_list) {
1032 sbid = list_entry(l, struct tape_34xx_sbid, list);
1033
1034 if (sbid->bid.block >= bid->block)
1035 break;
1036 sbid_to_use = sbid;
1037 }
1038 if (sbid_to_use) {
1039 bid->wrap = sbid_to_use->bid.wrap;
1040 bid->segment = sbid_to_use->bid.segment;
1041 DBF_LH(4, "Use %d:%03d@%05d for %05d\n",
1042 sbid_to_use->bid.wrap,
1043 sbid_to_use->bid.segment,
1044 sbid_to_use->bid.block,
1045 bid->block
1046 );
1047 }
1048}
1049
1050static int
1051tape_34xx_setup_device(struct tape_device * device)
1052{
1053 int rc;
1054 struct list_head * discdata;
1055
1056 DBF_EVENT(6, "34xx device setup\n");
1057 if ((rc = tape_std_assign(device)) == 0) {
1058 if ((rc = tape_34xx_medium_sense(device)) != 0) {
1059 DBF_LH(3, "34xx medium sense returned %d\n", rc);
1060 }
1061 }
1062 discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1063 if (discdata) {
1064 INIT_LIST_HEAD(discdata);
1065 device->discdata = discdata;
1066 }
1067
1068 return rc;
1069}
1070
1071static void
1072tape_34xx_cleanup_device(struct tape_device *device)
1073{
1074 tape_std_unassign(device);
1075
1076 if (device->discdata) {
1077 tape_34xx_delete_sbid_from(device, 0);
1078 kfree(device->discdata);
1079 device->discdata = NULL;
1080 }
1081}
1082
1083
1084/*
1085 * MTTELL: Tell block. Return the number of block relative to current file.
1086 */
1087static int
1088tape_34xx_mttell(struct tape_device *device, int mt_count)
1089{
1090 struct {
1091 struct tape_34xx_block_id cbid;
1092 struct tape_34xx_block_id dbid;
1093 } __attribute__ ((packed)) block_id;
1094 int rc;
1095
1096 rc = tape_std_read_block_id(device, (__u64 *) &block_id);
1097 if (rc)
1098 return rc;
1099
1100 tape_34xx_add_sbid(device, block_id.cbid);
1101 return block_id.cbid.block;
1102}
1103
1104/*
1105 * MTSEEK: seek to the specified block.
1106 */
1107static int
1108tape_34xx_mtseek(struct tape_device *device, int mt_count)
1109{
1110 struct tape_request *request;
1111 struct tape_34xx_block_id * bid;
1112
1113 if (mt_count > 0x3fffff) {
1114 DBF_EXCEPTION(6, "xsee parm\n");
1115 return -EINVAL;
1116 }
1117 request = tape_alloc_request(3, 4);
1118 if (IS_ERR(request))
1119 return PTR_ERR(request);
1120
1121 /* setup ccws */
1122 request->op = TO_LBL;
1123 bid = (struct tape_34xx_block_id *) request->cpdata;
1124 bid->format = (*device->modeset_byte & 0x08) ?
1125 TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480;
1126 bid->block = mt_count;
1127 tape_34xx_merge_sbid(device, bid);
1128
1129 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
1130 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
1131 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
1132
1133 /* execute it */
1134 return tape_do_io_free(device, request);
1135}
1136
1137#ifdef CONFIG_S390_TAPE_BLOCK
1138/*
1139 * Tape block read for 34xx.
1140 */
1141static struct tape_request *
1142tape_34xx_bread(struct tape_device *device, struct request *req)
1143{
1144 struct tape_request *request;
1145 struct ccw1 *ccw;
1146 int count = 0, i;
1147 unsigned off;
1148 char *dst;
1149 struct bio_vec *bv;
1150 struct bio *bio;
1151 struct tape_34xx_block_id * start_block;
1152
1153 DBF_EVENT(6, "xBREDid:");
1154
1155 /* Count the number of blocks for the request. */
1156 rq_for_each_bio(bio, req) {
1157 bio_for_each_segment(bv, bio, i) {
1158 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
1159 }
1160 }
1161
1162 /* Allocate the ccw request. */
1163 request = tape_alloc_request(3+count+1, 8);
1164 if (IS_ERR(request))
1165 return request;
1166
1167 /* Setup ccws. */
1168 request->op = TO_BLOCK;
1169 start_block = (struct tape_34xx_block_id *) request->cpdata;
1170 start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B;
1171 DBF_EVENT(6, "start_block = %i\n", start_block->block);
1172
1173 ccw = request->cpaddr;
1174 ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
1175
1176 /*
1177 * We always setup a nop after the mode set ccw. This slot is
1178 * used in tape_std_check_locate to insert a locate ccw if the
1179 * current tape position doesn't match the start block to be read.
1180 * The second nop will be filled with a read block id which is in
1181 * turn used by tape_34xx_free_bread to populate the segment bid
1182 * table.
1183 */
1184 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1185 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1186
1187 rq_for_each_bio(bio, req) {
1188 bio_for_each_segment(bv, bio, i) {
1189 dst = kmap(bv->bv_page) + bv->bv_offset;
1190 for (off = 0; off < bv->bv_len;
1191 off += TAPEBLOCK_HSEC_SIZE) {
1192 ccw->flags = CCW_FLAG_CC;
1193 ccw->cmd_code = READ_FORWARD;
1194 ccw->count = TAPEBLOCK_HSEC_SIZE;
1195 set_normalized_cda(ccw, (void*) __pa(dst));
1196 ccw++;
1197 dst += TAPEBLOCK_HSEC_SIZE;
1198 }
1199 }
1200 }
1201
1202 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
1203 DBF_EVENT(6, "xBREDccwg\n");
1204 return request;
1205}
1206
1207static void
1208tape_34xx_free_bread (struct tape_request *request)
1209{
1210 struct ccw1* ccw;
1211
1212 ccw = request->cpaddr;
1213 if ((ccw + 2)->cmd_code == READ_BLOCK_ID) {
1214 struct {
1215 struct tape_34xx_block_id cbid;
1216 struct tape_34xx_block_id dbid;
1217 } __attribute__ ((packed)) *rbi_data;
1218
1219 rbi_data = request->cpdata;
1220
1221 if (request->device)
1222 tape_34xx_add_sbid(request->device, rbi_data->cbid);
1223 }
1224
1225 /* Last ccw is a nop and doesn't need clear_normalized_cda */
1226 for (; ccw->flags & CCW_FLAG_CC; ccw++)
1227 if (ccw->cmd_code == READ_FORWARD)
1228 clear_normalized_cda(ccw);
1229 tape_free_request(request);
1230}
1231
1232/*
1233 * check_locate is called just before the tape request is passed to
1234 * the common io layer for execution. It has to check the current
1235 * tape position and insert a locate ccw if it doesn't match the
1236 * start block for the request.
1237 */
1238static void
1239tape_34xx_check_locate(struct tape_device *device, struct tape_request *request)
1240{
1241 struct tape_34xx_block_id * start_block;
1242
1243 start_block = (struct tape_34xx_block_id *) request->cpdata;
1244 if (start_block->block == device->blk_data.block_position)
1245 return;
1246
1247 DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof);
1248 start_block->wrap = 0;
1249 start_block->segment = 1;
1250 start_block->format = (*device->modeset_byte & 0x08) ?
1251 TAPE34XX_FMT_3480_XF :
1252 TAPE34XX_FMT_3480;
1253 start_block->block = start_block->block + device->bof;
1254 tape_34xx_merge_sbid(device, start_block);
1255 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
1256 tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata);
1257}
1258#endif
1259
1260/*
1261 * List of 3480/3490 magnetic tape commands.
1262 */
1263static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = {
1264 [MTRESET] = tape_std_mtreset,
1265 [MTFSF] = tape_std_mtfsf,
1266 [MTBSF] = tape_std_mtbsf,
1267 [MTFSR] = tape_std_mtfsr,
1268 [MTBSR] = tape_std_mtbsr,
1269 [MTWEOF] = tape_std_mtweof,
1270 [MTREW] = tape_std_mtrew,
1271 [MTOFFL] = tape_std_mtoffl,
1272 [MTNOP] = tape_std_mtnop,
1273 [MTRETEN] = tape_std_mtreten,
1274 [MTBSFM] = tape_std_mtbsfm,
1275 [MTFSFM] = tape_std_mtfsfm,
1276 [MTEOM] = tape_std_mteom,
1277 [MTERASE] = tape_std_mterase,
1278 [MTRAS1] = NULL,
1279 [MTRAS2] = NULL,
1280 [MTRAS3] = NULL,
1281 [MTSETBLK] = tape_std_mtsetblk,
1282 [MTSETDENSITY] = NULL,
1283 [MTSEEK] = tape_34xx_mtseek,
1284 [MTTELL] = tape_34xx_mttell,
1285 [MTSETDRVBUFFER] = NULL,
1286 [MTFSS] = NULL,
1287 [MTBSS] = NULL,
1288 [MTWSM] = NULL,
1289 [MTLOCK] = NULL,
1290 [MTUNLOCK] = NULL,
1291 [MTLOAD] = tape_std_mtload,
1292 [MTUNLOAD] = tape_std_mtunload,
1293 [MTCOMPRESSION] = tape_std_mtcompression,
1294 [MTSETPART] = NULL,
1295 [MTMKPART] = NULL
1296};
1297
1298/*
1299 * Tape discipline structure for 3480 and 3490.
1300 */
1301static struct tape_discipline tape_discipline_34xx = {
1302 .owner = THIS_MODULE,
1303 .setup_device = tape_34xx_setup_device,
1304 .cleanup_device = tape_34xx_cleanup_device,
1305 .process_eov = tape_std_process_eov,
1306 .irq = tape_34xx_irq,
1307 .read_block = tape_std_read_block,
1308 .write_block = tape_std_write_block,
1309#ifdef CONFIG_S390_TAPE_BLOCK
1310 .bread = tape_34xx_bread,
1311 .free_bread = tape_34xx_free_bread,
1312 .check_locate = tape_34xx_check_locate,
1313#endif
1314 .ioctl_fn = tape_34xx_ioctl,
1315 .mtop_array = tape_34xx_mtop
1316};
1317
1318static struct ccw_device_id tape_34xx_ids[] = {
1319 { CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), driver_info: tape_3480},
1320 { CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), driver_info: tape_3490},
1321 { /* end of list */ }
1322};
1323
1324static int
1325tape_34xx_online(struct ccw_device *cdev)
1326{
1327 return tape_generic_online(
1328 cdev->dev.driver_data,
1329 &tape_discipline_34xx
1330 );
1331}
1332
1333static int
1334tape_34xx_offline(struct ccw_device *cdev)
1335{
1336 return tape_generic_offline(cdev->dev.driver_data);
1337}
1338
1339static struct ccw_driver tape_34xx_driver = {
1340 .name = "tape_34xx",
1341 .owner = THIS_MODULE,
1342 .ids = tape_34xx_ids,
1343 .probe = tape_generic_probe,
1344 .remove = tape_generic_remove,
1345 .set_online = tape_34xx_online,
1346 .set_offline = tape_34xx_offline,
1347};
1348
1349static int
1350tape_34xx_init (void)
1351{
1352 int rc;
1353
1354 TAPE_DBF_AREA = debug_register ( "tape_34xx", 1, 2, 4*sizeof(long));
1355 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1356#ifdef DBF_LIKE_HELL
1357 debug_set_level(TAPE_DBF_AREA, 6);
1358#endif
1359
1360 DBF_EVENT(3, "34xx init: $Revision: 1.21 $\n");
1361 /* Register driver for 3480/3490 tapes. */
1362 rc = ccw_driver_register(&tape_34xx_driver);
1363 if (rc)
1364 DBF_EVENT(3, "34xx init failed\n");
1365 else
1366 DBF_EVENT(3, "34xx registered\n");
1367 return rc;
1368}
1369
1370static void
1371tape_34xx_exit(void)
1372{
1373 ccw_driver_unregister(&tape_34xx_driver);
1374
1375 debug_unregister(TAPE_DBF_AREA);
1376}
1377
1378MODULE_DEVICE_TABLE(ccw, tape_34xx_ids);
1379MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH");
1380MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape "
1381 "device driver ($Revision: 1.21 $)");
1382MODULE_LICENSE("GPL");
1383
1384module_init(tape_34xx_init);
1385module_exit(tape_34xx_exit);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
new file mode 100644
index 000000000000..1efc9f21229e
--- /dev/null
+++ b/drivers/s390/char/tape_block.c
@@ -0,0 +1,492 @@
1/*
2 * drivers/s390/char/tape_block.c
3 * block device frontend for tape device driver
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Stefan Bader <shbader@de.ibm.com>
11 */
12
13#include <linux/fs.h>
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/blkdev.h>
17#include <linux/interrupt.h>
18#include <linux/buffer_head.h>
19
20#include <asm/debug.h>
21
22#define TAPE_DBF_AREA tape_core_dbf
23
24#include "tape.h"
25
26#define PRINTK_HEADER "TAPE_BLOCK: "
27
28#define TAPEBLOCK_MAX_SEC 100
29#define TAPEBLOCK_MIN_REQUEUE 3
30
31/*
32 * 2003/11/25 Stefan Bader <shbader@de.ibm.com>
33 *
34 * In 2.5/2.6 the block device request function is very likely to be called
35 * with disabled interrupts (e.g. generic_unplug_device). So the driver can't
36 * just call any function that tries to allocate CCW requests from that con-
37 * text since it might sleep. There are two choices to work around this:
38 * a) do not allocate with kmalloc but use its own memory pool
39 * b) take requests from the queue outside that context, knowing that
40 * allocation might sleep
41 */
42
43/*
44 * file operation structure for tape block frontend
45 */
46static int tapeblock_open(struct inode *, struct file *);
47static int tapeblock_release(struct inode *, struct file *);
48static int tapeblock_ioctl(struct inode *, struct file *, unsigned int,
49 unsigned long);
50static int tapeblock_medium_changed(struct gendisk *);
51static int tapeblock_revalidate_disk(struct gendisk *);
52
53static struct block_device_operations tapeblock_fops = {
54 .owner = THIS_MODULE,
55 .open = tapeblock_open,
56 .release = tapeblock_release,
57 .ioctl = tapeblock_ioctl,
58 .media_changed = tapeblock_medium_changed,
59 .revalidate_disk = tapeblock_revalidate_disk,
60};
61
62static int tapeblock_major = 0;
63
64static void
65tapeblock_trigger_requeue(struct tape_device *device)
66{
67 /* Protect against rescheduling. */
68 if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled))
69 return;
70 schedule_work(&device->blk_data.requeue_task);
71}
72
73/*
74 * Post finished request.
75 */
76static inline void
77tapeblock_end_request(struct request *req, int uptodate)
78{
79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
80 BUG();
81 end_that_request_last(req);
82}
83
84static void
85__tapeblock_end_request(struct tape_request *ccw_req, void *data)
86{
87 struct tape_device *device;
88 struct request *req;
89
90 DBF_LH(6, "__tapeblock_end_request()\n");
91
92 device = ccw_req->device;
93 req = (struct request *) data;
94 tapeblock_end_request(req, ccw_req->rc == 0);
95 if (ccw_req->rc == 0)
96 /* Update position. */
97 device->blk_data.block_position =
98 (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B;
99 else
100 /* We lost the position information due to an error. */
101 device->blk_data.block_position = -1;
102 device->discipline->free_bread(ccw_req);
103 if (!list_empty(&device->req_queue) ||
104 elv_next_request(device->blk_data.request_queue))
105 tapeblock_trigger_requeue(device);
106}
107
108/*
109 * Feed the tape device CCW queue with requests supplied in a list.
110 */
111static inline int
112tapeblock_start_request(struct tape_device *device, struct request *req)
113{
114 struct tape_request * ccw_req;
115 int rc;
116
117 DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req);
118
119 ccw_req = device->discipline->bread(device, req);
120 if (IS_ERR(ccw_req)) {
121 DBF_EVENT(1, "TBLOCK: bread failed\n");
122 tapeblock_end_request(req, 0);
123 return PTR_ERR(ccw_req);
124 }
125 ccw_req->callback = __tapeblock_end_request;
126 ccw_req->callback_data = (void *) req;
127 ccw_req->retries = TAPEBLOCK_RETRIES;
128
129 rc = tape_do_io_async(device, ccw_req);
130 if (rc) {
131 /*
132 * Start/enqueueing failed. No retries in
133 * this case.
134 */
135 tapeblock_end_request(req, 0);
136 device->discipline->free_bread(ccw_req);
137 }
138
139 return rc;
140}
141
142/*
143 * Move requests from the block device request queue to the tape device ccw
144 * queue.
145 */
146static void
147tapeblock_requeue(void *data) {
148 struct tape_device * device;
149 request_queue_t * queue;
150 int nr_queued;
151 struct request * req;
152 struct list_head * l;
153 int rc;
154
155 device = (struct tape_device *) data;
156 if (!device)
157 return;
158
159 spin_lock_irq(get_ccwdev_lock(device->cdev));
160 queue = device->blk_data.request_queue;
161
162 /* Count number of requests on ccw queue. */
163 nr_queued = 0;
164 list_for_each(l, &device->req_queue)
165 nr_queued++;
166 spin_unlock(get_ccwdev_lock(device->cdev));
167
168 spin_lock(&device->blk_data.request_queue_lock);
169 while (
170 !blk_queue_plugged(queue) &&
171 elv_next_request(queue) &&
172 nr_queued < TAPEBLOCK_MIN_REQUEUE
173 ) {
174 req = elv_next_request(queue);
175 if (rq_data_dir(req) == WRITE) {
176 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
177 blkdev_dequeue_request(req);
178 tapeblock_end_request(req, 0);
179 continue;
180 }
181 spin_unlock_irq(&device->blk_data.request_queue_lock);
182 rc = tapeblock_start_request(device, req);
183 spin_lock_irq(&device->blk_data.request_queue_lock);
184 blkdev_dequeue_request(req);
185 nr_queued++;
186 }
187 spin_unlock_irq(&device->blk_data.request_queue_lock);
188 atomic_set(&device->blk_data.requeue_scheduled, 0);
189}
190
191/*
192 * Tape request queue function. Called from ll_rw_blk.c
193 */
194static void
195tapeblock_request_fn(request_queue_t *queue)
196{
197 struct tape_device *device;
198
199 device = (struct tape_device *) queue->queuedata;
200 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
201 if (device == NULL)
202 BUG();
203
204 tapeblock_trigger_requeue(device);
205}
206
207/*
208 * This function is called for every new tapedevice
209 */
210int
211tapeblock_setup_device(struct tape_device * device)
212{
213 struct tape_blk_data * blkdat;
214 struct gendisk * disk;
215 int rc;
216
217 blkdat = &device->blk_data;
218 spin_lock_init(&blkdat->request_queue_lock);
219 atomic_set(&blkdat->requeue_scheduled, 0);
220
221 blkdat->request_queue = blk_init_queue(
222 tapeblock_request_fn,
223 &blkdat->request_queue_lock
224 );
225 if (!blkdat->request_queue)
226 return -ENOMEM;
227
228 elevator_exit(blkdat->request_queue->elevator);
229 rc = elevator_init(blkdat->request_queue, "noop");
230 if (rc)
231 goto cleanup_queue;
232
233 blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
234 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
235 blk_queue_max_phys_segments(blkdat->request_queue, -1L);
236 blk_queue_max_hw_segments(blkdat->request_queue, -1L);
237 blk_queue_max_segment_size(blkdat->request_queue, -1L);
238 blk_queue_segment_boundary(blkdat->request_queue, -1L);
239
240 disk = alloc_disk(1);
241 if (!disk) {
242 rc = -ENOMEM;
243 goto cleanup_queue;
244 }
245
246 disk->major = tapeblock_major;
247 disk->first_minor = device->first_minor;
248 disk->fops = &tapeblock_fops;
249 disk->private_data = tape_get_device_reference(device);
250 disk->queue = blkdat->request_queue;
251 set_capacity(disk, 0);
252 sprintf(disk->disk_name, "btibm%d",
253 device->first_minor / TAPE_MINORS_PER_DEV);
254
255 blkdat->disk = disk;
256 blkdat->medium_changed = 1;
257 blkdat->request_queue->queuedata = tape_get_device_reference(device);
258
259 add_disk(disk);
260
261 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue,
262 tape_get_device_reference(device));
263
264 return 0;
265
266cleanup_queue:
267 blk_cleanup_queue(blkdat->request_queue);
268 blkdat->request_queue = NULL;
269
270 return rc;
271}
272
273void
274tapeblock_cleanup_device(struct tape_device *device)
275{
276 flush_scheduled_work();
277 device->blk_data.requeue_task.data = tape_put_device(device);
278
279 if (!device->blk_data.disk) {
280 PRINT_ERR("(%s): No gendisk to clean up!\n",
281 device->cdev->dev.bus_id);
282 goto cleanup_queue;
283 }
284
285 del_gendisk(device->blk_data.disk);
286 device->blk_data.disk->private_data =
287 tape_put_device(device->blk_data.disk->private_data);
288 put_disk(device->blk_data.disk);
289
290 device->blk_data.disk = NULL;
291cleanup_queue:
292 device->blk_data.request_queue->queuedata = tape_put_device(device);
293
294 blk_cleanup_queue(device->blk_data.request_queue);
295 device->blk_data.request_queue = NULL;
296}
297
298/*
299 * Detect number of blocks of the tape.
300 * FIXME: can we extent this to detect the blocks size as well ?
301 */
302static int
303tapeblock_revalidate_disk(struct gendisk *disk)
304{
305 struct tape_device * device;
306 unsigned int nr_of_blks;
307 int rc;
308
309 device = (struct tape_device *) disk->private_data;
310 if (!device)
311 BUG();
312
313 if (!device->blk_data.medium_changed)
314 return 0;
315
316 PRINT_INFO("Detecting media size...\n");
317 rc = tape_mtop(device, MTFSFM, 1);
318 if (rc)
319 return rc;
320
321 rc = tape_mtop(device, MTTELL, 1);
322 if (rc < 0)
323 return rc;
324
325 DBF_LH(3, "Image file ends at %d\n", rc);
326 nr_of_blks = rc;
327
328 /* This will fail for the first file. Catch the error by checking the
329 * position. */
330 tape_mtop(device, MTBSF, 1);
331
332 rc = tape_mtop(device, MTTELL, 1);
333 if (rc < 0)
334 return rc;
335
336 if (rc > nr_of_blks)
337 return -EINVAL;
338
339 DBF_LH(3, "Image file starts at %d\n", rc);
340 device->bof = rc;
341 nr_of_blks -= rc;
342
343 PRINT_INFO("Found %i blocks on media\n", nr_of_blks);
344 set_capacity(device->blk_data.disk,
345 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
346
347 device->blk_data.block_position = 0;
348 device->blk_data.medium_changed = 0;
349 return 0;
350}
351
352static int
353tapeblock_medium_changed(struct gendisk *disk)
354{
355 struct tape_device *device;
356
357 device = (struct tape_device *) disk->private_data;
358 DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
359 device, device->blk_data.medium_changed);
360
361 return device->blk_data.medium_changed;
362}
363
364/*
365 * Block frontend tape device open function.
366 */
367static int
368tapeblock_open(struct inode *inode, struct file *filp)
369{
370 struct gendisk * disk;
371 struct tape_device * device;
372 int rc;
373
374 disk = inode->i_bdev->bd_disk;
375 device = tape_get_device_reference(disk->private_data);
376
377 if (device->required_tapemarks) {
378 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
379 PRINT_ERR("TBLOCK: Refusing to open tape with missing"
380 " end of file marks.\n");
381 rc = -EPERM;
382 goto put_device;
383 }
384
385 rc = tape_open(device);
386 if (rc)
387 goto put_device;
388
389 rc = tapeblock_revalidate_disk(disk);
390 if (rc)
391 goto release;
392
393 /*
394 * Note: The reference to <device> is hold until the release function
395 * is called.
396 */
397 tape_state_set(device, TS_BLKUSE);
398 return 0;
399
400release:
401 tape_release(device);
402 put_device:
403 tape_put_device(device);
404 return rc;
405}
406
407/*
408 * Block frontend tape device release function.
409 *
410 * Note: One reference to the tape device was made by the open function. So
411 * we just get the pointer here and release the reference.
412 */
413static int
414tapeblock_release(struct inode *inode, struct file *filp)
415{
416 struct gendisk *disk = inode->i_bdev->bd_disk;
417 struct tape_device *device = disk->private_data;
418
419 tape_state_set(device, TS_IN_USE);
420 tape_release(device);
421 tape_put_device(device);
422
423 return 0;
424}
425
426/*
427 * Support of some generic block device IOCTLs.
428 */
429static int
430tapeblock_ioctl(
431 struct inode * inode,
432 struct file * file,
433 unsigned int command,
434 unsigned long arg
435) {
436 int rc;
437 int minor;
438 struct gendisk *disk = inode->i_bdev->bd_disk;
439 struct tape_device *device = disk->private_data;
440
441 rc = 0;
442 disk = inode->i_bdev->bd_disk;
443 if (!disk)
444 BUG();
445 device = disk->private_data;
446 if (!device)
447 BUG();
448 minor = iminor(inode);
449
450 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
451 DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
452
453 switch (command) {
454 /* Refuse some IOCTL calls without complaining (mount). */
455 case 0x5310: /* CDROMMULTISESSION */
456 rc = -EINVAL;
457 break;
458 default:
459 PRINT_WARN("invalid ioctl 0x%x\n", command);
460 rc = -EINVAL;
461 }
462
463 return rc;
464}
465
466/*
467 * Initialize block device frontend.
468 */
469int
470tapeblock_init(void)
471{
472 int rc;
473
474 /* Register the tape major number to the kernel */
475 rc = register_blkdev(tapeblock_major, "tBLK");
476 if (rc < 0)
477 return rc;
478
479 if (tapeblock_major == 0)
480 tapeblock_major = rc;
481 PRINT_INFO("tape gets major %d for block device\n", tapeblock_major);
482 return 0;
483}
484
485/*
486 * Deregister major for block device frontend
487 */
488void
489tapeblock_exit(void)
490{
491 unregister_blkdev(tapeblock_major, "tBLK");
492}
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
new file mode 100644
index 000000000000..86262a13f7c6
--- /dev/null
+++ b/drivers/s390/char/tape_char.c
@@ -0,0 +1,492 @@
1/*
2 * drivers/s390/char/tape_char.c
3 * character device frontend for tape device driver
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 */
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/proc_fs.h>
17#include <linux/mtio.h>
18
19#include <asm/uaccess.h>
20
21#define TAPE_DBF_AREA tape_core_dbf
22
23#include "tape.h"
24#include "tape_std.h"
25#include "tape_class.h"
26
27#define PRINTK_HEADER "TAPE_CHAR: "
28
29#define TAPECHAR_MAJOR 0 /* get dynamic major */
30
31/*
32 * file operation structure for tape character frontend
33 */
34static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
35static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
36static int tapechar_open(struct inode *,struct file *);
37static int tapechar_release(struct inode *,struct file *);
38static int tapechar_ioctl(struct inode *, struct file *, unsigned int,
39 unsigned long);
40
41static struct file_operations tape_fops =
42{
43 .owner = THIS_MODULE,
44 .read = tapechar_read,
45 .write = tapechar_write,
46 .ioctl = tapechar_ioctl,
47 .open = tapechar_open,
48 .release = tapechar_release,
49};
50
51static int tapechar_major = TAPECHAR_MAJOR;
52
53/*
54 * This function is called for every new tapedevice
55 */
56int
57tapechar_setup_device(struct tape_device * device)
58{
59 char device_name[20];
60
61 sprintf(device_name, "ntibm%i", device->first_minor / 2);
62 device->nt = register_tape_dev(
63 &device->cdev->dev,
64 MKDEV(tapechar_major, device->first_minor),
65 &tape_fops,
66 device_name,
67 "non-rewinding"
68 );
69 device_name[0] = 'r';
70 device->rt = register_tape_dev(
71 &device->cdev->dev,
72 MKDEV(tapechar_major, device->first_minor + 1),
73 &tape_fops,
74 device_name,
75 "rewinding"
76 );
77
78 return 0;
79}
80
81void
82tapechar_cleanup_device(struct tape_device *device)
83{
84 unregister_tape_dev(device->rt);
85 device->rt = NULL;
86 unregister_tape_dev(device->nt);
87 device->nt = NULL;
88}
89
90/*
91 * Terminate write command (we write two TMs and skip backward over last)
92 * This ensures that the tape is always correctly terminated.
93 * When the user writes afterwards a new file, he will overwrite the
94 * second TM and therefore one TM will remain to separate the
95 * two files on the tape...
96 */
97static inline void
98tapechar_terminate_write(struct tape_device *device)
99{
100 if (tape_mtop(device, MTWEOF, 1) == 0 &&
101 tape_mtop(device, MTWEOF, 1) == 0)
102 tape_mtop(device, MTBSR, 1);
103}
104
105static inline int
106tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
107{
108 struct idal_buffer *new;
109
110 if (device->char_data.idal_buf != NULL &&
111 device->char_data.idal_buf->size == block_size)
112 return 0;
113
114 if (block_size > MAX_BLOCKSIZE) {
115 DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
116 block_size, MAX_BLOCKSIZE);
117 PRINT_ERR("Invalid blocksize (%zd> %d)\n",
118 block_size, MAX_BLOCKSIZE);
119 return -EINVAL;
120 }
121
122 /* The current idal buffer is not correct. Allocate a new one. */
123 new = idal_buffer_alloc(block_size, 0);
124 if (new == NULL)
125 return -ENOMEM;
126
127 if (device->char_data.idal_buf != NULL)
128 idal_buffer_free(device->char_data.idal_buf);
129
130 device->char_data.idal_buf = new;
131
132 return 0;
133}
134
135/*
136 * Tape device read function
137 */
138ssize_t
139tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
140{
141 struct tape_device *device;
142 struct tape_request *request;
143 size_t block_size;
144 int rc;
145
146 DBF_EVENT(6, "TCHAR:read\n");
147 device = (struct tape_device *) filp->private_data;
148
149 /*
150 * If the tape isn't terminated yet, do it now. And since we then
151 * are at the end of the tape there wouldn't be anything to read
152 * anyways. So we return immediatly.
153 */
154 if(device->required_tapemarks) {
155 return tape_std_terminate_write(device);
156 }
157
158 /* Find out block size to use */
159 if (device->char_data.block_size != 0) {
160 if (count < device->char_data.block_size) {
161 DBF_EVENT(3, "TCHAR:read smaller than block "
162 "size was requested\n");
163 return -EINVAL;
164 }
165 block_size = device->char_data.block_size;
166 } else {
167 block_size = count;
168 }
169
170 rc = tapechar_check_idalbuffer(device, block_size);
171 if (rc)
172 return rc;
173
174#ifdef CONFIG_S390_TAPE_BLOCK
175 /* Changes position. */
176 device->blk_data.medium_changed = 1;
177#endif
178
179 DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
180 /* Let the discipline build the ccw chain. */
181 request = device->discipline->read_block(device, block_size);
182 if (IS_ERR(request))
183 return PTR_ERR(request);
184 /* Execute it. */
185 rc = tape_do_io(device, request);
186 if (rc == 0) {
187 rc = block_size - request->rescnt;
188 DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
189 filp->f_pos += rc;
190 /* Copy data from idal buffer to user space. */
191 if (idal_buffer_to_user(device->char_data.idal_buf,
192 data, rc) != 0)
193 rc = -EFAULT;
194 }
195 tape_free_request(request);
196 return rc;
197}
198
199/*
200 * Tape device write function
201 */
202ssize_t
203tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
204{
205 struct tape_device *device;
206 struct tape_request *request;
207 size_t block_size;
208 size_t written;
209 int nblocks;
210 int i, rc;
211
212 DBF_EVENT(6, "TCHAR:write\n");
213 device = (struct tape_device *) filp->private_data;
214 /* Find out block size and number of blocks */
215 if (device->char_data.block_size != 0) {
216 if (count < device->char_data.block_size) {
217 DBF_EVENT(3, "TCHAR:write smaller than block "
218 "size was requested\n");
219 return -EINVAL;
220 }
221 block_size = device->char_data.block_size;
222 nblocks = count / block_size;
223 } else {
224 block_size = count;
225 nblocks = 1;
226 }
227
228 rc = tapechar_check_idalbuffer(device, block_size);
229 if (rc)
230 return rc;
231
232#ifdef CONFIG_S390_TAPE_BLOCK
233 /* Changes position. */
234 device->blk_data.medium_changed = 1;
235#endif
236
237 DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
238 DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
239 /* Let the discipline build the ccw chain. */
240 request = device->discipline->write_block(device, block_size);
241 if (IS_ERR(request))
242 return PTR_ERR(request);
243 rc = 0;
244 written = 0;
245 for (i = 0; i < nblocks; i++) {
246 /* Copy data from user space to idal buffer. */
247 if (idal_buffer_from_user(device->char_data.idal_buf,
248 data, block_size)) {
249 rc = -EFAULT;
250 break;
251 }
252 rc = tape_do_io(device, request);
253 if (rc)
254 break;
255 DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
256 block_size - request->rescnt);
257 filp->f_pos += block_size - request->rescnt;
258 written += block_size - request->rescnt;
259 if (request->rescnt != 0)
260 break;
261 data += block_size;
262 }
263 tape_free_request(request);
264 if (rc == -ENOSPC) {
265 /*
266 * Ok, the device has no more space. It has NOT written
267 * the block.
268 */
269 if (device->discipline->process_eov)
270 device->discipline->process_eov(device);
271 if (written > 0)
272 rc = 0;
273
274 }
275
276 /*
277 * After doing a write we always need two tapemarks to correctly
278 * terminate the tape (one to terminate the file, the second to
279 * flag the end of recorded data.
280 * Since process_eov positions the tape in front of the written
281 * tapemark it doesn't hurt to write two marks again.
282 */
283 if (!rc)
284 device->required_tapemarks = 2;
285
286 return rc ? rc : written;
287}
288
289/*
290 * Character frontend tape device open function.
291 */
292int
293tapechar_open (struct inode *inode, struct file *filp)
294{
295 struct tape_device *device;
296 int minor, rc;
297
298 DBF_EVENT(6, "TCHAR:open: %i:%i\n",
299 imajor(filp->f_dentry->d_inode),
300 iminor(filp->f_dentry->d_inode));
301
302 if (imajor(filp->f_dentry->d_inode) != tapechar_major)
303 return -ENODEV;
304
305 minor = iminor(filp->f_dentry->d_inode);
306 device = tape_get_device(minor / TAPE_MINORS_PER_DEV);
307 if (IS_ERR(device)) {
308 DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n");
309 return PTR_ERR(device);
310 }
311
312
313 rc = tape_open(device);
314 if (rc == 0) {
315 filp->private_data = device;
316 return nonseekable_open(inode, filp);
317 }
318 tape_put_device(device);
319
320 return rc;
321}
322
323/*
324 * Character frontend tape device release function.
325 */
326
327int
328tapechar_release(struct inode *inode, struct file *filp)
329{
330 struct tape_device *device;
331
332 DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode));
333 device = (struct tape_device *) filp->private_data;
334
335 /*
336 * If this is the rewinding tape minor then rewind. In that case we
337 * write all required tapemarks. Otherwise only one to terminate the
338 * file.
339 */
340 if ((iminor(inode) & 1) != 0) {
341 if (device->required_tapemarks)
342 tape_std_terminate_write(device);
343 tape_mtop(device, MTREW, 1);
344 } else {
345 if (device->required_tapemarks > 1) {
346 if (tape_mtop(device, MTWEOF, 1) == 0)
347 device->required_tapemarks--;
348 }
349 }
350
351 if (device->char_data.idal_buf != NULL) {
352 idal_buffer_free(device->char_data.idal_buf);
353 device->char_data.idal_buf = NULL;
354 }
355 tape_release(device);
356 filp->private_data = tape_put_device(device);
357
358 return 0;
359}
360
361/*
362 * Tape device io controls.
363 */
364static int
365tapechar_ioctl(struct inode *inp, struct file *filp,
366 unsigned int no, unsigned long data)
367{
368 struct tape_device *device;
369 int rc;
370
371 DBF_EVENT(6, "TCHAR:ioct\n");
372
373 device = (struct tape_device *) filp->private_data;
374
375 if (no == MTIOCTOP) {
376 struct mtop op;
377
378 if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
379 return -EFAULT;
380 if (op.mt_count < 0)
381 return -EINVAL;
382
383 /*
384 * Operations that change tape position should write final
385 * tapemarks.
386 */
387 switch (op.mt_op) {
388 case MTFSF:
389 case MTBSF:
390 case MTFSR:
391 case MTBSR:
392 case MTREW:
393 case MTOFFL:
394 case MTEOM:
395 case MTRETEN:
396 case MTBSFM:
397 case MTFSFM:
398 case MTSEEK:
399#ifdef CONFIG_S390_TAPE_BLOCK
400 device->blk_data.medium_changed = 1;
401#endif
402 if (device->required_tapemarks)
403 tape_std_terminate_write(device);
404 default:
405 ;
406 }
407 rc = tape_mtop(device, op.mt_op, op.mt_count);
408
409 if (op.mt_op == MTWEOF && rc == 0) {
410 if (op.mt_count > device->required_tapemarks)
411 device->required_tapemarks = 0;
412 else
413 device->required_tapemarks -= op.mt_count;
414 }
415 return rc;
416 }
417 if (no == MTIOCPOS) {
418 /* MTIOCPOS: query the tape position. */
419 struct mtpos pos;
420
421 rc = tape_mtop(device, MTTELL, 1);
422 if (rc < 0)
423 return rc;
424 pos.mt_blkno = rc;
425 if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
426 return -EFAULT;
427 return 0;
428 }
429 if (no == MTIOCGET) {
430 /* MTIOCGET: query the tape drive status. */
431 struct mtget get;
432
433 memset(&get, 0, sizeof(get));
434 get.mt_type = MT_ISUNKNOWN;
435 get.mt_resid = 0 /* device->devstat.rescnt */;
436 get.mt_dsreg = device->tape_state;
437 /* FIXME: mt_gstat, mt_erreg, mt_fileno */
438 get.mt_gstat = 0;
439 get.mt_erreg = 0;
440 get.mt_fileno = 0;
441 get.mt_gstat = device->tape_generic_status;
442
443 if (device->medium_state == MS_LOADED) {
444 rc = tape_mtop(device, MTTELL, 1);
445
446 if (rc < 0)
447 return rc;
448
449 if (rc == 0)
450 get.mt_gstat |= GMT_BOT(~0);
451
452 get.mt_blkno = rc;
453 }
454
455 if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
456 return -EFAULT;
457
458 return 0;
459 }
460 /* Try the discipline ioctl function. */
461 if (device->discipline->ioctl_fn == NULL)
462 return -EINVAL;
463 return device->discipline->ioctl_fn(device, no, data);
464}
465
466/*
467 * Initialize character device frontend.
468 */
469int
470tapechar_init (void)
471{
472 dev_t dev;
473
474 if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0)
475 return -1;
476
477 tapechar_major = MAJOR(dev);
478 PRINT_INFO("tape gets major %d for character devices\n", MAJOR(dev));
479
480 return 0;
481}
482
483/*
484 * cleanup
485 */
486void
487tapechar_exit(void)
488{
489 PRINT_INFO("tape releases major %d for character devices\n",
490 tapechar_major);
491 unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
492}
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
new file mode 100644
index 000000000000..0f8ffd4167ca
--- /dev/null
+++ b/drivers/s390/char/tape_class.c
@@ -0,0 +1,126 @@
1/*
2 * (C) Copyright IBM Corp. 2004
3 * tape_class.c ($Revision: 1.8 $)
4 *
5 * Tape class device support
6 *
7 * Author: Stefan Bader <shbader@de.ibm.com>
8 * Based on simple class device code by Greg K-H
9 */
10#include "tape_class.h"
11
12MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
13MODULE_DESCRIPTION(
14 "(C) Copyright IBM Corp. 2004 All Rights Reserved.\n"
15 "tape_class.c ($Revision: 1.8 $)"
16);
17MODULE_LICENSE("GPL");
18
19struct class_simple *tape_class;
20
21/*
22 * Register a tape device and return a pointer to the cdev structure.
23 *
24 * device
25 * The pointer to the struct device of the physical (base) device.
26 * drivername
27 * The pointer to the drivers name for it's character devices.
28 * dev
29 * The intended major/minor number. The major number may be 0 to
30 * get a dynamic major number.
31 * fops
32 * The pointer to the drivers file operations for the tape device.
33 * devname
34 * The pointer to the name of the character device.
35 */
36struct tape_class_device *register_tape_dev(
37 struct device * device,
38 dev_t dev,
39 struct file_operations *fops,
40 char * device_name,
41 char * mode_name)
42{
43 struct tape_class_device * tcd;
44 int rc;
45 char * s;
46
47 tcd = kmalloc(sizeof(struct tape_class_device), GFP_KERNEL);
48 if (!tcd)
49 return ERR_PTR(-ENOMEM);
50
51 memset(tcd, 0, sizeof(struct tape_class_device));
52 strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
53 for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
54 *s = '!';
55 strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
56 for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
57 *s = '!';
58
59 tcd->char_device = cdev_alloc();
60 if (!tcd->char_device) {
61 rc = -ENOMEM;
62 goto fail_with_tcd;
63 }
64
65 tcd->char_device->owner = fops->owner;
66 tcd->char_device->ops = fops;
67 tcd->char_device->dev = dev;
68
69 rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
70 if (rc)
71 goto fail_with_cdev;
72
73 tcd->class_device = class_simple_device_add(
74 tape_class,
75 tcd->char_device->dev,
76 device,
77 "%s", tcd->device_name
78 );
79 sysfs_create_link(
80 &device->kobj,
81 &tcd->class_device->kobj,
82 tcd->mode_name
83 );
84
85 return tcd;
86
87fail_with_cdev:
88 cdev_del(tcd->char_device);
89
90fail_with_tcd:
91 kfree(tcd);
92
93 return ERR_PTR(rc);
94}
95EXPORT_SYMBOL(register_tape_dev);
96
97void unregister_tape_dev(struct tape_class_device *tcd)
98{
99 if (tcd != NULL && !IS_ERR(tcd)) {
100 sysfs_remove_link(
101 &tcd->class_device->dev->kobj,
102 tcd->mode_name
103 );
104 class_simple_device_remove(tcd->char_device->dev);
105 cdev_del(tcd->char_device);
106 kfree(tcd);
107 }
108}
109EXPORT_SYMBOL(unregister_tape_dev);
110
111
112static int __init tape_init(void)
113{
114 tape_class = class_simple_create(THIS_MODULE, "tape390");
115
116 return 0;
117}
118
119static void __exit tape_exit(void)
120{
121 class_simple_destroy(tape_class);
122 tape_class = NULL;
123}
124
125postcore_initcall(tape_init);
126module_exit(tape_exit);
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
new file mode 100644
index 000000000000..33133ad00ba2
--- /dev/null
+++ b/drivers/s390/char/tape_class.h
@@ -0,0 +1,61 @@
1/*
2 * (C) Copyright IBM Corp. 2004 All Rights Reserved.
3 * tape_class.h ($Revision: 1.4 $)
4 *
5 * Tape class device support
6 *
7 * Author: Stefan Bader <shbader@de.ibm.com>
8 * Based on simple class device code by Greg K-H
9 */
10#ifndef __TAPE_CLASS_H__
11#define __TAPE_CLASS_H__
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/fs.h>
16#include <linux/major.h>
17#include <linux/kobject.h>
18#include <linux/kobj_map.h>
19#include <linux/cdev.h>
20
21#include <linux/device.h>
22#include <linux/kdev_t.h>
23
24#define TAPECLASS_NAME_LEN 32
25
26struct tape_class_device {
27 struct cdev * char_device;
28 struct class_device * class_device;
29 char device_name[TAPECLASS_NAME_LEN];
30 char mode_name[TAPECLASS_NAME_LEN];
31};
32
33/*
34 * Register a tape device and return a pointer to the tape class device
35 * created by the call.
36 *
37 * device
38 * The pointer to the struct device of the physical (base) device.
39 * dev
40 * The intended major/minor number. The major number may be 0 to
41 * get a dynamic major number.
42 * fops
43 * The pointer to the drivers file operations for the tape device.
44 * device_name
45 * Pointer to the logical device name (will also be used as kobject name
46 * of the cdev). This can also be called the name of the tape class
47 * device.
48 * mode_name
49 * Points to the name of the tape mode. This creates a link with that
50 * name from the physical device to the logical device (class).
51 */
52struct tape_class_device *register_tape_dev(
53 struct device * device,
54 dev_t dev,
55 struct file_operations *fops,
56 char * device_name,
57 char * node_name
58);
59void unregister_tape_dev(struct tape_class_device *tcd);
60
61#endif /* __TAPE_CLASS_H__ */
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
new file mode 100644
index 000000000000..e51046ab8adc
--- /dev/null
+++ b/drivers/s390/char/tape_core.c
@@ -0,0 +1,1242 @@
1/*
2 * drivers/s390/char/tape_core.c
3 * basic function of the tape device driver
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 */
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/init.h> // for kernel parameters
16#include <linux/kmod.h> // for requesting modules
17#include <linux/spinlock.h> // for locks
18#include <linux/vmalloc.h>
19#include <linux/list.h>
20
21#include <asm/types.h> // for variable types
22
23#define TAPE_DBF_AREA tape_core_dbf
24
25#include "tape.h"
26#include "tape_std.h"
27
28#define PRINTK_HEADER "TAPE_CORE: "
29
30static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
31static void __tape_remove_request(struct tape_device *, struct tape_request *);
32
33/*
34 * One list to contain all tape devices of all disciplines, so
35 * we can assign the devices to minor numbers of the same major
36 * The list is protected by the rwlock
37 */
38static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list);
39static DEFINE_RWLOCK(tape_device_lock);
40
41/*
42 * Pointer to debug area.
43 */
44debug_info_t *TAPE_DBF_AREA = NULL;
45EXPORT_SYMBOL(TAPE_DBF_AREA);
46
47/*
48 * Printable strings for tape enumerations.
49 */
50const char *tape_state_verbose[TS_SIZE] =
51{
52 [TS_UNUSED] = "UNUSED",
53 [TS_IN_USE] = "IN_USE",
54 [TS_BLKUSE] = "BLKUSE",
55 [TS_INIT] = "INIT ",
56 [TS_NOT_OPER] = "NOT_OP"
57};
58
59const char *tape_op_verbose[TO_SIZE] =
60{
61 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB",
62 [TO_BSF] = "BSF", [TO_DSE] = "DSE",
63 [TO_FSB] = "FSB", [TO_FSF] = "FSF",
64 [TO_LBL] = "LBL", [TO_NOP] = "NOP",
65 [TO_RBA] = "RBA", [TO_RBI] = "RBI",
66 [TO_RFO] = "RFO", [TO_REW] = "REW",
67 [TO_RUN] = "RUN", [TO_WRI] = "WRI",
68 [TO_WTM] = "WTM", [TO_MSEN] = "MSN",
69 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
70 [TO_READ_ATTMSG] = "RAT",
71 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
72 [TO_UNASSIGN] = "UAS"
73};
74
75static inline int
76busid_to_int(char *bus_id)
77{
78 int dec;
79 int d;
80 char * s;
81
82 for(s = bus_id, d = 0; *s != '\0' && *s != '.'; s++)
83 d = (d * 10) + (*s - '0');
84 dec = d;
85 for(s++, d = 0; *s != '\0' && *s != '.'; s++)
86 d = (d * 10) + (*s - '0');
87 dec = (dec << 8) + d;
88
89 for(s++; *s != '\0'; s++) {
90 if (*s >= '0' && *s <= '9') {
91 d = *s - '0';
92 } else if (*s >= 'a' && *s <= 'f') {
93 d = *s - 'a' + 10;
94 } else {
95 d = *s - 'A' + 10;
96 }
97 dec = (dec << 4) + d;
98 }
99
100 return dec;
101}
102
103/*
104 * Some channel attached tape specific attributes.
105 *
106 * FIXME: In the future the first_minor and blocksize attribute should be
107 * replaced by a link to the cdev tree.
108 */
109static ssize_t
110tape_medium_state_show(struct device *dev, char *buf)
111{
112 struct tape_device *tdev;
113
114 tdev = (struct tape_device *) dev->driver_data;
115 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
116}
117
118static
119DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
120
121static ssize_t
122tape_first_minor_show(struct device *dev, char *buf)
123{
124 struct tape_device *tdev;
125
126 tdev = (struct tape_device *) dev->driver_data;
127 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
128}
129
130static
131DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
132
133static ssize_t
134tape_state_show(struct device *dev, char *buf)
135{
136 struct tape_device *tdev;
137
138 tdev = (struct tape_device *) dev->driver_data;
139 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
140 "OFFLINE" : tape_state_verbose[tdev->tape_state]);
141}
142
143static
144DEVICE_ATTR(state, 0444, tape_state_show, NULL);
145
146static ssize_t
147tape_operation_show(struct device *dev, char *buf)
148{
149 struct tape_device *tdev;
150 ssize_t rc;
151
152 tdev = (struct tape_device *) dev->driver_data;
153 if (tdev->first_minor < 0)
154 return scnprintf(buf, PAGE_SIZE, "N/A\n");
155
156 spin_lock_irq(get_ccwdev_lock(tdev->cdev));
157 if (list_empty(&tdev->req_queue))
158 rc = scnprintf(buf, PAGE_SIZE, "---\n");
159 else {
160 struct tape_request *req;
161
162 req = list_entry(tdev->req_queue.next, struct tape_request,
163 list);
164 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
165 }
166 spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
167 return rc;
168}
169
170static
171DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
172
173static ssize_t
174tape_blocksize_show(struct device *dev, char *buf)
175{
176 struct tape_device *tdev;
177
178 tdev = (struct tape_device *) dev->driver_data;
179
180 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
181}
182
183static
184DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
185
186static struct attribute *tape_attrs[] = {
187 &dev_attr_medium_state.attr,
188 &dev_attr_first_minor.attr,
189 &dev_attr_state.attr,
190 &dev_attr_operation.attr,
191 &dev_attr_blocksize.attr,
192 NULL
193};
194
195static struct attribute_group tape_attr_group = {
196 .attrs = tape_attrs,
197};
198
199/*
200 * Tape state functions
201 */
202void
203tape_state_set(struct tape_device *device, enum tape_state newstate)
204{
205 const char *str;
206
207 if (device->tape_state == TS_NOT_OPER) {
208 DBF_EVENT(3, "ts_set err: not oper\n");
209 return;
210 }
211 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor);
212 if (device->tape_state < TO_SIZE && device->tape_state >= 0)
213 str = tape_state_verbose[device->tape_state];
214 else
215 str = "UNKNOWN TS";
216 DBF_EVENT(4, "old ts: %s\n", str);
217 if (device->tape_state < TO_SIZE && device->tape_state >=0 )
218 str = tape_state_verbose[device->tape_state];
219 else
220 str = "UNKNOWN TS";
221 DBF_EVENT(4, "%s\n", str);
222 DBF_EVENT(4, "new ts:\t\n");
223 if (newstate < TO_SIZE && newstate >= 0)
224 str = tape_state_verbose[newstate];
225 else
226 str = "UNKNOWN TS";
227 DBF_EVENT(4, "%s\n", str);
228 device->tape_state = newstate;
229 wake_up(&device->state_change_wq);
230}
231
232void
233tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
234{
235 if (device->medium_state == newstate)
236 return;
237 switch(newstate){
238 case MS_UNLOADED:
239 device->tape_generic_status |= GMT_DR_OPEN(~0);
240 PRINT_INFO("(%s): Tape is unloaded\n",
241 device->cdev->dev.bus_id);
242 break;
243 case MS_LOADED:
244 device->tape_generic_status &= ~GMT_DR_OPEN(~0);
245 PRINT_INFO("(%s): Tape has been mounted\n",
246 device->cdev->dev.bus_id);
247 break;
248 default:
249 // print nothing
250 break;
251 }
252 device->medium_state = newstate;
253 wake_up(&device->state_change_wq);
254}
255
256/*
257 * Stop running ccw. Has to be called with the device lock held.
258 */
259static inline int
260__tape_halt_io(struct tape_device *device, struct tape_request *request)
261{
262 int retries;
263 int rc;
264
265 /* Check if interrupt has already been processed */
266 if (request->callback == NULL)
267 return 0;
268
269 rc = 0;
270 for (retries = 0; retries < 5; retries++) {
271 rc = ccw_device_clear(device->cdev, (long) request);
272
273 if (rc == 0) { /* Termination successful */
274 request->rc = -EIO;
275 request->status = TAPE_REQUEST_DONE;
276 return 0;
277 }
278
279 if (rc == -ENODEV)
280 DBF_EXCEPTION(2, "device gone, retry\n");
281 else if (rc == -EIO)
282 DBF_EXCEPTION(2, "I/O error, retry\n");
283 else if (rc == -EBUSY)
284 DBF_EXCEPTION(2, "device busy, retry late\n");
285 else
286 BUG();
287 }
288
289 return rc;
290}
291
292/*
293 * Add device into the sorted list, giving it the first
294 * available minor number.
295 */
296static int
297tape_assign_minor(struct tape_device *device)
298{
299 struct tape_device *tmp;
300 int minor;
301
302 minor = 0;
303 write_lock(&tape_device_lock);
304 list_for_each_entry(tmp, &tape_device_list, node) {
305 if (minor < tmp->first_minor)
306 break;
307 minor += TAPE_MINORS_PER_DEV;
308 }
309 if (minor >= 256) {
310 write_unlock(&tape_device_lock);
311 return -ENODEV;
312 }
313 device->first_minor = minor;
314 list_add_tail(&device->node, &tmp->node);
315 write_unlock(&tape_device_lock);
316 return 0;
317}
318
319/* remove device from the list */
320static void
321tape_remove_minor(struct tape_device *device)
322{
323 write_lock(&tape_device_lock);
324 list_del_init(&device->node);
325 device->first_minor = -1;
326 write_unlock(&tape_device_lock);
327}
328
329/*
330 * Set a device online.
331 *
332 * This function is called by the common I/O layer to move a device from the
333 * detected but offline into the online state.
334 * If we return an error (RC < 0) the device remains in the offline state. This
335 * can happen if the device is assigned somewhere else, for example.
336 */
337int
338tape_generic_online(struct tape_device *device,
339 struct tape_discipline *discipline)
340{
341 int rc;
342
343 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
344
345 if (device->tape_state != TS_INIT) {
346 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
347 return -EINVAL;
348 }
349
350 /* Let the discipline have a go at the device. */
351 device->discipline = discipline;
352 if (!try_module_get(discipline->owner)) {
353 PRINT_ERR("Cannot get module. Module gone.\n");
354 return -EINVAL;
355 }
356
357 rc = discipline->setup_device(device);
358 if (rc)
359 goto out;
360 rc = tape_assign_minor(device);
361 if (rc)
362 goto out_discipline;
363
364 rc = tapechar_setup_device(device);
365 if (rc)
366 goto out_minor;
367 rc = tapeblock_setup_device(device);
368 if (rc)
369 goto out_char;
370
371 tape_state_set(device, TS_UNUSED);
372
373 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
374
375 return 0;
376
377out_char:
378 tapechar_cleanup_device(device);
379out_discipline:
380 device->discipline->cleanup_device(device);
381 device->discipline = NULL;
382out_minor:
383 tape_remove_minor(device);
384out:
385 module_put(discipline->owner);
386 return rc;
387}
388
389static inline void
390tape_cleanup_device(struct tape_device *device)
391{
392 tapeblock_cleanup_device(device);
393 tapechar_cleanup_device(device);
394 device->discipline->cleanup_device(device);
395 module_put(device->discipline->owner);
396 tape_remove_minor(device);
397 tape_med_state_set(device, MS_UNKNOWN);
398}
399
400/*
401 * Set device offline.
402 *
403 * Called by the common I/O layer if the drive should set offline on user
404 * request. We may prevent this by returning an error.
405 * Manual offline is only allowed while the drive is not in use.
406 */
407int
408tape_generic_offline(struct tape_device *device)
409{
410 if (!device) {
411 PRINT_ERR("tape_generic_offline: no such device\n");
412 return -ENODEV;
413 }
414
415 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
416 device->cdev_id, device);
417
418 spin_lock_irq(get_ccwdev_lock(device->cdev));
419 switch (device->tape_state) {
420 case TS_INIT:
421 case TS_NOT_OPER:
422 spin_unlock_irq(get_ccwdev_lock(device->cdev));
423 break;
424 case TS_UNUSED:
425 tape_state_set(device, TS_INIT);
426 spin_unlock_irq(get_ccwdev_lock(device->cdev));
427 tape_cleanup_device(device);
428 break;
429 default:
430 DBF_EVENT(3, "(%08x): Set offline failed "
431 "- drive in use.\n",
432 device->cdev_id);
433 PRINT_WARN("(%s): Set offline failed "
434 "- drive in use.\n",
435 device->cdev->dev.bus_id);
436 spin_unlock_irq(get_ccwdev_lock(device->cdev));
437 return -EBUSY;
438 }
439
440 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
441 return 0;
442}
443
444/*
445 * Allocate memory for a new device structure.
446 */
447static struct tape_device *
448tape_alloc_device(void)
449{
450 struct tape_device *device;
451
452 device = (struct tape_device *)
453 kmalloc(sizeof(struct tape_device), GFP_KERNEL);
454 if (device == NULL) {
455 DBF_EXCEPTION(2, "ti:no mem\n");
456 PRINT_INFO ("can't allocate memory for "
457 "tape info structure\n");
458 return ERR_PTR(-ENOMEM);
459 }
460 memset(device, 0, sizeof(struct tape_device));
461 device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA);
462 if (device->modeset_byte == NULL) {
463 DBF_EXCEPTION(2, "ti:no mem\n");
464 PRINT_INFO("can't allocate memory for modeset byte\n");
465 kfree(device);
466 return ERR_PTR(-ENOMEM);
467 }
468 INIT_LIST_HEAD(&device->req_queue);
469 INIT_LIST_HEAD(&device->node);
470 init_waitqueue_head(&device->state_change_wq);
471 device->tape_state = TS_INIT;
472 device->medium_state = MS_UNKNOWN;
473 *device->modeset_byte = 0;
474 device->first_minor = -1;
475 atomic_set(&device->ref_count, 1);
476
477 return device;
478}
479
480/*
481 * Get a reference to an existing device structure. This will automatically
482 * increment the reference count.
483 */
484struct tape_device *
485tape_get_device_reference(struct tape_device *device)
486{
487 DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device,
488 atomic_inc_return(&device->ref_count));
489
490 return device;
491}
492
493/*
494 * Decrease the reference counter of a devices structure. If the
495 * reference counter reaches zero free the device structure.
496 * The function returns a NULL pointer to be used by the caller
497 * for clearing reference pointers.
498 */
499struct tape_device *
500tape_put_device(struct tape_device *device)
501{
502 int remain;
503
504 remain = atomic_dec_return(&device->ref_count);
505 if (remain > 0) {
506 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain);
507 } else {
508 if (remain < 0) {
509 DBF_EVENT(4, "put device without reference\n");
510 PRINT_ERR("put device without reference\n");
511 } else {
512 DBF_EVENT(4, "tape_free_device(%p)\n", device);
513 kfree(device->modeset_byte);
514 kfree(device);
515 }
516 }
517
518 return NULL;
519}
520
521/*
522 * Find tape device by a device index.
523 */
524struct tape_device *
525tape_get_device(int devindex)
526{
527 struct tape_device *device, *tmp;
528
529 device = ERR_PTR(-ENODEV);
530 read_lock(&tape_device_lock);
531 list_for_each_entry(tmp, &tape_device_list, node) {
532 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
533 device = tape_get_device_reference(tmp);
534 break;
535 }
536 }
537 read_unlock(&tape_device_lock);
538 return device;
539}
540
541/*
542 * Driverfs tape probe function.
543 */
544int
545tape_generic_probe(struct ccw_device *cdev)
546{
547 struct tape_device *device;
548
549 device = tape_alloc_device();
550 if (IS_ERR(device))
551 return -ENODEV;
552 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id);
553 cdev->dev.driver_data = device;
554 device->cdev = cdev;
555 device->cdev_id = busid_to_int(cdev->dev.bus_id);
556 cdev->handler = __tape_do_irq;
557
558 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
559 sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
560
561 return 0;
562}
563
564static inline void
565__tape_discard_requests(struct tape_device *device)
566{
567 struct tape_request * request;
568 struct list_head * l, *n;
569
570 list_for_each_safe(l, n, &device->req_queue) {
571 request = list_entry(l, struct tape_request, list);
572 if (request->status == TAPE_REQUEST_IN_IO)
573 request->status = TAPE_REQUEST_DONE;
574 list_del(&request->list);
575
576 /* Decrease ref_count for removed request. */
577 request->device = tape_put_device(device);
578 request->rc = -EIO;
579 if (request->callback != NULL)
580 request->callback(request, request->callback_data);
581 }
582}
583
584/*
585 * Driverfs tape remove function.
586 *
587 * This function is called whenever the common I/O layer detects the device
588 * gone. This can happen at any time and we cannot refuse.
589 */
590void
591tape_generic_remove(struct ccw_device *cdev)
592{
593 struct tape_device * device;
594
595 device = cdev->dev.driver_data;
596 if (!device) {
597 PRINT_ERR("No device pointer in tape_generic_remove!\n");
598 return;
599 }
600 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
601
602 spin_lock_irq(get_ccwdev_lock(device->cdev));
603 switch (device->tape_state) {
604 case TS_INIT:
605 tape_state_set(device, TS_NOT_OPER);
606 case TS_NOT_OPER:
607 /*
608 * Nothing to do.
609 */
610 spin_unlock_irq(get_ccwdev_lock(device->cdev));
611 break;
612 case TS_UNUSED:
613 /*
614 * Need only to release the device.
615 */
616 tape_state_set(device, TS_NOT_OPER);
617 spin_unlock_irq(get_ccwdev_lock(device->cdev));
618 tape_cleanup_device(device);
619 break;
620 default:
621 /*
622 * There may be requests on the queue. We will not get
623 * an interrupt for a request that was running. So we
624 * just post them all as I/O errors.
625 */
626 DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
627 device->cdev_id);
628 PRINT_WARN("(%s): Drive in use vanished - "
629 "expect trouble!\n",
630 device->cdev->dev.bus_id);
631 PRINT_WARN("State was %i\n", device->tape_state);
632 tape_state_set(device, TS_NOT_OPER);
633 __tape_discard_requests(device);
634 spin_unlock_irq(get_ccwdev_lock(device->cdev));
635 tape_cleanup_device(device);
636 }
637
638 if (cdev->dev.driver_data != NULL) {
639 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
640 cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data);
641 }
642}
643
644/*
645 * Allocate a new tape ccw request
646 */
647struct tape_request *
648tape_alloc_request(int cplength, int datasize)
649{
650 struct tape_request *request;
651
652 if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
653 BUG();
654
655 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
656
657 request = (struct tape_request *) kmalloc(sizeof(struct tape_request),
658 GFP_KERNEL);
659 if (request == NULL) {
660 DBF_EXCEPTION(1, "cqra nomem\n");
661 return ERR_PTR(-ENOMEM);
662 }
663 memset(request, 0, sizeof(struct tape_request));
664 /* allocate channel program */
665 if (cplength > 0) {
666 request->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
667 GFP_ATOMIC | GFP_DMA);
668 if (request->cpaddr == NULL) {
669 DBF_EXCEPTION(1, "cqra nomem\n");
670 kfree(request);
671 return ERR_PTR(-ENOMEM);
672 }
673 memset(request->cpaddr, 0, cplength*sizeof(struct ccw1));
674 }
675 /* alloc small kernel buffer */
676 if (datasize > 0) {
677 request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA);
678 if (request->cpdata == NULL) {
679 DBF_EXCEPTION(1, "cqra nomem\n");
680 if (request->cpaddr != NULL)
681 kfree(request->cpaddr);
682 kfree(request);
683 return ERR_PTR(-ENOMEM);
684 }
685 memset(request->cpdata, 0, datasize);
686 }
687 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
688 request->cpdata);
689
690 return request;
691}
692
693/*
694 * Free tape ccw request
695 */
696void
697tape_free_request (struct tape_request * request)
698{
699 DBF_LH(6, "Free request %p\n", request);
700
701 if (request->device != NULL) {
702 request->device = tape_put_device(request->device);
703 }
704 if (request->cpdata != NULL)
705 kfree(request->cpdata);
706 if (request->cpaddr != NULL)
707 kfree(request->cpaddr);
708 kfree(request);
709}
710
711static inline void
712__tape_do_io_list(struct tape_device *device)
713{
714 struct list_head *l, *n;
715 struct tape_request *request;
716 int rc;
717
718 DBF_LH(6, "__tape_do_io_list(%p)\n", device);
719 /*
720 * Try to start each request on request queue until one is
721 * started successful.
722 */
723 list_for_each_safe(l, n, &device->req_queue) {
724 request = list_entry(l, struct tape_request, list);
725#ifdef CONFIG_S390_TAPE_BLOCK
726 if (request->op == TO_BLOCK)
727 device->discipline->check_locate(device, request);
728#endif
729 rc = ccw_device_start(device->cdev, request->cpaddr,
730 (unsigned long) request, 0x00,
731 request->options);
732 if (rc == 0) {
733 request->status = TAPE_REQUEST_IN_IO;
734 break;
735 }
736 /* Start failed. Remove request and indicate failure. */
737 DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
738
739 /* Set ending status and do callback. */
740 request->rc = rc;
741 request->status = TAPE_REQUEST_DONE;
742 __tape_remove_request(device, request);
743 }
744}
745
746static void
747__tape_remove_request(struct tape_device *device, struct tape_request *request)
748{
749 /* Remove from request queue. */
750 list_del(&request->list);
751
752 /* Do callback. */
753 if (request->callback != NULL)
754 request->callback(request, request->callback_data);
755
756 /* Start next request. */
757 if (!list_empty(&device->req_queue))
758 __tape_do_io_list(device);
759}
760
761/*
762 * Write sense data to console/dbf
763 */
764void
765tape_dump_sense(struct tape_device* device, struct tape_request *request,
766 struct irb *irb)
767{
768 unsigned int *sptr;
769
770 PRINT_INFO("-------------------------------------------------\n");
771 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
772 irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa);
773 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id);
774 if (request != NULL)
775 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
776
777 sptr = (unsigned int *) irb->ecw;
778 PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
779 sptr[0], sptr[1], sptr[2], sptr[3]);
780 PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
781 sptr[4], sptr[5], sptr[6], sptr[7]);
782 PRINT_INFO("--------------------------------------------------\n");
783}
784
785/*
786 * Write sense data to dbf
787 */
788void
789tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
790 struct irb *irb)
791{
792 unsigned int *sptr;
793 const char* op;
794
795 if (request != NULL)
796 op = tape_op_verbose[request->op];
797 else
798 op = "---";
799 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
800 irb->scsw.dstat,irb->scsw.cstat);
801 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
802 sptr = (unsigned int *) irb->ecw;
803 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
804 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
805 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
806 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
807}
808
809/*
810 * I/O helper function. Adds the request to the request queue
811 * and starts it if the tape is idle. Has to be called with
812 * the device lock held.
813 */
814static inline int
815__tape_do_io(struct tape_device *device, struct tape_request *request)
816{
817 int rc;
818
819 switch (request->op) {
820 case TO_MSEN:
821 case TO_ASSIGN:
822 case TO_UNASSIGN:
823 case TO_READ_ATTMSG:
824 if (device->tape_state == TS_INIT)
825 break;
826 if (device->tape_state == TS_UNUSED)
827 break;
828 default:
829 if (device->tape_state == TS_BLKUSE)
830 break;
831 if (device->tape_state != TS_IN_USE)
832 return -ENODEV;
833 }
834
835 /* Increase use count of device for the added request. */
836 request->device = tape_get_device_reference(device);
837
838 if (list_empty(&device->req_queue)) {
839 /* No other requests are on the queue. Start this one. */
840#ifdef CONFIG_S390_TAPE_BLOCK
841 if (request->op == TO_BLOCK)
842 device->discipline->check_locate(device, request);
843#endif
844 rc = ccw_device_start(device->cdev, request->cpaddr,
845 (unsigned long) request, 0x00,
846 request->options);
847 if (rc) {
848 DBF_EVENT(1, "tape: DOIO failed with rc = %i\n", rc);
849 return rc;
850 }
851 DBF_LH(5, "Request %p added for execution.\n", request);
852 list_add(&request->list, &device->req_queue);
853 request->status = TAPE_REQUEST_IN_IO;
854 } else {
855 DBF_LH(5, "Request %p add to queue.\n", request);
856 list_add_tail(&request->list, &device->req_queue);
857 request->status = TAPE_REQUEST_QUEUED;
858 }
859 return 0;
860}
861
862/*
863 * Add the request to the request queue, try to start it if the
864 * tape is idle. Return without waiting for end of i/o.
865 */
866int
867tape_do_io_async(struct tape_device *device, struct tape_request *request)
868{
869 int rc;
870
871 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
872
873 spin_lock_irq(get_ccwdev_lock(device->cdev));
874 /* Add request to request queue and try to start it. */
875 rc = __tape_do_io(device, request);
876 spin_unlock_irq(get_ccwdev_lock(device->cdev));
877 return rc;
878}
879
880/*
881 * tape_do_io/__tape_wake_up
882 * Add the request to the request queue, try to start it if the
883 * tape is idle and wait uninterruptible for its completion.
884 */
885static void
886__tape_wake_up(struct tape_request *request, void *data)
887{
888 request->callback = NULL;
889 wake_up((wait_queue_head_t *) data);
890}
891
892int
893tape_do_io(struct tape_device *device, struct tape_request *request)
894{
895 wait_queue_head_t wq;
896 int rc;
897
898 init_waitqueue_head(&wq);
899 spin_lock_irq(get_ccwdev_lock(device->cdev));
900 /* Setup callback */
901 request->callback = __tape_wake_up;
902 request->callback_data = &wq;
903 /* Add request to request queue and try to start it. */
904 rc = __tape_do_io(device, request);
905 spin_unlock_irq(get_ccwdev_lock(device->cdev));
906 if (rc)
907 return rc;
908 /* Request added to the queue. Wait for its completion. */
909 wait_event(wq, (request->callback == NULL));
910 /* Get rc from request */
911 return request->rc;
912}
913
914/*
915 * tape_do_io_interruptible/__tape_wake_up_interruptible
916 * Add the request to the request queue, try to start it if the
917 * tape is idle and wait uninterruptible for its completion.
918 */
919static void
920__tape_wake_up_interruptible(struct tape_request *request, void *data)
921{
922 request->callback = NULL;
923 wake_up_interruptible((wait_queue_head_t *) data);
924}
925
926int
927tape_do_io_interruptible(struct tape_device *device,
928 struct tape_request *request)
929{
930 wait_queue_head_t wq;
931 int rc;
932
933 init_waitqueue_head(&wq);
934 spin_lock_irq(get_ccwdev_lock(device->cdev));
935 /* Setup callback */
936 request->callback = __tape_wake_up_interruptible;
937 request->callback_data = &wq;
938 rc = __tape_do_io(device, request);
939 spin_unlock_irq(get_ccwdev_lock(device->cdev));
940 if (rc)
941 return rc;
942 /* Request added to the queue. Wait for its completion. */
943 rc = wait_event_interruptible(wq, (request->callback == NULL));
944 if (rc != -ERESTARTSYS)
945 /* Request finished normally. */
946 return request->rc;
947 /* Interrupted by a signal. We have to stop the current request. */
948 spin_lock_irq(get_ccwdev_lock(device->cdev));
949 rc = __tape_halt_io(device, request);
950 if (rc == 0) {
951 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
952 rc = -ERESTARTSYS;
953 }
954 spin_unlock_irq(get_ccwdev_lock(device->cdev));
955 return rc;
956}
957
958/*
959 * Handle requests that return an i/o error in the irb.
960 */
961static inline void
962tape_handle_killed_request(
963 struct tape_device *device,
964 struct tape_request *request)
965{
966 if(request != NULL) {
967 /* Set ending status. FIXME: Should the request be retried? */
968 request->rc = -EIO;
969 request->status = TAPE_REQUEST_DONE;
970 __tape_remove_request(device, request);
971 } else {
972 __tape_do_io_list(device);
973 }
974}
975
976/*
977 * Tape interrupt routine, called from the ccw_device layer
978 */
979static void
980__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
981{
982 struct tape_device *device;
983 struct tape_request *request;
984 int final;
985 int rc;
986
987 device = (struct tape_device *) cdev->dev.driver_data;
988 if (device == NULL) {
989 PRINT_ERR("could not get device structure for %s "
990 "in interrupt\n", cdev->dev.bus_id);
991 return;
992 }
993 request = (struct tape_request *) intparm;
994
995 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
996
997 /* On special conditions irb is an error pointer */
998 if (IS_ERR(irb)) {
999 switch (PTR_ERR(irb)) {
1000 case -ETIMEDOUT:
1001 PRINT_WARN("(%s): Request timed out\n",
1002 cdev->dev.bus_id);
1003 case -EIO:
1004 tape_handle_killed_request(device, request);
1005 break;
1006 default:
1007 PRINT_ERR("(%s): Unexpected i/o error %li\n",
1008 cdev->dev.bus_id,
1009 PTR_ERR(irb));
1010 }
1011 return;
1012 }
1013
1014 /* May be an unsolicited irq */
1015 if(request != NULL)
1016 request->rescnt = irb->scsw.count;
1017
1018 if (irb->scsw.dstat != 0x0c) {
1019 /* Set the 'ONLINE' flag depending on sense byte 1 */
1020 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
1021 device->tape_generic_status |= GMT_ONLINE(~0);
1022 else
1023 device->tape_generic_status &= ~GMT_ONLINE(~0);
1024
1025 /*
1026 * Any request that does not come back with channel end
1027 * and device end is unusual. Log the sense data.
1028 */
1029 DBF_EVENT(3,"-- Tape Interrupthandler --\n");
1030 tape_dump_sense_dbf(device, request, irb);
1031 } else {
1032 /* Upon normal completion the device _is_ online */
1033 device->tape_generic_status |= GMT_ONLINE(~0);
1034 }
1035 if (device->tape_state == TS_NOT_OPER) {
1036 DBF_EVENT(6, "tape:device is not operational\n");
1037 return;
1038 }
1039
1040 /*
1041 * Request that were canceled still come back with an interrupt.
1042 * To detect these request the state will be set to TAPE_REQUEST_DONE.
1043 */
1044 if(request != NULL && request->status == TAPE_REQUEST_DONE) {
1045 __tape_remove_request(device, request);
1046 return;
1047 }
1048
1049 rc = device->discipline->irq(device, request, irb);
1050 /*
1051 * rc < 0 : request finished unsuccessfully.
1052 * rc == TAPE_IO_SUCCESS: request finished successfully.
1053 * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
1054 * rc == TAPE_IO_RETRY: request finished but needs another go.
1055 * rc == TAPE_IO_STOP: request needs to get terminated.
1056 */
1057 final = 0;
1058 switch (rc) {
1059 case TAPE_IO_SUCCESS:
1060 /* Upon normal completion the device _is_ online */
1061 device->tape_generic_status |= GMT_ONLINE(~0);
1062 final = 1;
1063 break;
1064 case TAPE_IO_PENDING:
1065 break;
1066 case TAPE_IO_RETRY:
1067#ifdef CONFIG_S390_TAPE_BLOCK
1068 if (request->op == TO_BLOCK)
1069 device->discipline->check_locate(device, request);
1070#endif
1071 rc = ccw_device_start(cdev, request->cpaddr,
1072 (unsigned long) request, 0x00,
1073 request->options);
1074 if (rc) {
1075 DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
1076 final = 1;
1077 }
1078 break;
1079 case TAPE_IO_STOP:
1080 __tape_halt_io(device, request);
1081 break;
1082 default:
1083 if (rc > 0) {
1084 DBF_EVENT(6, "xunknownrc\n");
1085 PRINT_ERR("Invalid return code from discipline "
1086 "interrupt function.\n");
1087 rc = -EIO;
1088 }
1089 final = 1;
1090 break;
1091 }
1092 if (final) {
1093 /* May be an unsolicited irq */
1094 if(request != NULL) {
1095 /* Set ending status. */
1096 request->rc = rc;
1097 request->status = TAPE_REQUEST_DONE;
1098 __tape_remove_request(device, request);
1099 } else {
1100 __tape_do_io_list(device);
1101 }
1102 }
1103}
1104
1105/*
1106 * Tape device open function used by tape_char & tape_block frontends.
1107 */
1108int
1109tape_open(struct tape_device *device)
1110{
1111 int rc;
1112
1113 spin_lock(get_ccwdev_lock(device->cdev));
1114 if (device->tape_state == TS_NOT_OPER) {
1115 DBF_EVENT(6, "TAPE:nodev\n");
1116 rc = -ENODEV;
1117 } else if (device->tape_state == TS_IN_USE) {
1118 DBF_EVENT(6, "TAPE:dbusy\n");
1119 rc = -EBUSY;
1120 } else if (device->tape_state == TS_BLKUSE) {
1121 DBF_EVENT(6, "TAPE:dbusy\n");
1122 rc = -EBUSY;
1123 } else if (device->discipline != NULL &&
1124 !try_module_get(device->discipline->owner)) {
1125 DBF_EVENT(6, "TAPE:nodisc\n");
1126 rc = -ENODEV;
1127 } else {
1128 tape_state_set(device, TS_IN_USE);
1129 rc = 0;
1130 }
1131 spin_unlock(get_ccwdev_lock(device->cdev));
1132 return rc;
1133}
1134
1135/*
1136 * Tape device release function used by tape_char & tape_block frontends.
1137 */
1138int
1139tape_release(struct tape_device *device)
1140{
1141 spin_lock(get_ccwdev_lock(device->cdev));
1142 if (device->tape_state == TS_IN_USE)
1143 tape_state_set(device, TS_UNUSED);
1144 module_put(device->discipline->owner);
1145 spin_unlock(get_ccwdev_lock(device->cdev));
1146 return 0;
1147}
1148
1149/*
1150 * Execute a magnetic tape command a number of times.
1151 */
1152int
1153tape_mtop(struct tape_device *device, int mt_op, int mt_count)
1154{
1155 tape_mtop_fn fn;
1156 int rc;
1157
1158 DBF_EVENT(6, "TAPE:mtio\n");
1159 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
1160 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count);
1161
1162 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
1163 return -EINVAL;
1164 fn = device->discipline->mtop_array[mt_op];
1165 if (fn == NULL)
1166 return -EINVAL;
1167
1168 /* We assume that the backends can handle count up to 500. */
1169 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF ||
1170 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) {
1171 rc = 0;
1172 for (; mt_count > 500; mt_count -= 500)
1173 if ((rc = fn(device, 500)) != 0)
1174 break;
1175 if (rc == 0)
1176 rc = fn(device, mt_count);
1177 } else
1178 rc = fn(device, mt_count);
1179 return rc;
1180
1181}
1182
1183/*
1184 * Tape init function.
1185 */
1186static int
1187tape_init (void)
1188{
1189 TAPE_DBF_AREA = debug_register ( "tape", 1, 2, 4*sizeof(long));
1190 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1191#ifdef DBF_LIKE_HELL
1192 debug_set_level(TAPE_DBF_AREA, 6);
1193#endif
1194 DBF_EVENT(3, "tape init: ($Revision: 1.51 $)\n");
1195 tape_proc_init();
1196 tapechar_init ();
1197 tapeblock_init ();
1198 return 0;
1199}
1200
1201/*
1202 * Tape exit function.
1203 */
1204static void
1205tape_exit(void)
1206{
1207 DBF_EVENT(6, "tape exit\n");
1208
1209 /* Get rid of the frontends */
1210 tapechar_exit();
1211 tapeblock_exit();
1212 tape_proc_cleanup();
1213 debug_unregister (TAPE_DBF_AREA);
1214}
1215
1216MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
1217 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
1218MODULE_DESCRIPTION("Linux on zSeries channel attached "
1219 "tape device driver ($Revision: 1.51 $)");
1220MODULE_LICENSE("GPL");
1221
1222module_init(tape_init);
1223module_exit(tape_exit);
1224
1225EXPORT_SYMBOL(tape_generic_remove);
1226EXPORT_SYMBOL(tape_generic_probe);
1227EXPORT_SYMBOL(tape_generic_online);
1228EXPORT_SYMBOL(tape_generic_offline);
1229EXPORT_SYMBOL(tape_put_device);
1230EXPORT_SYMBOL(tape_get_device_reference);
1231EXPORT_SYMBOL(tape_state_verbose);
1232EXPORT_SYMBOL(tape_op_verbose);
1233EXPORT_SYMBOL(tape_state_set);
1234EXPORT_SYMBOL(tape_med_state_set);
1235EXPORT_SYMBOL(tape_alloc_request);
1236EXPORT_SYMBOL(tape_free_request);
1237EXPORT_SYMBOL(tape_dump_sense);
1238EXPORT_SYMBOL(tape_dump_sense_dbf);
1239EXPORT_SYMBOL(tape_do_io);
1240EXPORT_SYMBOL(tape_do_io_async);
1241EXPORT_SYMBOL(tape_do_io_interruptible);
1242EXPORT_SYMBOL(tape_mtop);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
new file mode 100644
index 000000000000..801d17cca34e
--- /dev/null
+++ b/drivers/s390/char/tape_proc.c
@@ -0,0 +1,145 @@
1/*
2 * drivers/s390/char/tape.c
3 * tape device driver for S/390 and zSeries tapes.
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001 IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 *
11 * PROCFS Functions
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/seq_file.h>
18
19#define TAPE_DBF_AREA tape_core_dbf
20
21#include "tape.h"
22
23#define PRINTK_HEADER "TAPE_PROC: "
24
25static const char *tape_med_st_verbose[MS_SIZE] =
26{
27 [MS_UNKNOWN] = "UNKNOWN ",
28 [MS_LOADED] = "LOADED ",
29 [MS_UNLOADED] = "UNLOADED"
30};
31
32/* our proc tapedevices entry */
33static struct proc_dir_entry *tape_proc_devices;
34
35/*
36 * Show function for /proc/tapedevices
37 */
38static int tape_proc_show(struct seq_file *m, void *v)
39{
40 struct tape_device *device;
41 struct tape_request *request;
42 const char *str;
43 unsigned long n;
44
45 n = (unsigned long) v - 1;
46 if (!n) {
47 seq_printf(m, "TapeNo\tBusID CuType/Model\t"
48 "DevType/Model\tBlkSize\tState\tOp\tMedState\n");
49 }
50 device = tape_get_device(n);
51 if (IS_ERR(device))
52 return 0;
53 spin_lock_irq(get_ccwdev_lock(device->cdev));
54 seq_printf(m, "%d\t", (int) n);
55 seq_printf(m, "%-10.10s ", device->cdev->dev.bus_id);
56 seq_printf(m, "%04X/", device->cdev->id.cu_type);
57 seq_printf(m, "%02X\t", device->cdev->id.cu_model);
58 seq_printf(m, "%04X/", device->cdev->id.dev_type);
59 seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
60 if (device->char_data.block_size == 0)
61 seq_printf(m, "auto\t");
62 else
63 seq_printf(m, "%i\t", device->char_data.block_size);
64 if (device->tape_state >= 0 &&
65 device->tape_state < TS_SIZE)
66 str = tape_state_verbose[device->tape_state];
67 else
68 str = "UNKNOWN";
69 seq_printf(m, "%s\t", str);
70 if (!list_empty(&device->req_queue)) {
71 request = list_entry(device->req_queue.next,
72 struct tape_request, list);
73 str = tape_op_verbose[request->op];
74 } else
75 str = "---";
76 seq_printf(m, "%s\t", str);
77 seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
78 spin_unlock_irq(get_ccwdev_lock(device->cdev));
79 tape_put_device(device);
80 return 0;
81}
82
83static void *tape_proc_start(struct seq_file *m, loff_t *pos)
84{
85 if (*pos >= 256 / TAPE_MINORS_PER_DEV)
86 return NULL;
87 return (void *)((unsigned long) *pos + 1);
88}
89
90static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
91{
92 ++*pos;
93 return tape_proc_start(m, pos);
94}
95
96static void tape_proc_stop(struct seq_file *m, void *v)
97{
98}
99
100static struct seq_operations tape_proc_seq = {
101 .start = tape_proc_start,
102 .next = tape_proc_next,
103 .stop = tape_proc_stop,
104 .show = tape_proc_show,
105};
106
107static int tape_proc_open(struct inode *inode, struct file *file)
108{
109 return seq_open(file, &tape_proc_seq);
110}
111
112static struct file_operations tape_proc_ops =
113{
114 .open = tape_proc_open,
115 .read = seq_read,
116 .llseek = seq_lseek,
117 .release = seq_release,
118};
119
120/*
121 * Initialize procfs stuff on startup
122 */
123void
124tape_proc_init(void)
125{
126 tape_proc_devices =
127 create_proc_entry ("tapedevices", S_IFREG | S_IRUGO | S_IWUSR,
128 &proc_root);
129 if (tape_proc_devices == NULL) {
130 PRINT_WARN("tape: Cannot register procfs entry tapedevices\n");
131 return;
132 }
133 tape_proc_devices->proc_fops = &tape_proc_ops;
134 tape_proc_devices->owner = THIS_MODULE;
135}
136
137/*
138 * Cleanup all stuff registered to the procfs
139 */
140void
141tape_proc_cleanup(void)
142{
143 if (tape_proc_devices != NULL)
144 remove_proc_entry ("tapedevices", &proc_root);
145}
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
new file mode 100644
index 000000000000..2f9fe30989a7
--- /dev/null
+++ b/drivers/s390/char/tape_std.c
@@ -0,0 +1,765 @@
1/*
2 * drivers/s390/char/tape_std.c
3 * standard tape device functions for ibm tapes.
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Stefan Bader <shbader@de.ibm.com>
12 */
13
14#include <linux/config.h>
15#include <linux/stddef.h>
16#include <linux/kernel.h>
17#include <linux/bio.h>
18#include <linux/timer.h>
19
20#include <asm/types.h>
21#include <asm/idals.h>
22#include <asm/ebcdic.h>
23#include <asm/tape390.h>
24
25#define TAPE_DBF_AREA tape_core_dbf
26
27#include "tape.h"
28#include "tape_std.h"
29
30#define PRINTK_HEADER "TAPE_STD: "
31
32/*
33 * tape_std_assign
34 */
35static void
36tape_std_assign_timeout(unsigned long data)
37{
38 struct tape_request * request;
39 struct tape_device * device;
40
41 request = (struct tape_request *) data;
42 if ((device = request->device) == NULL)
43 BUG();
44
45 spin_lock_irq(get_ccwdev_lock(device->cdev));
46 if (request->callback != NULL) {
47 DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
48 device->cdev_id);
49 PRINT_ERR("%s: Assignment timeout. Device busy.\n",
50 device->cdev->dev.bus_id);
51 ccw_device_clear(device->cdev, (long) request);
52 }
53 spin_unlock_irq(get_ccwdev_lock(device->cdev));
54}
55
56int
57tape_std_assign(struct tape_device *device)
58{
59 int rc;
60 struct timer_list timeout;
61 struct tape_request *request;
62
63 request = tape_alloc_request(2, 11);
64 if (IS_ERR(request))
65 return PTR_ERR(request);
66
67 request->op = TO_ASSIGN;
68 tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
69 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
70
71 /*
72 * The assign command sometimes blocks if the device is assigned
73 * to another host (actually this shouldn't happen but it does).
74 * So we set up a timeout for this call.
75 */
76 init_timer(&timeout);
77 timeout.function = tape_std_assign_timeout;
78 timeout.data = (unsigned long) request;
79 timeout.expires = jiffies + 2 * HZ;
80 add_timer(&timeout);
81
82 rc = tape_do_io_interruptible(device, request);
83
84 del_timer(&timeout);
85
86 if (rc != 0) {
87 PRINT_WARN("%s: assign failed - device might be busy\n",
88 device->cdev->dev.bus_id);
89 DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
90 device->cdev_id);
91 } else {
92 DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id);
93 }
94 tape_free_request(request);
95 return rc;
96}
97
98/*
99 * tape_std_unassign
100 */
101int
102tape_std_unassign (struct tape_device *device)
103{
104 int rc;
105 struct tape_request *request;
106
107 if (device->tape_state == TS_NOT_OPER) {
108 DBF_EVENT(3, "(%08x): Can't unassign device\n",
109 device->cdev_id);
110 PRINT_WARN("(%s): Can't unassign device - device gone\n",
111 device->cdev->dev.bus_id);
112 return -EIO;
113 }
114
115 request = tape_alloc_request(2, 11);
116 if (IS_ERR(request))
117 return PTR_ERR(request);
118
119 request->op = TO_UNASSIGN;
120 tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
121 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
122
123 if ((rc = tape_do_io(device, request)) != 0) {
124 DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
125 PRINT_WARN("%s: Unassign failed\n", device->cdev->dev.bus_id);
126 } else {
127 DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
128 }
129 tape_free_request(request);
130 return rc;
131}
132
133/*
134 * TAPE390_DISPLAY: Show a string on the tape display.
135 */
136int
137tape_std_display(struct tape_device *device, struct display_struct *disp)
138{
139 struct tape_request *request;
140 int rc;
141
142 request = tape_alloc_request(2, 17);
143 if (IS_ERR(request)) {
144 DBF_EVENT(3, "TAPE: load display failed\n");
145 return PTR_ERR(request);
146 }
147 request->op = TO_DIS;
148
149 *(unsigned char *) request->cpdata = disp->cntrl;
150 DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
151 memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
152 memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
153 ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
154
155 tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
156 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
157
158 rc = tape_do_io_interruptible(device, request);
159 tape_free_request(request);
160 return rc;
161}
162
163/*
164 * Read block id.
165 */
166int
167tape_std_read_block_id(struct tape_device *device, __u64 *id)
168{
169 struct tape_request *request;
170 int rc;
171
172 request = tape_alloc_request(3, 8);
173 if (IS_ERR(request))
174 return PTR_ERR(request);
175 request->op = TO_RBI;
176 /* setup ccws */
177 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
178 tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
179 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
180 /* execute it */
181 rc = tape_do_io(device, request);
182 if (rc == 0)
183 /* Get result from read buffer. */
184 *id = *(__u64 *) request->cpdata;
185 tape_free_request(request);
186 return rc;
187}
188
189int
190tape_std_terminate_write(struct tape_device *device)
191{
192 int rc;
193
194 if(device->required_tapemarks == 0)
195 return 0;
196
197 DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor,
198 device->required_tapemarks);
199
200 rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
201 if (rc)
202 return rc;
203
204 device->required_tapemarks = 0;
205 return tape_mtop(device, MTBSR, 1);
206}
207
208/*
209 * MTLOAD: Loads the tape.
210 * The default implementation just wait until the tape medium state changes
211 * to MS_LOADED.
212 */
213int
214tape_std_mtload(struct tape_device *device, int count)
215{
216 return wait_event_interruptible(device->state_change_wq,
217 (device->medium_state == MS_LOADED));
218}
219
220/*
221 * MTSETBLK: Set block size.
222 */
223int
224tape_std_mtsetblk(struct tape_device *device, int count)
225{
226 struct idal_buffer *new;
227
228 DBF_LH(6, "tape_std_mtsetblk(%d)\n", count);
229 if (count <= 0) {
230 /*
231 * Just set block_size to 0. tapechar_read/tapechar_write
232 * will realloc the idal buffer if a bigger one than the
233 * current is needed.
234 */
235 device->char_data.block_size = 0;
236 return 0;
237 }
238 if (device->char_data.idal_buf != NULL &&
239 device->char_data.idal_buf->size == count)
240 /* We already have a idal buffer of that size. */
241 return 0;
242
243 if (count > MAX_BLOCKSIZE) {
244 DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
245 count, MAX_BLOCKSIZE);
246 PRINT_ERR("Invalid block size (%d > %d) given.\n",
247 count, MAX_BLOCKSIZE);
248 return -EINVAL;
249 }
250
251 /* Allocate a new idal buffer. */
252 new = idal_buffer_alloc(count, 0);
253 if (new == NULL)
254 return -ENOMEM;
255 if (device->char_data.idal_buf != NULL)
256 idal_buffer_free(device->char_data.idal_buf);
257 device->char_data.idal_buf = new;
258 device->char_data.block_size = count;
259
260 DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size);
261
262 return 0;
263}
264
265/*
266 * MTRESET: Set block size to 0.
267 */
268int
269tape_std_mtreset(struct tape_device *device, int count)
270{
271 DBF_EVENT(6, "TCHAR:devreset:\n");
272 device->char_data.block_size = 0;
273 return 0;
274}
275
276/*
277 * MTFSF: Forward space over 'count' file marks. The tape is positioned
278 * at the EOT (End of Tape) side of the file mark.
279 */
280int
281tape_std_mtfsf(struct tape_device *device, int mt_count)
282{
283 struct tape_request *request;
284 struct ccw1 *ccw;
285
286 request = tape_alloc_request(mt_count + 2, 0);
287 if (IS_ERR(request))
288 return PTR_ERR(request);
289 request->op = TO_FSF;
290 /* setup ccws */
291 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
292 device->modeset_byte);
293 ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
294 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
295
296 /* execute it */
297 return tape_do_io_free(device, request);
298}
299
300/*
301 * MTFSR: Forward space over 'count' tape blocks (blocksize is set
302 * via MTSETBLK.
303 */
304int
305tape_std_mtfsr(struct tape_device *device, int mt_count)
306{
307 struct tape_request *request;
308 struct ccw1 *ccw;
309 int rc;
310
311 request = tape_alloc_request(mt_count + 2, 0);
312 if (IS_ERR(request))
313 return PTR_ERR(request);
314 request->op = TO_FSB;
315 /* setup ccws */
316 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
317 device->modeset_byte);
318 ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
319 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
320
321 /* execute it */
322 rc = tape_do_io(device, request);
323 if (rc == 0 && request->rescnt > 0) {
324 DBF_LH(3, "FSR over tapemark\n");
325 rc = 1;
326 }
327 tape_free_request(request);
328
329 return rc;
330}
331
332/*
333 * MTBSR: Backward space over 'count' tape blocks.
334 * (blocksize is set via MTSETBLK.
335 */
336int
337tape_std_mtbsr(struct tape_device *device, int mt_count)
338{
339 struct tape_request *request;
340 struct ccw1 *ccw;
341 int rc;
342
343 request = tape_alloc_request(mt_count + 2, 0);
344 if (IS_ERR(request))
345 return PTR_ERR(request);
346 request->op = TO_BSB;
347 /* setup ccws */
348 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
349 device->modeset_byte);
350 ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
351 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
352
353 /* execute it */
354 rc = tape_do_io(device, request);
355 if (rc == 0 && request->rescnt > 0) {
356 DBF_LH(3, "BSR over tapemark\n");
357 rc = 1;
358 }
359 tape_free_request(request);
360
361 return rc;
362}
363
364/*
365 * MTWEOF: Write 'count' file marks at the current position.
366 */
367int
368tape_std_mtweof(struct tape_device *device, int mt_count)
369{
370 struct tape_request *request;
371 struct ccw1 *ccw;
372
373 request = tape_alloc_request(mt_count + 2, 0);
374 if (IS_ERR(request))
375 return PTR_ERR(request);
376 request->op = TO_WTM;
377 /* setup ccws */
378 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
379 device->modeset_byte);
380 ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
381 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
382
383 /* execute it */
384 return tape_do_io_free(device, request);
385}
386
387/*
388 * MTBSFM: Backward space over 'count' file marks.
389 * The tape is positioned at the BOT (Begin Of Tape) side of the
390 * last skipped file mark.
391 */
392int
393tape_std_mtbsfm(struct tape_device *device, int mt_count)
394{
395 struct tape_request *request;
396 struct ccw1 *ccw;
397
398 request = tape_alloc_request(mt_count + 2, 0);
399 if (IS_ERR(request))
400 return PTR_ERR(request);
401 request->op = TO_BSF;
402 /* setup ccws */
403 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
404 device->modeset_byte);
405 ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
406 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
407
408 /* execute it */
409 return tape_do_io_free(device, request);
410}
411
412/*
413 * MTBSF: Backward space over 'count' file marks. The tape is positioned at
414 * the EOT (End of Tape) side of the last skipped file mark.
415 */
416int
417tape_std_mtbsf(struct tape_device *device, int mt_count)
418{
419 struct tape_request *request;
420 struct ccw1 *ccw;
421 int rc;
422
423 request = tape_alloc_request(mt_count + 2, 0);
424 if (IS_ERR(request))
425 return PTR_ERR(request);
426 request->op = TO_BSF;
427 /* setup ccws */
428 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
429 device->modeset_byte);
430 ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
431 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
432 /* execute it */
433 rc = tape_do_io_free(device, request);
434 if (rc == 0) {
435 rc = tape_mtop(device, MTFSR, 1);
436 if (rc > 0)
437 rc = 0;
438 }
439 return rc;
440}
441
442/*
443 * MTFSFM: Forward space over 'count' file marks.
444 * The tape is positioned at the BOT (Begin Of Tape) side
445 * of the last skipped file mark.
446 */
447int
448tape_std_mtfsfm(struct tape_device *device, int mt_count)
449{
450 struct tape_request *request;
451 struct ccw1 *ccw;
452 int rc;
453
454 request = tape_alloc_request(mt_count + 2, 0);
455 if (IS_ERR(request))
456 return PTR_ERR(request);
457 request->op = TO_FSF;
458 /* setup ccws */
459 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
460 device->modeset_byte);
461 ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
462 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
463 /* execute it */
464 rc = tape_do_io_free(device, request);
465 if (rc == 0) {
466 rc = tape_mtop(device, MTBSR, 1);
467 if (rc > 0)
468 rc = 0;
469 }
470
471 return rc;
472}
473
474/*
475 * MTREW: Rewind the tape.
476 */
477int
478tape_std_mtrew(struct tape_device *device, int mt_count)
479{
480 struct tape_request *request;
481
482 request = tape_alloc_request(3, 0);
483 if (IS_ERR(request))
484 return PTR_ERR(request);
485 request->op = TO_REW;
486 /* setup ccws */
487 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
488 device->modeset_byte);
489 tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
490 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
491
492 /* execute it */
493 return tape_do_io_free(device, request);
494}
495
496/*
497 * MTOFFL: Rewind the tape and put the drive off-line.
498 * Implement 'rewind unload'
499 */
500int
501tape_std_mtoffl(struct tape_device *device, int mt_count)
502{
503 struct tape_request *request;
504
505 request = tape_alloc_request(3, 0);
506 if (IS_ERR(request))
507 return PTR_ERR(request);
508 request->op = TO_RUN;
509 /* setup ccws */
510 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
511 tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
512 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
513
514 /* execute it */
515 return tape_do_io_free(device, request);
516}
517
518/*
519 * MTNOP: 'No operation'.
520 */
521int
522tape_std_mtnop(struct tape_device *device, int mt_count)
523{
524 struct tape_request *request;
525
526 request = tape_alloc_request(2, 0);
527 if (IS_ERR(request))
528 return PTR_ERR(request);
529 request->op = TO_NOP;
530 /* setup ccws */
531 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
532 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
533 /* execute it */
534 return tape_do_io_free(device, request);
535}
536
537/*
538 * MTEOM: positions at the end of the portion of the tape already used
539 * for recordind data. MTEOM positions after the last file mark, ready for
540 * appending another file.
541 */
542int
543tape_std_mteom(struct tape_device *device, int mt_count)
544{
545 int rc;
546
547 /*
548 * Seek from the beginning of tape (rewind).
549 */
550 if ((rc = tape_mtop(device, MTREW, 1)) < 0)
551 return rc;
552
553 /*
554 * The logical end of volume is given by two sewuential tapemarks.
555 * Look for this by skipping to the next file (over one tapemark)
556 * and then test for another one (fsr returns 1 if a tapemark was
557 * encountered).
558 */
559 do {
560 if ((rc = tape_mtop(device, MTFSF, 1)) < 0)
561 return rc;
562 if ((rc = tape_mtop(device, MTFSR, 1)) < 0)
563 return rc;
564 } while (rc == 0);
565
566 return tape_mtop(device, MTBSR, 1);
567}
568
569/*
570 * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
571 */
572int
573tape_std_mtreten(struct tape_device *device, int mt_count)
574{
575 struct tape_request *request;
576 int rc;
577
578 request = tape_alloc_request(4, 0);
579 if (IS_ERR(request))
580 return PTR_ERR(request);
581 request->op = TO_FSF;
582 /* setup ccws */
583 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
584 tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
585 tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
586 tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
587 /* execute it, MTRETEN rc gets ignored */
588 rc = tape_do_io_interruptible(device, request);
589 tape_free_request(request);
590 return tape_mtop(device, MTREW, 1);
591}
592
593/*
594 * MTERASE: erases the tape.
595 */
596int
597tape_std_mterase(struct tape_device *device, int mt_count)
598{
599 struct tape_request *request;
600
601 request = tape_alloc_request(6, 0);
602 if (IS_ERR(request))
603 return PTR_ERR(request);
604 request->op = TO_DSE;
605 /* setup ccws */
606 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
607 tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
608 tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
609 tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
610 tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL);
611 tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL);
612
613 /* execute it */
614 return tape_do_io_free(device, request);
615}
616
617/*
618 * MTUNLOAD: Rewind the tape and unload it.
619 */
620int
621tape_std_mtunload(struct tape_device *device, int mt_count)
622{
623 return tape_mtop(device, MTOFFL, mt_count);
624}
625
626/*
627 * MTCOMPRESSION: used to enable compression.
628 * Sets the IDRC on/off.
629 */
630int
631tape_std_mtcompression(struct tape_device *device, int mt_count)
632{
633 struct tape_request *request;
634
635 if (mt_count < 0 || mt_count > 1) {
636 DBF_EXCEPTION(6, "xcom parm\n");
637 if (*device->modeset_byte & 0x08)
638 PRINT_INFO("(%s) Compression is currently on\n",
639 device->cdev->dev.bus_id);
640 else
641 PRINT_INFO("(%s) Compression is currently off\n",
642 device->cdev->dev.bus_id);
643 PRINT_INFO("Use 1 to switch compression on, 0 to "
644 "switch it off\n");
645 return -EINVAL;
646 }
647 request = tape_alloc_request(2, 0);
648 if (IS_ERR(request))
649 return PTR_ERR(request);
650 request->op = TO_NOP;
651 /* setup ccws */
652 *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08;
653 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
654 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
655 /* execute it */
656 return tape_do_io_free(device, request);
657}
658
659/*
660 * Read Block
661 */
662struct tape_request *
663tape_std_read_block(struct tape_device *device, size_t count)
664{
665 struct tape_request *request;
666
667 /*
668 * We have to alloc 4 ccws in order to be able to transform request
669 * into a read backward request in error case.
670 */
671 request = tape_alloc_request(4, 0);
672 if (IS_ERR(request)) {
673 DBF_EXCEPTION(6, "xrbl fail");
674 return request;
675 }
676 request->op = TO_RFO;
677 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
678 tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
679 device->char_data.idal_buf);
680 DBF_EVENT(6, "xrbl ccwg\n");
681 return request;
682}
683
684/*
685 * Read Block backward transformation function.
686 */
687void
688tape_std_read_backward(struct tape_device *device, struct tape_request *request)
689{
690 /*
691 * We have allocated 4 ccws in tape_std_read, so we can now
692 * transform the request to a read backward, followed by a
693 * forward space block.
694 */
695 request->op = TO_RBA;
696 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
697 tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
698 device->char_data.idal_buf);
699 tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
700 tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
701 DBF_EVENT(6, "xrop ccwg");}
702
703/*
704 * Write Block
705 */
706struct tape_request *
707tape_std_write_block(struct tape_device *device, size_t count)
708{
709 struct tape_request *request;
710
711 request = tape_alloc_request(2, 0);
712 if (IS_ERR(request)) {
713 DBF_EXCEPTION(6, "xwbl fail\n");
714 return request;
715 }
716 request->op = TO_WRI;
717 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
718 tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
719 device->char_data.idal_buf);
720 DBF_EVENT(6, "xwbl ccwg\n");
721 return request;
722}
723
724/*
725 * This routine is called by frontend after an ENOSP on write
726 */
727void
728tape_std_process_eov(struct tape_device *device)
729{
730 /*
731 * End of volume: We have to backspace the last written record, then
732 * we TRY to write a tapemark and then backspace over the written TM
733 */
734 if (tape_mtop(device, MTBSR, 1) == 0 &&
735 tape_mtop(device, MTWEOF, 1) == 0) {
736 tape_mtop(device, MTBSR, 1);
737 }
738}
739
740EXPORT_SYMBOL(tape_std_assign);
741EXPORT_SYMBOL(tape_std_unassign);
742EXPORT_SYMBOL(tape_std_display);
743EXPORT_SYMBOL(tape_std_read_block_id);
744EXPORT_SYMBOL(tape_std_mtload);
745EXPORT_SYMBOL(tape_std_mtsetblk);
746EXPORT_SYMBOL(tape_std_mtreset);
747EXPORT_SYMBOL(tape_std_mtfsf);
748EXPORT_SYMBOL(tape_std_mtfsr);
749EXPORT_SYMBOL(tape_std_mtbsr);
750EXPORT_SYMBOL(tape_std_mtweof);
751EXPORT_SYMBOL(tape_std_mtbsfm);
752EXPORT_SYMBOL(tape_std_mtbsf);
753EXPORT_SYMBOL(tape_std_mtfsfm);
754EXPORT_SYMBOL(tape_std_mtrew);
755EXPORT_SYMBOL(tape_std_mtoffl);
756EXPORT_SYMBOL(tape_std_mtnop);
757EXPORT_SYMBOL(tape_std_mteom);
758EXPORT_SYMBOL(tape_std_mtreten);
759EXPORT_SYMBOL(tape_std_mterase);
760EXPORT_SYMBOL(tape_std_mtunload);
761EXPORT_SYMBOL(tape_std_mtcompression);
762EXPORT_SYMBOL(tape_std_read_block);
763EXPORT_SYMBOL(tape_std_read_backward);
764EXPORT_SYMBOL(tape_std_write_block);
765EXPORT_SYMBOL(tape_std_process_eov);
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
new file mode 100644
index 000000000000..3ab6aafb7343
--- /dev/null
+++ b/drivers/s390/char/tape_std.h
@@ -0,0 +1,152 @@
1/*
2 * drivers/s390/char/tape_34xx.h
3 * standard tape device functions for ibm tapes.
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 */
11
12#ifndef _TAPE_STD_H
13#define _TAPE_STD_H
14
15#include <asm/tape390.h>
16
17/*
18 * Biggest block size to handle. Currently 64K because we only build
19 * channel programs without data chaining.
20 */
21#define MAX_BLOCKSIZE 65535
22
23/*
24 * The CCW commands for the Tape type of command.
25 */
26#define INVALID_00 0x00 /* Invalid cmd */
27#define BACKSPACEBLOCK 0x27 /* Back Space block */
28#define BACKSPACEFILE 0x2f /* Back Space file */
29#define DATA_SEC_ERASE 0x97 /* Data security erase */
30#define ERASE_GAP 0x17 /* Erase Gap */
31#define FORSPACEBLOCK 0x37 /* Forward space block */
32#define FORSPACEFILE 0x3F /* Forward Space file */
33#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */
34#define NOP 0x03 /* No operation */
35#define READ_FORWARD 0x02 /* Read forward */
36#define REWIND 0x07 /* Rewind */
37#define REWIND_UNLOAD 0x0F /* Rewind and Unload */
38#define SENSE 0x04 /* Sense */
39#define NEW_MODE_SET 0xEB /* Guess it is Mode set */
40#define WRITE_CMD 0x01 /* Write */
41#define WRITETAPEMARK 0x1F /* Write Tape Mark */
42
43#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */
44#define CONTROL_ACCESS 0xE3 /* Set high speed */
45#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT */
46#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */
47#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */
48#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */
49#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */
50#define MODE_SET_C3 0xC3 /* for 3420 */
51#define MODE_SET_CB 0xCB /* for 3420 */
52#define MODE_SET_D3 0xD3 /* for 3420 */
53#define READ_BACKWARD 0x0C /* */
54#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */
55#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */
56#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */
57#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT */
58#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT */
59#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT */
60#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */
61#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */
62#define READ_DEV_CHAR 0x64 /* Read device characteristics */
63#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT */
64#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */
65#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */
66#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */
67#define SYNC 0x43 /* Synchronize (flush buffer) */
68#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */
69#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */
70#define READ_CONFIG_DATA 0xFA /* 3490 CMD */
71#define READ_MESSAGE_ID 0x4E /* 3490 CMD */
72#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */
73#define SET_INTERFACE_ID 0x73 /* 3490 CMD */
74
75#define SENSE_COMMAND_REJECT 0x80
76#define SENSE_INTERVENTION_REQUIRED 0x40
77#define SENSE_BUS_OUT_CHECK 0x20
78#define SENSE_EQUIPMENT_CHECK 0x10
79#define SENSE_DATA_CHECK 0x08
80#define SENSE_OVERRUN 0x04
81#define SENSE_DEFERRED_UNIT_CHECK 0x02
82#define SENSE_ASSIGNED_ELSEWHERE 0x01
83
84#define SENSE_LOCATE_FAILURE 0x80
85#define SENSE_DRIVE_ONLINE 0x40
86#define SENSE_RESERVED 0x20
87#define SENSE_RECORD_SEQUENCE_ERR 0x10
88#define SENSE_BEGINNING_OF_TAPE 0x08
89#define SENSE_WRITE_MODE 0x04
90#define SENSE_WRITE_PROTECT 0x02
91#define SENSE_NOT_CAPABLE 0x01
92
93#define SENSE_CHANNEL_ADAPTER_CODE 0xE0
94#define SENSE_CHANNEL_ADAPTER_LOC 0x10
95#define SENSE_REPORTING_CU 0x08
96#define SENSE_AUTOMATIC_LOADER 0x04
97#define SENSE_TAPE_SYNC_MODE 0x02
98#define SENSE_TAPE_POSITIONING 0x01
99
100/* discipline functions */
101struct tape_request *tape_std_read_block(struct tape_device *, size_t);
102void tape_std_read_backward(struct tape_device *device,
103 struct tape_request *request);
104struct tape_request *tape_std_write_block(struct tape_device *, size_t);
105struct tape_request *tape_std_bread(struct tape_device *, struct request *);
106void tape_std_free_bread(struct tape_request *);
107void tape_std_check_locate(struct tape_device *, struct tape_request *);
108struct tape_request *tape_std_bwrite(struct request *,
109 struct tape_device *, int);
110
111/* Some non-mtop commands. */
112int tape_std_assign(struct tape_device *);
113int tape_std_unassign(struct tape_device *);
114int tape_std_read_block_id(struct tape_device *device, __u64 *id);
115int tape_std_display(struct tape_device *, struct display_struct *disp);
116int tape_std_terminate_write(struct tape_device *);
117
118/* Standard magnetic tape commands. */
119int tape_std_mtbsf(struct tape_device *, int);
120int tape_std_mtbsfm(struct tape_device *, int);
121int tape_std_mtbsr(struct tape_device *, int);
122int tape_std_mtcompression(struct tape_device *, int);
123int tape_std_mteom(struct tape_device *, int);
124int tape_std_mterase(struct tape_device *, int);
125int tape_std_mtfsf(struct tape_device *, int);
126int tape_std_mtfsfm(struct tape_device *, int);
127int tape_std_mtfsr(struct tape_device *, int);
128int tape_std_mtload(struct tape_device *, int);
129int tape_std_mtnop(struct tape_device *, int);
130int tape_std_mtoffl(struct tape_device *, int);
131int tape_std_mtreset(struct tape_device *, int);
132int tape_std_mtreten(struct tape_device *, int);
133int tape_std_mtrew(struct tape_device *, int);
134int tape_std_mtsetblk(struct tape_device *, int);
135int tape_std_mtunload(struct tape_device *, int);
136int tape_std_mtweof(struct tape_device *, int);
137
138/* Event handlers */
139void tape_std_default_handler(struct tape_device *);
140void tape_std_unexpect_uchk_handler(struct tape_device *);
141void tape_std_irq(struct tape_device *);
142void tape_std_process_eov(struct tape_device *);
143
144// the error recovery stuff:
145void tape_std_error_recovery(struct tape_device *);
146void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
147void tape_std_error_recovery_succeded(struct tape_device *);
148void tape_std_error_recovery_do_retry(struct tape_device *);
149void tape_std_error_recovery_read_opposite(struct tape_device *);
150void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
151
152#endif // _TAPE_STD_H
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
new file mode 100644
index 000000000000..7db5ebce7f0f
--- /dev/null
+++ b/drivers/s390/char/tty3270.c
@@ -0,0 +1,1836 @@
1/*
2 * drivers/s390/char/tty3270.c
3 * IBM/3270 Driver - tty functions.
4 *
5 * Author(s):
6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
7 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 */
10
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kdev_t.h>
15#include <linux/tty.h>
16#include <linux/vt_kern.h>
17#include <linux/init.h>
18#include <linux/console.h>
19#include <linux/interrupt.h>
20
21#include <linux/slab.h>
22#include <linux/bootmem.h>
23
24#include <asm/ccwdev.h>
25#include <asm/cio.h>
26#include <asm/ebcdic.h>
27#include <asm/uaccess.h>
28
29
30#include "raw3270.h"
31#include "keyboard.h"
32
33#define TTY3270_CHAR_BUF_SIZE 256
34#define TTY3270_OUTPUT_BUFFER_SIZE 1024
35#define TTY3270_STRING_PAGES 5
36
37struct tty_driver *tty3270_driver;
38static int tty3270_max_index;
39
40struct raw3270_fn tty3270_fn;
41
42struct tty3270_cell {
43 unsigned char character;
44 unsigned char highlight;
45 unsigned char f_color;
46};
47
48struct tty3270_line {
49 struct tty3270_cell *cells;
50 int len;
51};
52
53#define ESCAPE_NPAR 8
54
55/*
56 * The main tty view data structure.
57 * FIXME:
58 * 1) describe line orientation & lines list concept against screen
59 * 2) describe conversion of screen to lines
60 * 3) describe line format.
61 */
62struct tty3270 {
63 struct raw3270_view view;
64 struct tty_struct *tty; /* Pointer to tty structure */
65 void **freemem_pages; /* Array of pages used for freemem. */
66 struct list_head freemem; /* List of free memory for strings. */
67
68 /* Output stuff. */
69 struct list_head lines; /* List of lines. */
70 struct list_head update; /* List of lines to update. */
71 unsigned char wcc; /* Write control character. */
72 int nr_lines; /* # lines in list. */
73 int nr_up; /* # lines up in history. */
74 unsigned long update_flags; /* Update indication bits. */
75 struct string *status; /* Lower right of display. */
76 struct raw3270_request *write; /* Single write request. */
77 struct timer_list timer; /* Output delay timer. */
78
79 /* Current tty screen. */
80 unsigned int cx, cy; /* Current output position. */
81 unsigned int highlight; /* Blink/reverse/underscore */
82 unsigned int f_color; /* Foreground color */
83 struct tty3270_line *screen;
84
85 /* Input stuff. */
86 struct string *prompt; /* Output string for input area. */
87 struct string *input; /* Input string for read request. */
88 struct raw3270_request *read; /* Single read request. */
89 struct raw3270_request *kreset; /* Single keyboard reset request. */
90 unsigned char inattr; /* Visible/invisible input. */
91 int throttle, attn; /* tty throttle/unthrottle. */
92 struct tasklet_struct readlet; /* Tasklet to issue read request. */
93 struct kbd_data *kbd; /* key_maps stuff. */
94
95 /* Escape sequence parsing. */
96 int esc_state, esc_ques, esc_npar;
97 int esc_par[ESCAPE_NPAR];
98 unsigned int saved_cx, saved_cy;
99 unsigned int saved_highlight, saved_f_color;
100
101 /* Command recalling. */
102 struct list_head rcl_lines; /* List of recallable lines. */
103 struct list_head *rcl_walk; /* Point in rcl_lines list. */
104 int rcl_nr, rcl_max; /* Number/max number of rcl_lines. */
105
106 /* Character array for put_char/flush_chars. */
107 unsigned int char_count;
108 char char_buf[TTY3270_CHAR_BUF_SIZE];
109};
110
111/* tty3270->update_flags. See tty3270_update for details. */
112#define TTY_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
113#define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */
114#define TTY_UPDATE_INPUT 4 /* Update input line. */
115#define TTY_UPDATE_STATUS 8 /* Update status line. */
116#define TTY_UPDATE_ALL 15
117
118static void tty3270_update(struct tty3270 *);
119
120/*
121 * Setup timeout for a device. On timeout trigger an update.
122 */
123void
124tty3270_set_timer(struct tty3270 *tp, int expires)
125{
126 if (expires == 0) {
127 if (timer_pending(&tp->timer) && del_timer(&tp->timer))
128 raw3270_put_view(&tp->view);
129 return;
130 }
131 if (timer_pending(&tp->timer) &&
132 mod_timer(&tp->timer, jiffies + expires))
133 return;
134 raw3270_get_view(&tp->view);
135 tp->timer.function = (void (*)(unsigned long)) tty3270_update;
136 tp->timer.data = (unsigned long) tp;
137 tp->timer.expires = jiffies + expires;
138 add_timer(&tp->timer);
139}
140
141/*
142 * The input line are the two last lines of the screen.
143 */
144static void
145tty3270_update_prompt(struct tty3270 *tp, char *input, int count)
146{
147 struct string *line;
148 unsigned int off;
149
150 line = tp->prompt;
151 if (count != 0)
152 line->string[5] = TF_INMDT;
153 else
154 line->string[5] = tp->inattr;
155 if (count > tp->view.cols * 2 - 11)
156 count = tp->view.cols * 2 - 11;
157 memcpy(line->string + 6, input, count);
158 line->string[6 + count] = TO_IC;
159 /* Clear to end of input line. */
160 if (count < tp->view.cols * 2 - 11) {
161 line->string[7 + count] = TO_RA;
162 line->string[10 + count] = 0;
163 off = tp->view.cols * tp->view.rows - 9;
164 raw3270_buffer_address(tp->view.dev, line->string+count+8, off);
165 line->len = 11 + count;
166 } else
167 line->len = 7 + count;
168 tp->update_flags |= TTY_UPDATE_INPUT;
169}
170
171static void
172tty3270_create_prompt(struct tty3270 *tp)
173{
174 static const unsigned char blueprint[] =
175 { TO_SBA, 0, 0, 0x6e, TO_SF, TF_INPUT,
176 /* empty input string */
177 TO_IC, TO_RA, 0, 0, 0 };
178 struct string *line;
179 unsigned int offset;
180
181 line = alloc_string(&tp->freemem,
182 sizeof(blueprint) + tp->view.cols * 2 - 9);
183 tp->prompt = line;
184 tp->inattr = TF_INPUT;
185 /* Copy blueprint to status line */
186 memcpy(line->string, blueprint, sizeof(blueprint));
187 line->len = sizeof(blueprint);
188 /* Set output offsets. */
189 offset = tp->view.cols * (tp->view.rows - 2);
190 raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
191 offset = tp->view.cols * tp->view.rows - 9;
192 raw3270_buffer_address(tp->view.dev, line->string + 8, offset);
193
194 /* Allocate input string for reading. */
195 tp->input = alloc_string(&tp->freemem, tp->view.cols * 2 - 9 + 6);
196}
197
198/*
199 * The status line is the last line of the screen. It shows the string
200 * "Running"/"Holding" in the lower right corner of the screen.
201 */
202static void
203tty3270_update_status(struct tty3270 * tp)
204{
205 char *str;
206
207 str = (tp->nr_up != 0) ? "History" : "Running";
208 memcpy(tp->status->string + 8, str, 7);
209 codepage_convert(tp->view.ascebc, tp->status->string + 8, 7);
210 tp->update_flags |= TTY_UPDATE_STATUS;
211}
212
213static void
214tty3270_create_status(struct tty3270 * tp)
215{
216 static const unsigned char blueprint[] =
217 { TO_SBA, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR, TAC_GREEN,
218 0, 0, 0, 0, 0, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR,
219 TAC_RESET };
220 struct string *line;
221 unsigned int offset;
222
223 line = alloc_string(&tp->freemem,sizeof(blueprint));
224 tp->status = line;
225 /* Copy blueprint to status line */
226 memcpy(line->string, blueprint, sizeof(blueprint));
227 /* Set address to start of status string (= last 9 characters). */
228 offset = tp->view.cols * tp->view.rows - 9;
229 raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
230}
231
232/*
233 * Set output offsets to 3270 datastream fragment of a tty string.
234 * (TO_SBA offset at the start and TO_RA offset at the end of the string)
235 */
236static void
237tty3270_update_string(struct tty3270 *tp, struct string *line, int nr)
238{
239 unsigned char *cp;
240
241 raw3270_buffer_address(tp->view.dev, line->string + 1,
242 tp->view.cols * nr);
243 cp = line->string + line->len - 4;
244 if (*cp == TO_RA)
245 raw3270_buffer_address(tp->view.dev, cp + 1,
246 tp->view.cols * (nr + 1));
247}
248
249/*
250 * Rebuild update list to print all lines.
251 */
252static void
253tty3270_rebuild_update(struct tty3270 *tp)
254{
255 struct string *s, *n;
256 int line, nr_up;
257
258 /*
259 * Throw away update list and create a new one,
260 * containing all lines that will fit on the screen.
261 */
262 list_for_each_entry_safe(s, n, &tp->update, update)
263 list_del_init(&s->update);
264 line = tp->view.rows - 3;
265 nr_up = tp->nr_up;
266 list_for_each_entry_reverse(s, &tp->lines, list) {
267 if (nr_up > 0) {
268 nr_up--;
269 continue;
270 }
271 tty3270_update_string(tp, s, line);
272 list_add(&s->update, &tp->update);
273 if (--line < 0)
274 break;
275 }
276 tp->update_flags |= TTY_UPDATE_LIST;
277}
278
279/*
280 * Alloc string for size bytes. If there is not enough room in
281 * freemem, free strings until there is room.
282 */
283static struct string *
284tty3270_alloc_string(struct tty3270 *tp, size_t size)
285{
286 struct string *s, *n;
287
288 s = alloc_string(&tp->freemem, size);
289 if (s)
290 return s;
291 list_for_each_entry_safe(s, n, &tp->lines, list) {
292 BUG_ON(tp->nr_lines <= tp->view.rows - 2);
293 list_del(&s->list);
294 if (!list_empty(&s->update))
295 list_del(&s->update);
296 tp->nr_lines--;
297 if (free_string(&tp->freemem, s) >= size)
298 break;
299 }
300 s = alloc_string(&tp->freemem, size);
301 BUG_ON(!s);
302 if (tp->nr_up != 0 &&
303 tp->nr_up + tp->view.rows - 2 >= tp->nr_lines) {
304 tp->nr_up = tp->nr_lines - tp->view.rows + 2;
305 tty3270_rebuild_update(tp);
306 tty3270_update_status(tp);
307 }
308 return s;
309}
310
311/*
312 * Add an empty line to the list.
313 */
314static void
315tty3270_blank_line(struct tty3270 *tp)
316{
317 static const unsigned char blueprint[] =
318 { TO_SBA, 0, 0, TO_SA, TAT_EXTHI, TAX_RESET,
319 TO_SA, TAT_COLOR, TAC_RESET, TO_RA, 0, 0, 0 };
320 struct string *s;
321
322 s = tty3270_alloc_string(tp, sizeof(blueprint));
323 memcpy(s->string, blueprint, sizeof(blueprint));
324 s->len = sizeof(blueprint);
325 list_add_tail(&s->list, &tp->lines);
326 tp->nr_lines++;
327 if (tp->nr_up != 0)
328 tp->nr_up++;
329}
330
331/*
332 * Write request completion callback.
333 */
334static void
335tty3270_write_callback(struct raw3270_request *rq, void *data)
336{
337 struct tty3270 *tp;
338
339 tp = (struct tty3270 *) rq->view;
340 if (rq->rc != 0) {
341 /* Write wasn't successfull. Refresh all. */
342 tty3270_rebuild_update(tp);
343 tp->update_flags = TTY_UPDATE_ALL;
344 tty3270_set_timer(tp, 1);
345 }
346 raw3270_request_reset(rq);
347 xchg(&tp->write, rq);
348}
349
350/*
351 * Update 3270 display.
352 */
353static void
354tty3270_update(struct tty3270 *tp)
355{
356 static char invalid_sba[2] = { 0xff, 0xff };
357 struct raw3270_request *wrq;
358 unsigned long updated;
359 struct string *s, *n;
360 char *sba, *str;
361 int rc, len;
362
363 wrq = xchg(&tp->write, 0);
364 if (!wrq) {
365 tty3270_set_timer(tp, 1);
366 return;
367 }
368
369 spin_lock(&tp->view.lock);
370 updated = 0;
371 if (tp->update_flags & TTY_UPDATE_ERASE) {
372 /* Use erase write alternate to erase display. */
373 raw3270_request_set_cmd(wrq, TC_EWRITEA);
374 updated |= TTY_UPDATE_ERASE;
375 } else
376 raw3270_request_set_cmd(wrq, TC_WRITE);
377
378 raw3270_request_add_data(wrq, &tp->wcc, 1);
379 tp->wcc = TW_NONE;
380
381 /*
382 * Update status line.
383 */
384 if (tp->update_flags & TTY_UPDATE_STATUS)
385 if (raw3270_request_add_data(wrq, tp->status->string,
386 tp->status->len) == 0)
387 updated |= TTY_UPDATE_STATUS;
388
389 /*
390 * Write input line.
391 */
392 if (tp->update_flags & TTY_UPDATE_INPUT)
393 if (raw3270_request_add_data(wrq, tp->prompt->string,
394 tp->prompt->len) == 0)
395 updated |= TTY_UPDATE_INPUT;
396
397 sba = invalid_sba;
398
399 if (tp->update_flags & TTY_UPDATE_LIST) {
400 /* Write strings in the update list to the screen. */
401 list_for_each_entry_safe(s, n, &tp->update, update) {
402 str = s->string;
403 len = s->len;
404 /*
405 * Skip TO_SBA at the start of the string if the
406 * last output position matches the start address
407 * of this line.
408 */
409 if (s->string[1] == sba[0] && s->string[2] == sba[1])
410 str += 3, len -= 3;
411 if (raw3270_request_add_data(wrq, str, len) != 0)
412 break;
413 list_del_init(&s->update);
414 sba = s->string + s->len - 3;
415 }
416 if (list_empty(&tp->update))
417 updated |= TTY_UPDATE_LIST;
418 }
419 wrq->callback = tty3270_write_callback;
420 rc = raw3270_start(&tp->view, wrq);
421 if (rc == 0) {
422 tp->update_flags &= ~updated;
423 if (tp->update_flags)
424 tty3270_set_timer(tp, 1);
425 } else {
426 raw3270_request_reset(wrq);
427 xchg(&tp->write, wrq);
428 }
429 spin_unlock(&tp->view.lock);
430 raw3270_put_view(&tp->view);
431}
432
433/*
434 * Command recalling.
435 */
436static void
437tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
438{
439 struct string *s;
440
441 tp->rcl_walk = 0;
442 if (len <= 0)
443 return;
444 if (tp->rcl_nr >= tp->rcl_max) {
445 s = list_entry(tp->rcl_lines.next, struct string, list);
446 list_del(&s->list);
447 free_string(&tp->freemem, s);
448 tp->rcl_nr--;
449 }
450 s = tty3270_alloc_string(tp, len);
451 memcpy(s->string, input, len);
452 list_add_tail(&s->list, &tp->rcl_lines);
453 tp->rcl_nr++;
454}
455
456static void
457tty3270_rcl_backward(struct kbd_data *kbd)
458{
459 struct tty3270 *tp;
460 struct string *s;
461
462 tp = kbd->tty->driver_data;
463 spin_lock_bh(&tp->view.lock);
464 if (tp->inattr == TF_INPUT) {
465 if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines)
466 tp->rcl_walk = tp->rcl_walk->prev;
467 else if (!list_empty(&tp->rcl_lines))
468 tp->rcl_walk = tp->rcl_lines.prev;
469 s = tp->rcl_walk ?
470 list_entry(tp->rcl_walk, struct string, list) : 0;
471 if (tp->rcl_walk) {
472 s = list_entry(tp->rcl_walk, struct string, list);
473 tty3270_update_prompt(tp, s->string, s->len);
474 } else
475 tty3270_update_prompt(tp, 0, 0);
476 tty3270_set_timer(tp, 1);
477 }
478 spin_unlock_bh(&tp->view.lock);
479}
480
481/*
482 * Deactivate tty view.
483 */
484static void
485tty3270_exit_tty(struct kbd_data *kbd)
486{
487 struct tty3270 *tp;
488
489 tp = kbd->tty->driver_data;
490 raw3270_deactivate_view(&tp->view);
491}
492
493/*
494 * Scroll forward in history.
495 */
496static void
497tty3270_scroll_forward(struct kbd_data *kbd)
498{
499 struct tty3270 *tp;
500 int nr_up;
501
502 tp = kbd->tty->driver_data;
503 spin_lock_bh(&tp->view.lock);
504 nr_up = tp->nr_up - tp->view.rows + 2;
505 if (nr_up < 0)
506 nr_up = 0;
507 if (nr_up != tp->nr_up) {
508 tp->nr_up = nr_up;
509 tty3270_rebuild_update(tp);
510 tty3270_update_status(tp);
511 tty3270_set_timer(tp, 1);
512 }
513 spin_unlock_bh(&tp->view.lock);
514}
515
516/*
517 * Scroll backward in history.
518 */
519static void
520tty3270_scroll_backward(struct kbd_data *kbd)
521{
522 struct tty3270 *tp;
523 int nr_up;
524
525 tp = kbd->tty->driver_data;
526 spin_lock_bh(&tp->view.lock);
527 nr_up = tp->nr_up + tp->view.rows - 2;
528 if (nr_up + tp->view.rows - 2 > tp->nr_lines)
529 nr_up = tp->nr_lines - tp->view.rows + 2;
530 if (nr_up != tp->nr_up) {
531 tp->nr_up = nr_up;
532 tty3270_rebuild_update(tp);
533 tty3270_update_status(tp);
534 tty3270_set_timer(tp, 1);
535 }
536 spin_unlock_bh(&tp->view.lock);
537}
538
539/*
540 * Pass input line to tty.
541 */
542static void
543tty3270_read_tasklet(struct raw3270_request *rrq)
544{
545 static char kreset_data = TW_KR;
546 struct tty3270 *tp;
547 char *input;
548 int len;
549
550 tp = (struct tty3270 *) rrq->view;
551 spin_lock_bh(&tp->view.lock);
552 /*
553 * Two AID keys are special: For 0x7d (enter) the input line
554 * has to be emitted to the tty and for 0x6d the screen
555 * needs to be redrawn.
556 */
557 input = 0;
558 len = 0;
559 if (tp->input->string[0] == 0x7d) {
560 /* Enter: write input to tty. */
561 input = tp->input->string + 6;
562 len = tp->input->len - 6 - rrq->rescnt;
563 if (tp->inattr != TF_INPUTN)
564 tty3270_rcl_add(tp, input, len);
565 if (tp->nr_up > 0) {
566 tp->nr_up = 0;
567 tty3270_rebuild_update(tp);
568 tty3270_update_status(tp);
569 }
570 /* Clear input area. */
571 tty3270_update_prompt(tp, 0, 0);
572 tty3270_set_timer(tp, 1);
573 } else if (tp->input->string[0] == 0x6d) {
574 /* Display has been cleared. Redraw. */
575 tty3270_rebuild_update(tp);
576 tp->update_flags = TTY_UPDATE_ALL;
577 tty3270_set_timer(tp, 1);
578 }
579 spin_unlock_bh(&tp->view.lock);
580
581 /* Start keyboard reset command. */
582 raw3270_request_reset(tp->kreset);
583 raw3270_request_set_cmd(tp->kreset, TC_WRITE);
584 raw3270_request_add_data(tp->kreset, &kreset_data, 1);
585 raw3270_start(&tp->view, tp->kreset);
586
587 /* Emit input string. */
588 if (tp->tty) {
589 while (len-- > 0)
590 kbd_keycode(tp->kbd, *input++);
591 /* Emit keycode for AID byte. */
592 kbd_keycode(tp->kbd, 256 + tp->input->string[0]);
593 }
594
595 raw3270_request_reset(rrq);
596 xchg(&tp->read, rrq);
597 raw3270_put_view(&tp->view);
598}
599
600/*
601 * Read request completion callback.
602 */
603static void
604tty3270_read_callback(struct raw3270_request *rq, void *data)
605{
606 raw3270_get_view(rq->view);
607 /* Schedule tasklet to pass input to tty. */
608 tasklet_schedule(&((struct tty3270 *) rq->view)->readlet);
609}
610
611/*
612 * Issue a read request. Call with device lock.
613 */
614static void
615tty3270_issue_read(struct tty3270 *tp, int lock)
616{
617 struct raw3270_request *rrq;
618 int rc;
619
620 rrq = xchg(&tp->read, 0);
621 if (!rrq)
622 /* Read already scheduled. */
623 return;
624 rrq->callback = tty3270_read_callback;
625 rrq->callback_data = tp;
626 raw3270_request_set_cmd(rrq, TC_READMOD);
627 raw3270_request_set_data(rrq, tp->input->string, tp->input->len);
628 /* Issue the read modified request. */
629 if (lock) {
630 rc = raw3270_start(&tp->view, rrq);
631 } else
632 rc = raw3270_start_irq(&tp->view, rrq);
633 if (rc) {
634 raw3270_request_reset(rrq);
635 xchg(&tp->read, rrq);
636 }
637}
638
639/*
640 * Switch to the tty view.
641 */
642static int
643tty3270_activate(struct raw3270_view *view)
644{
645 struct tty3270 *tp;
646 unsigned long flags;
647
648 tp = (struct tty3270 *) view;
649 spin_lock_irqsave(&tp->view.lock, flags);
650 tp->nr_up = 0;
651 tty3270_rebuild_update(tp);
652 tty3270_update_status(tp);
653 tp->update_flags = TTY_UPDATE_ALL;
654 tty3270_set_timer(tp, 1);
655 spin_unlock_irqrestore(&tp->view.lock, flags);
656 start_tty(tp->tty);
657 return 0;
658}
659
660static void
661tty3270_deactivate(struct raw3270_view *view)
662{
663 struct tty3270 *tp;
664
665 tp = (struct tty3270 *) view;
666 if (tp && tp->tty)
667 stop_tty(tp->tty);
668}
669
670static int
671tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
672{
673 /* Handle ATTN. Schedule tasklet to read aid. */
674 if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
675 if (!tp->throttle)
676 tty3270_issue_read(tp, 0);
677 else
678 tp->attn = 1;
679 }
680
681 if (rq) {
682 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
683 rq->rc = -EIO;
684 else
685 /* Normal end. Copy residual count. */
686 rq->rescnt = irb->scsw.count;
687 }
688 return RAW3270_IO_DONE;
689}
690
691/*
692 * Allocate tty3270 structure.
693 */
694static struct tty3270 *
695tty3270_alloc_view(void)
696{
697 struct tty3270 *tp;
698 int pages;
699
700 tp = kmalloc(sizeof(struct tty3270),GFP_KERNEL);
701 if (!tp)
702 goto out_err;
703 memset(tp, 0, sizeof(struct tty3270));
704 tp->freemem_pages =
705 kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL);
706 if (!tp->freemem_pages)
707 goto out_tp;
708 INIT_LIST_HEAD(&tp->freemem);
709 init_timer(&tp->timer);
710 for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
711 tp->freemem_pages[pages] = (void *)
712 __get_free_pages(GFP_KERNEL|GFP_DMA, 0);
713 if (!tp->freemem_pages[pages])
714 goto out_pages;
715 add_string_memory(&tp->freemem,
716 tp->freemem_pages[pages], PAGE_SIZE);
717 }
718 tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
719 if (!tp->write)
720 goto out_pages;
721 tp->read = raw3270_request_alloc(0);
722 if (!tp->read)
723 goto out_write;
724 tp->kreset = raw3270_request_alloc(1);
725 if (!tp->kreset)
726 goto out_read;
727 tp->kbd = kbd_alloc();
728 if (!tp->kbd)
729 goto out_reset;
730 return tp;
731
732out_reset:
733 raw3270_request_free(tp->kreset);
734out_read:
735 raw3270_request_free(tp->read);
736out_write:
737 raw3270_request_free(tp->write);
738out_pages:
739 while (pages--)
740 free_pages((unsigned long) tp->freemem_pages[pages], 0);
741 kfree(tp->freemem_pages);
742out_tp:
743 kfree(tp);
744out_err:
745 return ERR_PTR(-ENOMEM);
746}
747
748/*
749 * Free tty3270 structure.
750 */
751static void
752tty3270_free_view(struct tty3270 *tp)
753{
754 int pages;
755
756 kbd_free(tp->kbd);
757 raw3270_request_free(tp->kreset);
758 raw3270_request_free(tp->read);
759 raw3270_request_free(tp->write);
760 for (pages = 0; pages < TTY3270_STRING_PAGES; pages++)
761 free_pages((unsigned long) tp->freemem_pages[pages], 0);
762 kfree(tp->freemem_pages);
763 kfree(tp);
764}
765
766/*
767 * Allocate tty3270 screen.
768 */
769static int
770tty3270_alloc_screen(struct tty3270 *tp)
771{
772 unsigned long size;
773 int lines;
774
775 size = sizeof(struct tty3270_line) * (tp->view.rows - 2);
776 tp->screen = kmalloc(size, GFP_KERNEL);
777 if (!tp->screen)
778 goto out_err;
779 memset(tp->screen, 0, size);
780 for (lines = 0; lines < tp->view.rows - 2; lines++) {
781 size = sizeof(struct tty3270_cell) * tp->view.cols;
782 tp->screen[lines].cells = kmalloc(size, GFP_KERNEL);
783 if (!tp->screen[lines].cells)
784 goto out_screen;
785 memset(tp->screen[lines].cells, 0, size);
786 }
787 return 0;
788out_screen:
789 while (lines--)
790 kfree(tp->screen[lines].cells);
791 kfree(tp->screen);
792out_err:
793 return -ENOMEM;
794}
795
796/*
797 * Free tty3270 screen.
798 */
799static void
800tty3270_free_screen(struct tty3270 *tp)
801{
802 int lines;
803
804 for (lines = 0; lines < tp->view.rows - 2; lines++)
805 kfree(tp->screen[lines].cells);
806 kfree(tp->screen);
807}
808
809/*
810 * Unlink tty3270 data structure from tty.
811 */
812static void
813tty3270_release(struct raw3270_view *view)
814{
815 struct tty3270 *tp;
816 struct tty_struct *tty;
817
818 tp = (struct tty3270 *) view;
819 tty = tp->tty;
820 if (tty) {
821 tty->driver_data = 0;
822 tp->tty = tp->kbd->tty = 0;
823 tty_hangup(tty);
824 raw3270_put_view(&tp->view);
825 }
826}
827
828/*
829 * Free tty3270 data structure
830 */
831static void
832tty3270_free(struct raw3270_view *view)
833{
834 tty3270_free_screen((struct tty3270 *) view);
835 tty3270_free_view((struct tty3270 *) view);
836}
837
838/*
839 * Delayed freeing of tty3270 views.
840 */
841static void
842tty3270_del_views(void)
843{
844 struct tty3270 *tp;
845 int i;
846
847 for (i = 0; i < tty3270_max_index; i++) {
848 tp = (struct tty3270 *) raw3270_find_view(&tty3270_fn, i);
849 if (!IS_ERR(tp))
850 raw3270_del_view(&tp->view);
851 }
852}
853
854struct raw3270_fn tty3270_fn = {
855 .activate = tty3270_activate,
856 .deactivate = tty3270_deactivate,
857 .intv = (void *) tty3270_irq,
858 .release = tty3270_release,
859 .free = tty3270_free
860};
861
862/*
863 * This routine is called whenever a 3270 tty is opened.
864 */
865static int
866tty3270_open(struct tty_struct *tty, struct file * filp)
867{
868 struct tty3270 *tp;
869 int i, rc;
870
871 if (tty->count > 1)
872 return 0;
873 /* Check if the tty3270 is already there. */
874 tp = (struct tty3270 *) raw3270_find_view(&tty3270_fn, tty->index);
875 if (!IS_ERR(tp)) {
876 tty->driver_data = tp;
877 tty->winsize.ws_row = tp->view.rows - 2;
878 tty->winsize.ws_col = tp->view.cols;
879 tty->low_latency = 0;
880 tp->tty = tty;
881 tp->kbd->tty = tty;
882 tp->inattr = TF_INPUT;
883 return 0;
884 }
885 if (tty3270_max_index < tty->index + 1)
886 tty3270_max_index = tty->index + 1;
887
888 /* Quick exit if there is no device for tty->index. */
889 if (PTR_ERR(tp) == -ENODEV)
890 return -ENODEV;
891
892 /* Allocate tty3270 structure on first open. */
893 tp = tty3270_alloc_view();
894 if (IS_ERR(tp))
895 return PTR_ERR(tp);
896
897 INIT_LIST_HEAD(&tp->lines);
898 INIT_LIST_HEAD(&tp->update);
899 INIT_LIST_HEAD(&tp->rcl_lines);
900 tp->rcl_max = 20;
901 init_timer(&tp->timer);
902 tasklet_init(&tp->readlet,
903 (void (*)(unsigned long)) tty3270_read_tasklet,
904 (unsigned long) tp->read);
905
906 rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index);
907 if (rc) {
908 tty3270_free_view(tp);
909 return rc;
910 }
911
912 rc = tty3270_alloc_screen(tp);
913 if (rc) {
914 raw3270_del_view(&tp->view);
915 raw3270_put_view(&tp->view);
916 return rc;
917 }
918
919 tp->tty = tty;
920 tty->low_latency = 0;
921 tty->driver_data = tp;
922 tty->winsize.ws_row = tp->view.rows - 2;
923 tty->winsize.ws_col = tp->view.cols;
924
925 tty3270_create_prompt(tp);
926 tty3270_create_status(tp);
927 tty3270_update_status(tp);
928
929 /* Create blank line for every line in the tty output area. */
930 for (i = 0; i < tp->view.rows - 2; i++)
931 tty3270_blank_line(tp);
932
933 tp->kbd->tty = tty;
934 tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
935 tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
936 tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
937 tp->kbd->fn_handler[KVAL(K_CONS)] = tty3270_rcl_backward;
938 kbd_ascebc(tp->kbd, tp->view.ascebc);
939
940 raw3270_activate_view(&tp->view);
941 return 0;
942}
943
944/*
945 * This routine is called when the 3270 tty is closed. We wait
946 * for the remaining request to be completed. Then we clean up.
947 */
948static void
949tty3270_close(struct tty_struct *tty, struct file * filp)
950{
951 struct tty3270 *tp;
952
953 if (tty->count > 1)
954 return;
955 tp = (struct tty3270 *) tty->driver_data;
956 if (tp) {
957 tty->driver_data = 0;
958 tp->tty = tp->kbd->tty = 0;
959 raw3270_put_view(&tp->view);
960 }
961}
962
963/*
964 * We always have room.
965 */
966static int
967tty3270_write_room(struct tty_struct *tty)
968{
969 return INT_MAX;
970}
971
972/*
973 * Insert character into the screen at the current position with the
974 * current color and highlight. This function does NOT do cursor movement.
975 */
976static void
977tty3270_put_character(struct tty3270 *tp, char ch)
978{
979 struct tty3270_line *line;
980 struct tty3270_cell *cell;
981
982 line = tp->screen + tp->cy;
983 if (line->len <= tp->cx) {
984 while (line->len < tp->cx) {
985 cell = line->cells + line->len;
986 cell->character = tp->view.ascebc[' '];
987 cell->highlight = tp->highlight;
988 cell->f_color = tp->f_color;
989 line->len++;
990 }
991 line->len++;
992 }
993 cell = line->cells + tp->cx;
994 cell->character = tp->view.ascebc[(unsigned int) ch];
995 cell->highlight = tp->highlight;
996 cell->f_color = tp->f_color;
997}
998
999/*
1000 * Convert a tty3270_line to a 3270 data fragment usable for output.
1001 */
1002static void
1003tty3270_convert_line(struct tty3270 *tp, int line_nr)
1004{
1005 struct tty3270_line *line;
1006 struct tty3270_cell *cell;
1007 struct string *s, *n;
1008 unsigned char highlight;
1009 unsigned char f_color;
1010 char *cp;
1011 int flen, i;
1012
1013 /* Determine how long the fragment will be. */
1014 flen = 3; /* Prefix (TO_SBA). */
1015 line = tp->screen + line_nr;
1016 flen += line->len;
1017 highlight = TAX_RESET;
1018 f_color = TAC_RESET;
1019 for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
1020 if (cell->highlight != highlight) {
1021 flen += 3; /* TO_SA to switch highlight. */
1022 highlight = cell->highlight;
1023 }
1024 if (cell->f_color != f_color) {
1025 flen += 3; /* TO_SA to switch color. */
1026 f_color = cell->f_color;
1027 }
1028 }
1029 if (highlight != TAX_RESET)
1030 flen += 3; /* TO_SA to reset hightlight. */
1031 if (f_color != TAC_RESET)
1032 flen += 3; /* TO_SA to reset color. */
1033 if (line->len < tp->view.cols)
1034 flen += 4; /* Postfix (TO_RA). */
1035
1036 /* Find the line in the list. */
1037 i = tp->view.rows - 2 - line_nr;
1038 list_for_each_entry_reverse(s, &tp->lines, list)
1039 if (--i <= 0)
1040 break;
1041 /*
1042 * Check if the line needs to get reallocated.
1043 */
1044 if (s->len != flen) {
1045 /* Reallocate string. */
1046 n = tty3270_alloc_string(tp, flen);
1047 list_add(&n->list, &s->list);
1048 list_del_init(&s->list);
1049 if (!list_empty(&s->update))
1050 list_del_init(&s->update);
1051 free_string(&tp->freemem, s);
1052 s = n;
1053 }
1054
1055 /* Write 3270 data fragment. */
1056 cp = s->string;
1057 *cp++ = TO_SBA;
1058 *cp++ = 0;
1059 *cp++ = 0;
1060
1061 highlight = TAX_RESET;
1062 f_color = TAC_RESET;
1063 for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
1064 if (cell->highlight != highlight) {
1065 *cp++ = TO_SA;
1066 *cp++ = TAT_EXTHI;
1067 *cp++ = cell->highlight;
1068 highlight = cell->highlight;
1069 }
1070 if (cell->f_color != f_color) {
1071 *cp++ = TO_SA;
1072 *cp++ = TAT_COLOR;
1073 *cp++ = cell->f_color;
1074 f_color = cell->f_color;
1075 }
1076 *cp++ = cell->character;
1077 }
1078 if (highlight != TAX_RESET) {
1079 *cp++ = TO_SA;
1080 *cp++ = TAT_EXTHI;
1081 *cp++ = TAX_RESET;
1082 }
1083 if (f_color != TAC_RESET) {
1084 *cp++ = TO_SA;
1085 *cp++ = TAT_COLOR;
1086 *cp++ = TAC_RESET;
1087 }
1088 if (line->len < tp->view.cols) {
1089 *cp++ = TO_RA;
1090 *cp++ = 0;
1091 *cp++ = 0;
1092 *cp++ = 0;
1093 }
1094
1095 if (tp->nr_up + line_nr < tp->view.rows - 2) {
1096 /* Line is currently visible on screen. */
1097 tty3270_update_string(tp, s, line_nr);
1098 /* Add line to update list. */
1099 if (list_empty(&s->update)) {
1100 list_add_tail(&s->update, &tp->update);
1101 tp->update_flags |= TTY_UPDATE_LIST;
1102 }
1103 }
1104}
1105
1106/*
1107 * Do carriage return.
1108 */
1109static void
1110tty3270_cr(struct tty3270 *tp)
1111{
1112 tp->cx = 0;
1113}
1114
1115/*
1116 * Do line feed.
1117 */
1118static void
1119tty3270_lf(struct tty3270 *tp)
1120{
1121 struct tty3270_line temp;
1122 int i;
1123
1124 tty3270_convert_line(tp, tp->cy);
1125 if (tp->cy < tp->view.rows - 3) {
1126 tp->cy++;
1127 return;
1128 }
1129 /* Last line just filled up. Add new, blank line. */
1130 tty3270_blank_line(tp);
1131 temp = tp->screen[0];
1132 temp.len = 0;
1133 for (i = 0; i < tp->view.rows - 3; i++)
1134 tp->screen[i] = tp->screen[i+1];
1135 tp->screen[tp->view.rows - 3] = temp;
1136 tty3270_rebuild_update(tp);
1137}
1138
1139static void
1140tty3270_ri(struct tty3270 *tp)
1141{
1142 if (tp->cy > 0) {
1143 tty3270_convert_line(tp, tp->cy);
1144 tp->cy--;
1145 }
1146}
1147
1148/*
1149 * Insert characters at current position.
1150 */
1151static void
1152tty3270_insert_characters(struct tty3270 *tp, int n)
1153{
1154 struct tty3270_line *line;
1155 int k;
1156
1157 line = tp->screen + tp->cy;
1158 while (line->len < tp->cx) {
1159 line->cells[line->len].character = tp->view.ascebc[' '];
1160 line->cells[line->len].highlight = TAX_RESET;
1161 line->cells[line->len].f_color = TAC_RESET;
1162 line->len++;
1163 }
1164 if (n > tp->view.cols - tp->cx)
1165 n = tp->view.cols - tp->cx;
1166 k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n);
1167 while (k--)
1168 line->cells[tp->cx + n + k] = line->cells[tp->cx + k];
1169 line->len += n;
1170 if (line->len > tp->view.cols)
1171 line->len = tp->view.cols;
1172 while (n-- > 0) {
1173 line->cells[tp->cx + n].character = tp->view.ascebc[' '];
1174 line->cells[tp->cx + n].highlight = tp->highlight;
1175 line->cells[tp->cx + n].f_color = tp->f_color;
1176 }
1177}
1178
1179/*
1180 * Delete characters at current position.
1181 */
1182static void
1183tty3270_delete_characters(struct tty3270 *tp, int n)
1184{
1185 struct tty3270_line *line;
1186 int i;
1187
1188 line = tp->screen + tp->cy;
1189 if (line->len <= tp->cx)
1190 return;
1191 if (line->len - tp->cx <= n) {
1192 line->len = tp->cx;
1193 return;
1194 }
1195 for (i = tp->cx; i + n < line->len; i++)
1196 line->cells[i] = line->cells[i + n];
1197 line->len -= n;
1198}
1199
1200/*
1201 * Erase characters at current position.
1202 */
1203static void
1204tty3270_erase_characters(struct tty3270 *tp, int n)
1205{
1206 struct tty3270_line *line;
1207 struct tty3270_cell *cell;
1208
1209 line = tp->screen + tp->cy;
1210 while (line->len > tp->cx && n-- > 0) {
1211 cell = line->cells + tp->cx++;
1212 cell->character = ' ';
1213 cell->highlight = TAX_RESET;
1214 cell->f_color = TAC_RESET;
1215 }
1216 tp->cx += n;
1217 tp->cx = min_t(int, tp->cx, tp->view.cols - 1);
1218}
1219
1220/*
1221 * Erase line, 3 different cases:
1222 * Esc [ 0 K Erase from current position to end of line inclusive
1223 * Esc [ 1 K Erase from beginning of line to current position inclusive
1224 * Esc [ 2 K Erase entire line (without moving cursor)
1225 */
1226static void
1227tty3270_erase_line(struct tty3270 *tp, int mode)
1228{
1229 struct tty3270_line *line;
1230 struct tty3270_cell *cell;
1231 int i;
1232
1233 line = tp->screen + tp->cy;
1234 if (mode == 0)
1235 line->len = tp->cx;
1236 else if (mode == 1) {
1237 for (i = 0; i < tp->cx; i++) {
1238 cell = line->cells + i;
1239 cell->character = ' ';
1240 cell->highlight = TAX_RESET;
1241 cell->f_color = TAC_RESET;
1242 }
1243 if (line->len <= tp->cx)
1244 line->len = tp->cx + 1;
1245 } else if (mode == 2)
1246 line->len = 0;
1247 tty3270_convert_line(tp, tp->cy);
1248}
1249
1250/*
1251 * Erase display, 3 different cases:
1252 * Esc [ 0 J Erase from current position to bottom of screen inclusive
1253 * Esc [ 1 J Erase from top of screen to current position inclusive
1254 * Esc [ 2 J Erase entire screen (without moving the cursor)
1255 */
1256static void
1257tty3270_erase_display(struct tty3270 *tp, int mode)
1258{
1259 int i;
1260
1261 if (mode == 0) {
1262 tty3270_erase_line(tp, 0);
1263 for (i = tp->cy + 1; i < tp->view.rows - 2; i++) {
1264 tp->screen[i].len = 0;
1265 tty3270_convert_line(tp, i);
1266 }
1267 } else if (mode == 1) {
1268 for (i = 0; i < tp->cy; i++) {
1269 tp->screen[i].len = 0;
1270 tty3270_convert_line(tp, i);
1271 }
1272 tty3270_erase_line(tp, 1);
1273 } else if (mode == 2) {
1274 for (i = 0; i < tp->view.rows - 2; i++) {
1275 tp->screen[i].len = 0;
1276 tty3270_convert_line(tp, i);
1277 }
1278 }
1279 tty3270_rebuild_update(tp);
1280}
1281
1282/*
1283 * Set attributes found in an escape sequence.
1284 * Esc [ <attr> ; <attr> ; ... m
1285 */
1286static void
1287tty3270_set_attributes(struct tty3270 *tp)
1288{
1289 static unsigned char f_colors[] = {
1290 TAC_DEFAULT, TAC_RED, TAC_GREEN, TAC_YELLOW, TAC_BLUE,
1291 TAC_PINK, TAC_TURQ, TAC_WHITE, 0, TAC_DEFAULT
1292 };
1293 int i, attr;
1294
1295 for (i = 0; i <= tp->esc_npar; i++) {
1296 attr = tp->esc_par[i];
1297 switch (attr) {
1298 case 0: /* Reset */
1299 tp->highlight = TAX_RESET;
1300 tp->f_color = TAC_RESET;
1301 break;
1302 /* Highlight. */
1303 case 4: /* Start underlining. */
1304 tp->highlight = TAX_UNDER;
1305 break;
1306 case 5: /* Start blink. */
1307 tp->highlight = TAX_BLINK;
1308 break;
1309 case 7: /* Start reverse. */
1310 tp->highlight = TAX_REVER;
1311 break;
1312 case 24: /* End underlining */
1313 if (tp->highlight == TAX_UNDER)
1314 tp->highlight = TAX_RESET;
1315 break;
1316 case 25: /* End blink. */
1317 if (tp->highlight == TAX_BLINK)
1318 tp->highlight = TAX_RESET;
1319 break;
1320 case 27: /* End reverse. */
1321 if (tp->highlight == TAX_REVER)
1322 tp->highlight = TAX_RESET;
1323 break;
1324 /* Foreground color. */
1325 case 30: /* Black */
1326 case 31: /* Red */
1327 case 32: /* Green */
1328 case 33: /* Yellow */
1329 case 34: /* Blue */
1330 case 35: /* Magenta */
1331 case 36: /* Cyan */
1332 case 37: /* White */
1333 case 39: /* Black */
1334 tp->f_color = f_colors[attr - 30];
1335 break;
1336 }
1337 }
1338}
1339
1340static inline int
1341tty3270_getpar(struct tty3270 *tp, int ix)
1342{
1343 return (tp->esc_par[ix] > 0) ? tp->esc_par[ix] : 1;
1344}
1345
1346static void
1347tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
1348{
1349 tp->cx = min_t(int, tp->view.cols - 1, max_t(int, 0, cx));
1350 cy = min_t(int, tp->view.rows - 3, max_t(int, 0, cy));
1351 if (cy != tp->cy) {
1352 tty3270_convert_line(tp, tp->cy);
1353 tp->cy = cy;
1354 }
1355}
1356
1357/*
1358 * Process escape sequences. Known sequences:
1359 * Esc 7 Save Cursor Position
1360 * Esc 8 Restore Cursor Position
1361 * Esc [ Pn ; Pn ; .. m Set attributes
1362 * Esc [ Pn ; Pn H Cursor Position
1363 * Esc [ Pn ; Pn f Cursor Position
1364 * Esc [ Pn A Cursor Up
1365 * Esc [ Pn B Cursor Down
1366 * Esc [ Pn C Cursor Forward
1367 * Esc [ Pn D Cursor Backward
1368 * Esc [ Pn G Cursor Horizontal Absolute
1369 * Esc [ Pn X Erase Characters
1370 * Esc [ Ps J Erase in Display
1371 * Esc [ Ps K Erase in Line
1372 * // FIXME: add all the new ones.
1373 *
1374 * Pn is a numeric parameter, a string of zero or more decimal digits.
1375 * Ps is a selective parameter.
1376 */
1377static void
1378tty3270_escape_sequence(struct tty3270 *tp, char ch)
1379{
1380 enum { ESnormal, ESesc, ESsquare, ESgetpars };
1381
1382 if (tp->esc_state == ESnormal) {
1383 if (ch == 0x1b)
1384 /* Starting new escape sequence. */
1385 tp->esc_state = ESesc;
1386 return;
1387 }
1388 if (tp->esc_state == ESesc) {
1389 tp->esc_state = ESnormal;
1390 switch (ch) {
1391 case '[':
1392 tp->esc_state = ESsquare;
1393 break;
1394 case 'E':
1395 tty3270_cr(tp);
1396 tty3270_lf(tp);
1397 break;
1398 case 'M':
1399 tty3270_ri(tp);
1400 break;
1401 case 'D':
1402 tty3270_lf(tp);
1403 break;
1404 case 'Z': /* Respond ID. */
1405 kbd_puts_queue(tp->tty, "\033[?6c");
1406 break;
1407 case '7': /* Save cursor position. */
1408 tp->saved_cx = tp->cx;
1409 tp->saved_cy = tp->cy;
1410 tp->saved_highlight = tp->highlight;
1411 tp->saved_f_color = tp->f_color;
1412 break;
1413 case '8': /* Restore cursor position. */
1414 tty3270_convert_line(tp, tp->cy);
1415 tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
1416 tp->highlight = tp->saved_highlight;
1417 tp->f_color = tp->saved_f_color;
1418 break;
1419 case 'c': /* Reset terminal. */
1420 tp->cx = tp->saved_cx = 0;
1421 tp->cy = tp->saved_cy = 0;
1422 tp->highlight = tp->saved_highlight = TAX_RESET;
1423 tp->f_color = tp->saved_f_color = TAC_RESET;
1424 tty3270_erase_display(tp, 2);
1425 break;
1426 }
1427 return;
1428 }
1429 if (tp->esc_state == ESsquare) {
1430 tp->esc_state = ESgetpars;
1431 memset(tp->esc_par, 0, sizeof(tp->esc_par));
1432 tp->esc_npar = 0;
1433 tp->esc_ques = (ch == '?');
1434 if (tp->esc_ques)
1435 return;
1436 }
1437 if (tp->esc_state == ESgetpars) {
1438 if (ch == ';' && tp->esc_npar < ESCAPE_NPAR - 1) {
1439 tp->esc_npar++;
1440 return;
1441 }
1442 if (ch >= '0' && ch <= '9') {
1443 tp->esc_par[tp->esc_npar] *= 10;
1444 tp->esc_par[tp->esc_npar] += ch - '0';
1445 return;
1446 }
1447 }
1448 tp->esc_state = ESnormal;
1449 if (ch == 'n' && !tp->esc_ques) {
1450 if (tp->esc_par[0] == 5) /* Status report. */
1451 kbd_puts_queue(tp->tty, "\033[0n");
1452 else if (tp->esc_par[0] == 6) { /* Cursor report. */
1453 char buf[40];
1454 sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
1455 kbd_puts_queue(tp->tty, buf);
1456 }
1457 return;
1458 }
1459 if (tp->esc_ques)
1460 return;
1461 switch (ch) {
1462 case 'm':
1463 tty3270_set_attributes(tp);
1464 break;
1465 case 'H': /* Set cursor position. */
1466 case 'f':
1467 tty3270_goto_xy(tp, tty3270_getpar(tp, 1) - 1,
1468 tty3270_getpar(tp, 0) - 1);
1469 break;
1470 case 'd': /* Set y position. */
1471 tty3270_goto_xy(tp, tp->cx, tty3270_getpar(tp, 0) - 1);
1472 break;
1473 case 'A': /* Cursor up. */
1474 case 'F':
1475 tty3270_goto_xy(tp, tp->cx, tp->cy - tty3270_getpar(tp, 0));
1476 break;
1477 case 'B': /* Cursor down. */
1478 case 'e':
1479 case 'E':
1480 tty3270_goto_xy(tp, tp->cx, tp->cy + tty3270_getpar(tp, 0));
1481 break;
1482 case 'C': /* Cursor forward. */
1483 case 'a':
1484 tty3270_goto_xy(tp, tp->cx + tty3270_getpar(tp, 0), tp->cy);
1485 break;
1486 case 'D': /* Cursor backward. */
1487 tty3270_goto_xy(tp, tp->cx - tty3270_getpar(tp, 0), tp->cy);
1488 break;
1489 case 'G': /* Set x position. */
1490 case '`':
1491 tty3270_goto_xy(tp, tty3270_getpar(tp, 0), tp->cy);
1492 break;
1493 case 'X': /* Erase Characters. */
1494 tty3270_erase_characters(tp, tty3270_getpar(tp, 0));
1495 break;
1496 case 'J': /* Erase display. */
1497 tty3270_erase_display(tp, tp->esc_par[0]);
1498 break;
1499 case 'K': /* Erase line. */
1500 tty3270_erase_line(tp, tp->esc_par[0]);
1501 break;
1502 case 'P': /* Delete characters. */
1503 tty3270_delete_characters(tp, tty3270_getpar(tp, 0));
1504 break;
1505 case '@': /* Insert characters. */
1506 tty3270_insert_characters(tp, tty3270_getpar(tp, 0));
1507 break;
1508 case 's': /* Save cursor position. */
1509 tp->saved_cx = tp->cx;
1510 tp->saved_cy = tp->cy;
1511 tp->saved_highlight = tp->highlight;
1512 tp->saved_f_color = tp->f_color;
1513 break;
1514 case 'u': /* Restore cursor position. */
1515 tty3270_convert_line(tp, tp->cy);
1516 tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
1517 tp->highlight = tp->saved_highlight;
1518 tp->f_color = tp->saved_f_color;
1519 break;
1520 }
1521}
1522
1523/*
1524 * String write routine for 3270 ttys
1525 */
1526static void
1527tty3270_do_write(struct tty3270 *tp, const unsigned char *buf, int count)
1528{
1529 int i_msg, i;
1530
1531 spin_lock_bh(&tp->view.lock);
1532 for (i_msg = 0; !tp->tty->stopped && i_msg < count; i_msg++) {
1533 if (tp->esc_state != 0) {
1534 /* Continue escape sequence. */
1535 tty3270_escape_sequence(tp, buf[i_msg]);
1536 continue;
1537 }
1538
1539 switch (buf[i_msg]) {
1540 case 0x07: /* '\a' -- Alarm */
1541 tp->wcc |= TW_PLUSALARM;
1542 break;
1543 case 0x08: /* Backspace. */
1544 if (tp->cx > 0) {
1545 tp->cx--;
1546 tty3270_put_character(tp, ' ');
1547 }
1548 break;
1549 case 0x09: /* '\t' -- Tabulate */
1550 for (i = tp->cx % 8; i < 8; i++) {
1551 if (tp->cx >= tp->view.cols) {
1552 tty3270_cr(tp);
1553 tty3270_lf(tp);
1554 break;
1555 }
1556 tty3270_put_character(tp, ' ');
1557 tp->cx++;
1558 }
1559 break;
1560 case 0x0a: /* '\n' -- New Line */
1561 tty3270_cr(tp);
1562 tty3270_lf(tp);
1563 break;
1564 case 0x0c: /* '\f' -- Form Feed */
1565 tty3270_erase_display(tp, 2);
1566 tp->cx = tp->cy = 0;
1567 break;
1568 case 0x0d: /* '\r' -- Carriage Return */
1569 tp->cx = 0;
1570 break;
1571 case 0x0f: /* SuSE "exit alternate mode" */
1572 break;
1573 case 0x1b: /* Start escape sequence. */
1574 tty3270_escape_sequence(tp, buf[i_msg]);
1575 break;
1576 default: /* Insert normal character. */
1577 if (tp->cx >= tp->view.cols) {
1578 tty3270_cr(tp);
1579 tty3270_lf(tp);
1580 }
1581 tty3270_put_character(tp, buf[i_msg]);
1582 tp->cx++;
1583 break;
1584 }
1585 }
1586 /* Convert current line to 3270 data fragment. */
1587 tty3270_convert_line(tp, tp->cy);
1588
1589 /* Setup timer to update display after 1/10 second */
1590 if (!timer_pending(&tp->timer))
1591 tty3270_set_timer(tp, HZ/10);
1592
1593 spin_unlock_bh(&tp->view.lock);
1594}
1595
1596/*
1597 * String write routine for 3270 ttys
1598 */
1599static int
1600tty3270_write(struct tty_struct * tty,
1601 const unsigned char *buf, int count)
1602{
1603 struct tty3270 *tp;
1604
1605 tp = tty->driver_data;
1606 if (!tp)
1607 return 0;
1608 if (tp->char_count > 0) {
1609 tty3270_do_write(tp, tp->char_buf, tp->char_count);
1610 tp->char_count = 0;
1611 }
1612 tty3270_do_write(tp, buf, count);
1613 return count;
1614}
1615
1616/*
1617 * Put single characters to the ttys character buffer
1618 */
1619static void
1620tty3270_put_char(struct tty_struct *tty, unsigned char ch)
1621{
1622 struct tty3270 *tp;
1623
1624 tp = tty->driver_data;
1625 if (!tp)
1626 return;
1627 if (tp->char_count < TTY3270_CHAR_BUF_SIZE)
1628 tp->char_buf[tp->char_count++] = ch;
1629}
1630
1631/*
1632 * Flush all characters from the ttys characeter buffer put there
1633 * by tty3270_put_char.
1634 */
1635static void
1636tty3270_flush_chars(struct tty_struct *tty)
1637{
1638 struct tty3270 *tp;
1639
1640 tp = tty->driver_data;
1641 if (!tp)
1642 return;
1643 if (tp->char_count > 0) {
1644 tty3270_do_write(tp, tp->char_buf, tp->char_count);
1645 tp->char_count = 0;
1646 }
1647}
1648
1649/*
1650 * Returns the number of characters in the output buffer. This is
1651 * used in tty_wait_until_sent to wait until all characters have
1652 * appeared on the screen.
1653 */
1654static int
1655tty3270_chars_in_buffer(struct tty_struct *tty)
1656{
1657 return 0;
1658}
1659
1660static void
1661tty3270_flush_buffer(struct tty_struct *tty)
1662{
1663}
1664
1665/*
1666 * Check for visible/invisible input switches
1667 */
1668static void
1669tty3270_set_termios(struct tty_struct *tty, struct termios *old)
1670{
1671 struct tty3270 *tp;
1672 int new;
1673
1674 tp = tty->driver_data;
1675 if (!tp)
1676 return;
1677 spin_lock_bh(&tp->view.lock);
1678 if (L_ICANON(tty)) {
1679 new = L_ECHO(tty) ? TF_INPUT: TF_INPUTN;
1680 if (new != tp->inattr) {
1681 tp->inattr = new;
1682 tty3270_update_prompt(tp, 0, 0);
1683 tty3270_set_timer(tp, 1);
1684 }
1685 }
1686 spin_unlock_bh(&tp->view.lock);
1687}
1688
1689/*
1690 * Disable reading from a 3270 tty
1691 */
1692static void
1693tty3270_throttle(struct tty_struct * tty)
1694{
1695 struct tty3270 *tp;
1696
1697 tp = tty->driver_data;
1698 if (!tp)
1699 return;
1700 tp->throttle = 1;
1701}
1702
1703/*
1704 * Enable reading from a 3270 tty
1705 */
1706static void
1707tty3270_unthrottle(struct tty_struct * tty)
1708{
1709 struct tty3270 *tp;
1710
1711 tp = tty->driver_data;
1712 if (!tp)
1713 return;
1714 tp->throttle = 0;
1715 if (tp->attn)
1716 tty3270_issue_read(tp, 1);
1717}
1718
1719/*
1720 * Hang up the tty device.
1721 */
1722static void
1723tty3270_hangup(struct tty_struct *tty)
1724{
1725 // FIXME: implement
1726}
1727
1728static void
1729tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
1730{
1731}
1732
1733static int
1734tty3270_ioctl(struct tty_struct *tty, struct file *file,
1735 unsigned int cmd, unsigned long arg)
1736{
1737 struct tty3270 *tp;
1738
1739 tp = tty->driver_data;
1740 if (!tp)
1741 return -ENODEV;
1742 if (tty->flags & (1 << TTY_IO_ERROR))
1743 return -EIO;
1744 return kbd_ioctl(tp->kbd, file, cmd, arg);
1745}
1746
1747static struct tty_operations tty3270_ops = {
1748 .open = tty3270_open,
1749 .close = tty3270_close,
1750 .write = tty3270_write,
1751 .put_char = tty3270_put_char,
1752 .flush_chars = tty3270_flush_chars,
1753 .write_room = tty3270_write_room,
1754 .chars_in_buffer = tty3270_chars_in_buffer,
1755 .flush_buffer = tty3270_flush_buffer,
1756 .throttle = tty3270_throttle,
1757 .unthrottle = tty3270_unthrottle,
1758 .hangup = tty3270_hangup,
1759 .wait_until_sent = tty3270_wait_until_sent,
1760 .ioctl = tty3270_ioctl,
1761 .set_termios = tty3270_set_termios
1762};
1763
1764void
1765tty3270_notifier(int index, int active)
1766{
1767 if (active)
1768 tty_register_device(tty3270_driver, index, 0);
1769 else
1770 tty_unregister_device(tty3270_driver, index);
1771}
1772
1773/*
1774 * 3270 tty registration code called from tty_init().
1775 * Most kernel services (incl. kmalloc) are available at this poimt.
1776 */
1777int __init
1778tty3270_init(void)
1779{
1780 struct tty_driver *driver;
1781 int ret;
1782
1783 driver = alloc_tty_driver(256);
1784 if (!driver)
1785 return -ENOMEM;
1786
1787 /*
1788 * Initialize the tty_driver structure
1789 * Entries in tty3270_driver that are NOT initialized:
1790 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
1791 */
1792 driver->owner = THIS_MODULE;
1793 driver->devfs_name = "ttyTUB/";
1794 driver->driver_name = "ttyTUB";
1795 driver->name = "ttyTUB";
1796 driver->major = IBM_TTY3270_MAJOR;
1797 driver->type = TTY_DRIVER_TYPE_SYSTEM;
1798 driver->subtype = SYSTEM_TYPE_TTY;
1799 driver->init_termios = tty_std_termios;
1800 driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
1801 tty_set_operations(driver, &tty3270_ops);
1802 ret = tty_register_driver(driver);
1803 if (ret) {
1804 printk(KERN_ERR "tty3270 registration failed with %d\n", ret);
1805 put_tty_driver(driver);
1806 return ret;
1807 }
1808 tty3270_driver = driver;
1809 ret = raw3270_register_notifier(tty3270_notifier);
1810 if (ret) {
1811 printk(KERN_ERR "tty3270 notifier registration failed "
1812 "with %d\n", ret);
1813 put_tty_driver(driver);
1814 return ret;
1815
1816 }
1817 return 0;
1818}
1819
1820static void __exit
1821tty3270_exit(void)
1822{
1823 struct tty_driver *driver;
1824
1825 raw3270_unregister_notifier(tty3270_notifier);
1826 driver = tty3270_driver;
1827 tty3270_driver = 0;
1828 tty_unregister_driver(driver);
1829 tty3270_del_views();
1830}
1831
1832MODULE_LICENSE("GPL");
1833MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR);
1834
1835module_init(tty3270_init);
1836module_exit(tty3270_exit);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
new file mode 100644
index 000000000000..edf50d2bd10b
--- /dev/null
+++ b/drivers/s390/char/vmlogrdr.c
@@ -0,0 +1,920 @@
1/*
2 * drivers/s390/char/vmlogrdr.c
3 * character device driver for reading z/VM system service records
4 *
5 *
6 * Copyright (C) 2004 IBM Corporation
7 * character device driver for reading z/VM system service records,
8 * Version 1.0
9 * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
10 * Stefan Weinhuber <wein@de.ibm.com>
11 *
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/types.h>
17#include <linux/interrupt.h>
18#include <linux/spinlock.h>
19#include <asm/atomic.h>
20#include <asm/uaccess.h>
21#include <asm/cpcmd.h>
22#include <asm/debug.h>
23#include <asm/ebcdic.h>
24#include "../net/iucv.h"
25#include <linux/kmod.h>
26#include <linux/cdev.h>
27#include <linux/device.h>
28#include <linux/string.h>
29
30
31
32MODULE_AUTHOR
33 ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
34 " Stefan Weinhuber (wein@de.ibm.com)");
35MODULE_DESCRIPTION ("Character device driver for reading z/VM "
36 "system service records.");
37MODULE_LICENSE("GPL");
38
39
40/*
41 * The size of the buffer for iucv data transfer is one page,
42 * but in addition to the data we read from iucv we also
43 * place an integer and some characters into that buffer,
44 * so the maximum size for record data is a little less then
45 * one page.
46 */
47#define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
48
49/*
50 * The elements that are concurrently accessed by bottom halves are
51 * connection_established, iucv_path_severed, local_interrupt_buffer
52 * and receive_ready. The first three can be protected by
53 * priv_lock. receive_ready is atomic, so it can be incremented and
54 * decremented without holding a lock.
55 * The variable dev_in_use needs to be protected by the lock, since
56 * it's a flag used by open to make sure that the device is opened only
57 * by one user at the same time.
58 */
59struct vmlogrdr_priv_t {
60 char system_service[8];
61 char internal_name[8];
62 char recording_name[8];
63 u16 pathid;
64 int connection_established;
65 int iucv_path_severed;
66 iucv_MessagePending local_interrupt_buffer;
67 atomic_t receive_ready;
68 iucv_handle_t iucv_handle;
69 int minor_num;
70 char * buffer;
71 char * current_position;
72 int remaining;
73 ulong residual_length;
74 int buffer_free;
75 int dev_in_use; /* 1: already opened, 0: not opened*/
76 spinlock_t priv_lock;
77 struct device *device;
78 struct class_device *class_device;
79 int autorecording;
80 int autopurge;
81};
82
83
84/*
85 * File operation structure for vmlogrdr devices
86 */
87static int vmlogrdr_open(struct inode *, struct file *);
88static int vmlogrdr_release(struct inode *, struct file *);
89static ssize_t vmlogrdr_read (struct file *filp, char *data, size_t count,
90 loff_t * ppos);
91
92static struct file_operations vmlogrdr_fops = {
93 .owner = THIS_MODULE,
94 .open = vmlogrdr_open,
95 .release = vmlogrdr_release,
96 .read = vmlogrdr_read,
97};
98
99
100static u8 iucvMagic[16] = {
101 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
102 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
103};
104
105
106static u8 mask[] = {
107 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
108 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
109 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
110 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
111};
112
113
114static u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
115
116
117static void
118vmlogrdr_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data);
119static void
120vmlogrdr_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data);
121static void
122vmlogrdr_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data);
123
124
125static iucv_interrupt_ops_t vmlogrdr_iucvops = {
126 .ConnectionComplete = vmlogrdr_iucv_ConnectionComplete,
127 .ConnectionSevered = vmlogrdr_iucv_ConnectionSevered,
128 .MessagePending = vmlogrdr_iucv_MessagePending,
129};
130
131
132DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
133DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
134
135/*
136 * pointer to system service private structure
137 * minor number 0 --> logrec
138 * minor number 1 --> account
139 * minor number 2 --> symptom
140 */
141
142static struct vmlogrdr_priv_t sys_ser[] = {
143 { .system_service = "*LOGREC ",
144 .internal_name = "logrec",
145 .recording_name = "EREP",
146 .minor_num = 0,
147 .buffer_free = 1,
148 .priv_lock = SPIN_LOCK_UNLOCKED,
149 .autorecording = 1,
150 .autopurge = 1,
151 },
152 { .system_service = "*ACCOUNT",
153 .internal_name = "account",
154 .recording_name = "ACCOUNT",
155 .minor_num = 1,
156 .buffer_free = 1,
157 .priv_lock = SPIN_LOCK_UNLOCKED,
158 .autorecording = 1,
159 .autopurge = 1,
160 },
161 { .system_service = "*SYMPTOM",
162 .internal_name = "symptom",
163 .recording_name = "SYMPTOM",
164 .minor_num = 2,
165 .buffer_free = 1,
166 .priv_lock = SPIN_LOCK_UNLOCKED,
167 .autorecording = 1,
168 .autopurge = 1,
169 }
170};
171
172#define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
173
174static char FENCE[] = {"EOR"};
175static int vmlogrdr_major = 0;
176static struct cdev *vmlogrdr_cdev = NULL;
177static int recording_class_AB;
178
179
180static void
181vmlogrdr_iucv_ConnectionComplete (iucv_ConnectionComplete * eib,
182 void * pgm_data)
183{
184 struct vmlogrdr_priv_t * logptr = pgm_data;
185 spin_lock(&logptr->priv_lock);
186 logptr->connection_established = 1;
187 spin_unlock(&logptr->priv_lock);
188 wake_up(&conn_wait_queue);
189 return;
190}
191
192
193static void
194vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data)
195{
196 u8 reason = (u8) eib->ipuser[8];
197 struct vmlogrdr_priv_t * logptr = pgm_data;
198
199 printk (KERN_ERR "vmlogrdr: connection severed with"
200 " reason %i\n", reason);
201
202 spin_lock(&logptr->priv_lock);
203 logptr->connection_established = 0;
204 logptr->iucv_path_severed = 1;
205 spin_unlock(&logptr->priv_lock);
206
207 wake_up(&conn_wait_queue);
208 /* just in case we're sleeping waiting for a record */
209 wake_up_interruptible(&read_wait_queue);
210}
211
212
213static void
214vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data)
215{
216 struct vmlogrdr_priv_t * logptr = pgm_data;
217
218 /*
219 * This function is the bottom half so it should be quick.
220 * Copy the external interrupt data into our local eib and increment
221 * the usage count
222 */
223 spin_lock(&logptr->priv_lock);
224 memcpy(&(logptr->local_interrupt_buffer), eib, sizeof(*eib));
225 atomic_inc(&logptr->receive_ready);
226 spin_unlock(&logptr->priv_lock);
227 wake_up_interruptible(&read_wait_queue);
228}
229
230
231static int
232vmlogrdr_get_recording_class_AB(void) {
233 char cp_command[]="QUERY COMMAND RECORDING ";
234 char cp_response[80];
235 char *tail;
236 int len,i;
237
238 printk (KERN_DEBUG "vmlogrdr: query command: %s\n", cp_command);
239 cpcmd(cp_command, cp_response, sizeof(cp_response));
240 printk (KERN_DEBUG "vmlogrdr: response: %s", cp_response);
241 len = strnlen(cp_response,sizeof(cp_response));
242 // now the parsing
243 tail=strnchr(cp_response,len,'=');
244 if (!tail)
245 return 0;
246 tail++;
247 if (!strncmp("ANY",tail,3))
248 return 1;
249 if (!strncmp("NONE",tail,4))
250 return 0;
251 /*
252 * expect comma separated list of classes here, if one of them
253 * is A or B return 1 otherwise 0
254 */
255 for (i=tail-cp_response; i<len; i++)
256 if ( cp_response[i]=='A' || cp_response[i]=='B' )
257 return 1;
258 return 0;
259}
260
261
262static int
263vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) {
264
265 char cp_command[80];
266 char cp_response[160];
267 char *onoff, *qid_string;
268
269 memset(cp_command, 0x00, sizeof(cp_command));
270 memset(cp_response, 0x00, sizeof(cp_response));
271
272 onoff = ((action == 1) ? "ON" : "OFF");
273 qid_string = ((recording_class_AB == 1) ? " QID * " : "");
274
275 /*
276 * The recording commands needs to be called with option QID
277 * for guests that have previlege classes A or B.
278 * Purging has to be done as separate step, because recording
279 * can't be switched on as long as records are on the queue.
280 * Doing both at the same time doesn't work.
281 */
282
283 if (purge) {
284 snprintf(cp_command, sizeof(cp_command),
285 "RECORDING %s PURGE %s",
286 logptr->recording_name,
287 qid_string);
288
289 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n",
290 cp_command);
291 cpcmd(cp_command, cp_response, sizeof(cp_response));
292 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
293 cp_response);
294 }
295
296 memset(cp_command, 0x00, sizeof(cp_command));
297 memset(cp_response, 0x00, sizeof(cp_response));
298 snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
299 logptr->recording_name,
300 onoff,
301 qid_string);
302
303 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
304 cpcmd(cp_command, cp_response, sizeof(cp_response));
305 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
306 cp_response);
307 /* The recording command will usually answer with 'Command complete'
308 * on success, but when the specific service was never connected
309 * before then there might be an additional informational message
310 * 'HCPCRC8072I Recording entry not found' before the
311 * 'Command complete'. So I use strstr rather then the strncmp.
312 */
313 if (strstr(cp_response,"Command complete"))
314 return 0;
315 else
316 return -EIO;
317
318}
319
320
321static int
322vmlogrdr_open (struct inode *inode, struct file *filp)
323{
324 int dev_num = 0;
325 struct vmlogrdr_priv_t * logptr = NULL;
326 int connect_rc = 0;
327 int ret;
328
329 dev_num = iminor(inode);
330 if (dev_num > MAXMINOR)
331 return -ENODEV;
332
333 logptr = &sys_ser[dev_num];
334 if (logptr == NULL)
335 return -ENODEV;
336
337 /*
338 * only allow for blocking reads to be open
339 */
340 if (filp->f_flags & O_NONBLOCK)
341 return -ENOSYS;
342
343 /* Besure this device hasn't already been opened */
344 spin_lock_bh(&logptr->priv_lock);
345 if (logptr->dev_in_use) {
346 spin_unlock_bh(&logptr->priv_lock);
347 return -EBUSY;
348 } else {
349 logptr->dev_in_use = 1;
350 spin_unlock_bh(&logptr->priv_lock);
351 }
352
353 atomic_set(&logptr->receive_ready, 0);
354 logptr->buffer_free = 1;
355
356 /* set the file options */
357 filp->private_data = logptr;
358 filp->f_op = &vmlogrdr_fops;
359
360 /* start recording for this service*/
361 ret=0;
362 if (logptr->autorecording)
363 ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
364 if (ret)
365 printk (KERN_WARNING "vmlogrdr: failed to start "
366 "recording automatically\n");
367
368 /* Register with iucv driver */
369 logptr->iucv_handle = iucv_register_program(iucvMagic,
370 logptr->system_service, mask, &vmlogrdr_iucvops,
371 logptr);
372
373 if (logptr->iucv_handle == NULL) {
374 printk (KERN_ERR "vmlogrdr: failed to register with"
375 "iucv driver\n");
376 goto not_registered;
377 }
378
379 /* create connection to the system service */
380 spin_lock_bh(&logptr->priv_lock);
381 logptr->connection_established = 0;
382 logptr->iucv_path_severed = 0;
383 spin_unlock_bh(&logptr->priv_lock);
384
385 connect_rc = iucv_connect (&(logptr->pathid), 10, iucvMagic,
386 logptr->system_service, iucv_host, 0,
387 NULL, NULL,
388 logptr->iucv_handle, NULL);
389 if (connect_rc) {
390 printk (KERN_ERR "vmlogrdr: iucv connection to %s "
391 "failed with rc %i \n", logptr->system_service,
392 connect_rc);
393 goto not_connected;
394 }
395
396 /* We've issued the connect and now we must wait for a
397 * ConnectionComplete or ConnectinSevered Interrupt
398 * before we can continue to process.
399 */
400 wait_event(conn_wait_queue, (logptr->connection_established)
401 || (logptr->iucv_path_severed));
402 if (logptr->iucv_path_severed) {
403 goto not_connected;
404 }
405
406 return nonseekable_open(inode, filp);
407
408not_connected:
409 iucv_unregister_program(logptr->iucv_handle);
410 logptr->iucv_handle = NULL;
411not_registered:
412 if (logptr->autorecording)
413 vmlogrdr_recording(logptr,0,logptr->autopurge);
414 logptr->dev_in_use = 0;
415 return -EIO;
416
417
418}
419
420
421static int
422vmlogrdr_release (struct inode *inode, struct file *filp)
423{
424 int ret;
425
426 struct vmlogrdr_priv_t * logptr = filp->private_data;
427
428 iucv_unregister_program(logptr->iucv_handle);
429 logptr->iucv_handle = NULL;
430
431 if (logptr->autorecording) {
432 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
433 if (ret)
434 printk (KERN_WARNING "vmlogrdr: failed to stop "
435 "recording automatically\n");
436 }
437 logptr->dev_in_use = 0;
438
439 return 0;
440}
441
442
443static int
444vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
445 int rc, *temp;
446 /* we need to keep track of two data sizes here:
447 * The number of bytes we need to receive from iucv and
448 * the total number of bytes we actually write into the buffer.
449 */
450 int user_data_count, iucv_data_count;
451 char * buffer;
452
453 if (atomic_read(&priv->receive_ready)) {
454 spin_lock_bh(&priv->priv_lock);
455 if (priv->residual_length){
456 /* receive second half of a record */
457 iucv_data_count = priv->residual_length;
458 user_data_count = 0;
459 buffer = priv->buffer;
460 } else {
461 /* receive a new record:
462 * We need to return the total length of the record
463 * + size of FENCE in the first 4 bytes of the buffer.
464 */
465 iucv_data_count =
466 priv->local_interrupt_buffer.ln1msg2.ipbfln1f;
467 user_data_count = sizeof(int);
468 temp = (int*)priv->buffer;
469 *temp= iucv_data_count + sizeof(FENCE);
470 buffer = priv->buffer + sizeof(int);
471 }
472 /*
473 * If the record is bigger then our buffer, we receive only
474 * a part of it. We can get the rest later.
475 */
476 if (iucv_data_count > NET_BUFFER_SIZE)
477 iucv_data_count = NET_BUFFER_SIZE;
478 rc = iucv_receive(priv->pathid,
479 priv->local_interrupt_buffer.ipmsgid,
480 priv->local_interrupt_buffer.iptrgcls,
481 buffer,
482 iucv_data_count,
483 NULL,
484 NULL,
485 &priv->residual_length);
486 spin_unlock_bh(&priv->priv_lock);
487 /* An rc of 5 indicates that the record was bigger then
488 * the buffer, which is OK for us. A 9 indicates that the
489 * record was purged befor we could receive it.
490 */
491 if (rc == 5)
492 rc = 0;
493 if (rc == 9)
494 atomic_set(&priv->receive_ready, 0);
495 } else {
496 rc = 1;
497 }
498 if (!rc) {
499 priv->buffer_free = 0;
500 user_data_count += iucv_data_count;
501 priv->current_position = priv->buffer;
502 if (priv->residual_length == 0){
503 /* the whole record has been captured,
504 * now add the fence */
505 atomic_dec(&priv->receive_ready);
506 buffer = priv->buffer + user_data_count;
507 memcpy(buffer, FENCE, sizeof(FENCE));
508 user_data_count += sizeof(FENCE);
509 }
510 priv->remaining = user_data_count;
511 }
512
513 return rc;
514}
515
516
517static ssize_t
518vmlogrdr_read (struct file *filp, char *data, size_t count, loff_t * ppos)
519{
520 int rc;
521 struct vmlogrdr_priv_t * priv = filp->private_data;
522
523 while (priv->buffer_free) {
524 rc = vmlogrdr_receive_data(priv);
525 if (rc) {
526 rc = wait_event_interruptible(read_wait_queue,
527 atomic_read(&priv->receive_ready));
528 if (rc)
529 return rc;
530 }
531 }
532 /* copy only up to end of record */
533 if (count > priv->remaining)
534 count = priv->remaining;
535
536 if (copy_to_user(data, priv->current_position, count))
537 return -EFAULT;
538
539 *ppos += count;
540 priv->current_position += count;
541 priv->remaining -= count;
542
543 /* if all data has been transferred, set buffer free */
544 if (priv->remaining == 0)
545 priv->buffer_free = 1;
546
547 return count;
548}
549
550static ssize_t
551vmlogrdr_autopurge_store(struct device * dev, const char * buf, size_t count) {
552 struct vmlogrdr_priv_t *priv = dev->driver_data;
553 ssize_t ret = count;
554
555 switch (buf[0]) {
556 case '0':
557 priv->autopurge=0;
558 break;
559 case '1':
560 priv->autopurge=1;
561 break;
562 default:
563 ret = -EINVAL;
564 }
565 return ret;
566}
567
568
569static ssize_t
570vmlogrdr_autopurge_show(struct device *dev, char *buf) {
571 struct vmlogrdr_priv_t *priv = dev->driver_data;
572 return sprintf(buf, "%u\n", priv->autopurge);
573}
574
575
576static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
577 vmlogrdr_autopurge_store);
578
579
580static ssize_t
581vmlogrdr_purge_store(struct device * dev, const char * buf, size_t count) {
582
583 char cp_command[80];
584 char cp_response[80];
585 struct vmlogrdr_priv_t *priv = dev->driver_data;
586
587 if (buf[0] != '1')
588 return -EINVAL;
589
590 memset(cp_command, 0x00, sizeof(cp_command));
591 memset(cp_response, 0x00, sizeof(cp_response));
592
593 /*
594 * The recording command needs to be called with option QID
595 * for guests that have previlege classes A or B.
596 * Other guests will not recognize the command and we have to
597 * issue the same command without the QID parameter.
598 */
599
600 if (recording_class_AB)
601 snprintf(cp_command, sizeof(cp_command),
602 "RECORDING %s PURGE QID * ",
603 priv->recording_name);
604 else
605 snprintf(cp_command, sizeof(cp_command),
606 "RECORDING %s PURGE ",
607 priv->recording_name);
608
609 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
610 cpcmd(cp_command, cp_response, sizeof(cp_response));
611 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
612 cp_response);
613
614 return count;
615}
616
617
618static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
619
620
621static ssize_t
622vmlogrdr_autorecording_store(struct device *dev, const char *buf,
623 size_t count) {
624 struct vmlogrdr_priv_t *priv = dev->driver_data;
625 ssize_t ret = count;
626
627 switch (buf[0]) {
628 case '0':
629 priv->autorecording=0;
630 break;
631 case '1':
632 priv->autorecording=1;
633 break;
634 default:
635 ret = -EINVAL;
636 }
637 return ret;
638}
639
640
641static ssize_t
642vmlogrdr_autorecording_show(struct device *dev, char *buf) {
643 struct vmlogrdr_priv_t *priv = dev->driver_data;
644 return sprintf(buf, "%u\n", priv->autorecording);
645}
646
647
648static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
649 vmlogrdr_autorecording_store);
650
651
652static ssize_t
653vmlogrdr_recording_store(struct device * dev, const char * buf, size_t count) {
654
655 struct vmlogrdr_priv_t *priv = dev->driver_data;
656 ssize_t ret;
657
658 switch (buf[0]) {
659 case '0':
660 ret = vmlogrdr_recording(priv,0,0);
661 break;
662 case '1':
663 ret = vmlogrdr_recording(priv,1,0);
664 break;
665 default:
666 ret = -EINVAL;
667 }
668 if (ret)
669 return ret;
670 else
671 return count;
672
673}
674
675
676static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
677
678
679static ssize_t
680vmlogrdr_recording_status_show(struct device_driver *driver, char *buf) {
681
682 char cp_command[] = "QUERY RECORDING ";
683 int len;
684
685 cpcmd(cp_command, buf, 4096);
686 len = strlen(buf);
687 return len;
688}
689
690
691static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
692 NULL);
693
694static struct attribute *vmlogrdr_attrs[] = {
695 &dev_attr_autopurge.attr,
696 &dev_attr_purge.attr,
697 &dev_attr_autorecording.attr,
698 &dev_attr_recording.attr,
699 NULL,
700};
701
702static struct attribute_group vmlogrdr_attr_group = {
703 .attrs = vmlogrdr_attrs,
704};
705
706static struct class_simple *vmlogrdr_class;
707static struct device_driver vmlogrdr_driver = {
708 .name = "vmlogrdr",
709 .bus = &iucv_bus,
710};
711
712
713static int
714vmlogrdr_register_driver(void) {
715 int ret;
716
717 ret = driver_register(&vmlogrdr_driver);
718 if (ret) {
719 printk(KERN_ERR "vmlogrdr: failed to register driver.\n");
720 return ret;
721 }
722
723 ret = driver_create_file(&vmlogrdr_driver,
724 &driver_attr_recording_status);
725 if (ret) {
726 printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n");
727 goto unregdriver;
728 }
729
730 vmlogrdr_class = class_simple_create(THIS_MODULE, "vmlogrdr");
731 if (IS_ERR(vmlogrdr_class)) {
732 printk(KERN_ERR "vmlogrdr: failed to create class.\n");
733 ret=PTR_ERR(vmlogrdr_class);
734 vmlogrdr_class=NULL;
735 goto unregattr;
736 }
737 return 0;
738
739unregattr:
740 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
741unregdriver:
742 driver_unregister(&vmlogrdr_driver);
743 return ret;
744}
745
746
747static void
748vmlogrdr_unregister_driver(void) {
749 class_simple_destroy(vmlogrdr_class);
750 vmlogrdr_class = NULL;
751 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
752 driver_unregister(&vmlogrdr_driver);
753 return;
754}
755
756
757static int
758vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) {
759 struct device *dev;
760 int ret;
761
762 dev = kmalloc(sizeof(struct device), GFP_KERNEL);
763 if (dev) {
764 memset(dev, 0, sizeof(struct device));
765 snprintf(dev->bus_id, BUS_ID_SIZE, "%s",
766 priv->internal_name);
767 dev->bus = &iucv_bus;
768 dev->parent = iucv_root;
769 dev->driver = &vmlogrdr_driver;
770 /*
771 * The release function could be called after the
772 * module has been unloaded. It's _only_ task is to
773 * free the struct. Therefore, we specify kfree()
774 * directly here. (Probably a little bit obfuscating
775 * but legitime ...).
776 */
777 dev->release = (void (*)(struct device *))kfree;
778 } else
779 return -ENOMEM;
780 ret = device_register(dev);
781 if (ret)
782 return ret;
783
784 ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
785 if (ret) {
786 device_unregister(dev);
787 return ret;
788 }
789 priv->class_device = class_simple_device_add(
790 vmlogrdr_class,
791 MKDEV(vmlogrdr_major, priv->minor_num),
792 dev,
793 "%s", dev->bus_id );
794 if (IS_ERR(priv->class_device)) {
795 ret = PTR_ERR(priv->class_device);
796 priv->class_device=NULL;
797 sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
798 device_unregister(dev);
799 return ret;
800 }
801 dev->driver_data = priv;
802 priv->device = dev;
803 return 0;
804}
805
806
807static int
808vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) {
809 class_simple_device_remove(MKDEV(vmlogrdr_major, priv->minor_num));
810 if (priv->device != NULL) {
811 sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
812 device_unregister(priv->device);
813 priv->device=NULL;
814 }
815 return 0;
816}
817
818
819static int
820vmlogrdr_register_cdev(dev_t dev) {
821 int rc = 0;
822 vmlogrdr_cdev = cdev_alloc();
823 if (!vmlogrdr_cdev) {
824 return -ENOMEM;
825 }
826 vmlogrdr_cdev->owner = THIS_MODULE;
827 vmlogrdr_cdev->ops = &vmlogrdr_fops;
828 vmlogrdr_cdev->dev = dev;
829 rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
830 if (!rc)
831 return 0;
832
833 // cleanup: cdev is not fully registered, no cdev_del here!
834 kobject_put(&vmlogrdr_cdev->kobj);
835 vmlogrdr_cdev=NULL;
836 return rc;
837}
838
839
840static void
841vmlogrdr_cleanup(void) {
842 int i;
843 if (vmlogrdr_cdev) {
844 cdev_del(vmlogrdr_cdev);
845 vmlogrdr_cdev=NULL;
846 }
847 for (i=0; i < MAXMINOR; ++i ) {
848 vmlogrdr_unregister_device(&sys_ser[i]);
849 free_page((unsigned long)sys_ser[i].buffer);
850 }
851 vmlogrdr_unregister_driver();
852 if (vmlogrdr_major) {
853 unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
854 vmlogrdr_major=0;
855 }
856}
857
858
859static int
860vmlogrdr_init(void)
861{
862 int rc;
863 int i;
864 dev_t dev;
865
866 if (! MACHINE_IS_VM) {
867 printk (KERN_ERR "vmlogrdr: not running under VM, "
868 "driver not loaded.\n");
869 return -ENODEV;
870 }
871
872 recording_class_AB = vmlogrdr_get_recording_class_AB();
873
874 rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
875 if (rc)
876 return rc;
877 vmlogrdr_major = MAJOR(dev);
878
879 rc=vmlogrdr_register_driver();
880 if (rc)
881 goto cleanup;
882
883 for (i=0; i < MAXMINOR; ++i ) {
884 sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
885 if (!sys_ser[i].buffer) {
886 rc = ENOMEM;
887 break;
888 }
889 sys_ser[i].current_position = sys_ser[i].buffer;
890 rc=vmlogrdr_register_device(&sys_ser[i]);
891 if (rc)
892 break;
893 }
894 if (rc)
895 goto cleanup;
896
897 rc = vmlogrdr_register_cdev(dev);
898 if (rc)
899 goto cleanup;
900 printk (KERN_INFO "vmlogrdr: driver loaded\n");
901 return 0;
902
903cleanup:
904 vmlogrdr_cleanup();
905 printk (KERN_ERR "vmlogrdr: driver not loaded.\n");
906 return rc;
907}
908
909
910static void
911vmlogrdr_exit(void)
912{
913 vmlogrdr_cleanup();
914 printk (KERN_INFO "vmlogrdr: driver unloaded\n");
915 return;
916}
917
918
919module_init(vmlogrdr_init);
920module_exit(vmlogrdr_exit);
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
new file mode 100644
index 000000000000..22cf4fec8da9
--- /dev/null
+++ b/drivers/s390/char/vmwatchdog.c
@@ -0,0 +1,292 @@
1/*
2 * Watchdog implementation based on z/VM Watchdog Timer API
3 *
4 * The user space watchdog daemon can use this driver as
5 * /dev/vmwatchdog to have z/VM execute the specified CP
6 * command when the timeout expires. The default command is
7 * "IPL", which which cause an immediate reboot.
8 */
9#include <linux/init.h>
10#include <linux/fs.h>
11#include <linux/kernel.h>
12#include <linux/miscdevice.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/watchdog.h>
16
17#include <asm/ebcdic.h>
18#include <asm/io.h>
19#include <asm/uaccess.h>
20
21#define MAX_CMDLEN 240
22#define MIN_INTERVAL 15
23static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
24static int vmwdt_conceal;
25
26#ifdef CONFIG_WATCHDOG_NOWAYOUT
27static int vmwdt_nowayout = 1;
28#else
29static int vmwdt_nowayout = 0;
30#endif
31
32MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
34MODULE_DESCRIPTION("z/VM Watchdog Timer");
35module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644);
36MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers");
37module_param_named(conceal, vmwdt_conceal, bool, 0644);
38MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog "
39 " is active");
40module_param_named(nowayout, vmwdt_nowayout, bool, 0);
41MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
42 " (default=CONFIG_WATCHDOG_NOWAYOUT)");
43MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
44
45static unsigned int vmwdt_interval = 60;
46static unsigned long vmwdt_is_open;
47static int vmwdt_expect_close;
48
49enum vmwdt_func {
50 /* function codes */
51 wdt_init = 0,
52 wdt_change = 1,
53 wdt_cancel = 2,
54 /* flags */
55 wdt_conceal = 0x80000000,
56};
57
58static int __diag288(enum vmwdt_func func, unsigned int timeout,
59 char *cmd, size_t len)
60{
61 register unsigned long __func asm("2");
62 register unsigned long __timeout asm("3");
63 register unsigned long __cmdp asm("4");
64 register unsigned long __cmdl asm("5");
65 int err;
66
67 __func = func;
68 __timeout = timeout;
69 __cmdp = virt_to_phys(cmd);
70 __cmdl = len;
71 err = 0;
72 asm volatile (
73#ifdef __s390x__
74 "diag %2,%4,0x288\n"
75 "1: \n"
76 ".section .fixup,\"ax\"\n"
77 "2: lghi %0,%1\n"
78 " jg 1b\n"
79 ".previous\n"
80 ".section __ex_table,\"a\"\n"
81 " .align 8\n"
82 " .quad 1b,2b\n"
83 ".previous\n"
84#else
85 "diag %2,%4,0x288\n"
86 "1: \n"
87 ".section .fixup,\"ax\"\n"
88 "2: lhi %0,%1\n"
89 " bras 1,3f\n"
90 " .long 1b\n"
91 "3: l 1,0(1)\n"
92 " br 1\n"
93 ".previous\n"
94 ".section __ex_table,\"a\"\n"
95 " .align 4\n"
96 " .long 1b,2b\n"
97 ".previous\n"
98#endif
99 : "+&d"(err)
100 : "i"(-EINVAL), "d"(__func), "d"(__timeout),
101 "d"(__cmdp), "d"(__cmdl)
102 : "1", "cc");
103 return err;
104}
105
106static int vmwdt_keepalive(void)
107{
108 /* we allocate new memory every time to avoid having
109 * to track the state. static allocation is not an
110 * option since that might not be contiguous in real
111 * storage in case of a modular build */
112 static char *ebc_cmd;
113 size_t len;
114 int ret;
115 unsigned int func;
116
117 ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
118 if (!ebc_cmd)
119 return -ENOMEM;
120
121 len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN);
122 ASCEBC(ebc_cmd, MAX_CMDLEN);
123 EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
124
125 func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
126 ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
127 kfree(ebc_cmd);
128
129 if (ret) {
130 printk(KERN_WARNING "%s: problem setting interval %d, "
131 "cmd %s\n", __FUNCTION__, vmwdt_interval,
132 vmwdt_cmd);
133 }
134 return ret;
135}
136
137static int vmwdt_disable(void)
138{
139 int ret = __diag288(wdt_cancel, 0, "", 0);
140 if (ret) {
141 printk(KERN_WARNING "%s: problem disabling watchdog\n",
142 __FUNCTION__);
143 }
144 return ret;
145}
146
147static int __init vmwdt_probe(void)
148{
149 /* there is no real way to see if the watchdog is supported,
150 * so we try initializing it with a NOP command ("BEGIN")
151 * that won't cause any harm even if the following disable
152 * fails for some reason */
153 static char __initdata ebc_begin[] = {
154 194, 197, 199, 201, 213
155 };
156 if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) {
157 printk(KERN_INFO "z/VM watchdog not available\n");
158 return -EINVAL;
159 }
160 return vmwdt_disable();
161}
162
163static int vmwdt_open(struct inode *i, struct file *f)
164{
165 int ret;
166 if (test_and_set_bit(0, &vmwdt_is_open))
167 return -EBUSY;
168 ret = vmwdt_keepalive();
169 if (ret)
170 clear_bit(0, &vmwdt_is_open);
171 return ret ? ret : nonseekable_open(i, f);
172}
173
174static int vmwdt_close(struct inode *i, struct file *f)
175{
176 if (vmwdt_expect_close == 42)
177 vmwdt_disable();
178 vmwdt_expect_close = 0;
179 clear_bit(0, &vmwdt_is_open);
180 return 0;
181}
182
183static struct watchdog_info vmwdt_info = {
184 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
185 .firmware_version = 0,
186 .identity = "z/VM Watchdog Timer",
187};
188
189static int vmwdt_ioctl(struct inode *i, struct file *f,
190 unsigned int cmd, unsigned long arg)
191{
192 switch (cmd) {
193 case WDIOC_GETSUPPORT:
194 if (copy_to_user((void __user *)arg, &vmwdt_info,
195 sizeof(vmwdt_info)))
196 return -EFAULT;
197 return 0;
198 case WDIOC_GETSTATUS:
199 case WDIOC_GETBOOTSTATUS:
200 return put_user(0, (int *)arg);
201 case WDIOC_GETTEMP:
202 return -EINVAL;
203 case WDIOC_SETOPTIONS:
204 {
205 int options, ret;
206 if (get_user(options, (int __user *)arg))
207 return -EFAULT;
208 ret = -EINVAL;
209 if (options & WDIOS_DISABLECARD) {
210 ret = vmwdt_disable();
211 if (ret)
212 return ret;
213 }
214 if (options & WDIOS_ENABLECARD) {
215 ret = vmwdt_keepalive();
216 }
217 return ret;
218 }
219 case WDIOC_GETTIMEOUT:
220 return put_user(vmwdt_interval, (int __user *)arg);
221 case WDIOC_SETTIMEOUT:
222 {
223 int interval;
224 if (get_user(interval, (int __user *)arg))
225 return -EFAULT;
226 if (interval < MIN_INTERVAL)
227 return -EINVAL;
228 vmwdt_interval = interval;
229 }
230 return vmwdt_keepalive();
231 case WDIOC_KEEPALIVE:
232 return vmwdt_keepalive();
233 }
234
235 return -EINVAL;
236}
237
238static ssize_t vmwdt_write(struct file *f, const char __user *buf,
239 size_t count, loff_t *ppos)
240{
241 if(count) {
242 if (!vmwdt_nowayout) {
243 size_t i;
244
245 /* note: just in case someone wrote the magic character
246 * five months ago... */
247 vmwdt_expect_close = 0;
248
249 for (i = 0; i != count; i++) {
250 char c;
251 if (get_user(c, buf+i))
252 return -EFAULT;
253 if (c == 'V')
254 vmwdt_expect_close = 42;
255 }
256 }
257 /* someone wrote to us, we should restart timer */
258 vmwdt_keepalive();
259 }
260 return count;
261}
262
263static struct file_operations vmwdt_fops = {
264 .open = &vmwdt_open,
265 .release = &vmwdt_close,
266 .ioctl = &vmwdt_ioctl,
267 .write = &vmwdt_write,
268 .owner = THIS_MODULE,
269};
270
271static struct miscdevice vmwdt_dev = {
272 .minor = WATCHDOG_MINOR,
273 .name = "watchdog",
274 .fops = &vmwdt_fops,
275};
276
277static int __init vmwdt_init(void)
278{
279 int ret;
280
281 ret = vmwdt_probe();
282 if (ret)
283 return ret;
284 return misc_register(&vmwdt_dev);
285}
286module_init(vmwdt_init);
287
288static void __exit vmwdt_exit(void)
289{
290 WARN_ON(misc_deregister(&vmwdt_dev) != 0);
291}
292module_exit(vmwdt_exit);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
new file mode 100644
index 000000000000..c490c2a1c2fc
--- /dev/null
+++ b/drivers/s390/cio/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the S/390 common i/o drivers
3#
4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o
6ccw_device-objs += device.o device_fsm.o device_ops.o
7ccw_device-objs += device_id.o device_pgid.o device_status.o
8obj-y += ccw_device.o cmf.o
9obj-$(CONFIG_CCWGROUP) += ccwgroup.o
10obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
new file mode 100644
index 000000000000..3720e77b465f
--- /dev/null
+++ b/drivers/s390/cio/airq.c
@@ -0,0 +1,87 @@
1/*
2 * drivers/s390/cio/airq.c
3 * S/390 common I/O routines -- support for adapter interruptions
4 *
5 * $Revision: 1.12 $
6 *
7 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
8 * IBM Corporation
9 * Author(s): Ingo Adlung (adlung@de.ibm.com)
10 * Cornelia Huck (cohuck@de.ibm.com)
11 * Arnd Bergmann (arndb@de.ibm.com)
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/rcupdate.h>
18
19#include "cio_debug.h"
20#include "airq.h"
21
22static adapter_int_handler_t adapter_handler;
23
24/*
25 * register for adapter interrupts
26 *
27 * With HiperSockets the zSeries architecture provides for
28 * means of adapter interrups, pseudo I/O interrupts that are
29 * not tied to an I/O subchannel, but to an adapter. However,
30 * it doesn't disclose the info how to enable/disable them, but
31 * to recognize them only. Perhaps we should consider them
32 * being shared interrupts, and thus build a linked list
33 * of adapter handlers ... to be evaluated ...
34 */
35int
36s390_register_adapter_interrupt (adapter_int_handler_t handler)
37{
38 int ret;
39 char dbf_txt[15];
40
41 CIO_TRACE_EVENT (4, "rgaint");
42
43 if (handler == NULL)
44 ret = -EINVAL;
45 else
46 ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0);
47 if (!ret)
48 synchronize_kernel();
49
50 sprintf (dbf_txt, "ret:%d", ret);
51 CIO_TRACE_EVENT (4, dbf_txt);
52
53 return ret;
54}
55
56int
57s390_unregister_adapter_interrupt (adapter_int_handler_t handler)
58{
59 int ret;
60 char dbf_txt[15];
61
62 CIO_TRACE_EVENT (4, "urgaint");
63
64 if (handler == NULL)
65 ret = -EINVAL;
66 else {
67 adapter_handler = NULL;
68 synchronize_kernel();
69 ret = 0;
70 }
71 sprintf (dbf_txt, "ret:%d", ret);
72 CIO_TRACE_EVENT (4, dbf_txt);
73
74 return ret;
75}
76
77void
78do_adapter_IO (void)
79{
80 CIO_TRACE_EVENT (6, "doaio");
81
82 if (adapter_handler)
83 (*adapter_handler) ();
84}
85
86EXPORT_SYMBOL (s390_register_adapter_interrupt);
87EXPORT_SYMBOL (s390_unregister_adapter_interrupt);
diff --git a/drivers/s390/cio/airq.h b/drivers/s390/cio/airq.h
new file mode 100644
index 000000000000..7d6be3fdcd66
--- /dev/null
+++ b/drivers/s390/cio/airq.h
@@ -0,0 +1,10 @@
1#ifndef S390_AINTERRUPT_H
2#define S390_AINTERRUPT_H
3
4typedef int (*adapter_int_handler_t)(void);
5
6extern int s390_register_adapter_interrupt(adapter_int_handler_t handler);
7extern int s390_unregister_adapter_interrupt(adapter_int_handler_t handler);
8extern void do_adapter_IO (void);
9
10#endif
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
new file mode 100644
index 000000000000..4a06c7d0e5e4
--- /dev/null
+++ b/drivers/s390/cio/blacklist.c
@@ -0,0 +1,351 @@
1/*
2 * drivers/s390/cio/blacklist.c
3 * S/390 common I/O routines -- blacklisting of specific devices
4 * $Revision: 1.33 $
5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Ingo Adlung (adlung@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
10 * Arnd Bergmann (arndb@de.ibm.com)
11 */
12
13#include <linux/config.h>
14#include <linux/init.h>
15#include <linux/vmalloc.h>
16#include <linux/slab.h>
17#include <linux/proc_fs.h>
18#include <linux/ctype.h>
19#include <linux/device.h>
20
21#include <asm/cio.h>
22#include <asm/uaccess.h>
23
24#include "blacklist.h"
25#include "cio.h"
26#include "cio_debug.h"
27#include "css.h"
28
29/*
30 * "Blacklisting" of certain devices:
31 * Device numbers given in the commandline as cio_ignore=... won't be known
32 * to Linux.
33 *
34 * These can be single devices or ranges of devices
35 */
36
37/* 65536 bits to indicate if a devno is blacklisted or not */
38#define __BL_DEV_WORDS (__MAX_SUBCHANNELS + (8*sizeof(long) - 1) / \
39 (8*sizeof(long)))
40static unsigned long bl_dev[__BL_DEV_WORDS];
41typedef enum {add, free} range_action;
42
43/*
44 * Function: blacklist_range
45 * (Un-)blacklist the devices from-to
46 */
47static inline void
48blacklist_range (range_action action, unsigned int from, unsigned int to)
49{
50 if (!to)
51 to = from;
52
53 if (from > to || to > __MAX_SUBCHANNELS) {
54 printk (KERN_WARNING "Invalid blacklist range "
55 "0x%04x to 0x%04x, skipping\n", from, to);
56 return;
57 }
58 for (; from <= to; from++) {
59 if (action == add)
60 set_bit (from, bl_dev);
61 else
62 clear_bit (from, bl_dev);
63 }
64}
65
66/*
67 * Function: blacklist_busid
68 * Get devno/busid from given string.
69 * Shamelessly grabbed from dasd_devmap.c.
70 */
71static inline int
72blacklist_busid(char **str, int *id0, int *id1, int *devno)
73{
74 int val, old_style;
75 char *sav;
76
77 sav = *str;
78
79 /* check for leading '0x' */
80 old_style = 0;
81 if ((*str)[0] == '0' && (*str)[1] == 'x') {
82 *str += 2;
83 old_style = 1;
84 }
85 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
86 goto confused;
87 val = simple_strtoul(*str, str, 16);
88 if (old_style || (*str)[0] != '.') {
89 *id0 = *id1 = 0;
90 if (val < 0 || val > 0xffff)
91 goto confused;
92 *devno = val;
93 if ((*str)[0] != ',' && (*str)[0] != '-' &&
94 (*str)[0] != '\n' && (*str)[0] != '\0')
95 goto confused;
96 return 0;
97 }
98 /* New style x.y.z busid */
99 if (val < 0 || val > 0xff)
100 goto confused;
101 *id0 = val;
102 (*str)++;
103 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
104 goto confused;
105 val = simple_strtoul(*str, str, 16);
106 if (val < 0 || val > 0xff || (*str)++[0] != '.')
107 goto confused;
108 *id1 = val;
109 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
110 goto confused;
111 val = simple_strtoul(*str, str, 16);
112 if (val < 0 || val > 0xffff)
113 goto confused;
114 *devno = val;
115 if ((*str)[0] != ',' && (*str)[0] != '-' &&
116 (*str)[0] != '\n' && (*str)[0] != '\0')
117 goto confused;
118 return 0;
119confused:
120 strsep(str, ",\n");
121 printk(KERN_WARNING "Invalid cio_ignore parameter '%s'\n", sav);
122 return 1;
123}
124
125static inline int
126blacklist_parse_parameters (char *str, range_action action)
127{
128 unsigned int from, to, from_id0, to_id0, from_id1, to_id1;
129
130 while (*str != 0 && *str != '\n') {
131 range_action ra = action;
132 while(*str == ',')
133 str++;
134 if (*str == '!') {
135 ra = !action;
136 ++str;
137 }
138
139 /*
140 * Since we have to parse the proc commands and the
141 * kernel arguments we have to check four cases
142 */
143 if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 ||
144 strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) {
145 from = 0;
146 to = __MAX_SUBCHANNELS;
147 str += 3;
148 } else {
149 int rc;
150
151 rc = blacklist_busid(&str, &from_id0,
152 &from_id1, &from);
153 if (rc)
154 continue;
155 to = from;
156 to_id0 = from_id0;
157 to_id1 = from_id1;
158 if (*str == '-') {
159 str++;
160 rc = blacklist_busid(&str, &to_id0,
161 &to_id1, &to);
162 if (rc)
163 continue;
164 }
165 if (*str == '-') {
166 printk(KERN_WARNING "invalid cio_ignore "
167 "parameter '%s'\n",
168 strsep(&str, ",\n"));
169 continue;
170 }
171 if ((from_id0 != to_id0) || (from_id1 != to_id1)) {
172 printk(KERN_WARNING "invalid cio_ignore range "
173 "%x.%x.%04x-%x.%x.%04x\n",
174 from_id0, from_id1, from,
175 to_id0, to_id1, to);
176 continue;
177 }
178 }
179 /* FIXME: ignoring id0 and id1 here. */
180 pr_debug("blacklist_setup: adding range "
181 "from 0.0.%04x to 0.0.%04x\n", from, to);
182 blacklist_range (ra, from, to);
183 }
184 return 1;
185}
186
187/* Parsing the commandline for blacklist parameters, e.g. to blacklist
188 * bus ids 0.0.1234, 0.0.1235 and 0.0.1236, you could use any of:
189 * - cio_ignore=1234-1236
190 * - cio_ignore=0x1234-0x1235,1236
191 * - cio_ignore=0x1234,1235-1236
192 * - cio_ignore=1236 cio_ignore=1234-0x1236
193 * - cio_ignore=1234 cio_ignore=1236 cio_ignore=0x1235
194 * - cio_ignore=0.0.1234-0.0.1236
195 * - cio_ignore=0.0.1234,0x1235,1236
196 * - ...
197 */
198static int __init
199blacklist_setup (char *str)
200{
201 CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
202 return blacklist_parse_parameters (str, add);
203}
204
205__setup ("cio_ignore=", blacklist_setup);
206
207/* Checking if devices are blacklisted */
208
209/*
210 * Function: is_blacklisted
211 * Returns 1 if the given devicenumber can be found in the blacklist,
212 * otherwise 0.
213 * Used by validate_subchannel()
214 */
215int
216is_blacklisted (int devno)
217{
218 return test_bit (devno, bl_dev);
219}
220
221#ifdef CONFIG_PROC_FS
222/*
223 * Function: s390_redo_validation
224 * Look for no longer blacklisted devices
225 * FIXME: there must be a better way to do this */
226static inline void
227s390_redo_validation (void)
228{
229 unsigned int irq;
230
231 CIO_TRACE_EVENT (0, "redoval");
232 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
233 int ret;
234 struct subchannel *sch;
235
236 sch = get_subchannel_by_schid(irq);
237 if (sch) {
238 /* Already known. */
239 put_device(&sch->dev);
240 continue;
241 }
242 ret = css_probe_device(irq);
243 if (ret == -ENXIO)
244 break; /* We're through. */
245 if (ret == -ENOMEM)
246 /*
247 * Stop validation for now. Bad, but no need for a
248 * panic.
249 */
250 break;
251 }
252}
253
254/*
255 * Function: blacklist_parse_proc_parameters
256 * parse the stuff which is piped to /proc/cio_ignore
257 */
258static inline void
259blacklist_parse_proc_parameters (char *buf)
260{
261 if (strncmp (buf, "free ", 5) == 0) {
262 blacklist_parse_parameters (buf + 5, free);
263 } else if (strncmp (buf, "add ", 4) == 0) {
264 /*
265 * We don't need to check for known devices since
266 * css_probe_device will handle this correctly.
267 */
268 blacklist_parse_parameters (buf + 4, add);
269 } else {
270 printk (KERN_WARNING "cio_ignore: Parse error; \n"
271 KERN_WARNING "try using 'free all|<devno-range>,"
272 "<devno-range>,...'\n"
273 KERN_WARNING "or 'add <devno-range>,"
274 "<devno-range>,...'\n");
275 return;
276 }
277
278 s390_redo_validation ();
279}
280
281/* FIXME: These should be real bus ids and not home-grown ones! */
282static int cio_ignore_read (char *page, char **start, off_t off,
283 int count, int *eof, void *data)
284{
285 const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */
286 long devno;
287 int len;
288
289 len = 0;
290 for (devno = off; /* abuse the page variable
291 * as counter, see fs/proc/generic.c */
292 devno <= __MAX_SUBCHANNELS && len + entry_size < count; devno++) {
293 if (!test_bit(devno, bl_dev))
294 continue;
295 len += sprintf(page + len, "0.0.%04lx", devno);
296 if (test_bit(devno + 1, bl_dev)) { /* print range */
297 while (++devno < __MAX_SUBCHANNELS)
298 if (!test_bit(devno, bl_dev))
299 break;
300 len += sprintf(page + len, "-0.0.%04lx", --devno);
301 }
302 len += sprintf(page + len, "\n");
303 }
304
305 if (devno <= __MAX_SUBCHANNELS)
306 *eof = 1;
307 *start = (char *) (devno - off); /* number of checked entries */
308 return len;
309}
310
311static int cio_ignore_write(struct file *file, const char __user *user_buf,
312 unsigned long user_len, void *data)
313{
314 char *buf;
315
316 if (user_len > 65536)
317 user_len = 65536;
318 buf = vmalloc (user_len + 1); /* maybe better use the stack? */
319 if (buf == NULL)
320 return -ENOMEM;
321 if (strncpy_from_user (buf, user_buf, user_len) < 0) {
322 vfree (buf);
323 return -EFAULT;
324 }
325 buf[user_len] = '\0';
326
327 blacklist_parse_proc_parameters (buf);
328
329 vfree (buf);
330 return user_len;
331}
332
333static int
334cio_ignore_proc_init (void)
335{
336 struct proc_dir_entry *entry;
337
338 entry = create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR,
339 &proc_root);
340 if (!entry)
341 return 0;
342
343 entry->read_proc = cio_ignore_read;
344 entry->write_proc = cio_ignore_write;
345
346 return 1;
347}
348
349__initcall (cio_ignore_proc_init);
350
351#endif /* CONFIG_PROC_FS */
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h
new file mode 100644
index 000000000000..fb42cafbe57c
--- /dev/null
+++ b/drivers/s390/cio/blacklist.h
@@ -0,0 +1,6 @@
1#ifndef S390_BLACKLIST_H
2#define S390_BLACKLIST_H
3
4extern int is_blacklisted (int devno);
5
6#endif
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
new file mode 100644
index 000000000000..21a75ee28b80
--- /dev/null
+++ b/drivers/s390/cio/ccwgroup.c
@@ -0,0 +1,482 @@
1/*
2 * drivers/s390/cio/ccwgroup.c
3 * bus driver for ccwgroup
4 * $Revision: 1.29 $
5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
10 */
11#include <linux/module.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <linux/list.h>
15#include <linux/device.h>
16#include <linux/init.h>
17#include <linux/ctype.h>
18#include <linux/dcache.h>
19
20#include <asm/semaphore.h>
21#include <asm/ccwdev.h>
22#include <asm/ccwgroup.h>
23
24/* In Linux 2.4, we had a channel device layer called "chandev"
25 * that did all sorts of obscure stuff for networking devices.
26 * This is another driver that serves as a replacement for just
27 * one of its functions, namely the translation of single subchannels
28 * to devices that use multiple subchannels.
29 */
30
31/* a device matches a driver if all its slave devices match the same
32 * entry of the driver */
33static int
34ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
35{
36 struct ccwgroup_device *gdev;
37 struct ccwgroup_driver *gdrv;
38
39 gdev = container_of(dev, struct ccwgroup_device, dev);
40 gdrv = container_of(drv, struct ccwgroup_driver, driver);
41
42 if (gdev->creator_id == gdrv->driver_id)
43 return 1;
44
45 return 0;
46}
47static int
48ccwgroup_hotplug (struct device *dev, char **envp, int num_envp, char *buffer,
49 int buffer_size)
50{
51 /* TODO */
52 return 0;
53}
54
55static struct bus_type ccwgroup_bus_type = {
56 .name = "ccwgroup",
57 .match = ccwgroup_bus_match,
58 .hotplug = ccwgroup_hotplug,
59};
60
61static inline void
62__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
63{
64 int i;
65 char str[8];
66
67 for (i = 0; i < gdev->count; i++) {
68 sprintf(str, "cdev%d", i);
69 sysfs_remove_link(&gdev->dev.kobj, str);
70 sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
71 }
72
73}
74
75/*
76 * Provide an 'ungroup' attribute so the user can remove group devices no
77 * longer needed or accidentially created. Saves memory :)
78 */
79static ssize_t
80ccwgroup_ungroup_store(struct device *dev, const char *buf, size_t count)
81{
82 struct ccwgroup_device *gdev;
83
84 gdev = to_ccwgroupdev(dev);
85
86 if (gdev->state != CCWGROUP_OFFLINE)
87 return -EINVAL;
88
89 __ccwgroup_remove_symlinks(gdev);
90 device_unregister(dev);
91
92 return count;
93}
94
95static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
96
97static void
98ccwgroup_release (struct device *dev)
99{
100 struct ccwgroup_device *gdev;
101 int i;
102
103 gdev = to_ccwgroupdev(dev);
104
105 for (i = 0; i < gdev->count; i++) {
106 gdev->cdev[i]->dev.driver_data = NULL;
107 put_device(&gdev->cdev[i]->dev);
108 }
109 kfree(gdev);
110}
111
112static inline int
113__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
114{
115 char str[8];
116 int i, rc;
117
118 for (i = 0; i < gdev->count; i++) {
119 rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj,
120 "group_device");
121 if (rc) {
122 for (--i; i >= 0; i--)
123 sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
124 "group_device");
125 return rc;
126 }
127 }
128 for (i = 0; i < gdev->count; i++) {
129 sprintf(str, "cdev%d", i);
130 rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj,
131 str);
132 if (rc) {
133 for (--i; i >= 0; i--) {
134 sprintf(str, "cdev%d", i);
135 sysfs_remove_link(&gdev->dev.kobj, str);
136 }
137 for (i = 0; i < gdev->count; i++)
138 sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
139 "group_device");
140 return rc;
141 }
142 }
143 return 0;
144}
145
146/*
147 * try to add a new ccwgroup device for one driver
148 * argc and argv[] are a list of bus_id's of devices
149 * belonging to the driver.
150 */
151int
152ccwgroup_create(struct device *root,
153 unsigned int creator_id,
154 struct ccw_driver *cdrv,
155 int argc, char *argv[])
156{
157 struct ccwgroup_device *gdev;
158 int i;
159 int rc;
160 int del_drvdata;
161
162 if (argc > 256) /* disallow dumb users */
163 return -EINVAL;
164
165 gdev = kmalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL);
166 if (!gdev)
167 return -ENOMEM;
168
169 memset(gdev, 0, sizeof(*gdev) + argc*sizeof(gdev->cdev[0]));
170 atomic_set(&gdev->onoff, 0);
171
172 del_drvdata = 0;
173 for (i = 0; i < argc; i++) {
174 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
175
176 /* all devices have to be of the same type in
177 * order to be grouped */
178 if (!gdev->cdev[i]
179 || gdev->cdev[i]->id.driver_info !=
180 gdev->cdev[0]->id.driver_info) {
181 rc = -EINVAL;
182 goto free_dev;
183 }
184 /* Don't allow a device to belong to more than one group. */
185 if (gdev->cdev[i]->dev.driver_data) {
186 rc = -EINVAL;
187 goto free_dev;
188 }
189 }
190 for (i = 0; i < argc; i++)
191 gdev->cdev[i]->dev.driver_data = gdev;
192 del_drvdata = 1;
193
194 gdev->creator_id = creator_id;
195 gdev->count = argc;
196 gdev->dev = (struct device ) {
197 .bus = &ccwgroup_bus_type,
198 .parent = root,
199 .release = ccwgroup_release,
200 };
201
202 snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s",
203 gdev->cdev[0]->dev.bus_id);
204
205 rc = device_register(&gdev->dev);
206
207 if (rc)
208 goto free_dev;
209 get_device(&gdev->dev);
210 rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
211
212 if (rc) {
213 device_unregister(&gdev->dev);
214 goto error;
215 }
216
217 rc = __ccwgroup_create_symlinks(gdev);
218 if (!rc) {
219 put_device(&gdev->dev);
220 return 0;
221 }
222 device_remove_file(&gdev->dev, &dev_attr_ungroup);
223 device_unregister(&gdev->dev);
224error:
225 for (i = 0; i < argc; i++)
226 if (gdev->cdev[i]) {
227 put_device(&gdev->cdev[i]->dev);
228 gdev->cdev[i]->dev.driver_data = NULL;
229 }
230 put_device(&gdev->dev);
231 return rc;
232free_dev:
233 for (i = 0; i < argc; i++)
234 if (gdev->cdev[i]) {
235 put_device(&gdev->cdev[i]->dev);
236 if (del_drvdata)
237 gdev->cdev[i]->dev.driver_data = NULL;
238 }
239 kfree(gdev);
240 return rc;
241}
242
243static int __init
244init_ccwgroup (void)
245{
246 return bus_register (&ccwgroup_bus_type);
247}
248
249static void __exit
250cleanup_ccwgroup (void)
251{
252 bus_unregister (&ccwgroup_bus_type);
253}
254
255module_init(init_ccwgroup);
256module_exit(cleanup_ccwgroup);
257
258/************************** driver stuff ******************************/
259
260static int
261ccwgroup_set_online(struct ccwgroup_device *gdev)
262{
263 struct ccwgroup_driver *gdrv;
264 int ret;
265
266 if (atomic_compare_and_swap(0, 1, &gdev->onoff))
267 return -EAGAIN;
268 if (gdev->state == CCWGROUP_ONLINE) {
269 ret = 0;
270 goto out;
271 }
272 if (!gdev->dev.driver) {
273 ret = -EINVAL;
274 goto out;
275 }
276 gdrv = to_ccwgroupdrv (gdev->dev.driver);
277 if ((ret = gdrv->set_online(gdev)))
278 goto out;
279
280 gdev->state = CCWGROUP_ONLINE;
281 out:
282 atomic_set(&gdev->onoff, 0);
283 return ret;
284}
285
286static int
287ccwgroup_set_offline(struct ccwgroup_device *gdev)
288{
289 struct ccwgroup_driver *gdrv;
290 int ret;
291
292 if (atomic_compare_and_swap(0, 1, &gdev->onoff))
293 return -EAGAIN;
294 if (gdev->state == CCWGROUP_OFFLINE) {
295 ret = 0;
296 goto out;
297 }
298 if (!gdev->dev.driver) {
299 ret = -EINVAL;
300 goto out;
301 }
302 gdrv = to_ccwgroupdrv (gdev->dev.driver);
303 if ((ret = gdrv->set_offline(gdev)))
304 goto out;
305
306 gdev->state = CCWGROUP_OFFLINE;
307 out:
308 atomic_set(&gdev->onoff, 0);
309 return ret;
310}
311
312static ssize_t
313ccwgroup_online_store (struct device *dev, const char *buf, size_t count)
314{
315 struct ccwgroup_device *gdev;
316 struct ccwgroup_driver *gdrv;
317 unsigned int value;
318 int ret;
319
320 gdev = to_ccwgroupdev(dev);
321 if (!dev->driver)
322 return count;
323
324 gdrv = to_ccwgroupdrv (gdev->dev.driver);
325 if (!try_module_get(gdrv->owner))
326 return -EINVAL;
327
328 value = simple_strtoul(buf, 0, 0);
329 ret = count;
330 if (value == 1)
331 ccwgroup_set_online(gdev);
332 else if (value == 0)
333 ccwgroup_set_offline(gdev);
334 else
335 ret = -EINVAL;
336 module_put(gdrv->owner);
337 return ret;
338}
339
340static ssize_t
341ccwgroup_online_show (struct device *dev, char *buf)
342{
343 int online;
344
345 online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE);
346
347 return sprintf(buf, online ? "1\n" : "0\n");
348}
349
350static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
351
352static int
353ccwgroup_probe (struct device *dev)
354{
355 struct ccwgroup_device *gdev;
356 struct ccwgroup_driver *gdrv;
357
358 int ret;
359
360 gdev = to_ccwgroupdev(dev);
361 gdrv = to_ccwgroupdrv(dev->driver);
362
363 if ((ret = device_create_file(dev, &dev_attr_online)))
364 return ret;
365
366 pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id);
367 ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
368 if (ret)
369 device_remove_file(dev, &dev_attr_online);
370
371 return ret;
372}
373
374static int
375ccwgroup_remove (struct device *dev)
376{
377 struct ccwgroup_device *gdev;
378 struct ccwgroup_driver *gdrv;
379
380 gdev = to_ccwgroupdev(dev);
381 gdrv = to_ccwgroupdrv(dev->driver);
382
383 pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id);
384
385 device_remove_file(dev, &dev_attr_online);
386
387 if (gdrv && gdrv->remove)
388 gdrv->remove(gdev);
389 return 0;
390}
391
392int
393ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
394{
395 /* register our new driver with the core */
396 cdriver->driver = (struct device_driver) {
397 .bus = &ccwgroup_bus_type,
398 .name = cdriver->name,
399 .probe = ccwgroup_probe,
400 .remove = ccwgroup_remove,
401 };
402
403 return driver_register(&cdriver->driver);
404}
405
406static inline struct device *
407__get_next_ccwgroup_device(struct device_driver *drv)
408{
409 struct device *dev, *d;
410
411 down_read(&drv->bus->subsys.rwsem);
412 dev = NULL;
413 list_for_each_entry(d, &drv->devices, driver_list) {
414 dev = get_device(d);
415 if (dev)
416 break;
417 }
418 up_read(&drv->bus->subsys.rwsem);
419 return dev;
420}
421
422void
423ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
424{
425 struct device *dev;
426
427 /* We don't want ccwgroup devices to live longer than their driver. */
428 get_driver(&cdriver->driver);
429 while ((dev = __get_next_ccwgroup_device(&cdriver->driver))) {
430 __ccwgroup_remove_symlinks(to_ccwgroupdev(dev));
431 device_unregister(dev);
432 put_device(dev);
433 };
434 put_driver(&cdriver->driver);
435 driver_unregister(&cdriver->driver);
436}
437
438int
439ccwgroup_probe_ccwdev(struct ccw_device *cdev)
440{
441 return 0;
442}
443
444static inline struct ccwgroup_device *
445__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
446{
447 struct ccwgroup_device *gdev;
448
449 if (cdev->dev.driver_data) {
450 gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
451 if (get_device(&gdev->dev)) {
452 if (!list_empty(&gdev->dev.node))
453 return gdev;
454 put_device(&gdev->dev);
455 }
456 return NULL;
457 }
458 return NULL;
459}
460
461void
462ccwgroup_remove_ccwdev(struct ccw_device *cdev)
463{
464 struct ccwgroup_device *gdev;
465
466 /* Ignore offlining errors, device is gone anyway. */
467 ccw_device_set_offline(cdev);
468 /* If one of its devices is gone, the whole group is done for. */
469 gdev = __ccwgroup_get_gdev_by_cdev(cdev);
470 if (gdev) {
471 __ccwgroup_remove_symlinks(gdev);
472 device_unregister(&gdev->dev);
473 put_device(&gdev->dev);
474 }
475}
476
477MODULE_LICENSE("GPL");
478EXPORT_SYMBOL(ccwgroup_driver_register);
479EXPORT_SYMBOL(ccwgroup_driver_unregister);
480EXPORT_SYMBOL(ccwgroup_create);
481EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
482EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
new file mode 100644
index 000000000000..b35fe12e6bfc
--- /dev/null
+++ b/drivers/s390/cio/chsc.c
@@ -0,0 +1,1114 @@
1/*
2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
4 * $Revision: 1.119 $
5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Ingo Adlung (adlung@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
10 * Arnd Bergmann (arndb@de.ibm.com)
11 */
12
13#include <linux/module.h>
14#include <linux/config.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/device.h>
18
19#include <asm/cio.h>
20
21#include "css.h"
22#include "cio.h"
23#include "cio_debug.h"
24#include "ioasm.h"
25#include "chsc.h"
26
27static struct channel_path *chps[NR_CHPIDS];
28
29static void *sei_page;
30
31static int new_channel_path(int chpid);
32
33static inline void
34set_chp_logically_online(int chp, int onoff)
35{
36 chps[chp]->state = onoff;
37}
38
39static int
40get_chp_status(int chp)
41{
42 return (chps[chp] ? chps[chp]->state : -ENODEV);
43}
44
45void
46chsc_validate_chpids(struct subchannel *sch)
47{
48 int mask, chp;
49
50 for (chp = 0; chp <= 7; chp++) {
51 mask = 0x80 >> chp;
52 if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
53 /* disable using this path */
54 sch->opm &= ~mask;
55 }
56}
57
58void
59chpid_is_actually_online(int chp)
60{
61 int state;
62
63 state = get_chp_status(chp);
64 if (state < 0) {
65 need_rescan = 1;
66 queue_work(slow_path_wq, &slow_path_work);
67 } else
68 WARN_ON(!state);
69}
70
71/* FIXME: this is _always_ called for every subchannel. shouldn't we
72 * process more than one at a time? */
73static int
74chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
75{
76 int ccode, j;
77
78 struct {
79 struct chsc_header request;
80 u16 reserved1;
81 u16 f_sch; /* first subchannel */
82 u16 reserved2;
83 u16 l_sch; /* last subchannel */
84 u32 reserved3;
85 struct chsc_header response;
86 u32 reserved4;
87 u8 sch_valid : 1;
88 u8 dev_valid : 1;
89 u8 st : 3; /* subchannel type */
90 u8 zeroes : 3;
91 u8 unit_addr; /* unit address */
92 u16 devno; /* device number */
93 u8 path_mask;
94 u8 fla_valid_mask;
95 u16 sch; /* subchannel */
96 u8 chpid[8]; /* chpids 0-7 */
97 u16 fla[8]; /* full link addresses 0-7 */
98 } *ssd_area;
99
100 ssd_area = page;
101
102 ssd_area->request = (struct chsc_header) {
103 .length = 0x0010,
104 .code = 0x0004,
105 };
106
107 ssd_area->f_sch = sch->irq;
108 ssd_area->l_sch = sch->irq;
109
110 ccode = chsc(ssd_area);
111 if (ccode > 0) {
112 pr_debug("chsc returned with ccode = %d\n", ccode);
113 return (ccode == 3) ? -ENODEV : -EBUSY;
114 }
115
116 switch (ssd_area->response.code) {
117 case 0x0001: /* everything ok */
118 break;
119 case 0x0002:
120 CIO_CRW_EVENT(2, "Invalid command!\n");
121 return -EINVAL;
122 case 0x0003:
123 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
124 return -EINVAL;
125 case 0x0004:
126 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
127 return -EOPNOTSUPP;
128 default:
129 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
130 ssd_area->response.code);
131 return -EIO;
132 }
133
134 /*
135 * ssd_area->st stores the type of the detected
136 * subchannel, with the following definitions:
137 *
138 * 0: I/O subchannel: All fields have meaning
139 * 1: CHSC subchannel: Only sch_val, st and sch
140 * have meaning
141 * 2: Message subchannel: All fields except unit_addr
142 * have meaning
143 * 3: ADM subchannel: Only sch_val, st and sch
144 * have meaning
145 *
146 * Other types are currently undefined.
147 */
148 if (ssd_area->st > 3) { /* uhm, that looks strange... */
149 CIO_CRW_EVENT(0, "Strange subchannel type %d"
150 " for sch %04x\n", ssd_area->st, sch->irq);
151 /*
152 * There may have been a new subchannel type defined in the
153 * time since this code was written; since we don't know which
154 * fields have meaning and what to do with it we just jump out
155 */
156 return 0;
157 } else {
158 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
159 CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n",
160 sch->irq, type[ssd_area->st]);
161
162 sch->ssd_info.valid = 1;
163 sch->ssd_info.type = ssd_area->st;
164 }
165
166 if (ssd_area->st == 0 || ssd_area->st == 2) {
167 for (j = 0; j < 8; j++) {
168 if (!((0x80 >> j) & ssd_area->path_mask &
169 ssd_area->fla_valid_mask))
170 continue;
171 sch->ssd_info.chpid[j] = ssd_area->chpid[j];
172 sch->ssd_info.fla[j] = ssd_area->fla[j];
173 }
174 }
175 return 0;
176}
177
178int
179css_get_ssd_info(struct subchannel *sch)
180{
181 int ret;
182 void *page;
183
184 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
185 if (!page)
186 return -ENOMEM;
187 spin_lock_irq(&sch->lock);
188 ret = chsc_get_sch_desc_irq(sch, page);
189 if (ret) {
190 static int cio_chsc_err_msg;
191
192 if (!cio_chsc_err_msg) {
193 printk(KERN_ERR
194 "chsc_get_sch_descriptions:"
195 " Error %d while doing chsc; "
196 "processing some machine checks may "
197 "not work\n", ret);
198 cio_chsc_err_msg = 1;
199 }
200 }
201 spin_unlock_irq(&sch->lock);
202 free_page((unsigned long)page);
203 if (!ret) {
204 int j, chpid;
205 /* Allocate channel path structures, if needed. */
206 for (j = 0; j < 8; j++) {
207 chpid = sch->ssd_info.chpid[j];
208 if (chpid && (get_chp_status(chpid) < 0))
209 new_channel_path(chpid);
210 }
211 }
212 return ret;
213}
214
215static int
216s390_subchannel_remove_chpid(struct device *dev, void *data)
217{
218 int j;
219 int mask;
220 struct subchannel *sch;
221 __u8 *chpid;
222 struct schib schib;
223
224 sch = to_subchannel(dev);
225 chpid = data;
226 for (j = 0; j < 8; j++)
227 if (sch->schib.pmcw.chpid[j] == *chpid)
228 break;
229 if (j >= 8)
230 return 0;
231
232 mask = 0x80 >> j;
233 spin_lock(&sch->lock);
234
235 stsch(sch->irq, &schib);
236 if (!schib.pmcw.dnv)
237 goto out_unreg;
238 memcpy(&sch->schib, &schib, sizeof(struct schib));
239 /* Check for single path devices. */
240 if (sch->schib.pmcw.pim == 0x80)
241 goto out_unreg;
242 if (sch->vpm == mask)
243 goto out_unreg;
244
245 if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
246 SCSW_ACTL_HALT_PEND |
247 SCSW_ACTL_START_PEND |
248 SCSW_ACTL_RESUME_PEND)) &&
249 (sch->schib.pmcw.lpum == mask)) {
250 int cc = cio_cancel(sch);
251
252 if (cc == -ENODEV)
253 goto out_unreg;
254
255 if (cc == -EINVAL) {
256 cc = cio_clear(sch);
257 if (cc == -ENODEV)
258 goto out_unreg;
259 /* Call handler. */
260 if (sch->driver && sch->driver->termination)
261 sch->driver->termination(&sch->dev);
262 goto out_unlock;
263 }
264 } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
265 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
266 (sch->schib.pmcw.lpum == mask)) {
267 int cc;
268
269 cc = cio_clear(sch);
270 if (cc == -ENODEV)
271 goto out_unreg;
272 /* Call handler. */
273 if (sch->driver && sch->driver->termination)
274 sch->driver->termination(&sch->dev);
275 goto out_unlock;
276 }
277
278 /* trigger path verification. */
279 if (sch->driver && sch->driver->verify)
280 sch->driver->verify(&sch->dev);
281out_unlock:
282 spin_unlock(&sch->lock);
283 return 0;
284out_unreg:
285 spin_unlock(&sch->lock);
286 sch->lpm = 0;
287 if (css_enqueue_subchannel_slow(sch->irq)) {
288 css_clear_subchannel_slow_list();
289 need_rescan = 1;
290 }
291 return 0;
292}
293
294static inline void
295s390_set_chpid_offline( __u8 chpid)
296{
297 char dbf_txt[15];
298
299 sprintf(dbf_txt, "chpr%x", chpid);
300 CIO_TRACE_EVENT(2, dbf_txt);
301
302 if (get_chp_status(chpid) <= 0)
303 return;
304
305 bus_for_each_dev(&css_bus_type, NULL, &chpid,
306 s390_subchannel_remove_chpid);
307
308 if (need_rescan || css_slow_subchannels_exist())
309 queue_work(slow_path_wq, &slow_path_work);
310}
311
312static int
313s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
314 struct subchannel *sch)
315{
316 int found;
317 int chp;
318 int ccode;
319
320 found = 0;
321 for (chp = 0; chp <= 7; chp++)
322 /*
323 * check if chpid is in information updated by ssd
324 */
325 if (sch->ssd_info.valid &&
326 sch->ssd_info.chpid[chp] == chpid &&
327 (sch->ssd_info.fla[chp] & fla_mask) == fla) {
328 found = 1;
329 break;
330 }
331
332 if (found == 0)
333 return 0;
334
335 /*
336 * Do a stsch to update our subchannel structure with the
337 * new path information and eventually check for logically
338 * offline chpids.
339 */
340 ccode = stsch(sch->irq, &sch->schib);
341 if (ccode > 0)
342 return 0;
343
344 return 0x80 >> chp;
345}
346
347static int
348s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
349{
350 struct subchannel *sch;
351 int irq, rc;
352 char dbf_txt[15];
353
354 sprintf(dbf_txt, "accpr%x", chpid);
355 CIO_TRACE_EVENT( 2, dbf_txt);
356 if (fla != 0) {
357 sprintf(dbf_txt, "fla%x", fla);
358 CIO_TRACE_EVENT( 2, dbf_txt);
359 }
360
361 /*
362 * I/O resources may have become accessible.
363 * Scan through all subchannels that may be concerned and
364 * do a validation on those.
365 * The more information we have (info), the less scanning
366 * will we have to do.
367 */
368
369 if (!get_chp_status(chpid))
370 return 0; /* no need to do the rest */
371
372 rc = 0;
373 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
374 int chp_mask, old_lpm;
375
376 sch = get_subchannel_by_schid(irq);
377 if (!sch) {
378 struct schib schib;
379 int ret;
380 /*
381 * We don't know the device yet, but since a path
382 * may be available now to the device we'll have
383 * to do recognition again.
384 * Since we don't have any idea about which chpid
385 * that beast may be on we'll have to do a stsch
386 * on all devices, grr...
387 */
388 if (stsch(irq, &schib)) {
389 /* We're through */
390 if (need_rescan)
391 rc = -EAGAIN;
392 break;
393 }
394 if (need_rescan) {
395 rc = -EAGAIN;
396 continue;
397 }
398 /* Put it on the slow path. */
399 ret = css_enqueue_subchannel_slow(irq);
400 if (ret) {
401 css_clear_subchannel_slow_list();
402 need_rescan = 1;
403 }
404 rc = -EAGAIN;
405 continue;
406 }
407
408 spin_lock_irq(&sch->lock);
409
410 chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
411
412 if (chp_mask == 0) {
413
414 spin_unlock_irq(&sch->lock);
415
416 if (fla_mask != 0)
417 break;
418 else
419 continue;
420 }
421 old_lpm = sch->lpm;
422 sch->lpm = ((sch->schib.pmcw.pim &
423 sch->schib.pmcw.pam &
424 sch->schib.pmcw.pom)
425 | chp_mask) & sch->opm;
426 if (!old_lpm && sch->lpm)
427 device_trigger_reprobe(sch);
428 else if (sch->driver && sch->driver->verify)
429 sch->driver->verify(&sch->dev);
430
431 spin_unlock_irq(&sch->lock);
432 put_device(&sch->dev);
433 if (fla_mask != 0)
434 break;
435 }
436 return rc;
437}
438
439static int
440__get_chpid_from_lir(void *data)
441{
442 struct lir {
443 u8 iq;
444 u8 ic;
445 u16 sci;
446 /* incident-node descriptor */
447 u32 indesc[28];
448 /* attached-node descriptor */
449 u32 andesc[28];
450 /* incident-specific information */
451 u32 isinfo[28];
452 } *lir;
453
454 lir = (struct lir*) data;
455 if (!(lir->iq&0x80))
456 /* NULL link incident record */
457 return -EINVAL;
458 if (!(lir->indesc[0]&0xc0000000))
459 /* node descriptor not valid */
460 return -EINVAL;
461 if (!(lir->indesc[0]&0x10000000))
462 /* don't handle device-type nodes - FIXME */
463 return -EINVAL;
464 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
465
466 return (u16) (lir->indesc[0]&0x000000ff);
467}
468
469int
470chsc_process_crw(void)
471{
472 int chpid, ret;
473 struct {
474 struct chsc_header request;
475 u32 reserved1;
476 u32 reserved2;
477 u32 reserved3;
478 struct chsc_header response;
479 u32 reserved4;
480 u8 flags;
481 u8 vf; /* validity flags */
482 u8 rs; /* reporting source */
483 u8 cc; /* content code */
484 u16 fla; /* full link address */
485 u16 rsid; /* reporting source id */
486 u32 reserved5;
487 u32 reserved6;
488 u32 ccdf[96]; /* content-code dependent field */
489 /* ccdf has to be big enough for a link-incident record */
490 } *sei_area;
491
492 if (!sei_page)
493 return 0;
494 /*
495 * build the chsc request block for store event information
496 * and do the call
497 * This function is only called by the machine check handler thread,
498 * so we don't need locking for the sei_page.
499 */
500 sei_area = sei_page;
501
502 CIO_TRACE_EVENT( 2, "prcss");
503 ret = 0;
504 do {
505 int ccode, status;
506 memset(sei_area, 0, sizeof(*sei_area));
507
508 sei_area->request = (struct chsc_header) {
509 .length = 0x0010,
510 .code = 0x000e,
511 };
512
513 ccode = chsc(sei_area);
514 if (ccode > 0)
515 return 0;
516
517 switch (sei_area->response.code) {
518 /* for debug purposes, check for problems */
519 case 0x0001:
520 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
521 "successfully stored\n");
522 break; /* everything ok */
523 case 0x0002:
524 CIO_CRW_EVENT(2,
525 "chsc_process_crw: invalid command!\n");
526 return 0;
527 case 0x0003:
528 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
529 "request block!\n");
530 return 0;
531 case 0x0005:
532 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
533 "information stored\n");
534 return 0;
535 default:
536 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
537 sei_area->response.code);
538 return 0;
539 }
540
541 /* Check if we might have lost some information. */
542 if (sei_area->flags & 0x40)
543 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
544 "has been lost due to overflow!\n");
545
546 if (sei_area->rs != 4) {
547 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
548 "(%04X) isn't a chpid!\n",
549 sei_area->rsid);
550 continue;
551 }
552
553 /* which kind of information was stored? */
554 switch (sei_area->cc) {
555 case 1: /* link incident*/
556 CIO_CRW_EVENT(4, "chsc_process_crw: "
557 "channel subsystem reports link incident,"
558 " reporting source is chpid %x\n",
559 sei_area->rsid);
560 chpid = __get_chpid_from_lir(sei_area->ccdf);
561 if (chpid < 0)
562 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
563 __FUNCTION__);
564 else
565 s390_set_chpid_offline(chpid);
566 break;
567
568 case 2: /* i/o resource accessibiliy */
569 CIO_CRW_EVENT(4, "chsc_process_crw: "
570 "channel subsystem reports some I/O "
571 "devices may have become accessible\n");
572 pr_debug("Data received after sei: \n");
573 pr_debug("Validity flags: %x\n", sei_area->vf);
574
575 /* allocate a new channel path structure, if needed */
576 status = get_chp_status(sei_area->rsid);
577 if (status < 0)
578 new_channel_path(sei_area->rsid);
579 else if (!status)
580 return 0;
581 if ((sei_area->vf & 0x80) == 0) {
582 pr_debug("chpid: %x\n", sei_area->rsid);
583 ret = s390_process_res_acc(sei_area->rsid,
584 0, 0);
585 } else if ((sei_area->vf & 0xc0) == 0x80) {
586 pr_debug("chpid: %x link addr: %x\n",
587 sei_area->rsid, sei_area->fla);
588 ret = s390_process_res_acc(sei_area->rsid,
589 sei_area->fla,
590 0xff00);
591 } else if ((sei_area->vf & 0xc0) == 0xc0) {
592 pr_debug("chpid: %x full link addr: %x\n",
593 sei_area->rsid, sei_area->fla);
594 ret = s390_process_res_acc(sei_area->rsid,
595 sei_area->fla,
596 0xffff);
597 }
598 pr_debug("\n");
599
600 break;
601
602 default: /* other stuff */
603 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
604 sei_area->cc);
605 break;
606 }
607 } while (sei_area->flags & 0x80);
608 return ret;
609}
610
611static int
612chp_add(int chpid)
613{
614 struct subchannel *sch;
615 int irq, ret, rc;
616 char dbf_txt[15];
617
618 if (!get_chp_status(chpid))
619 return 0; /* no need to do the rest */
620
621 sprintf(dbf_txt, "cadd%x", chpid);
622 CIO_TRACE_EVENT(2, dbf_txt);
623
624 rc = 0;
625 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
626 int i;
627
628 sch = get_subchannel_by_schid(irq);
629 if (!sch) {
630 struct schib schib;
631
632 if (stsch(irq, &schib)) {
633 /* We're through */
634 if (need_rescan)
635 rc = -EAGAIN;
636 break;
637 }
638 if (need_rescan) {
639 rc = -EAGAIN;
640 continue;
641 }
642 /* Put it on the slow path. */
643 ret = css_enqueue_subchannel_slow(irq);
644 if (ret) {
645 css_clear_subchannel_slow_list();
646 need_rescan = 1;
647 }
648 rc = -EAGAIN;
649 continue;
650 }
651
652 spin_lock(&sch->lock);
653 for (i=0; i<8; i++)
654 if (sch->schib.pmcw.chpid[i] == chpid) {
655 if (stsch(sch->irq, &sch->schib) != 0) {
656 /* Endgame. */
657 spin_unlock(&sch->lock);
658 return rc;
659 }
660 break;
661 }
662 if (i==8) {
663 spin_unlock(&sch->lock);
664 return rc;
665 }
666 sch->lpm = ((sch->schib.pmcw.pim &
667 sch->schib.pmcw.pam &
668 sch->schib.pmcw.pom)
669 | 0x80 >> i) & sch->opm;
670
671 if (sch->driver && sch->driver->verify)
672 sch->driver->verify(&sch->dev);
673
674 spin_unlock(&sch->lock);
675 put_device(&sch->dev);
676 }
677 return rc;
678}
679
680/*
681 * Handling of crw machine checks with channel path source.
682 */
683int
684chp_process_crw(int chpid, int on)
685{
686 if (on == 0) {
687 /* Path has gone. We use the link incident routine.*/
688 s390_set_chpid_offline(chpid);
689 return 0; /* De-register is async anyway. */
690 }
691 /*
692 * Path has come. Allocate a new channel path structure,
693 * if needed.
694 */
695 if (get_chp_status(chpid) < 0)
696 new_channel_path(chpid);
697 /* Avoid the extra overhead in process_rec_acc. */
698 return chp_add(chpid);
699}
700
701static inline int
702__check_for_io_and_kill(struct subchannel *sch, int index)
703{
704 int cc;
705
706 if (!device_is_online(sch))
707 /* cio could be doing I/O. */
708 return 0;
709 cc = stsch(sch->irq, &sch->schib);
710 if (cc)
711 return 0;
712 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
713 device_set_waiting(sch);
714 return 1;
715 }
716 return 0;
717}
718
719static inline void
720__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
721{
722 int chp, old_lpm;
723 unsigned long flags;
724
725 if (!sch->ssd_info.valid)
726 return;
727
728 spin_lock_irqsave(&sch->lock, flags);
729 old_lpm = sch->lpm;
730 for (chp = 0; chp < 8; chp++) {
731 if (sch->ssd_info.chpid[chp] != chpid)
732 continue;
733
734 if (on) {
735 sch->opm |= (0x80 >> chp);
736 sch->lpm |= (0x80 >> chp);
737 if (!old_lpm)
738 device_trigger_reprobe(sch);
739 else if (sch->driver && sch->driver->verify)
740 sch->driver->verify(&sch->dev);
741 } else {
742 sch->opm &= ~(0x80 >> chp);
743 sch->lpm &= ~(0x80 >> chp);
744 /*
745 * Give running I/O a grace period in which it
746 * can successfully terminate, even using the
747 * just varied off path. Then kill it.
748 */
749 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
750 if (css_enqueue_subchannel_slow(sch->irq)) {
751 css_clear_subchannel_slow_list();
752 need_rescan = 1;
753 }
754 } else if (sch->driver && sch->driver->verify)
755 sch->driver->verify(&sch->dev);
756 }
757 break;
758 }
759 spin_unlock_irqrestore(&sch->lock, flags);
760}
761
762static int
763s390_subchannel_vary_chpid_off(struct device *dev, void *data)
764{
765 struct subchannel *sch;
766 __u8 *chpid;
767
768 sch = to_subchannel(dev);
769 chpid = data;
770
771 __s390_subchannel_vary_chpid(sch, *chpid, 0);
772 return 0;
773}
774
775static int
776s390_subchannel_vary_chpid_on(struct device *dev, void *data)
777{
778 struct subchannel *sch;
779 __u8 *chpid;
780
781 sch = to_subchannel(dev);
782 chpid = data;
783
784 __s390_subchannel_vary_chpid(sch, *chpid, 1);
785 return 0;
786}
787
788/*
789 * Function: s390_vary_chpid
790 * Varies the specified chpid online or offline
791 */
792static int
793s390_vary_chpid( __u8 chpid, int on)
794{
795 char dbf_text[15];
796 int status, irq, ret;
797 struct subchannel *sch;
798
799 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
800 CIO_TRACE_EVENT( 2, dbf_text);
801
802 status = get_chp_status(chpid);
803 if (status < 0) {
804 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
805 return -EINVAL;
806 }
807
808 if (!on && !status) {
809 printk(KERN_ERR "chpid %x is already offline\n", chpid);
810 return -EINVAL;
811 }
812
813 set_chp_logically_online(chpid, on);
814
815 /*
816 * Redo PathVerification on the devices the chpid connects to
817 */
818
819 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
820 s390_subchannel_vary_chpid_on :
821 s390_subchannel_vary_chpid_off);
822 if (!on)
823 goto out;
824 /* Scan for new devices on varied on path. */
825 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
826 struct schib schib;
827
828 if (need_rescan)
829 break;
830 sch = get_subchannel_by_schid(irq);
831 if (sch) {
832 put_device(&sch->dev);
833 continue;
834 }
835 if (stsch(irq, &schib))
836 /* We're through */
837 break;
838 /* Put it on the slow path. */
839 ret = css_enqueue_subchannel_slow(irq);
840 if (ret) {
841 css_clear_subchannel_slow_list();
842 need_rescan = 1;
843 }
844 }
845out:
846 if (need_rescan || css_slow_subchannels_exist())
847 queue_work(slow_path_wq, &slow_path_work);
848 return 0;
849}
850
851/*
852 * Files for the channel path entries.
853 */
854static ssize_t
855chp_status_show(struct device *dev, char *buf)
856{
857 struct channel_path *chp = container_of(dev, struct channel_path, dev);
858
859 if (!chp)
860 return 0;
861 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
862 sprintf(buf, "offline\n"));
863}
864
865static ssize_t
866chp_status_write(struct device *dev, const char *buf, size_t count)
867{
868 struct channel_path *cp = container_of(dev, struct channel_path, dev);
869 char cmd[10];
870 int num_args;
871 int error;
872
873 num_args = sscanf(buf, "%5s", cmd);
874 if (!num_args)
875 return count;
876
877 if (!strnicmp(cmd, "on", 2))
878 error = s390_vary_chpid(cp->id, 1);
879 else if (!strnicmp(cmd, "off", 3))
880 error = s390_vary_chpid(cp->id, 0);
881 else
882 error = -EINVAL;
883
884 return error < 0 ? error : count;
885
886}
887
888static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
889
890static ssize_t
891chp_type_show(struct device *dev, char *buf)
892{
893 struct channel_path *chp = container_of(dev, struct channel_path, dev);
894
895 if (!chp)
896 return 0;
897 return sprintf(buf, "%x\n", chp->desc.desc);
898}
899
900static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
901
902static struct attribute * chp_attrs[] = {
903 &dev_attr_status.attr,
904 &dev_attr_type.attr,
905 NULL,
906};
907
908static struct attribute_group chp_attr_group = {
909 .attrs = chp_attrs,
910};
911
912static void
913chp_release(struct device *dev)
914{
915 struct channel_path *cp;
916
917 cp = container_of(dev, struct channel_path, dev);
918 kfree(cp);
919}
920
921static int
922chsc_determine_channel_path_description(int chpid,
923 struct channel_path_desc *desc)
924{
925 int ccode, ret;
926
927 struct {
928 struct chsc_header request;
929 u32 : 24;
930 u32 first_chpid : 8;
931 u32 : 24;
932 u32 last_chpid : 8;
933 u32 zeroes1;
934 struct chsc_header response;
935 u32 zeroes2;
936 struct channel_path_desc desc;
937 } *scpd_area;
938
939 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
940 if (!scpd_area)
941 return -ENOMEM;
942
943 scpd_area->request = (struct chsc_header) {
944 .length = 0x0010,
945 .code = 0x0002,
946 };
947
948 scpd_area->first_chpid = chpid;
949 scpd_area->last_chpid = chpid;
950
951 ccode = chsc(scpd_area);
952 if (ccode > 0) {
953 ret = (ccode == 3) ? -ENODEV : -EBUSY;
954 goto out;
955 }
956
957 switch (scpd_area->response.code) {
958 case 0x0001: /* Success. */
959 memcpy(desc, &scpd_area->desc,
960 sizeof(struct channel_path_desc));
961 ret = 0;
962 break;
963 case 0x0003: /* Invalid block. */
964 case 0x0007: /* Invalid format. */
965 case 0x0008: /* Other invalid block. */
966 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
967 ret = -EINVAL;
968 break;
969 case 0x0004: /* Command not provided in model. */
970 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
971 ret = -EOPNOTSUPP;
972 break;
973 default:
974 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
975 scpd_area->response.code);
976 ret = -EIO;
977 }
978out:
979 free_page((unsigned long)scpd_area);
980 return ret;
981}
982
983/*
984 * Entries for chpids on the system bus.
985 * This replaces /proc/chpids.
986 */
987static int
988new_channel_path(int chpid)
989{
990 struct channel_path *chp;
991 int ret;
992
993 chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL);
994 if (!chp)
995 return -ENOMEM;
996 memset(chp, 0, sizeof(struct channel_path));
997
998 /* fill in status, etc. */
999 chp->id = chpid;
1000 chp->state = 1;
1001 chp->dev = (struct device) {
1002 .parent = &css_bus_device,
1003 .release = chp_release,
1004 };
1005 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1006
1007 /* Obtain channel path description and fill it in. */
1008 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1009 if (ret)
1010 goto out_free;
1011
1012 /* make it known to the system */
1013 ret = device_register(&chp->dev);
1014 if (ret) {
1015 printk(KERN_WARNING "%s: could not register %02x\n",
1016 __func__, chpid);
1017 goto out_free;
1018 }
1019 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1020 if (ret) {
1021 device_unregister(&chp->dev);
1022 goto out_free;
1023 } else
1024 chps[chpid] = chp;
1025 return ret;
1026out_free:
1027 kfree(chp);
1028 return ret;
1029}
1030
1031void *
1032chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1033{
1034 struct channel_path *chp;
1035 struct channel_path_desc *desc;
1036
1037 chp = chps[sch->schib.pmcw.chpid[chp_no]];
1038 if (!chp)
1039 return NULL;
1040 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1041 if (!desc)
1042 return NULL;
1043 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1044 return desc;
1045}
1046
1047
1048static int __init
1049chsc_alloc_sei_area(void)
1050{
1051 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1052 if (!sei_page)
1053 printk(KERN_WARNING"Can't allocate page for processing of " \
1054 "chsc machine checks!\n");
1055 return (sei_page ? 0 : -ENOMEM);
1056}
1057
1058subsys_initcall(chsc_alloc_sei_area);
1059
1060struct css_general_char css_general_characteristics;
1061struct css_chsc_char css_chsc_characteristics;
1062
1063int __init
1064chsc_determine_css_characteristics(void)
1065{
1066 int result;
1067 struct {
1068 struct chsc_header request;
1069 u32 reserved1;
1070 u32 reserved2;
1071 u32 reserved3;
1072 struct chsc_header response;
1073 u32 reserved4;
1074 u32 general_char[510];
1075 u32 chsc_char[518];
1076 } *scsc_area;
1077
1078 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1079 if (!scsc_area) {
1080 printk(KERN_WARNING"cio: Was not able to determine available" \
1081 "CHSCs due to no memory.\n");
1082 return -ENOMEM;
1083 }
1084
1085 scsc_area->request = (struct chsc_header) {
1086 .length = 0x0010,
1087 .code = 0x0010,
1088 };
1089
1090 result = chsc(scsc_area);
1091 if (result) {
1092 printk(KERN_WARNING"cio: Was not able to determine " \
1093 "available CHSCs, cc=%i.\n", result);
1094 result = -EIO;
1095 goto exit;
1096 }
1097
1098 if (scsc_area->response.code != 1) {
1099 printk(KERN_WARNING"cio: Was not able to determine " \
1100 "available CHSCs.\n");
1101 result = -EIO;
1102 goto exit;
1103 }
1104 memcpy(&css_general_characteristics, scsc_area->general_char,
1105 sizeof(css_general_characteristics));
1106 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1107 sizeof(css_chsc_characteristics));
1108exit:
1109 free_page ((unsigned long) scsc_area);
1110 return result;
1111}
1112
1113EXPORT_SYMBOL_GPL(css_general_characteristics);
1114EXPORT_SYMBOL_GPL(css_chsc_characteristics);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
new file mode 100644
index 000000000000..be20da49d147
--- /dev/null
+++ b/drivers/s390/cio/chsc.h
@@ -0,0 +1,66 @@
1#ifndef S390_CHSC_H
2#define S390_CHSC_H
3
4#define NR_CHPIDS 256
5
6#define CHSC_SEI_ACC_CHPID 1
7#define CHSC_SEI_ACC_LINKADDR 2
8#define CHSC_SEI_ACC_FULLLINKADDR 3
9
10struct chsc_header {
11 u16 length;
12 u16 code;
13};
14
15struct channel_path_desc {
16 u8 flags;
17 u8 lsn;
18 u8 desc;
19 u8 chpid;
20 u8 swla;
21 u8 zeroes;
22 u8 chla;
23 u8 chpp;
24};
25
26struct channel_path {
27 int id;
28 int state;
29 struct channel_path_desc desc;
30 struct device dev;
31};
32
33extern void s390_process_css( void );
34extern void chsc_validate_chpids(struct subchannel *);
35extern void chpid_is_actually_online(int);
36
37struct css_general_char {
38 u64 : 41;
39 u32 aif : 1; /* bit 41 */
40 u32 : 3;
41 u32 mcss : 1; /* bit 45 */
42 u32 : 2;
43 u32 ext_mb : 1; /* bit 48 */
44 u32 : 7;
45 u32 aif_tdd : 1; /* bit 56 */
46 u32 : 10;
47 u32 aif_osa : 1; /* bit 67 */
48 u32 : 28;
49}__attribute__((packed));
50
51struct css_chsc_char {
52 u64 res;
53 u64 : 43;
54 u32 scssc : 1; /* bit 107 */
55 u32 scsscf : 1; /* bit 108 */
56 u32 : 19;
57}__attribute__((packed));
58
59extern struct css_general_char css_general_characteristics;
60extern struct css_chsc_char css_chsc_characteristics;
61
62extern int chsc_determine_css_characteristics(void);
63extern int css_characteristics_avail;
64
65extern void *chsc_get_chp_desc(struct subchannel*, int);
66#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
new file mode 100644
index 000000000000..99ce5a567982
--- /dev/null
+++ b/drivers/s390/cio/cio.c
@@ -0,0 +1,860 @@
1/*
2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls
4 * $Revision: 1.131 $
5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Ingo Adlung (adlung@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
10 * Arnd Bergmann (arndb@de.ibm.com)
11 * Martin Schwidefsky (schwidefsky@de.ibm.com)
12 */
13
14#include <linux/module.h>
15#include <linux/config.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/device.h>
19#include <linux/kernel_stat.h>
20#include <linux/interrupt.h>
21
22#include <asm/cio.h>
23#include <asm/delay.h>
24#include <asm/irq.h>
25
26#include "airq.h"
27#include "cio.h"
28#include "css.h"
29#include "chsc.h"
30#include "ioasm.h"
31#include "blacklist.h"
32#include "cio_debug.h"
33
34debug_info_t *cio_debug_msg_id;
35debug_info_t *cio_debug_trace_id;
36debug_info_t *cio_debug_crw_id;
37
38int cio_show_msg;
39
40static int __init
41cio_setup (char *parm)
42{
43 if (!strcmp (parm, "yes"))
44 cio_show_msg = 1;
45 else if (!strcmp (parm, "no"))
46 cio_show_msg = 0;
47 else
48 printk (KERN_ERR "cio_setup : invalid cio_msg parameter '%s'",
49 parm);
50 return 1;
51}
52
53__setup ("cio_msg=", cio_setup);
54
55/*
56 * Function: cio_debug_init
57 * Initializes three debug logs (under /proc/s390dbf) for common I/O:
58 * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on
59 * - cio_trace logs the calling of different functions
60 * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on
61 * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW
62 */
63static int __init
64cio_debug_init (void)
65{
66 cio_debug_msg_id = debug_register ("cio_msg", 4, 4, 16*sizeof (long));
67 if (!cio_debug_msg_id)
68 goto out_unregister;
69 debug_register_view (cio_debug_msg_id, &debug_sprintf_view);
70 debug_set_level (cio_debug_msg_id, 2);
71 cio_debug_trace_id = debug_register ("cio_trace", 4, 4, 8);
72 if (!cio_debug_trace_id)
73 goto out_unregister;
74 debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view);
75 debug_set_level (cio_debug_trace_id, 2);
76 cio_debug_crw_id = debug_register ("cio_crw", 2, 4, 16*sizeof (long));
77 if (!cio_debug_crw_id)
78 goto out_unregister;
79 debug_register_view (cio_debug_crw_id, &debug_sprintf_view);
80 debug_set_level (cio_debug_crw_id, 2);
81 pr_debug("debugging initialized\n");
82 return 0;
83
84out_unregister:
85 if (cio_debug_msg_id)
86 debug_unregister (cio_debug_msg_id);
87 if (cio_debug_trace_id)
88 debug_unregister (cio_debug_trace_id);
89 if (cio_debug_crw_id)
90 debug_unregister (cio_debug_crw_id);
91 pr_debug("could not initialize debugging\n");
92 return -1;
93}
94
95arch_initcall (cio_debug_init);
96
97int
98cio_set_options (struct subchannel *sch, int flags)
99{
100 sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
101 sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
102 sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
103 return 0;
104}
105
106/* FIXME: who wants to use this? */
107int
108cio_get_options (struct subchannel *sch)
109{
110 int flags;
111
112 flags = 0;
113 if (sch->options.suspend)
114 flags |= DOIO_ALLOW_SUSPEND;
115 if (sch->options.prefetch)
116 flags |= DOIO_DENY_PREFETCH;
117 if (sch->options.inter)
118 flags |= DOIO_SUPPRESS_INTER;
119 return flags;
120}
121
122/*
123 * Use tpi to get a pending interrupt, call the interrupt handler and
124 * return a pointer to the subchannel structure.
125 */
126static inline int
127cio_tpi(void)
128{
129 struct tpi_info *tpi_info;
130 struct subchannel *sch;
131 struct irb *irb;
132
133 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
134 if (tpi (NULL) != 1)
135 return 0;
136 irb = (struct irb *) __LC_IRB;
137 /* Store interrupt response block to lowcore. */
138 if (tsch (tpi_info->irq, irb) != 0)
139 /* Not status pending or not operational. */
140 return 1;
141 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
142 if (!sch)
143 return 1;
144 local_bh_disable();
145 irq_enter ();
146 spin_lock(&sch->lock);
147 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
148 if (sch->driver && sch->driver->irq)
149 sch->driver->irq(&sch->dev);
150 spin_unlock(&sch->lock);
151 irq_exit ();
152 __local_bh_enable();
153 return 1;
154}
155
156static inline int
157cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
158{
159 char dbf_text[15];
160
161 if (lpm != 0)
162 sch->lpm &= ~lpm;
163 else
164 sch->lpm = 0;
165
166 stsch (sch->irq, &sch->schib);
167
168 CIO_MSG_EVENT(0, "cio_start: 'not oper' status for "
169 "subchannel %04x!\n", sch->irq);
170 sprintf(dbf_text, "no%s", sch->dev.bus_id);
171 CIO_TRACE_EVENT(0, dbf_text);
172 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
173
174 return (sch->lpm ? -EACCES : -ENODEV);
175}
176
177int
178cio_start_key (struct subchannel *sch, /* subchannel structure */
179 struct ccw1 * cpa, /* logical channel prog addr */
180 __u8 lpm, /* logical path mask */
181 __u8 key) /* storage key */
182{
183 char dbf_txt[15];
184 int ccode;
185
186 CIO_TRACE_EVENT (4, "stIO");
187 CIO_TRACE_EVENT (4, sch->dev.bus_id);
188
189 /* sch is always under 2G. */
190 sch->orb.intparm = (__u32)(unsigned long)sch;
191 sch->orb.fmt = 1;
192
193 sch->orb.pfch = sch->options.prefetch == 0;
194 sch->orb.spnd = sch->options.suspend;
195 sch->orb.ssic = sch->options.suspend && sch->options.inter;
196 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm;
197#ifdef CONFIG_ARCH_S390X
198 /*
199 * for 64 bit we always support 64 bit IDAWs with 4k page size only
200 */
201 sch->orb.c64 = 1;
202 sch->orb.i2k = 0;
203#endif
204 sch->orb.key = key >> 4;
205 /* issue "Start Subchannel" */
206 sch->orb.cpa = (__u32) __pa (cpa);
207 ccode = ssch (sch->irq, &sch->orb);
208
209 /* process condition code */
210 sprintf (dbf_txt, "ccode:%d", ccode);
211 CIO_TRACE_EVENT (4, dbf_txt);
212
213 switch (ccode) {
214 case 0:
215 /*
216 * initialize device status information
217 */
218 sch->schib.scsw.actl |= SCSW_ACTL_START_PEND;
219 return 0;
220 case 1: /* status pending */
221 case 2: /* busy */
222 return -EBUSY;
223 default: /* device/path not operational */
224 return cio_start_handle_notoper(sch, lpm);
225 }
226}
227
228int
229cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
230{
231 return cio_start_key(sch, cpa, lpm, default_storage_key);
232}
233
234/*
235 * resume suspended I/O operation
236 */
237int
238cio_resume (struct subchannel *sch)
239{
240 char dbf_txt[15];
241 int ccode;
242
243 CIO_TRACE_EVENT (4, "resIO");
244 CIO_TRACE_EVENT (4, sch->dev.bus_id);
245
246 ccode = rsch (sch->irq);
247
248 sprintf (dbf_txt, "ccode:%d", ccode);
249 CIO_TRACE_EVENT (4, dbf_txt);
250
251 switch (ccode) {
252 case 0:
253 sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND;
254 return 0;
255 case 1:
256 return -EBUSY;
257 case 2:
258 return -EINVAL;
259 default:
260 /*
261 * useless to wait for request completion
262 * as device is no longer operational !
263 */
264 return -ENODEV;
265 }
266}
267
268/*
269 * halt I/O operation
270 */
271int
272cio_halt(struct subchannel *sch)
273{
274 char dbf_txt[15];
275 int ccode;
276
277 if (!sch)
278 return -ENODEV;
279
280 CIO_TRACE_EVENT (2, "haltIO");
281 CIO_TRACE_EVENT (2, sch->dev.bus_id);
282
283 /*
284 * Issue "Halt subchannel" and process condition code
285 */
286 ccode = hsch (sch->irq);
287
288 sprintf (dbf_txt, "ccode:%d", ccode);
289 CIO_TRACE_EVENT (2, dbf_txt);
290
291 switch (ccode) {
292 case 0:
293 sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND;
294 return 0;
295 case 1: /* status pending */
296 case 2: /* busy */
297 return -EBUSY;
298 default: /* device not operational */
299 return -ENODEV;
300 }
301}
302
303/*
304 * Clear I/O operation
305 */
306int
307cio_clear(struct subchannel *sch)
308{
309 char dbf_txt[15];
310 int ccode;
311
312 if (!sch)
313 return -ENODEV;
314
315 CIO_TRACE_EVENT (2, "clearIO");
316 CIO_TRACE_EVENT (2, sch->dev.bus_id);
317
318 /*
319 * Issue "Clear subchannel" and process condition code
320 */
321 ccode = csch (sch->irq);
322
323 sprintf (dbf_txt, "ccode:%d", ccode);
324 CIO_TRACE_EVENT (2, dbf_txt);
325
326 switch (ccode) {
327 case 0:
328 sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND;
329 return 0;
330 default: /* device not operational */
331 return -ENODEV;
332 }
333}
334
335/*
336 * Function: cio_cancel
337 * Issues a "Cancel Subchannel" on the specified subchannel
338 * Note: We don't need any fancy intparms and flags here
339 * since xsch is executed synchronously.
340 * Only for common I/O internal use as for now.
341 */
342int
343cio_cancel (struct subchannel *sch)
344{
345 char dbf_txt[15];
346 int ccode;
347
348 if (!sch)
349 return -ENODEV;
350
351 CIO_TRACE_EVENT (2, "cancelIO");
352 CIO_TRACE_EVENT (2, sch->dev.bus_id);
353
354 ccode = xsch (sch->irq);
355
356 sprintf (dbf_txt, "ccode:%d", ccode);
357 CIO_TRACE_EVENT (2, dbf_txt);
358
359 switch (ccode) {
360 case 0: /* success */
361 /* Update information in scsw. */
362 stsch (sch->irq, &sch->schib);
363 return 0;
364 case 1: /* status pending */
365 return -EBUSY;
366 case 2: /* not applicable */
367 return -EINVAL;
368 default: /* not oper */
369 return -ENODEV;
370 }
371}
372
373/*
374 * Function: cio_modify
375 * Issues a "Modify Subchannel" on the specified subchannel
376 */
377int
378cio_modify (struct subchannel *sch)
379{
380 int ccode, retry, ret;
381
382 ret = 0;
383 for (retry = 0; retry < 5; retry++) {
384 ccode = msch_err (sch->irq, &sch->schib);
385 if (ccode < 0) /* -EIO if msch gets a program check. */
386 return ccode;
387 switch (ccode) {
388 case 0: /* successfull */
389 return 0;
390 case 1: /* status pending */
391 return -EBUSY;
392 case 2: /* busy */
393 udelay (100); /* allow for recovery */
394 ret = -EBUSY;
395 break;
396 case 3: /* not operational */
397 return -ENODEV;
398 }
399 }
400 return ret;
401}
402
403/*
404 * Enable subchannel.
405 */
406int
407cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
408{
409 char dbf_txt[15];
410 int ccode;
411 int retry;
412 int ret;
413
414 CIO_TRACE_EVENT (2, "ensch");
415 CIO_TRACE_EVENT (2, sch->dev.bus_id);
416
417 ccode = stsch (sch->irq, &sch->schib);
418 if (ccode)
419 return -ENODEV;
420
421 for (retry = 5, ret = 0; retry > 0; retry--) {
422 sch->schib.pmcw.ena = 1;
423 sch->schib.pmcw.isc = isc;
424 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
425 ret = cio_modify(sch);
426 if (ret == -ENODEV)
427 break;
428 if (ret == -EIO)
429 /*
430 * Got a program check in cio_modify. Try without
431 * the concurrent sense bit the next time.
432 */
433 sch->schib.pmcw.csense = 0;
434 if (ret == 0) {
435 stsch (sch->irq, &sch->schib);
436 if (sch->schib.pmcw.ena)
437 break;
438 }
439 if (ret == -EBUSY) {
440 struct irb irb;
441 if (tsch(sch->irq, &irb) != 0)
442 break;
443 }
444 }
445 sprintf (dbf_txt, "ret:%d", ret);
446 CIO_TRACE_EVENT (2, dbf_txt);
447 return ret;
448}
449
450/*
451 * Disable subchannel.
452 */
453int
454cio_disable_subchannel (struct subchannel *sch)
455{
456 char dbf_txt[15];
457 int ccode;
458 int retry;
459 int ret;
460
461 CIO_TRACE_EVENT (2, "dissch");
462 CIO_TRACE_EVENT (2, sch->dev.bus_id);
463
464 ccode = stsch (sch->irq, &sch->schib);
465 if (ccode == 3) /* Not operational. */
466 return -ENODEV;
467
468 if (sch->schib.scsw.actl != 0)
469 /*
470 * the disable function must not be called while there are
471 * requests pending for completion !
472 */
473 return -EBUSY;
474
475 for (retry = 5, ret = 0; retry > 0; retry--) {
476 sch->schib.pmcw.ena = 0;
477 ret = cio_modify(sch);
478 if (ret == -ENODEV)
479 break;
480 if (ret == -EBUSY)
481 /*
482 * The subchannel is busy or status pending.
483 * We'll disable when the next interrupt was delivered
484 * via the state machine.
485 */
486 break;
487 if (ret == 0) {
488 stsch (sch->irq, &sch->schib);
489 if (!sch->schib.pmcw.ena)
490 break;
491 }
492 }
493 sprintf (dbf_txt, "ret:%d", ret);
494 CIO_TRACE_EVENT (2, dbf_txt);
495 return ret;
496}
497
498/*
499 * cio_validate_subchannel()
500 *
501 * Find out subchannel type and initialize struct subchannel.
502 * Return codes:
503 * SUBCHANNEL_TYPE_IO for a normal io subchannel
504 * SUBCHANNEL_TYPE_CHSC for a chsc subchannel
505 * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel
506 * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel
507 * -ENXIO for non-defined subchannels
508 * -ENODEV for subchannels with invalid device number or blacklisted devices
509 */
510int
511cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
512{
513 char dbf_txt[15];
514 int ccode;
515
516 sprintf (dbf_txt, "valsch%x", irq);
517 CIO_TRACE_EVENT (4, dbf_txt);
518
519 /* Nuke all fields. */
520 memset(sch, 0, sizeof(struct subchannel));
521
522 spin_lock_init(&sch->lock);
523
524 /* Set a name for the subchannel */
525 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq);
526
527 /*
528 * The first subchannel that is not-operational (ccode==3)
529 * indicates that there aren't any more devices available.
530 */
531 sch->irq = irq;
532 ccode = stsch (irq, &sch->schib);
533 if (ccode)
534 return -ENXIO;
535
536 /* Copy subchannel type from path management control word. */
537 sch->st = sch->schib.pmcw.st;
538
539 /*
540 * ... just being curious we check for non I/O subchannels
541 */
542 if (sch->st != 0) {
543 CIO_DEBUG(KERN_INFO, 0,
544 "Subchannel %04X reports "
545 "non-I/O subchannel type %04X\n",
546 sch->irq, sch->st);
547 /* We stop here for non-io subchannels. */
548 return sch->st;
549 }
550
551 /* Initialization for io subchannels. */
552 if (!sch->schib.pmcw.dnv)
553 /* io subchannel but device number is invalid. */
554 return -ENODEV;
555
556 /* Devno is valid. */
557 if (is_blacklisted (sch->schib.pmcw.dev)) {
558 /*
559 * This device must not be known to Linux. So we simply
560 * say that there is no device and return ENODEV.
561 */
562 CIO_MSG_EVENT(0, "Blacklisted device detected "
563 "at devno %04X\n", sch->schib.pmcw.dev);
564 return -ENODEV;
565 }
566 sch->opm = 0xff;
567 chsc_validate_chpids(sch);
568 sch->lpm = sch->schib.pmcw.pim &
569 sch->schib.pmcw.pam &
570 sch->schib.pmcw.pom &
571 sch->opm;
572
573 CIO_DEBUG(KERN_INFO, 0,
574 "Detected device %04X on subchannel %04X"
575 " - PIM = %02X, PAM = %02X, POM = %02X\n",
576 sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim,
577 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
578
579 /*
580 * We now have to initially ...
581 * ... set "interruption subclass"
582 * ... enable "concurrent sense"
583 * ... enable "multipath mode" if more than one
584 * CHPID is available. This is done regardless
585 * whether multiple paths are available for us.
586 */
587 sch->schib.pmcw.isc = 3; /* could be smth. else */
588 sch->schib.pmcw.csense = 1; /* concurrent sense */
589 sch->schib.pmcw.ena = 0;
590 if ((sch->lpm & (sch->lpm - 1)) != 0)
591 sch->schib.pmcw.mp = 1; /* multipath mode */
592 return 0;
593}
594
595/*
596 * do_IRQ() handles all normal I/O device IRQ's (the special
597 * SMP cross-CPU interrupts have their own specific
598 * handlers).
599 *
600 */
601void
602do_IRQ (struct pt_regs *regs)
603{
604 struct tpi_info *tpi_info;
605 struct subchannel *sch;
606 struct irb *irb;
607
608 irq_enter ();
609 asm volatile ("mc 0,0");
610 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
611 /**
612 * Make sure that the i/o interrupt did not "overtake"
613 * the last HZ timer interrupt.
614 */
615 account_ticks(regs);
616 /*
617 * Get interrupt information from lowcore
618 */
619 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
620 irb = (struct irb *) __LC_IRB;
621 do {
622 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
623 /*
624 * Non I/O-subchannel thin interrupts are processed differently
625 */
626 if (tpi_info->adapter_IO == 1 &&
627 tpi_info->int_type == IO_INTERRUPT_TYPE) {
628 do_adapter_IO();
629 continue;
630 }
631 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
632 if (sch)
633 spin_lock(&sch->lock);
634 /* Store interrupt response block to lowcore. */
635 if (tsch (tpi_info->irq, irb) == 0 && sch) {
636 /* Keep subchannel information word up to date. */
637 memcpy (&sch->schib.scsw, &irb->scsw,
638 sizeof (irb->scsw));
639 /* Call interrupt handler if there is one. */
640 if (sch->driver && sch->driver->irq)
641 sch->driver->irq(&sch->dev);
642 }
643 if (sch)
644 spin_unlock(&sch->lock);
645 /*
646 * Are more interrupts pending?
647 * If so, the tpi instruction will update the lowcore
648 * to hold the info for the next interrupt.
649 * We don't do this for VM because a tpi drops the cpu
650 * out of the sie which costs more cycles than it saves.
651 */
652 } while (!MACHINE_IS_VM && tpi (NULL) != 0);
653 irq_exit ();
654}
655
656#ifdef CONFIG_CCW_CONSOLE
657static struct subchannel console_subchannel;
658static int console_subchannel_in_use;
659
660/*
661 * busy wait for the next interrupt on the console
662 */
663void
664wait_cons_dev (void)
665{
666 unsigned long cr6 __attribute__ ((aligned (8)));
667 unsigned long save_cr6 __attribute__ ((aligned (8)));
668
669 /*
670 * before entering the spinlock we may already have
671 * processed the interrupt on a different CPU...
672 */
673 if (!console_subchannel_in_use)
674 return;
675
676 /* disable all but isc 7 (console device) */
677 __ctl_store (save_cr6, 6, 6);
678 cr6 = 0x01000000;
679 __ctl_load (cr6, 6, 6);
680
681 do {
682 spin_unlock(&console_subchannel.lock);
683 if (!cio_tpi())
684 cpu_relax();
685 spin_lock(&console_subchannel.lock);
686 } while (console_subchannel.schib.scsw.actl != 0);
687 /*
688 * restore previous isc value
689 */
690 __ctl_load (save_cr6, 6, 6);
691}
692
693static int
694cio_console_irq(void)
695{
696 int irq;
697
698 if (console_irq != -1) {
699 /* VM provided us with the irq number of the console. */
700 if (stsch(console_irq, &console_subchannel.schib) != 0 ||
701 !console_subchannel.schib.pmcw.dnv)
702 return -1;
703 console_devno = console_subchannel.schib.pmcw.dev;
704 } else if (console_devno != -1) {
705 /* At least the console device number is known. */
706 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
707 if (stsch(irq, &console_subchannel.schib) != 0)
708 break;
709 if (console_subchannel.schib.pmcw.dnv &&
710 console_subchannel.schib.pmcw.dev ==
711 console_devno) {
712 console_irq = irq;
713 break;
714 }
715 }
716 if (console_irq == -1)
717 return -1;
718 } else {
719 /* unlike in 2.4, we cannot autoprobe here, since
720 * the channel subsystem is not fully initialized.
721 * With some luck, the HWC console can take over */
722 printk(KERN_WARNING "No ccw console found!\n");
723 return -1;
724 }
725 return console_irq;
726}
727
728struct subchannel *
729cio_probe_console(void)
730{
731 int irq, ret;
732
733 if (xchg(&console_subchannel_in_use, 1) != 0)
734 return ERR_PTR(-EBUSY);
735 irq = cio_console_irq();
736 if (irq == -1) {
737 console_subchannel_in_use = 0;
738 return ERR_PTR(-ENODEV);
739 }
740 memset(&console_subchannel, 0, sizeof(struct subchannel));
741 ret = cio_validate_subchannel(&console_subchannel, irq);
742 if (ret) {
743 console_subchannel_in_use = 0;
744 return ERR_PTR(-ENODEV);
745 }
746
747 /*
748 * enable console I/O-interrupt subclass 7
749 */
750 ctl_set_bit(6, 24);
751 console_subchannel.schib.pmcw.isc = 7;
752 console_subchannel.schib.pmcw.intparm =
753 (__u32)(unsigned long)&console_subchannel;
754 ret = cio_modify(&console_subchannel);
755 if (ret) {
756 console_subchannel_in_use = 0;
757 return ERR_PTR(ret);
758 }
759 return &console_subchannel;
760}
761
762void
763cio_release_console(void)
764{
765 console_subchannel.schib.pmcw.intparm = 0;
766 cio_modify(&console_subchannel);
767 ctl_clear_bit(6, 24);
768 console_subchannel_in_use = 0;
769}
770
771/* Bah... hack to catch console special sausages. */
772int
773cio_is_console(int irq)
774{
775 if (!console_subchannel_in_use)
776 return 0;
777 return (irq == console_subchannel.irq);
778}
779
780struct subchannel *
781cio_get_console_subchannel(void)
782{
783 if (!console_subchannel_in_use)
784 return 0;
785 return &console_subchannel;
786}
787
788#endif
789static inline int
790__disable_subchannel_easy(unsigned int schid, struct schib *schib)
791{
792 int retry, cc;
793
794 cc = 0;
795 for (retry=0;retry<3;retry++) {
796 schib->pmcw.ena = 0;
797 cc = msch(schid, schib);
798 if (cc)
799 return (cc==3?-ENODEV:-EBUSY);
800 stsch(schid, schib);
801 if (!schib->pmcw.ena)
802 return 0;
803 }
804 return -EBUSY; /* uhm... */
805}
806
807static inline int
808__clear_subchannel_easy(unsigned int schid)
809{
810 int retry;
811
812 if (csch(schid))
813 return -ENODEV;
814 for (retry=0;retry<20;retry++) {
815 struct tpi_info ti;
816
817 if (tpi(&ti)) {
818 tsch(schid, (struct irb *)__LC_IRB);
819 return 0;
820 }
821 udelay(100);
822 }
823 return -EBUSY;
824}
825
826extern void do_reipl(unsigned long devno);
827
828/* Clear all subchannels. */
829void
830clear_all_subchannels(void)
831{
832 unsigned int schid;
833
834 local_irq_disable();
835 for (schid=0;schid<=highest_subchannel;schid++) {
836 struct schib schib;
837 if (stsch(schid, &schib))
838 break; /* break out of the loop */
839 if (!schib.pmcw.ena)
840 continue;
841 switch(__disable_subchannel_easy(schid, &schib)) {
842 case 0:
843 case -ENODEV:
844 break;
845 default: /* -EBUSY */
846 if (__clear_subchannel_easy(schid))
847 break; /* give up... jump out of switch */
848 stsch(schid, &schib);
849 __disable_subchannel_easy(schid, &schib);
850 }
851 }
852}
853
854/* Make sure all subchannels are quiet before we re-ipl an lpar. */
855void
856reipl(unsigned long devno)
857{
858 clear_all_subchannels();
859 do_reipl(devno);
860}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
new file mode 100644
index 000000000000..c50a9da420a9
--- /dev/null
+++ b/drivers/s390/cio/cio.h
@@ -0,0 +1,143 @@
1#ifndef S390_CIO_H
2#define S390_CIO_H
3
4/*
5 * where we put the ssd info
6 */
7struct ssd_info {
8 __u8 valid:1;
9 __u8 type:7; /* subchannel type */
10 __u8 chpid[8]; /* chpids */
11 __u16 fla[8]; /* full link addresses */
12} __attribute__ ((packed));
13
14/*
15 * path management control word
16 */
17struct pmcw {
18 __u32 intparm; /* interruption parameter */
19 __u32 qf : 1; /* qdio facility */
20 __u32 res0 : 1; /* reserved zeros */
21 __u32 isc : 3; /* interruption sublass */
22 __u32 res5 : 3; /* reserved zeros */
23 __u32 ena : 1; /* enabled */
24 __u32 lm : 2; /* limit mode */
25 __u32 mme : 2; /* measurement-mode enable */
26 __u32 mp : 1; /* multipath mode */
27 __u32 tf : 1; /* timing facility */
28 __u32 dnv : 1; /* device number valid */
29 __u32 dev : 16; /* device number */
30 __u8 lpm; /* logical path mask */
31 __u8 pnom; /* path not operational mask */
32 __u8 lpum; /* last path used mask */
33 __u8 pim; /* path installed mask */
34 __u16 mbi; /* measurement-block index */
35 __u8 pom; /* path operational mask */
36 __u8 pam; /* path available mask */
37 __u8 chpid[8]; /* CHPID 0-7 (if available) */
38 __u32 unused1 : 8; /* reserved zeros */
39 __u32 st : 3; /* subchannel type */
40 __u32 unused2 : 18; /* reserved zeros */
41 __u32 mbfc : 1; /* measurement block format control */
42 __u32 xmwme : 1; /* extended measurement word mode enable */
43 __u32 csense : 1; /* concurrent sense; can be enabled ...*/
44 /* ... per MSCH, however, if facility */
45 /* ... is not installed, this results */
46 /* ... in an operand exception. */
47} __attribute__ ((packed));
48
49/*
50 * subchannel information block
51 */
52struct schib {
53 struct pmcw pmcw; /* path management control word */
54 struct scsw scsw; /* subchannel status word */
55 __u64 mba; /* measurement block address */
56 __u8 mda[4]; /* model dependent area */
57} __attribute__ ((packed,aligned(4)));
58
59/*
60 * operation request block
61 */
62struct orb {
63 __u32 intparm; /* interruption parameter */
64 __u32 key : 4; /* flags, like key, suspend control, etc. */
65 __u32 spnd : 1; /* suspend control */
66 __u32 res1 : 1; /* reserved */
67 __u32 mod : 1; /* modification control */
68 __u32 sync : 1; /* synchronize control */
69 __u32 fmt : 1; /* format control */
70 __u32 pfch : 1; /* prefetch control */
71 __u32 isic : 1; /* initial-status-interruption control */
72 __u32 alcc : 1; /* address-limit-checking control */
73 __u32 ssic : 1; /* suppress-suspended-interr. control */
74 __u32 res2 : 1; /* reserved */
75 __u32 c64 : 1; /* IDAW/QDIO 64 bit control */
76 __u32 i2k : 1; /* IDAW 2/4kB block size control */
77 __u32 lpm : 8; /* logical path mask */
78 __u32 ils : 1; /* incorrect length */
79 __u32 zero : 6; /* reserved zeros */
80 __u32 orbx : 1; /* ORB extension control */
81 __u32 cpa; /* channel program address */
82} __attribute__ ((packed,aligned(4)));
83
84/* subchannel data structure used by I/O subroutines */
85struct subchannel {
86 unsigned int irq; /* aka. subchannel number */
87 spinlock_t lock; /* subchannel lock */
88
89 enum {
90 SUBCHANNEL_TYPE_IO = 0,
91 SUBCHANNEL_TYPE_CHSC = 1,
92 SUBCHANNEL_TYPE_MESSAGE = 2,
93 SUBCHANNEL_TYPE_ADM = 3,
94 } st; /* subchannel type */
95
96 struct {
97 unsigned int suspend:1; /* allow suspend */
98 unsigned int prefetch:1;/* deny prefetch */
99 unsigned int inter:1; /* suppress intermediate interrupts */
100 } __attribute__ ((packed)) options;
101
102 __u8 vpm; /* verified path mask */
103 __u8 lpm; /* logical path mask */
104 __u8 opm; /* operational path mask */
105 struct schib schib; /* subchannel information block */
106 struct orb orb; /* operation request block */
107 struct ccw1 sense_ccw; /* static ccw for sense command */
108 struct ssd_info ssd_info; /* subchannel description */
109 struct device dev; /* entry in device tree */
110 struct css_driver *driver;
111} __attribute__ ((aligned(8)));
112
113#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
114
115#define to_subchannel(n) container_of(n, struct subchannel, dev)
116
117extern int cio_validate_subchannel (struct subchannel *, unsigned int);
118extern int cio_enable_subchannel (struct subchannel *, unsigned int);
119extern int cio_disable_subchannel (struct subchannel *);
120extern int cio_cancel (struct subchannel *);
121extern int cio_clear (struct subchannel *);
122extern int cio_resume (struct subchannel *);
123extern int cio_halt (struct subchannel *);
124extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
125extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
126extern int cio_cancel (struct subchannel *);
127extern int cio_set_options (struct subchannel *, int);
128extern int cio_get_options (struct subchannel *);
129extern int cio_modify (struct subchannel *);
130/* Use with care. */
131#ifdef CONFIG_CCW_CONSOLE
132extern struct subchannel *cio_probe_console(void);
133extern void cio_release_console(void);
134extern int cio_is_console(int irq);
135extern struct subchannel *cio_get_console_subchannel(void);
136#else
137#define cio_is_console(irq) 0
138#define cio_get_console_subchannel() NULL
139#endif
140
141extern int cio_show_msg;
142
143#endif
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
new file mode 100644
index 000000000000..6af8b27d366b
--- /dev/null
+++ b/drivers/s390/cio/cio_debug.h
@@ -0,0 +1,32 @@
1#ifndef CIO_DEBUG_H
2#define CIO_DEBUG_H
3
4#include <asm/debug.h>
5
6#define CIO_TRACE_EVENT(imp, txt) do { \
7 debug_text_event(cio_debug_trace_id, imp, txt); \
8 } while (0)
9
10#define CIO_MSG_EVENT(imp, args...) do { \
11 debug_sprintf_event(cio_debug_msg_id, imp , ##args); \
12 } while (0)
13
14#define CIO_CRW_EVENT(imp, args...) do { \
15 debug_sprintf_event(cio_debug_crw_id, imp , ##args); \
16 } while (0)
17
18#define CIO_HEX_EVENT(imp, args...) do { \
19 debug_event(cio_debug_trace_id, imp, ##args); \
20 } while (0)
21
22#define CIO_DEBUG(printk_level,event_level,msg...) ({ \
23 if (cio_show_msg) printk(printk_level msg); \
24 CIO_MSG_EVENT (event_level, msg); \
25})
26
27/* for use of debug feature */
28extern debug_info_t *cio_debug_msg_id;
29extern debug_info_t *cio_debug_trace_id;
30extern debug_info_t *cio_debug_crw_id;
31
32#endif
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
new file mode 100644
index 000000000000..49def26ba383
--- /dev/null
+++ b/drivers/s390/cio/cmf.c
@@ -0,0 +1,1042 @@
1/*
2 * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $)
3 *
4 * Linux on zSeries Channel Measurement Facility support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 *
8 * Author: Arnd Bergmann <arndb@de.ibm.com>
9 *
10 * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <linux/bootmem.h>
28#include <linux/device.h>
29#include <linux/init.h>
30#include <linux/list.h>
31#include <linux/module.h>
32#include <linux/moduleparam.h>
33
34#include <asm/ccwdev.h>
35#include <asm/cio.h>
36#include <asm/cmb.h>
37
38#include "cio.h"
39#include "css.h"
40#include "device.h"
41#include "ioasm.h"
42#include "chsc.h"
43
44/* parameter to enable cmf during boot, possible uses are:
45 * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
46 * used on any subchannel
47 * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
48 * <num> subchannel, where <num> is an integer
49 * between 1 and 65535, default is 1024
50 */
51#define ARGSTRING "s390cmf"
52
53/* indices for READCMB */
54enum cmb_index {
55 /* basic and exended format: */
56 cmb_ssch_rsch_count,
57 cmb_sample_count,
58 cmb_device_connect_time,
59 cmb_function_pending_time,
60 cmb_device_disconnect_time,
61 cmb_control_unit_queuing_time,
62 cmb_device_active_only_time,
63 /* extended format only: */
64 cmb_device_busy_time,
65 cmb_initial_command_response_time,
66};
67
68/**
69 * enum cmb_format - types of supported measurement block formats
70 *
71 * @CMF_BASIC: traditional channel measurement blocks supported
72 * by all machines that we run on
73 * @CMF_EXTENDED: improved format that was introduced with the z990
74 * machine
75 * @CMF_AUTODETECT: default: use extended format when running on a z990
76 * or later machine, otherwise fall back to basic format
77 **/
78enum cmb_format {
79 CMF_BASIC,
80 CMF_EXTENDED,
81 CMF_AUTODETECT = -1,
82};
83/**
84 * format - actual format for all measurement blocks
85 *
86 * The format module parameter can be set to a value of 0 (zero)
87 * or 1, indicating basic or extended format as described for
88 * enum cmb_format.
89 */
90static int format = CMF_AUTODETECT;
91module_param(format, bool, 0444);
92
93/**
94 * struct cmb_operations - functions to use depending on cmb_format
95 *
96 * all these functions operate on a struct cmf_device. There is only
97 * one instance of struct cmb_operations because all cmf_device
98 * objects are guaranteed to be of the same type.
99 *
100 * @alloc: allocate memory for a channel measurement block,
101 * either with the help of a special pool or with kmalloc
102 * @free: free memory allocated with @alloc
103 * @set: enable or disable measurement
104 * @readall: read a measurement block in a common format
105 * @reset: clear the data in the associated measurement block and
106 * reset its time stamp
107 */
108struct cmb_operations {
109 int (*alloc) (struct ccw_device*);
110 void(*free) (struct ccw_device*);
111 int (*set) (struct ccw_device*, u32);
112 u64 (*read) (struct ccw_device*, int);
113 int (*readall)(struct ccw_device*, struct cmbdata *);
114 void (*reset) (struct ccw_device*);
115
116 struct attribute_group *attr_group;
117};
118static struct cmb_operations *cmbops;
119
120/* our user interface is designed in terms of nanoseconds,
121 * while the hardware measures total times in its own
122 * unit.*/
123static inline u64 time_to_nsec(u32 value)
124{
125 return ((u64)value) * 128000ull;
126}
127
128/*
129 * Users are usually interested in average times,
130 * not accumulated time.
131 * This also helps us with atomicity problems
132 * when reading sinlge values.
133 */
134static inline u64 time_to_avg_nsec(u32 value, u32 count)
135{
136 u64 ret;
137
138 /* no samples yet, avoid division by 0 */
139 if (count == 0)
140 return 0;
141
142 /* value comes in units of 128 µsec */
143 ret = time_to_nsec(value);
144 do_div(ret, count);
145
146 return ret;
147}
148
149/* activate or deactivate the channel monitor. When area is NULL,
150 * the monitor is deactivated. The channel monitor needs to
151 * be active in order to measure subchannels, which also need
152 * to be enabled. */
153static inline void
154cmf_activate(void *area, unsigned int onoff)
155{
156 register void * __gpr2 asm("2");
157 register long __gpr1 asm("1");
158
159 __gpr2 = area;
160 __gpr1 = onoff ? 2 : 0;
161 /* activate channel measurement */
162 asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
163}
164
165static int
166set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
167{
168 int ret;
169 int retry;
170 struct subchannel *sch;
171 struct schib *schib;
172
173 sch = to_subchannel(cdev->dev.parent);
174 schib = &sch->schib;
175 /* msch can silently fail, so do it again if necessary */
176 for (retry = 0; retry < 3; retry++) {
177 /* prepare schib */
178 stsch(sch->irq, schib);
179 schib->pmcw.mme = mme;
180 schib->pmcw.mbfc = mbfc;
181 /* address can be either a block address or a block index */
182 if (mbfc)
183 schib->mba = address;
184 else
185 schib->pmcw.mbi = address;
186
187 /* try to submit it */
188 switch(ret = msch_err(sch->irq, schib)) {
189 case 0:
190 break;
191 case 1:
192 case 2: /* in I/O or status pending */
193 ret = -EBUSY;
194 break;
195 case 3: /* subchannel is no longer valid */
196 ret = -ENODEV;
197 break;
198 default: /* msch caught an exception */
199 ret = -EINVAL;
200 break;
201 }
202 stsch(sch->irq, schib); /* restore the schib */
203
204 if (ret)
205 break;
206
207 /* check if it worked */
208 if (schib->pmcw.mme == mme &&
209 schib->pmcw.mbfc == mbfc &&
210 (mbfc ? (schib->mba == address)
211 : (schib->pmcw.mbi == address)))
212 return 0;
213
214 ret = -EINVAL;
215 }
216
217 return ret;
218}
219
220struct set_schib_struct {
221 u32 mme;
222 int mbfc;
223 unsigned long address;
224 wait_queue_head_t wait;
225 int ret;
226};
227
228static int set_schib_wait(struct ccw_device *cdev, u32 mme,
229 int mbfc, unsigned long address)
230{
231 struct set_schib_struct s = {
232 .mme = mme,
233 .mbfc = mbfc,
234 .address = address,
235 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait),
236 };
237
238 spin_lock_irq(cdev->ccwlock);
239 s.ret = set_schib(cdev, mme, mbfc, address);
240 if (s.ret != -EBUSY) {
241 goto out_nowait;
242 }
243
244 if (cdev->private->state != DEV_STATE_ONLINE) {
245 s.ret = -EBUSY;
246 /* if the device is not online, don't even try again */
247 goto out_nowait;
248 }
249 cdev->private->state = DEV_STATE_CMFCHANGE;
250 cdev->private->cmb_wait = &s;
251 s.ret = 1;
252
253 spin_unlock_irq(cdev->ccwlock);
254 if (wait_event_interruptible(s.wait, s.ret != 1)) {
255 spin_lock_irq(cdev->ccwlock);
256 if (s.ret == 1) {
257 s.ret = -ERESTARTSYS;
258 cdev->private->cmb_wait = 0;
259 if (cdev->private->state == DEV_STATE_CMFCHANGE)
260 cdev->private->state = DEV_STATE_ONLINE;
261 }
262 spin_unlock_irq(cdev->ccwlock);
263 }
264 return s.ret;
265
266out_nowait:
267 spin_unlock_irq(cdev->ccwlock);
268 return s.ret;
269}
270
271void retry_set_schib(struct ccw_device *cdev)
272{
273 struct set_schib_struct *s;
274
275 s = cdev->private->cmb_wait;
276 cdev->private->cmb_wait = 0;
277 if (!s) {
278 WARN_ON(1);
279 return;
280 }
281 s->ret = set_schib(cdev, s->mme, s->mbfc, s->address);
282 wake_up(&s->wait);
283}
284
285/**
286 * struct cmb_area - container for global cmb data
287 *
288 * @mem: pointer to CMBs (only in basic measurement mode)
289 * @list: contains a linked list of all subchannels
290 * @lock: protect concurrent access to @mem and @list
291 */
292struct cmb_area {
293 struct cmb *mem;
294 struct list_head list;
295 int num_channels;
296 spinlock_t lock;
297};
298
299static struct cmb_area cmb_area = {
300 .lock = SPIN_LOCK_UNLOCKED,
301 .list = LIST_HEAD_INIT(cmb_area.list),
302 .num_channels = 1024,
303};
304
305
306/* ****** old style CMB handling ********/
307
308/** int maxchannels
309 *
310 * Basic channel measurement blocks are allocated in one contiguous
311 * block of memory, which can not be moved as long as any channel
312 * is active. Therefore, a maximum number of subchannels needs to
313 * be defined somewhere. This is a module parameter, defaulting to
314 * a resonable value of 1024, or 32 kb of memory.
315 * Current kernels don't allow kmalloc with more than 128kb, so the
316 * maximum is 4096
317 */
318
319module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
320
321/**
322 * struct cmb - basic channel measurement block
323 *
324 * cmb as used by the hardware the fields are described in z/Architecture
325 * Principles of Operation, chapter 17.
326 * The area to be a contiguous array and may not be reallocated or freed.
327 * Only one cmb area can be present in the system.
328 */
329struct cmb {
330 u16 ssch_rsch_count;
331 u16 sample_count;
332 u32 device_connect_time;
333 u32 function_pending_time;
334 u32 device_disconnect_time;
335 u32 control_unit_queuing_time;
336 u32 device_active_only_time;
337 u32 reserved[2];
338};
339
340/* insert a single device into the cmb_area list
341 * called with cmb_area.lock held from alloc_cmb
342 */
343static inline int
344alloc_cmb_single (struct ccw_device *cdev)
345{
346 struct cmb *cmb;
347 struct ccw_device_private *node;
348 int ret;
349
350 spin_lock_irq(cdev->ccwlock);
351 if (!list_empty(&cdev->private->cmb_list)) {
352 ret = -EBUSY;
353 goto out;
354 }
355
356 /* find first unused cmb in cmb_area.mem.
357 * this is a little tricky: cmb_area.list
358 * remains sorted by ->cmb pointers */
359 cmb = cmb_area.mem;
360 list_for_each_entry(node, &cmb_area.list, cmb_list) {
361 if ((struct cmb*)node->cmb > cmb)
362 break;
363 cmb++;
364 }
365 if (cmb - cmb_area.mem >= cmb_area.num_channels) {
366 ret = -ENOMEM;
367 goto out;
368 }
369
370 /* insert new cmb */
371 list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
372 cdev->private->cmb = cmb;
373 ret = 0;
374out:
375 spin_unlock_irq(cdev->ccwlock);
376 return ret;
377}
378
379static int
380alloc_cmb (struct ccw_device *cdev)
381{
382 int ret;
383 struct cmb *mem;
384 ssize_t size;
385
386 spin_lock(&cmb_area.lock);
387
388 if (!cmb_area.mem) {
389 /* there is no user yet, so we need a new area */
390 size = sizeof(struct cmb) * cmb_area.num_channels;
391 WARN_ON(!list_empty(&cmb_area.list));
392
393 spin_unlock(&cmb_area.lock);
394 mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
395 get_order(size));
396 spin_lock(&cmb_area.lock);
397
398 if (cmb_area.mem) {
399 /* ok, another thread was faster */
400 free_pages((unsigned long)mem, get_order(size));
401 } else if (!mem) {
402 /* no luck */
403 ret = -ENOMEM;
404 goto out;
405 } else {
406 /* everything ok */
407 memset(mem, 0, size);
408 cmb_area.mem = mem;
409 cmf_activate(cmb_area.mem, 1);
410 }
411 }
412
413 /* do the actual allocation */
414 ret = alloc_cmb_single(cdev);
415out:
416 spin_unlock(&cmb_area.lock);
417
418 return ret;
419}
420
421static void
422free_cmb(struct ccw_device *cdev)
423{
424 struct ccw_device_private *priv;
425
426 priv = cdev->private;
427
428 spin_lock(&cmb_area.lock);
429 spin_lock_irq(cdev->ccwlock);
430
431 if (list_empty(&priv->cmb_list)) {
432 /* already freed */
433 goto out;
434 }
435
436 priv->cmb = NULL;
437 list_del_init(&priv->cmb_list);
438
439 if (list_empty(&cmb_area.list)) {
440 ssize_t size;
441 size = sizeof(struct cmb) * cmb_area.num_channels;
442 cmf_activate(NULL, 0);
443 free_pages((unsigned long)cmb_area.mem, get_order(size));
444 cmb_area.mem = NULL;
445 }
446out:
447 spin_unlock_irq(cdev->ccwlock);
448 spin_unlock(&cmb_area.lock);
449}
450
451static int
452set_cmb(struct ccw_device *cdev, u32 mme)
453{
454 u16 offset;
455
456 if (!cdev->private->cmb)
457 return -EINVAL;
458
459 offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0;
460
461 return set_schib_wait(cdev, mme, 0, offset);
462}
463
464static u64
465read_cmb (struct ccw_device *cdev, int index)
466{
467 /* yes, we have to put it on the stack
468 * because the cmb must only be accessed
469 * atomically, e.g. with mvc */
470 struct cmb cmb;
471 unsigned long flags;
472 u32 val;
473
474 spin_lock_irqsave(cdev->ccwlock, flags);
475 if (!cdev->private->cmb) {
476 spin_unlock_irqrestore(cdev->ccwlock, flags);
477 return 0;
478 }
479
480 cmb = *(struct cmb*)cdev->private->cmb;
481 spin_unlock_irqrestore(cdev->ccwlock, flags);
482
483 switch (index) {
484 case cmb_ssch_rsch_count:
485 return cmb.ssch_rsch_count;
486 case cmb_sample_count:
487 return cmb.sample_count;
488 case cmb_device_connect_time:
489 val = cmb.device_connect_time;
490 break;
491 case cmb_function_pending_time:
492 val = cmb.function_pending_time;
493 break;
494 case cmb_device_disconnect_time:
495 val = cmb.device_disconnect_time;
496 break;
497 case cmb_control_unit_queuing_time:
498 val = cmb.control_unit_queuing_time;
499 break;
500 case cmb_device_active_only_time:
501 val = cmb.device_active_only_time;
502 break;
503 default:
504 return 0;
505 }
506 return time_to_avg_nsec(val, cmb.sample_count);
507}
508
509static int
510readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
511{
512 /* yes, we have to put it on the stack
513 * because the cmb must only be accessed
514 * atomically, e.g. with mvc */
515 struct cmb cmb;
516 unsigned long flags;
517 u64 time;
518
519 spin_lock_irqsave(cdev->ccwlock, flags);
520 if (!cdev->private->cmb) {
521 spin_unlock_irqrestore(cdev->ccwlock, flags);
522 return -ENODEV;
523 }
524
525 cmb = *(struct cmb*)cdev->private->cmb;
526 time = get_clock() - cdev->private->cmb_start_time;
527 spin_unlock_irqrestore(cdev->ccwlock, flags);
528
529 memset(data, 0, sizeof(struct cmbdata));
530
531 /* we only know values before device_busy_time */
532 data->size = offsetof(struct cmbdata, device_busy_time);
533
534 /* convert to nanoseconds */
535 data->elapsed_time = (time * 1000) >> 12;
536
537 /* copy data to new structure */
538 data->ssch_rsch_count = cmb.ssch_rsch_count;
539 data->sample_count = cmb.sample_count;
540
541 /* time fields are converted to nanoseconds while copying */
542 data->device_connect_time = time_to_nsec(cmb.device_connect_time);
543 data->function_pending_time = time_to_nsec(cmb.function_pending_time);
544 data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
545 data->control_unit_queuing_time
546 = time_to_nsec(cmb.control_unit_queuing_time);
547 data->device_active_only_time
548 = time_to_nsec(cmb.device_active_only_time);
549
550 return 0;
551}
552
553static void
554reset_cmb(struct ccw_device *cdev)
555{
556 struct cmb *cmb;
557 spin_lock_irq(cdev->ccwlock);
558 cmb = cdev->private->cmb;
559 if (cmb)
560 memset (cmb, 0, sizeof (*cmb));
561 cdev->private->cmb_start_time = get_clock();
562 spin_unlock_irq(cdev->ccwlock);
563}
564
565static struct attribute_group cmf_attr_group;
566
567static struct cmb_operations cmbops_basic = {
568 .alloc = alloc_cmb,
569 .free = free_cmb,
570 .set = set_cmb,
571 .read = read_cmb,
572 .readall = readall_cmb,
573 .reset = reset_cmb,
574 .attr_group = &cmf_attr_group,
575};
576
577/* ******** extended cmb handling ********/
578
579/**
580 * struct cmbe - extended channel measurement block
581 *
582 * cmb as used by the hardware, may be in any 64 bit physical location,
583 * the fields are described in z/Architecture Principles of Operation,
584 * third edition, chapter 17.
585 */
586struct cmbe {
587 u32 ssch_rsch_count;
588 u32 sample_count;
589 u32 device_connect_time;
590 u32 function_pending_time;
591 u32 device_disconnect_time;
592 u32 control_unit_queuing_time;
593 u32 device_active_only_time;
594 u32 device_busy_time;
595 u32 initial_command_response_time;
596 u32 reserved[7];
597};
598
599/* kmalloc only guarantees 8 byte alignment, but we need cmbe
600 * pointers to be naturally aligned. Make sure to allocate
601 * enough space for two cmbes */
602static inline struct cmbe* cmbe_align(struct cmbe *c)
603{
604 unsigned long addr;
605 addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
606 ~(sizeof (struct cmbe) - sizeof(long));
607 return (struct cmbe*)addr;
608}
609
610static int
611alloc_cmbe (struct ccw_device *cdev)
612{
613 struct cmbe *cmbe;
614 cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
615 if (!cmbe)
616 return -ENOMEM;
617
618 spin_lock_irq(cdev->ccwlock);
619 if (cdev->private->cmb) {
620 kfree(cmbe);
621 spin_unlock_irq(cdev->ccwlock);
622 return -EBUSY;
623 }
624
625 cdev->private->cmb = cmbe;
626 spin_unlock_irq(cdev->ccwlock);
627
628 /* activate global measurement if this is the first channel */
629 spin_lock(&cmb_area.lock);
630 if (list_empty(&cmb_area.list))
631 cmf_activate(NULL, 1);
632 list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
633 spin_unlock(&cmb_area.lock);
634
635 return 0;
636}
637
638static void
639free_cmbe (struct ccw_device *cdev)
640{
641 spin_lock_irq(cdev->ccwlock);
642 if (cdev->private->cmb)
643 kfree(cdev->private->cmb);
644 cdev->private->cmb = NULL;
645 spin_unlock_irq(cdev->ccwlock);
646
647 /* deactivate global measurement if this is the last channel */
648 spin_lock(&cmb_area.lock);
649 list_del_init(&cdev->private->cmb_list);
650 if (list_empty(&cmb_area.list))
651 cmf_activate(NULL, 0);
652 spin_unlock(&cmb_area.lock);
653}
654
655static int
656set_cmbe(struct ccw_device *cdev, u32 mme)
657{
658 unsigned long mba;
659
660 if (!cdev->private->cmb)
661 return -EINVAL;
662 mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0;
663
664 return set_schib_wait(cdev, mme, 1, mba);
665}
666
667
668u64
669read_cmbe (struct ccw_device *cdev, int index)
670{
671 /* yes, we have to put it on the stack
672 * because the cmb must only be accessed
673 * atomically, e.g. with mvc */
674 struct cmbe cmb;
675 unsigned long flags;
676 u32 val;
677
678 spin_lock_irqsave(cdev->ccwlock, flags);
679 if (!cdev->private->cmb) {
680 spin_unlock_irqrestore(cdev->ccwlock, flags);
681 return 0;
682 }
683
684 cmb = *cmbe_align(cdev->private->cmb);
685 spin_unlock_irqrestore(cdev->ccwlock, flags);
686
687 switch (index) {
688 case cmb_ssch_rsch_count:
689 return cmb.ssch_rsch_count;
690 case cmb_sample_count:
691 return cmb.sample_count;
692 case cmb_device_connect_time:
693 val = cmb.device_connect_time;
694 break;
695 case cmb_function_pending_time:
696 val = cmb.function_pending_time;
697 break;
698 case cmb_device_disconnect_time:
699 val = cmb.device_disconnect_time;
700 break;
701 case cmb_control_unit_queuing_time:
702 val = cmb.control_unit_queuing_time;
703 break;
704 case cmb_device_active_only_time:
705 val = cmb.device_active_only_time;
706 break;
707 case cmb_device_busy_time:
708 val = cmb.device_busy_time;
709 break;
710 case cmb_initial_command_response_time:
711 val = cmb.initial_command_response_time;
712 break;
713 default:
714 return 0;
715 }
716 return time_to_avg_nsec(val, cmb.sample_count);
717}
718
719static int
720readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
721{
722 /* yes, we have to put it on the stack
723 * because the cmb must only be accessed
724 * atomically, e.g. with mvc */
725 struct cmbe cmb;
726 unsigned long flags;
727 u64 time;
728
729 spin_lock_irqsave(cdev->ccwlock, flags);
730 if (!cdev->private->cmb) {
731 spin_unlock_irqrestore(cdev->ccwlock, flags);
732 return -ENODEV;
733 }
734
735 cmb = *cmbe_align(cdev->private->cmb);
736 time = get_clock() - cdev->private->cmb_start_time;
737 spin_unlock_irqrestore(cdev->ccwlock, flags);
738
739 memset (data, 0, sizeof(struct cmbdata));
740
741 /* we only know values before device_busy_time */
742 data->size = offsetof(struct cmbdata, device_busy_time);
743
744 /* conver to nanoseconds */
745 data->elapsed_time = (time * 1000) >> 12;
746
747 /* copy data to new structure */
748 data->ssch_rsch_count = cmb.ssch_rsch_count;
749 data->sample_count = cmb.sample_count;
750
751 /* time fields are converted to nanoseconds while copying */
752 data->device_connect_time = time_to_nsec(cmb.device_connect_time);
753 data->function_pending_time = time_to_nsec(cmb.function_pending_time);
754 data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
755 data->control_unit_queuing_time
756 = time_to_nsec(cmb.control_unit_queuing_time);
757 data->device_active_only_time
758 = time_to_nsec(cmb.device_active_only_time);
759 data->device_busy_time = time_to_nsec(cmb.device_busy_time);
760 data->initial_command_response_time
761 = time_to_nsec(cmb.initial_command_response_time);
762
763 return 0;
764}
765
766static void
767reset_cmbe(struct ccw_device *cdev)
768{
769 struct cmbe *cmb;
770 spin_lock_irq(cdev->ccwlock);
771 cmb = cmbe_align(cdev->private->cmb);
772 if (cmb)
773 memset (cmb, 0, sizeof (*cmb));
774 cdev->private->cmb_start_time = get_clock();
775 spin_unlock_irq(cdev->ccwlock);
776}
777
778static struct attribute_group cmf_attr_group_ext;
779
780static struct cmb_operations cmbops_extended = {
781 .alloc = alloc_cmbe,
782 .free = free_cmbe,
783 .set = set_cmbe,
784 .read = read_cmbe,
785 .readall = readall_cmbe,
786 .reset = reset_cmbe,
787 .attr_group = &cmf_attr_group_ext,
788};
789
790
791static ssize_t
792cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
793{
794 return sprintf(buf, "%lld\n",
795 (unsigned long long) cmf_read(to_ccwdev(dev), idx));
796}
797
798static ssize_t
799cmb_show_avg_sample_interval(struct device *dev, char *buf)
800{
801 struct ccw_device *cdev;
802 long interval;
803 unsigned long count;
804
805 cdev = to_ccwdev(dev);
806 interval = get_clock() - cdev->private->cmb_start_time;
807 count = cmf_read(cdev, cmb_sample_count);
808 if (count)
809 interval /= count;
810 else
811 interval = -1;
812 return sprintf(buf, "%ld\n", interval);
813}
814
815static ssize_t
816cmb_show_avg_utilization(struct device *dev, char *buf)
817{
818 struct cmbdata data;
819 u64 utilization;
820 unsigned long t, u;
821 int ret;
822
823 ret = cmf_readall(to_ccwdev(dev), &data);
824 if (ret)
825 return ret;
826
827 utilization = data.device_connect_time +
828 data.function_pending_time +
829 data.device_disconnect_time;
830
831 /* shift to avoid long long division */
832 while (-1ul < (data.elapsed_time | utilization)) {
833 utilization >>= 8;
834 data.elapsed_time >>= 8;
835 }
836
837 /* calculate value in 0.1 percent units */
838 t = (unsigned long) data.elapsed_time / 1000;
839 u = (unsigned long) utilization / t;
840
841 return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
842}
843
844#define cmf_attr(name) \
845static ssize_t show_ ## name (struct device * dev, char * buf) \
846{ return cmb_show_attr((dev), buf, cmb_ ## name); } \
847static DEVICE_ATTR(name, 0444, show_ ## name, NULL);
848
849#define cmf_attr_avg(name) \
850static ssize_t show_avg_ ## name (struct device * dev, char * buf) \
851{ return cmb_show_attr((dev), buf, cmb_ ## name); } \
852static DEVICE_ATTR(avg_ ## name, 0444, show_avg_ ## name, NULL);
853
854cmf_attr(ssch_rsch_count);
855cmf_attr(sample_count);
856cmf_attr_avg(device_connect_time);
857cmf_attr_avg(function_pending_time);
858cmf_attr_avg(device_disconnect_time);
859cmf_attr_avg(control_unit_queuing_time);
860cmf_attr_avg(device_active_only_time);
861cmf_attr_avg(device_busy_time);
862cmf_attr_avg(initial_command_response_time);
863
864static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval, NULL);
865static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
866
867static struct attribute *cmf_attributes[] = {
868 &dev_attr_avg_sample_interval.attr,
869 &dev_attr_avg_utilization.attr,
870 &dev_attr_ssch_rsch_count.attr,
871 &dev_attr_sample_count.attr,
872 &dev_attr_avg_device_connect_time.attr,
873 &dev_attr_avg_function_pending_time.attr,
874 &dev_attr_avg_device_disconnect_time.attr,
875 &dev_attr_avg_control_unit_queuing_time.attr,
876 &dev_attr_avg_device_active_only_time.attr,
877 0,
878};
879
880static struct attribute_group cmf_attr_group = {
881 .name = "cmf",
882 .attrs = cmf_attributes,
883};
884
885static struct attribute *cmf_attributes_ext[] = {
886 &dev_attr_avg_sample_interval.attr,
887 &dev_attr_avg_utilization.attr,
888 &dev_attr_ssch_rsch_count.attr,
889 &dev_attr_sample_count.attr,
890 &dev_attr_avg_device_connect_time.attr,
891 &dev_attr_avg_function_pending_time.attr,
892 &dev_attr_avg_device_disconnect_time.attr,
893 &dev_attr_avg_control_unit_queuing_time.attr,
894 &dev_attr_avg_device_active_only_time.attr,
895 &dev_attr_avg_device_busy_time.attr,
896 &dev_attr_avg_initial_command_response_time.attr,
897 0,
898};
899
900static struct attribute_group cmf_attr_group_ext = {
901 .name = "cmf",
902 .attrs = cmf_attributes_ext,
903};
904
905static ssize_t cmb_enable_show(struct device *dev, char *buf)
906{
907 return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
908}
909
910static ssize_t cmb_enable_store(struct device *dev, const char *buf, size_t c)
911{
912 struct ccw_device *cdev;
913 int ret;
914
915 cdev = to_ccwdev(dev);
916
917 switch (buf[0]) {
918 case '0':
919 ret = disable_cmf(cdev);
920 if (ret)
921 printk(KERN_INFO "disable_cmf failed (%d)\n", ret);
922 break;
923 case '1':
924 ret = enable_cmf(cdev);
925 if (ret && ret != -EBUSY)
926 printk(KERN_INFO "enable_cmf failed (%d)\n", ret);
927 break;
928 }
929
930 return c;
931}
932
933DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
934
935/* enable_cmf/disable_cmf: module interface for cmf (de)activation */
936int
937enable_cmf(struct ccw_device *cdev)
938{
939 int ret;
940
941 ret = cmbops->alloc(cdev);
942 cmbops->reset(cdev);
943 if (ret)
944 return ret;
945 ret = cmbops->set(cdev, 2);
946 if (ret) {
947 cmbops->free(cdev);
948 return ret;
949 }
950 ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
951 if (!ret)
952 return 0;
953 cmbops->set(cdev, 0); //FIXME: this can fail
954 cmbops->free(cdev);
955 return ret;
956}
957
958int
959disable_cmf(struct ccw_device *cdev)
960{
961 int ret;
962
963 ret = cmbops->set(cdev, 0);
964 if (ret)
965 return ret;
966 cmbops->free(cdev);
967 sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
968 return ret;
969}
970
971u64
972cmf_read(struct ccw_device *cdev, int index)
973{
974 return cmbops->read(cdev, index);
975}
976
977int
978cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
979{
980 return cmbops->readall(cdev, data);
981}
982
983static int __init
984init_cmf(void)
985{
986 char *format_string;
987 char *detect_string = "parameter";
988
989 /* We cannot really autoprobe this. If the user did not give a parameter,
990 see if we are running on z990 or up, otherwise fall back to basic mode. */
991
992 if (format == CMF_AUTODETECT) {
993 if (!css_characteristics_avail ||
994 !css_general_characteristics.ext_mb) {
995 format = CMF_BASIC;
996 } else {
997 format = CMF_EXTENDED;
998 }
999 detect_string = "autodetected";
1000 } else {
1001 detect_string = "parameter";
1002 }
1003
1004 switch (format) {
1005 case CMF_BASIC:
1006 format_string = "basic";
1007 cmbops = &cmbops_basic;
1008 if (cmb_area.num_channels > 4096 || cmb_area.num_channels < 1) {
1009 printk(KERN_ERR "Basic channel measurement facility"
1010 " can only use 1 to 4096 devices\n"
1011 KERN_ERR "when the cmf driver is built"
1012 " as a loadable module\n");
1013 return 1;
1014 }
1015 break;
1016 case CMF_EXTENDED:
1017 format_string = "extended";
1018 cmbops = &cmbops_extended;
1019 break;
1020 default:
1021 printk(KERN_ERR "Invalid format %d for channel "
1022 "measurement facility\n", format);
1023 return 1;
1024 }
1025
1026 printk(KERN_INFO "Channel measurement facility using %s format (%s)\n",
1027 format_string, detect_string);
1028 return 0;
1029}
1030
1031module_init(init_cmf);
1032
1033
1034MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
1035MODULE_LICENSE("GPL");
1036MODULE_DESCRIPTION("channel measurement facility base driver\n"
1037 "Copyright 2003 IBM Corporation\n");
1038
1039EXPORT_SYMBOL_GPL(enable_cmf);
1040EXPORT_SYMBOL_GPL(disable_cmf);
1041EXPORT_SYMBOL_GPL(cmf_read);
1042EXPORT_SYMBOL_GPL(cmf_readall);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
new file mode 100644
index 000000000000..87bd70eeabed
--- /dev/null
+++ b/drivers/s390/cio/css.c
@@ -0,0 +1,575 @@
1/*
2 * drivers/s390/cio/css.c
3 * driver for channel subsystem
4 * $Revision: 1.85 $
5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/device.h>
14#include <linux/slab.h>
15#include <linux/errno.h>
16#include <linux/list.h>
17
18#include "css.h"
19#include "cio.h"
20#include "cio_debug.h"
21#include "ioasm.h"
22#include "chsc.h"
23
24unsigned int highest_subchannel;
25int need_rescan = 0;
26int css_init_done = 0;
27
28struct pgid global_pgid;
29int css_characteristics_avail = 0;
30
31struct device css_bus_device = {
32 .bus_id = "css0",
33};
34
35static struct subchannel *
36css_alloc_subchannel(int irq)
37{
38 struct subchannel *sch;
39 int ret;
40
41 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
42 if (sch == NULL)
43 return ERR_PTR(-ENOMEM);
44 ret = cio_validate_subchannel (sch, irq);
45 if (ret < 0) {
46 kfree(sch);
47 return ERR_PTR(ret);
48 }
49 if (irq > highest_subchannel)
50 highest_subchannel = irq;
51
52 if (sch->st != SUBCHANNEL_TYPE_IO) {
53 /* For now we ignore all non-io subchannels. */
54 kfree(sch);
55 return ERR_PTR(-EINVAL);
56 }
57
58 /*
59 * Set intparm to subchannel address.
60 * This is fine even on 64bit since the subchannel is always located
61 * under 2G.
62 */
63 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
64 ret = cio_modify(sch);
65 if (ret) {
66 kfree(sch);
67 return ERR_PTR(ret);
68 }
69 return sch;
70}
71
72static void
73css_free_subchannel(struct subchannel *sch)
74{
75 if (sch) {
76 /* Reset intparm to zeroes. */
77 sch->schib.pmcw.intparm = 0;
78 cio_modify(sch);
79 kfree(sch);
80 }
81
82}
83
84static void
85css_subchannel_release(struct device *dev)
86{
87 struct subchannel *sch;
88
89 sch = to_subchannel(dev);
90 if (!cio_is_console(sch->irq))
91 kfree(sch);
92}
93
94extern int css_get_ssd_info(struct subchannel *sch);
95
96static int
97css_register_subchannel(struct subchannel *sch)
98{
99 int ret;
100
101 /* Initialize the subchannel structure */
102 sch->dev.parent = &css_bus_device;
103 sch->dev.bus = &css_bus_type;
104 sch->dev.release = &css_subchannel_release;
105
106 /* make it known to the system */
107 ret = device_register(&sch->dev);
108 if (ret)
109 printk (KERN_WARNING "%s: could not register %s\n",
110 __func__, sch->dev.bus_id);
111 else
112 css_get_ssd_info(sch);
113 return ret;
114}
115
116int
117css_probe_device(int irq)
118{
119 int ret;
120 struct subchannel *sch;
121
122 sch = css_alloc_subchannel(irq);
123 if (IS_ERR(sch))
124 return PTR_ERR(sch);
125 ret = css_register_subchannel(sch);
126 if (ret)
127 css_free_subchannel(sch);
128 return ret;
129}
130
131struct subchannel *
132get_subchannel_by_schid(int irq)
133{
134 struct subchannel *sch;
135 struct list_head *entry;
136 struct device *dev;
137
138 if (!get_bus(&css_bus_type))
139 return NULL;
140 down_read(&css_bus_type.subsys.rwsem);
141 sch = NULL;
142 list_for_each(entry, &css_bus_type.devices.list) {
143 dev = get_device(container_of(entry,
144 struct device, bus_list));
145 if (!dev)
146 continue;
147 sch = to_subchannel(dev);
148 if (sch->irq == irq)
149 break;
150 put_device(dev);
151 sch = NULL;
152 }
153 up_read(&css_bus_type.subsys.rwsem);
154 put_bus(&css_bus_type);
155
156 return sch;
157}
158
159static inline int
160css_get_subchannel_status(struct subchannel *sch, int schid)
161{
162 struct schib schib;
163 int cc;
164
165 cc = stsch(schid, &schib);
166 if (cc)
167 return CIO_GONE;
168 if (!schib.pmcw.dnv)
169 return CIO_GONE;
170 if (sch && sch->schib.pmcw.dnv &&
171 (schib.pmcw.dev != sch->schib.pmcw.dev))
172 return CIO_REVALIDATE;
173 if (sch && !sch->lpm)
174 return CIO_NO_PATH;
175 return CIO_OPER;
176}
177
178static int
179css_evaluate_subchannel(int irq, int slow)
180{
181 int event, ret, disc;
182 struct subchannel *sch;
183 unsigned long flags;
184
185 sch = get_subchannel_by_schid(irq);
186 disc = sch ? device_is_disconnected(sch) : 0;
187 if (disc && slow) {
188 if (sch)
189 put_device(&sch->dev);
190 return 0; /* Already processed. */
191 }
192 /*
193 * We've got a machine check, so running I/O won't get an interrupt.
194 * Kill any pending timers.
195 */
196 if (sch)
197 device_kill_pending_timer(sch);
198 if (!disc && !slow) {
199 if (sch)
200 put_device(&sch->dev);
201 return -EAGAIN; /* Will be done on the slow path. */
202 }
203 event = css_get_subchannel_status(sch, irq);
204 CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n",
205 irq, event, sch?(disc?"disconnected":"normal"):"unknown",
206 slow?"slow":"fast");
207 switch (event) {
208 case CIO_NO_PATH:
209 case CIO_GONE:
210 if (!sch) {
211 /* Never used this subchannel. Ignore. */
212 ret = 0;
213 break;
214 }
215 if (disc && (event == CIO_NO_PATH)) {
216 /*
217 * Uargh, hack again. Because we don't get a machine
218 * check on configure on, our path bookkeeping can
219 * be out of date here (it's fine while we only do
220 * logical varying or get chsc machine checks). We
221 * need to force reprobing or we might miss devices
222 * coming operational again. It won't do harm in real
223 * no path situations.
224 */
225 spin_lock_irqsave(&sch->lock, flags);
226 device_trigger_reprobe(sch);
227 spin_unlock_irqrestore(&sch->lock, flags);
228 ret = 0;
229 break;
230 }
231 if (sch->driver && sch->driver->notify &&
232 sch->driver->notify(&sch->dev, event)) {
233 cio_disable_subchannel(sch);
234 device_set_disconnected(sch);
235 ret = 0;
236 break;
237 }
238 /*
239 * Unregister subchannel.
240 * The device will be killed automatically.
241 */
242 cio_disable_subchannel(sch);
243 device_unregister(&sch->dev);
244 /* Reset intparm to zeroes. */
245 sch->schib.pmcw.intparm = 0;
246 cio_modify(sch);
247 put_device(&sch->dev);
248 ret = 0;
249 break;
250 case CIO_REVALIDATE:
251 /*
252 * Revalidation machine check. Sick.
253 * We don't notify the driver since we have to throw the device
254 * away in any case.
255 */
256 if (!disc) {
257 device_unregister(&sch->dev);
258 /* Reset intparm to zeroes. */
259 sch->schib.pmcw.intparm = 0;
260 cio_modify(sch);
261 put_device(&sch->dev);
262 ret = css_probe_device(irq);
263 } else {
264 /*
265 * We can't immediately deregister the disconnected
266 * device since it might block.
267 */
268 spin_lock_irqsave(&sch->lock, flags);
269 device_trigger_reprobe(sch);
270 spin_unlock_irqrestore(&sch->lock, flags);
271 ret = 0;
272 }
273 break;
274 case CIO_OPER:
275 if (disc) {
276 spin_lock_irqsave(&sch->lock, flags);
277 /* Get device operational again. */
278 device_trigger_reprobe(sch);
279 spin_unlock_irqrestore(&sch->lock, flags);
280 }
281 ret = sch ? 0 : css_probe_device(irq);
282 break;
283 default:
284 BUG();
285 ret = 0;
286 }
287 return ret;
288}
289
290static void
291css_rescan_devices(void)
292{
293 int irq, ret;
294
295 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
296 ret = css_evaluate_subchannel(irq, 1);
297 /* No more memory. It doesn't make sense to continue. No
298 * panic because this can happen in midflight and just
299 * because we can't use a new device is no reason to crash
300 * the system. */
301 if (ret == -ENOMEM)
302 break;
303 /* -ENXIO indicates that there are no more subchannels. */
304 if (ret == -ENXIO)
305 break;
306 }
307}
308
309struct slow_subchannel {
310 struct list_head slow_list;
311 unsigned long schid;
312};
313
314static LIST_HEAD(slow_subchannels_head);
315static DEFINE_SPINLOCK(slow_subchannel_lock);
316
317static void
318css_trigger_slow_path(void)
319{
320 CIO_TRACE_EVENT(4, "slowpath");
321
322 if (need_rescan) {
323 need_rescan = 0;
324 css_rescan_devices();
325 return;
326 }
327
328 spin_lock_irq(&slow_subchannel_lock);
329 while (!list_empty(&slow_subchannels_head)) {
330 struct slow_subchannel *slow_sch =
331 list_entry(slow_subchannels_head.next,
332 struct slow_subchannel, slow_list);
333
334 list_del_init(slow_subchannels_head.next);
335 spin_unlock_irq(&slow_subchannel_lock);
336 css_evaluate_subchannel(slow_sch->schid, 1);
337 spin_lock_irq(&slow_subchannel_lock);
338 kfree(slow_sch);
339 }
340 spin_unlock_irq(&slow_subchannel_lock);
341}
342
343typedef void (*workfunc)(void *);
344DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
345struct workqueue_struct *slow_path_wq;
346
347/*
348 * Rescan for new devices. FIXME: This is slow.
349 * This function is called when we have lost CRWs due to overflows and we have
350 * to do subchannel housekeeping.
351 */
352void
353css_reiterate_subchannels(void)
354{
355 css_clear_subchannel_slow_list();
356 need_rescan = 1;
357}
358
359/*
360 * Called from the machine check handler for subchannel report words.
361 */
362int
363css_process_crw(int irq)
364{
365 int ret;
366
367 CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq);
368
369 if (need_rescan)
370 /* We need to iterate all subchannels anyway. */
371 return -EAGAIN;
372 /*
373 * Since we are always presented with IPI in the CRW, we have to
374 * use stsch() to find out if the subchannel in question has come
375 * or gone.
376 */
377 ret = css_evaluate_subchannel(irq, 0);
378 if (ret == -EAGAIN) {
379 if (css_enqueue_subchannel_slow(irq)) {
380 css_clear_subchannel_slow_list();
381 need_rescan = 1;
382 }
383 }
384 return ret;
385}
386
387static void __init
388css_generate_pgid(void)
389{
390 /* Let's build our path group ID here. */
391 if (css_characteristics_avail && css_general_characteristics.mcss)
392 global_pgid.cpu_addr = 0x8000;
393 else {
394#ifdef CONFIG_SMP
395 global_pgid.cpu_addr = hard_smp_processor_id();
396#else
397 global_pgid.cpu_addr = 0;
398#endif
399 }
400 global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
401 global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
402 global_pgid.tod_high = (__u32) (get_clock() >> 32);
403}
404
405/*
406 * Now that the driver core is running, we can setup our channel subsystem.
407 * The struct subchannel's are created during probing (except for the
408 * static console subchannel).
409 */
410static int __init
411init_channel_subsystem (void)
412{
413 int ret, irq;
414
415 if (chsc_determine_css_characteristics() == 0)
416 css_characteristics_avail = 1;
417
418 css_generate_pgid();
419
420 if ((ret = bus_register(&css_bus_type)))
421 goto out;
422 if ((ret = device_register (&css_bus_device)))
423 goto out_bus;
424
425 css_init_done = 1;
426
427 ctl_set_bit(6, 28);
428
429 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
430 struct subchannel *sch;
431
432 if (cio_is_console(irq))
433 sch = cio_get_console_subchannel();
434 else {
435 sch = css_alloc_subchannel(irq);
436 if (IS_ERR(sch))
437 ret = PTR_ERR(sch);
438 else
439 ret = 0;
440 if (ret == -ENOMEM)
441 panic("Out of memory in "
442 "init_channel_subsystem\n");
443 /* -ENXIO: no more subchannels. */
444 if (ret == -ENXIO)
445 break;
446 if (ret)
447 continue;
448 }
449 /*
450 * We register ALL valid subchannels in ioinfo, even those
451 * that have been present before init_channel_subsystem.
452 * These subchannels can't have been registered yet (kmalloc
453 * not working) so we do it now. This is true e.g. for the
454 * console subchannel.
455 */
456 css_register_subchannel(sch);
457 }
458 return 0;
459
460out_bus:
461 bus_unregister(&css_bus_type);
462out:
463 return ret;
464}
465
466/*
467 * find a driver for a subchannel. They identify by the subchannel
468 * type with the exception that the console subchannel driver has its own
469 * subchannel type although the device is an i/o subchannel
470 */
471static int
472css_bus_match (struct device *dev, struct device_driver *drv)
473{
474 struct subchannel *sch = container_of (dev, struct subchannel, dev);
475 struct css_driver *driver = container_of (drv, struct css_driver, drv);
476
477 if (sch->st == driver->subchannel_type)
478 return 1;
479
480 return 0;
481}
482
483struct bus_type css_bus_type = {
484 .name = "css",
485 .match = &css_bus_match,
486};
487
488subsys_initcall(init_channel_subsystem);
489
490/*
491 * Register root devices for some drivers. The release function must not be
492 * in the device drivers, so we do it here.
493 */
494static void
495s390_root_dev_release(struct device *dev)
496{
497 kfree(dev);
498}
499
500struct device *
501s390_root_dev_register(const char *name)
502{
503 struct device *dev;
504 int ret;
505
506 if (!strlen(name))
507 return ERR_PTR(-EINVAL);
508 dev = kmalloc(sizeof(struct device), GFP_KERNEL);
509 if (!dev)
510 return ERR_PTR(-ENOMEM);
511 memset(dev, 0, sizeof(struct device));
512 strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
513 dev->release = s390_root_dev_release;
514 ret = device_register(dev);
515 if (ret) {
516 kfree(dev);
517 return ERR_PTR(ret);
518 }
519 return dev;
520}
521
522void
523s390_root_dev_unregister(struct device *dev)
524{
525 if (dev)
526 device_unregister(dev);
527}
528
529int
530css_enqueue_subchannel_slow(unsigned long schid)
531{
532 struct slow_subchannel *new_slow_sch;
533 unsigned long flags;
534
535 new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
536 if (!new_slow_sch)
537 return -ENOMEM;
538 memset(new_slow_sch, 0, sizeof(struct slow_subchannel));
539 new_slow_sch->schid = schid;
540 spin_lock_irqsave(&slow_subchannel_lock, flags);
541 list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
542 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
543 return 0;
544}
545
546void
547css_clear_subchannel_slow_list(void)
548{
549 unsigned long flags;
550
551 spin_lock_irqsave(&slow_subchannel_lock, flags);
552 while (!list_empty(&slow_subchannels_head)) {
553 struct slow_subchannel *slow_sch =
554 list_entry(slow_subchannels_head.next,
555 struct slow_subchannel, slow_list);
556
557 list_del_init(slow_subchannels_head.next);
558 kfree(slow_sch);
559 }
560 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
561}
562
563
564
565int
566css_slow_subchannels_exist(void)
567{
568 return (!list_empty(&slow_subchannels_head));
569}
570
571MODULE_LICENSE("GPL");
572EXPORT_SYMBOL(css_bus_type);
573EXPORT_SYMBOL(s390_root_dev_register);
574EXPORT_SYMBOL(s390_root_dev_unregister);
575EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
new file mode 100644
index 000000000000..2004a6c49388
--- /dev/null
+++ b/drivers/s390/cio/css.h
@@ -0,0 +1,155 @@
1#ifndef _CSS_H
2#define _CSS_H
3
4#include <linux/wait.h>
5#include <linux/workqueue.h>
6
7#include <asm/cio.h>
8
9/*
10 * path grouping stuff
11 */
12#define SPID_FUNC_SINGLE_PATH 0x00
13#define SPID_FUNC_MULTI_PATH 0x80
14#define SPID_FUNC_ESTABLISH 0x00
15#define SPID_FUNC_RESIGN 0x40
16#define SPID_FUNC_DISBAND 0x20
17
18#define SNID_STATE1_RESET 0
19#define SNID_STATE1_UNGROUPED 2
20#define SNID_STATE1_GROUPED 3
21
22#define SNID_STATE2_NOT_RESVD 0
23#define SNID_STATE2_RESVD_ELSE 2
24#define SNID_STATE2_RESVD_SELF 3
25
26#define SNID_STATE3_MULTI_PATH 1
27#define SNID_STATE3_SINGLE_PATH 0
28
29struct path_state {
30 __u8 state1 : 2; /* path state value 1 */
31 __u8 state2 : 2; /* path state value 2 */
32 __u8 state3 : 1; /* path state value 3 */
33 __u8 resvd : 3; /* reserved */
34} __attribute__ ((packed));
35
36struct pgid {
37 union {
38 __u8 fc; /* SPID function code */
39 struct path_state ps; /* SNID path state */
40 } inf;
41 __u32 cpu_addr : 16; /* CPU address */
42 __u32 cpu_id : 24; /* CPU identification */
43 __u32 cpu_model : 16; /* CPU model */
44 __u32 tod_high; /* high word TOD clock */
45} __attribute__ ((packed));
46
47extern struct pgid global_pgid;
48
49#define MAX_CIWS 8
50
51/*
52 * sense-id response buffer layout
53 */
54struct senseid {
55 /* common part */
56 __u8 reserved; /* always 0x'FF' */
57 __u16 cu_type; /* control unit type */
58 __u8 cu_model; /* control unit model */
59 __u16 dev_type; /* device type */
60 __u8 dev_model; /* device model */
61 __u8 unused; /* padding byte */
62 /* extended part */
63 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
64} __attribute__ ((packed,aligned(4)));
65
66struct ccw_device_private {
67 int state; /* device state */
68 atomic_t onoff;
69 unsigned long registered;
70 __u16 devno; /* device number */
71 __u16 irq; /* subchannel number */
72 __u8 imask; /* lpm mask for SNID/SID/SPGID */
73 int iretry; /* retry counter SNID/SID/SPGID */
74 struct {
75 unsigned int fast:1; /* post with "channel end" */
76 unsigned int repall:1; /* report every interrupt status */
77 unsigned int pgroup:1; /* do path grouping */
78 unsigned int force:1; /* allow forced online */
79 } __attribute__ ((packed)) options;
80 struct {
81 unsigned int pgid_single:1; /* use single path for Set PGID */
82 unsigned int esid:1; /* Ext. SenseID supported by HW */
83 unsigned int dosense:1; /* delayed SENSE required */
84 unsigned int doverify:1; /* delayed path verification */
85 unsigned int donotify:1; /* call notify function */
86 unsigned int recog_done:1; /* dev. recog. complete */
87 unsigned int fake_irb:1; /* deliver faked irb */
88 } __attribute__((packed)) flags;
89 unsigned long intparm; /* user interruption parameter */
90 struct qdio_irq *qdio_data;
91 struct irb irb; /* device status */
92 struct senseid senseid; /* SenseID info */
93 struct pgid pgid; /* path group ID */
94 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
95 struct work_struct kick_work;
96 wait_queue_head_t wait_q;
97 struct timer_list timer;
98 void *cmb; /* measurement information */
99 struct list_head cmb_list; /* list of measured devices */
100 u64 cmb_start_time; /* clock value of cmb reset */
101 void *cmb_wait; /* deferred cmb enable/disable */
102};
103
104/*
105 * A css driver handles all subchannels of one type.
106 * Currently, we only care about I/O subchannels (type 0), these
107 * have a ccw_device connected to them.
108 */
109struct css_driver {
110 unsigned int subchannel_type;
111 struct device_driver drv;
112 void (*irq)(struct device *);
113 int (*notify)(struct device *, int);
114 void (*verify)(struct device *);
115 void (*termination)(struct device *);
116};
117
118/*
119 * all css_drivers have the css_bus_type
120 */
121extern struct bus_type css_bus_type;
122extern struct css_driver io_subchannel_driver;
123
124int css_probe_device(int irq);
125extern struct subchannel * get_subchannel_by_schid(int irq);
126extern unsigned int highest_subchannel;
127extern int css_init_done;
128
129#define __MAX_SUBCHANNELS 65536
130
131extern struct bus_type css_bus_type;
132extern struct device css_bus_device;
133
134/* Some helper functions for disconnected state. */
135int device_is_disconnected(struct subchannel *);
136void device_set_disconnected(struct subchannel *);
137void device_trigger_reprobe(struct subchannel *);
138
139/* Helper functions for vary on/off. */
140int device_is_online(struct subchannel *);
141void device_set_waiting(struct subchannel *);
142
143/* Machine check helper function. */
144void device_kill_pending_timer(struct subchannel *);
145
146/* Helper functions to build lists for the slow path. */
147int css_enqueue_subchannel_slow(unsigned long schid);
148void css_walk_subchannel_slow_list(void (*fn)(unsigned long));
149void css_clear_subchannel_slow_list(void);
150int css_slow_subchannels_exist(void);
151extern int need_rescan;
152
153extern struct workqueue_struct *slow_path_wq;
154extern struct work_struct slow_path_work;
155#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
new file mode 100644
index 000000000000..df0325505e4e
--- /dev/null
+++ b/drivers/s390/cio/device.c
@@ -0,0 +1,1135 @@
1/*
2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices
4 * $Revision: 1.131 $
5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
10 * Martin Schwidefsky (schwidefsky@de.ibm.com)
11 */
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/spinlock.h>
16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/slab.h>
19#include <linux/list.h>
20#include <linux/device.h>
21#include <linux/workqueue.h>
22
23#include <asm/ccwdev.h>
24#include <asm/cio.h>
25
26#include "cio.h"
27#include "css.h"
28#include "device.h"
29#include "ioasm.h"
30
31/******************* bus type handling ***********************/
32
33/* The Linux driver model distinguishes between a bus type and
34 * the bus itself. Of course we only have one channel
35 * subsystem driver and one channel system per machine, but
36 * we still use the abstraction. T.R. says it's a good idea. */
37static int
38ccw_bus_match (struct device * dev, struct device_driver * drv)
39{
40 struct ccw_device *cdev = to_ccwdev(dev);
41 struct ccw_driver *cdrv = to_ccwdrv(drv);
42 const struct ccw_device_id *ids = cdrv->ids, *found;
43
44 if (!ids)
45 return 0;
46
47 found = ccw_device_id_match(ids, &cdev->id);
48 if (!found)
49 return 0;
50
51 cdev->id.driver_info = found->driver_info;
52
53 return 1;
54}
55
56/*
57 * Hotplugging interface for ccw devices.
58 * Heavily modeled on pci and usb hotplug.
59 */
60static int
61ccw_hotplug (struct device *dev, char **envp, int num_envp,
62 char *buffer, int buffer_size)
63{
64 struct ccw_device *cdev = to_ccwdev(dev);
65 int i = 0;
66 int length = 0;
67
68 if (!cdev)
69 return -ENODEV;
70
71 /* what we want to pass to /sbin/hotplug */
72
73 envp[i++] = buffer;
74 length += scnprintf(buffer, buffer_size - length, "CU_TYPE=%04X",
75 cdev->id.cu_type);
76 if ((buffer_size - length <= 0) || (i >= num_envp))
77 return -ENOMEM;
78 ++length;
79 buffer += length;
80
81 envp[i++] = buffer;
82 length += scnprintf(buffer, buffer_size - length, "CU_MODEL=%02X",
83 cdev->id.cu_model);
84 if ((buffer_size - length <= 0) || (i >= num_envp))
85 return -ENOMEM;
86 ++length;
87 buffer += length;
88
89 /* The next two can be zero, that's ok for us */
90 envp[i++] = buffer;
91 length += scnprintf(buffer, buffer_size - length, "DEV_TYPE=%04X",
92 cdev->id.dev_type);
93 if ((buffer_size - length <= 0) || (i >= num_envp))
94 return -ENOMEM;
95 ++length;
96 buffer += length;
97
98 envp[i++] = buffer;
99 length += scnprintf(buffer, buffer_size - length, "DEV_MODEL=%02X",
100 cdev->id.dev_model);
101 if ((buffer_size - length <= 0) || (i >= num_envp))
102 return -ENOMEM;
103
104 envp[i] = 0;
105
106 return 0;
107}
108
109struct bus_type ccw_bus_type = {
110 .name = "ccw",
111 .match = &ccw_bus_match,
112 .hotplug = &ccw_hotplug,
113};
114
115static int io_subchannel_probe (struct device *);
116static int io_subchannel_remove (struct device *);
117void io_subchannel_irq (struct device *);
118static int io_subchannel_notify(struct device *, int);
119static void io_subchannel_verify(struct device *);
120static void io_subchannel_ioterm(struct device *);
121static void io_subchannel_shutdown(struct device *);
122
123struct css_driver io_subchannel_driver = {
124 .subchannel_type = SUBCHANNEL_TYPE_IO,
125 .drv = {
126 .name = "io_subchannel",
127 .bus = &css_bus_type,
128 .probe = &io_subchannel_probe,
129 .remove = &io_subchannel_remove,
130 .shutdown = &io_subchannel_shutdown,
131 },
132 .irq = io_subchannel_irq,
133 .notify = io_subchannel_notify,
134 .verify = io_subchannel_verify,
135 .termination = io_subchannel_ioterm,
136};
137
138struct workqueue_struct *ccw_device_work;
139struct workqueue_struct *ccw_device_notify_work;
140static wait_queue_head_t ccw_device_init_wq;
141static atomic_t ccw_device_init_count;
142
143static int __init
144init_ccw_bus_type (void)
145{
146 int ret;
147
148 init_waitqueue_head(&ccw_device_init_wq);
149 atomic_set(&ccw_device_init_count, 0);
150
151 ccw_device_work = create_singlethread_workqueue("cio");
152 if (!ccw_device_work)
153 return -ENOMEM; /* FIXME: better errno ? */
154 ccw_device_notify_work = create_singlethread_workqueue("cio_notify");
155 if (!ccw_device_notify_work) {
156 ret = -ENOMEM; /* FIXME: better errno ? */
157 goto out_err;
158 }
159 slow_path_wq = create_singlethread_workqueue("kslowcrw");
160 if (!slow_path_wq) {
161 ret = -ENOMEM; /* FIXME: better errno ? */
162 goto out_err;
163 }
164 if ((ret = bus_register (&ccw_bus_type)))
165 goto out_err;
166
167 if ((ret = driver_register(&io_subchannel_driver.drv)))
168 goto out_err;
169
170 wait_event(ccw_device_init_wq,
171 atomic_read(&ccw_device_init_count) == 0);
172 flush_workqueue(ccw_device_work);
173 return 0;
174out_err:
175 if (ccw_device_work)
176 destroy_workqueue(ccw_device_work);
177 if (ccw_device_notify_work)
178 destroy_workqueue(ccw_device_notify_work);
179 if (slow_path_wq)
180 destroy_workqueue(slow_path_wq);
181 return ret;
182}
183
184static void __exit
185cleanup_ccw_bus_type (void)
186{
187 driver_unregister(&io_subchannel_driver.drv);
188 bus_unregister(&ccw_bus_type);
189 destroy_workqueue(ccw_device_notify_work);
190 destroy_workqueue(ccw_device_work);
191}
192
193subsys_initcall(init_ccw_bus_type);
194module_exit(cleanup_ccw_bus_type);
195
196/************************ device handling **************************/
197
198/*
199 * A ccw_device has some interfaces in sysfs in addition to the
200 * standard ones.
201 * The following entries are designed to export the information which
202 * resided in 2.4 in /proc/subchannels. Subchannel and device number
203 * are obvious, so they don't have an entry :)
204 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
205 */
206static ssize_t
207chpids_show (struct device * dev, char * buf)
208{
209 struct subchannel *sch = to_subchannel(dev);
210 struct ssd_info *ssd = &sch->ssd_info;
211 ssize_t ret = 0;
212 int chp;
213
214 for (chp = 0; chp < 8; chp++)
215 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
216
217 ret += sprintf (buf+ret, "\n");
218 return min((ssize_t)PAGE_SIZE, ret);
219}
220
221static ssize_t
222pimpampom_show (struct device * dev, char * buf)
223{
224 struct subchannel *sch = to_subchannel(dev);
225 struct pmcw *pmcw = &sch->schib.pmcw;
226
227 return sprintf (buf, "%02x %02x %02x\n",
228 pmcw->pim, pmcw->pam, pmcw->pom);
229}
230
231static ssize_t
232devtype_show (struct device *dev, char *buf)
233{
234 struct ccw_device *cdev = to_ccwdev(dev);
235 struct ccw_device_id *id = &(cdev->id);
236
237 if (id->dev_type != 0)
238 return sprintf(buf, "%04x/%02x\n",
239 id->dev_type, id->dev_model);
240 else
241 return sprintf(buf, "n/a\n");
242}
243
244static ssize_t
245cutype_show (struct device *dev, char *buf)
246{
247 struct ccw_device *cdev = to_ccwdev(dev);
248 struct ccw_device_id *id = &(cdev->id);
249
250 return sprintf(buf, "%04x/%02x\n",
251 id->cu_type, id->cu_model);
252}
253
254static ssize_t
255online_show (struct device *dev, char *buf)
256{
257 struct ccw_device *cdev = to_ccwdev(dev);
258
259 return sprintf(buf, cdev->online ? "1\n" : "0\n");
260}
261
262static void
263ccw_device_remove_disconnected(struct ccw_device *cdev)
264{
265 struct subchannel *sch;
266 /*
267 * Forced offline in disconnected state means
268 * 'throw away device'.
269 */
270 sch = to_subchannel(cdev->dev.parent);
271 device_unregister(&sch->dev);
272 /* Reset intparm to zeroes. */
273 sch->schib.pmcw.intparm = 0;
274 cio_modify(sch);
275 put_device(&sch->dev);
276}
277
278int
279ccw_device_set_offline(struct ccw_device *cdev)
280{
281 int ret;
282
283 if (!cdev)
284 return -ENODEV;
285 if (!cdev->online || !cdev->drv)
286 return -EINVAL;
287
288 if (cdev->drv->set_offline) {
289 ret = cdev->drv->set_offline(cdev);
290 if (ret != 0)
291 return ret;
292 }
293 cdev->online = 0;
294 spin_lock_irq(cdev->ccwlock);
295 ret = ccw_device_offline(cdev);
296 if (ret == -ENODEV) {
297 if (cdev->private->state != DEV_STATE_NOT_OPER) {
298 cdev->private->state = DEV_STATE_OFFLINE;
299 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
300 }
301 spin_unlock_irq(cdev->ccwlock);
302 return ret;
303 }
304 spin_unlock_irq(cdev->ccwlock);
305 if (ret == 0)
306 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
307 else {
308 pr_debug("ccw_device_offline returned %d, device %s\n",
309 ret, cdev->dev.bus_id);
310 cdev->online = 1;
311 }
312 return ret;
313}
314
315int
316ccw_device_set_online(struct ccw_device *cdev)
317{
318 int ret;
319
320 if (!cdev)
321 return -ENODEV;
322 if (cdev->online || !cdev->drv)
323 return -EINVAL;
324
325 spin_lock_irq(cdev->ccwlock);
326 ret = ccw_device_online(cdev);
327 spin_unlock_irq(cdev->ccwlock);
328 if (ret == 0)
329 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
330 else {
331 pr_debug("ccw_device_online returned %d, device %s\n",
332 ret, cdev->dev.bus_id);
333 return ret;
334 }
335 if (cdev->private->state != DEV_STATE_ONLINE)
336 return -ENODEV;
337 if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
338 cdev->online = 1;
339 return 0;
340 }
341 spin_lock_irq(cdev->ccwlock);
342 ret = ccw_device_offline(cdev);
343 spin_unlock_irq(cdev->ccwlock);
344 if (ret == 0)
345 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
346 else
347 pr_debug("ccw_device_offline returned %d, device %s\n",
348 ret, cdev->dev.bus_id);
349 return (ret = 0) ? -ENODEV : ret;
350}
351
352static ssize_t
353online_store (struct device *dev, const char *buf, size_t count)
354{
355 struct ccw_device *cdev = to_ccwdev(dev);
356 int i, force, ret;
357 char *tmp;
358
359 if (atomic_compare_and_swap(0, 1, &cdev->private->onoff))
360 return -EAGAIN;
361
362 if (cdev->drv && !try_module_get(cdev->drv->owner)) {
363 atomic_set(&cdev->private->onoff, 0);
364 return -EINVAL;
365 }
366 if (!strncmp(buf, "force\n", count)) {
367 force = 1;
368 i = 1;
369 } else {
370 force = 0;
371 i = simple_strtoul(buf, &tmp, 16);
372 }
373 if (i == 1) {
374 /* Do device recognition, if needed. */
375 if (cdev->id.cu_type == 0) {
376 ret = ccw_device_recognition(cdev);
377 if (ret) {
378 printk(KERN_WARNING"Couldn't start recognition "
379 "for device %s (ret=%d)\n",
380 cdev->dev.bus_id, ret);
381 goto out;
382 }
383 wait_event(cdev->private->wait_q,
384 cdev->private->flags.recog_done);
385 }
386 if (cdev->drv && cdev->drv->set_online)
387 ccw_device_set_online(cdev);
388 } else if (i == 0) {
389 if (cdev->private->state == DEV_STATE_DISCONNECTED)
390 ccw_device_remove_disconnected(cdev);
391 else if (cdev->drv && cdev->drv->set_offline)
392 ccw_device_set_offline(cdev);
393 }
394 if (force && cdev->private->state == DEV_STATE_BOXED) {
395 ret = ccw_device_stlck(cdev);
396 if (ret) {
397 printk(KERN_WARNING"ccw_device_stlck for device %s "
398 "returned %d!\n", cdev->dev.bus_id, ret);
399 goto out;
400 }
401 /* Do device recognition, if needed. */
402 if (cdev->id.cu_type == 0) {
403 cdev->private->state = DEV_STATE_NOT_OPER;
404 ret = ccw_device_recognition(cdev);
405 if (ret) {
406 printk(KERN_WARNING"Couldn't start recognition "
407 "for device %s (ret=%d)\n",
408 cdev->dev.bus_id, ret);
409 goto out;
410 }
411 wait_event(cdev->private->wait_q,
412 cdev->private->flags.recog_done);
413 }
414 if (cdev->drv && cdev->drv->set_online)
415 ccw_device_set_online(cdev);
416 }
417 out:
418 if (cdev->drv)
419 module_put(cdev->drv->owner);
420 atomic_set(&cdev->private->onoff, 0);
421 return count;
422}
423
424static ssize_t
425available_show (struct device *dev, char *buf)
426{
427 struct ccw_device *cdev = to_ccwdev(dev);
428 struct subchannel *sch;
429
430 switch (cdev->private->state) {
431 case DEV_STATE_BOXED:
432 return sprintf(buf, "boxed\n");
433 case DEV_STATE_DISCONNECTED:
434 case DEV_STATE_DISCONNECTED_SENSE_ID:
435 case DEV_STATE_NOT_OPER:
436 sch = to_subchannel(dev->parent);
437 if (!sch->lpm)
438 return sprintf(buf, "no path\n");
439 else
440 return sprintf(buf, "no device\n");
441 default:
442 /* All other states considered fine. */
443 return sprintf(buf, "good\n");
444 }
445}
446
447static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
448static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
449static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
450static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
451static DEVICE_ATTR(online, 0644, online_show, online_store);
452extern struct device_attribute dev_attr_cmb_enable;
453static DEVICE_ATTR(availability, 0444, available_show, NULL);
454
455static struct attribute * subch_attrs[] = {
456 &dev_attr_chpids.attr,
457 &dev_attr_pimpampom.attr,
458 NULL,
459};
460
461static struct attribute_group subch_attr_group = {
462 .attrs = subch_attrs,
463};
464
465static inline int
466subchannel_add_files (struct device *dev)
467{
468 return sysfs_create_group(&dev->kobj, &subch_attr_group);
469}
470
471static struct attribute * ccwdev_attrs[] = {
472 &dev_attr_devtype.attr,
473 &dev_attr_cutype.attr,
474 &dev_attr_online.attr,
475 &dev_attr_cmb_enable.attr,
476 &dev_attr_availability.attr,
477 NULL,
478};
479
480static struct attribute_group ccwdev_attr_group = {
481 .attrs = ccwdev_attrs,
482};
483
484static inline int
485device_add_files (struct device *dev)
486{
487 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group);
488}
489
490static inline void
491device_remove_files(struct device *dev)
492{
493 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
494}
495
496/* this is a simple abstraction for device_register that sets the
497 * correct bus type and adds the bus specific files */
498int
499ccw_device_register(struct ccw_device *cdev)
500{
501 struct device *dev = &cdev->dev;
502 int ret;
503
504 dev->bus = &ccw_bus_type;
505
506 if ((ret = device_add(dev)))
507 return ret;
508
509 set_bit(1, &cdev->private->registered);
510 if ((ret = device_add_files(dev))) {
511 if (test_and_clear_bit(1, &cdev->private->registered))
512 device_del(dev);
513 }
514 return ret;
515}
516
517static struct ccw_device *
518get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling)
519{
520 struct ccw_device *cdev;
521 struct list_head *entry;
522 struct device *dev;
523
524 if (!get_bus(&ccw_bus_type))
525 return NULL;
526 down_read(&ccw_bus_type.subsys.rwsem);
527 cdev = NULL;
528 list_for_each(entry, &ccw_bus_type.devices.list) {
529 dev = get_device(container_of(entry,
530 struct device, bus_list));
531 if (!dev)
532 continue;
533 cdev = to_ccwdev(dev);
534 if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
535 (cdev->private->devno == devno) &&
536 (cdev != sibling)) {
537 cdev->private->state = DEV_STATE_NOT_OPER;
538 break;
539 }
540 put_device(dev);
541 cdev = NULL;
542 }
543 up_read(&ccw_bus_type.subsys.rwsem);
544 put_bus(&ccw_bus_type);
545
546 return cdev;
547}
548
549static void
550ccw_device_add_changed(void *data)
551{
552
553 struct ccw_device *cdev;
554
555 cdev = (struct ccw_device *)data;
556 if (device_add(&cdev->dev)) {
557 put_device(&cdev->dev);
558 return;
559 }
560 set_bit(1, &cdev->private->registered);
561 if (device_add_files(&cdev->dev)) {
562 if (test_and_clear_bit(1, &cdev->private->registered))
563 device_unregister(&cdev->dev);
564 }
565}
566
567extern int css_get_ssd_info(struct subchannel *sch);
568
569void
570ccw_device_do_unreg_rereg(void *data)
571{
572 struct ccw_device *cdev;
573 struct subchannel *sch;
574 int need_rename;
575
576 cdev = (struct ccw_device *)data;
577 sch = to_subchannel(cdev->dev.parent);
578 if (cdev->private->devno != sch->schib.pmcw.dev) {
579 /*
580 * The device number has changed. This is usually only when
581 * a device has been detached under VM and then re-appeared
582 * on another subchannel because of a different attachment
583 * order than before. Ideally, we should should just switch
584 * subchannels, but unfortunately, this is not possible with
585 * the current implementation.
586 * Instead, we search for the old subchannel for this device
587 * number and deregister so there are no collisions with the
588 * newly registered ccw_device.
589 * FIXME: Find another solution so the block layer doesn't
590 * get possibly sick...
591 */
592 struct ccw_device *other_cdev;
593
594 need_rename = 1;
595 other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev,
596 cdev);
597 if (other_cdev) {
598 struct subchannel *other_sch;
599
600 other_sch = to_subchannel(other_cdev->dev.parent);
601 if (get_device(&other_sch->dev)) {
602 stsch(other_sch->irq, &other_sch->schib);
603 if (other_sch->schib.pmcw.dnv) {
604 other_sch->schib.pmcw.intparm = 0;
605 cio_modify(other_sch);
606 }
607 device_unregister(&other_sch->dev);
608 }
609 }
610 /* Update ssd info here. */
611 css_get_ssd_info(sch);
612 cdev->private->devno = sch->schib.pmcw.dev;
613 } else
614 need_rename = 0;
615 device_remove_files(&cdev->dev);
616 if (test_and_clear_bit(1, &cdev->private->registered))
617 device_del(&cdev->dev);
618 if (need_rename)
619 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x",
620 sch->schib.pmcw.dev);
621 PREPARE_WORK(&cdev->private->kick_work,
622 ccw_device_add_changed, (void *)cdev);
623 queue_work(ccw_device_work, &cdev->private->kick_work);
624}
625
626static void
627ccw_device_release(struct device *dev)
628{
629 struct ccw_device *cdev;
630
631 cdev = to_ccwdev(dev);
632 kfree(cdev->private);
633 kfree(cdev);
634}
635
636/*
637 * Register recognized device.
638 */
639static void
640io_subchannel_register(void *data)
641{
642 struct ccw_device *cdev;
643 struct subchannel *sch;
644 int ret;
645 unsigned long flags;
646
647 cdev = (struct ccw_device *) data;
648 sch = to_subchannel(cdev->dev.parent);
649
650 if (!list_empty(&sch->dev.children)) {
651 bus_rescan_devices(&ccw_bus_type);
652 goto out;
653 }
654 /* make it known to the system */
655 ret = ccw_device_register(cdev);
656 if (ret) {
657 printk (KERN_WARNING "%s: could not register %s\n",
658 __func__, cdev->dev.bus_id);
659 put_device(&cdev->dev);
660 spin_lock_irqsave(&sch->lock, flags);
661 sch->dev.driver_data = NULL;
662 spin_unlock_irqrestore(&sch->lock, flags);
663 kfree (cdev->private);
664 kfree (cdev);
665 put_device(&sch->dev);
666 if (atomic_dec_and_test(&ccw_device_init_count))
667 wake_up(&ccw_device_init_wq);
668 return;
669 }
670
671 ret = subchannel_add_files(cdev->dev.parent);
672 if (ret)
673 printk(KERN_WARNING "%s: could not add attributes to %s\n",
674 __func__, sch->dev.bus_id);
675 put_device(&cdev->dev);
676out:
677 cdev->private->flags.recog_done = 1;
678 put_device(&sch->dev);
679 wake_up(&cdev->private->wait_q);
680 if (atomic_dec_and_test(&ccw_device_init_count))
681 wake_up(&ccw_device_init_wq);
682}
683
684void
685ccw_device_call_sch_unregister(void *data)
686{
687 struct ccw_device *cdev = data;
688 struct subchannel *sch;
689
690 sch = to_subchannel(cdev->dev.parent);
691 device_unregister(&sch->dev);
692 /* Reset intparm to zeroes. */
693 sch->schib.pmcw.intparm = 0;
694 cio_modify(sch);
695 put_device(&cdev->dev);
696 put_device(&sch->dev);
697}
698
699/*
700 * subchannel recognition done. Called from the state machine.
701 */
702void
703io_subchannel_recog_done(struct ccw_device *cdev)
704{
705 struct subchannel *sch;
706
707 if (css_init_done == 0) {
708 cdev->private->flags.recog_done = 1;
709 return;
710 }
711 switch (cdev->private->state) {
712 case DEV_STATE_NOT_OPER:
713 cdev->private->flags.recog_done = 1;
714 /* Remove device found not operational. */
715 if (!get_device(&cdev->dev))
716 break;
717 sch = to_subchannel(cdev->dev.parent);
718 PREPARE_WORK(&cdev->private->kick_work,
719 ccw_device_call_sch_unregister, (void *) cdev);
720 queue_work(slow_path_wq, &cdev->private->kick_work);
721 if (atomic_dec_and_test(&ccw_device_init_count))
722 wake_up(&ccw_device_init_wq);
723 break;
724 case DEV_STATE_BOXED:
725 /* Device did not respond in time. */
726 case DEV_STATE_OFFLINE:
727 /*
728 * We can't register the device in interrupt context so
729 * we schedule a work item.
730 */
731 if (!get_device(&cdev->dev))
732 break;
733 PREPARE_WORK(&cdev->private->kick_work,
734 io_subchannel_register, (void *) cdev);
735 queue_work(slow_path_wq, &cdev->private->kick_work);
736 break;
737 }
738}
739
740static int
741io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
742{
743 int rc;
744 struct ccw_device_private *priv;
745
746 sch->dev.driver_data = cdev;
747 sch->driver = &io_subchannel_driver;
748 cdev->ccwlock = &sch->lock;
749 /* Init private data. */
750 priv = cdev->private;
751 priv->devno = sch->schib.pmcw.dev;
752 priv->irq = sch->irq;
753 priv->state = DEV_STATE_NOT_OPER;
754 INIT_LIST_HEAD(&priv->cmb_list);
755 init_waitqueue_head(&priv->wait_q);
756 init_timer(&priv->timer);
757
758 /* Set an initial name for the device. */
759 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x",
760 sch->schib.pmcw.dev);
761
762 /* Increase counter of devices currently in recognition. */
763 atomic_inc(&ccw_device_init_count);
764
765 /* Start async. device sensing. */
766 spin_lock_irq(&sch->lock);
767 rc = ccw_device_recognition(cdev);
768 spin_unlock_irq(&sch->lock);
769 if (rc) {
770 if (atomic_dec_and_test(&ccw_device_init_count))
771 wake_up(&ccw_device_init_wq);
772 }
773 return rc;
774}
775
776static int
777io_subchannel_probe (struct device *pdev)
778{
779 struct subchannel *sch;
780 struct ccw_device *cdev;
781 int rc;
782 unsigned long flags;
783
784 sch = to_subchannel(pdev);
785 if (sch->dev.driver_data) {
786 /*
787 * This subchannel already has an associated ccw_device.
788 * Register it and exit. This happens for all early
789 * device, e.g. the console.
790 */
791 cdev = sch->dev.driver_data;
792 device_initialize(&cdev->dev);
793 ccw_device_register(cdev);
794 subchannel_add_files(&sch->dev);
795 /*
796 * Check if the device is already online. If it is
797 * the reference count needs to be corrected
798 * (see ccw_device_online and css_init_done for the
799 * ugly details).
800 */
801 if (cdev->private->state != DEV_STATE_NOT_OPER &&
802 cdev->private->state != DEV_STATE_OFFLINE &&
803 cdev->private->state != DEV_STATE_BOXED)
804 get_device(&cdev->dev);
805 return 0;
806 }
807 cdev = kmalloc (sizeof(*cdev), GFP_KERNEL);
808 if (!cdev)
809 return -ENOMEM;
810 memset(cdev, 0, sizeof(struct ccw_device));
811 cdev->private = kmalloc(sizeof(struct ccw_device_private),
812 GFP_KERNEL | GFP_DMA);
813 if (!cdev->private) {
814 kfree(cdev);
815 return -ENOMEM;
816 }
817 memset(cdev->private, 0, sizeof(struct ccw_device_private));
818 atomic_set(&cdev->private->onoff, 0);
819 cdev->dev = (struct device) {
820 .parent = pdev,
821 .release = ccw_device_release,
822 };
823 INIT_LIST_HEAD(&cdev->private->kick_work.entry);
824 /* Do first half of device_register. */
825 device_initialize(&cdev->dev);
826
827 if (!get_device(&sch->dev)) {
828 if (cdev->dev.release)
829 cdev->dev.release(&cdev->dev);
830 return -ENODEV;
831 }
832
833 rc = io_subchannel_recog(cdev, to_subchannel(pdev));
834 if (rc) {
835 spin_lock_irqsave(&sch->lock, flags);
836 sch->dev.driver_data = NULL;
837 spin_unlock_irqrestore(&sch->lock, flags);
838 if (cdev->dev.release)
839 cdev->dev.release(&cdev->dev);
840 }
841
842 return rc;
843}
844
845static void
846ccw_device_unregister(void *data)
847{
848 struct ccw_device *cdev;
849
850 cdev = (struct ccw_device *)data;
851 if (test_and_clear_bit(1, &cdev->private->registered))
852 device_unregister(&cdev->dev);
853 put_device(&cdev->dev);
854}
855
856static int
857io_subchannel_remove (struct device *dev)
858{
859 struct ccw_device *cdev;
860 unsigned long flags;
861
862 if (!dev->driver_data)
863 return 0;
864 cdev = dev->driver_data;
865 /* Set ccw device to not operational and drop reference. */
866 spin_lock_irqsave(cdev->ccwlock, flags);
867 dev->driver_data = NULL;
868 cdev->private->state = DEV_STATE_NOT_OPER;
869 spin_unlock_irqrestore(cdev->ccwlock, flags);
870 /*
871 * Put unregistration on workqueue to avoid livelocks on the css bus
872 * semaphore.
873 */
874 if (get_device(&cdev->dev)) {
875 PREPARE_WORK(&cdev->private->kick_work,
876 ccw_device_unregister, (void *) cdev);
877 queue_work(ccw_device_work, &cdev->private->kick_work);
878 }
879 return 0;
880}
881
882static int
883io_subchannel_notify(struct device *dev, int event)
884{
885 struct ccw_device *cdev;
886
887 cdev = dev->driver_data;
888 if (!cdev)
889 return 0;
890 if (!cdev->drv)
891 return 0;
892 if (!cdev->online)
893 return 0;
894 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
895}
896
897static void
898io_subchannel_verify(struct device *dev)
899{
900 struct ccw_device *cdev;
901
902 cdev = dev->driver_data;
903 if (cdev)
904 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
905}
906
907static void
908io_subchannel_ioterm(struct device *dev)
909{
910 struct ccw_device *cdev;
911
912 cdev = dev->driver_data;
913 if (!cdev)
914 return;
915 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
916 if (cdev->handler)
917 cdev->handler(cdev, cdev->private->intparm,
918 ERR_PTR(-EIO));
919}
920
921static void
922io_subchannel_shutdown(struct device *dev)
923{
924 struct subchannel *sch;
925 struct ccw_device *cdev;
926 int ret;
927
928 sch = to_subchannel(dev);
929 cdev = dev->driver_data;
930
931 if (cio_is_console(sch->irq))
932 return;
933 if (!sch->schib.pmcw.ena)
934 /* Nothing to do. */
935 return;
936 ret = cio_disable_subchannel(sch);
937 if (ret != -EBUSY)
938 /* Subchannel is disabled, we're done. */
939 return;
940 cdev->private->state = DEV_STATE_QUIESCE;
941 if (cdev->handler)
942 cdev->handler(cdev, cdev->private->intparm,
943 ERR_PTR(-EIO));
944 ret = ccw_device_cancel_halt_clear(cdev);
945 if (ret == -EBUSY) {
946 ccw_device_set_timeout(cdev, HZ/10);
947 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
948 }
949 cio_disable_subchannel(sch);
950}
951
952#ifdef CONFIG_CCW_CONSOLE
953static struct ccw_device console_cdev;
954static struct ccw_device_private console_private;
955static int console_cdev_in_use;
956
957static int
958ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
959{
960 int rc;
961
962 /* Initialize the ccw_device structure. */
963 cdev->dev = (struct device) {
964 .parent = &sch->dev,
965 };
966 /* Initialize the subchannel structure */
967 sch->dev.parent = &css_bus_device;
968 sch->dev.bus = &css_bus_type;
969
970 rc = io_subchannel_recog(cdev, sch);
971 if (rc)
972 return rc;
973
974 /* Now wait for the async. recognition to come to an end. */
975 spin_lock_irq(cdev->ccwlock);
976 while (!dev_fsm_final_state(cdev))
977 wait_cons_dev();
978 rc = -EIO;
979 if (cdev->private->state != DEV_STATE_OFFLINE)
980 goto out_unlock;
981 ccw_device_online(cdev);
982 while (!dev_fsm_final_state(cdev))
983 wait_cons_dev();
984 if (cdev->private->state != DEV_STATE_ONLINE)
985 goto out_unlock;
986 rc = 0;
987out_unlock:
988 spin_unlock_irq(cdev->ccwlock);
989 return 0;
990}
991
992struct ccw_device *
993ccw_device_probe_console(void)
994{
995 struct subchannel *sch;
996 int ret;
997
998 if (xchg(&console_cdev_in_use, 1) != 0)
999 return NULL;
1000 sch = cio_probe_console();
1001 if (IS_ERR(sch)) {
1002 console_cdev_in_use = 0;
1003 return (void *) sch;
1004 }
1005 memset(&console_cdev, 0, sizeof(struct ccw_device));
1006 memset(&console_private, 0, sizeof(struct ccw_device_private));
1007 console_cdev.private = &console_private;
1008 ret = ccw_device_console_enable(&console_cdev, sch);
1009 if (ret) {
1010 cio_release_console();
1011 console_cdev_in_use = 0;
1012 return ERR_PTR(ret);
1013 }
1014 console_cdev.online = 1;
1015 return &console_cdev;
1016}
1017#endif
1018
1019/*
1020 * get ccw_device matching the busid, but only if owned by cdrv
1021 */
1022struct ccw_device *
1023get_ccwdev_by_busid(struct ccw_driver *cdrv, const char *bus_id)
1024{
1025 struct device *d, *dev;
1026 struct device_driver *drv;
1027
1028 drv = get_driver(&cdrv->driver);
1029 if (!drv)
1030 return 0;
1031
1032 down_read(&drv->bus->subsys.rwsem);
1033
1034 dev = NULL;
1035 list_for_each_entry(d, &drv->devices, driver_list) {
1036 dev = get_device(d);
1037
1038 if (dev && !strncmp(bus_id, dev->bus_id, BUS_ID_SIZE))
1039 break;
1040 else if (dev) {
1041 put_device(dev);
1042 dev = NULL;
1043 }
1044 }
1045 up_read(&drv->bus->subsys.rwsem);
1046 put_driver(drv);
1047
1048 return dev ? to_ccwdev(dev) : 0;
1049}
1050
1051/************************** device driver handling ************************/
1052
1053/* This is the implementation of the ccw_driver class. The probe, remove
1054 * and release methods are initially very similar to the device_driver
1055 * implementations, with the difference that they have ccw_device
1056 * arguments.
1057 *
1058 * A ccw driver also contains the information that is needed for
1059 * device matching.
1060 */
1061static int
1062ccw_device_probe (struct device *dev)
1063{
1064 struct ccw_device *cdev = to_ccwdev(dev);
1065 struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1066 int ret;
1067
1068 cdev->drv = cdrv; /* to let the driver call _set_online */
1069
1070 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1071
1072 if (ret) {
1073 cdev->drv = 0;
1074 return ret;
1075 }
1076
1077 return 0;
1078}
1079
1080static int
1081ccw_device_remove (struct device *dev)
1082{
1083 struct ccw_device *cdev = to_ccwdev(dev);
1084 struct ccw_driver *cdrv = cdev->drv;
1085 int ret;
1086
1087 pr_debug("removing device %s\n", cdev->dev.bus_id);
1088 if (cdrv->remove)
1089 cdrv->remove(cdev);
1090 if (cdev->online) {
1091 cdev->online = 0;
1092 spin_lock_irq(cdev->ccwlock);
1093 ret = ccw_device_offline(cdev);
1094 spin_unlock_irq(cdev->ccwlock);
1095 if (ret == 0)
1096 wait_event(cdev->private->wait_q,
1097 dev_fsm_final_state(cdev));
1098 else
1099 //FIXME: we can't fail!
1100 pr_debug("ccw_device_offline returned %d, device %s\n",
1101 ret, cdev->dev.bus_id);
1102 }
1103 ccw_device_set_timeout(cdev, 0);
1104 cdev->drv = 0;
1105 return 0;
1106}
1107
1108int
1109ccw_driver_register (struct ccw_driver *cdriver)
1110{
1111 struct device_driver *drv = &cdriver->driver;
1112
1113 drv->bus = &ccw_bus_type;
1114 drv->name = cdriver->name;
1115 drv->probe = ccw_device_probe;
1116 drv->remove = ccw_device_remove;
1117
1118 return driver_register(drv);
1119}
1120
1121void
1122ccw_driver_unregister (struct ccw_driver *cdriver)
1123{
1124 driver_unregister(&cdriver->driver);
1125}
1126
1127MODULE_LICENSE("GPL");
1128EXPORT_SYMBOL(ccw_device_set_online);
1129EXPORT_SYMBOL(ccw_device_set_offline);
1130EXPORT_SYMBOL(ccw_driver_register);
1131EXPORT_SYMBOL(ccw_driver_unregister);
1132EXPORT_SYMBOL(get_ccwdev_by_busid);
1133EXPORT_SYMBOL(ccw_bus_type);
1134EXPORT_SYMBOL(ccw_device_work);
1135EXPORT_SYMBOL(ccw_device_notify_work);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
new file mode 100644
index 000000000000..a3aa056d7245
--- /dev/null
+++ b/drivers/s390/cio/device.h
@@ -0,0 +1,115 @@
1#ifndef S390_DEVICE_H
2#define S390_DEVICE_H
3
4/*
5 * states of the device statemachine
6 */
7enum dev_state {
8 DEV_STATE_NOT_OPER,
9 DEV_STATE_SENSE_PGID,
10 DEV_STATE_SENSE_ID,
11 DEV_STATE_OFFLINE,
12 DEV_STATE_VERIFY,
13 DEV_STATE_ONLINE,
14 DEV_STATE_W4SENSE,
15 DEV_STATE_DISBAND_PGID,
16 DEV_STATE_BOXED,
17 /* states to wait for i/o completion before doing something */
18 DEV_STATE_CLEAR_VERIFY,
19 DEV_STATE_TIMEOUT_KILL,
20 DEV_STATE_WAIT4IO,
21 DEV_STATE_QUIESCE,
22 /* special states for devices gone not operational */
23 DEV_STATE_DISCONNECTED,
24 DEV_STATE_DISCONNECTED_SENSE_ID,
25 DEV_STATE_CMFCHANGE,
26 /* last element! */
27 NR_DEV_STATES
28};
29
30/*
31 * asynchronous events of the device statemachine
32 */
33enum dev_event {
34 DEV_EVENT_NOTOPER,
35 DEV_EVENT_INTERRUPT,
36 DEV_EVENT_TIMEOUT,
37 DEV_EVENT_VERIFY,
38 /* last element! */
39 NR_DEV_EVENTS
40};
41
42struct ccw_device;
43
44/*
45 * action called through jumptable
46 */
47typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
48extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
49
50static inline void
51dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
52{
53 dev_jumptable[cdev->private->state][dev_event](cdev, dev_event);
54}
55
56/*
57 * Delivers 1 if the device state is final.
58 */
59static inline int
60dev_fsm_final_state(struct ccw_device *cdev)
61{
62 return (cdev->private->state == DEV_STATE_NOT_OPER ||
63 cdev->private->state == DEV_STATE_OFFLINE ||
64 cdev->private->state == DEV_STATE_ONLINE ||
65 cdev->private->state == DEV_STATE_BOXED);
66}
67
68extern struct workqueue_struct *ccw_device_work;
69extern struct workqueue_struct *ccw_device_notify_work;
70
71void io_subchannel_recog_done(struct ccw_device *cdev);
72
73int ccw_device_cancel_halt_clear(struct ccw_device *);
74
75int ccw_device_register(struct ccw_device *);
76void ccw_device_do_unreg_rereg(void *);
77void ccw_device_call_sch_unregister(void *);
78
79int ccw_device_recognition(struct ccw_device *);
80int ccw_device_online(struct ccw_device *);
81int ccw_device_offline(struct ccw_device *);
82
83/* Function prototypes for device status and basic sense stuff. */
84void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
85void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
86int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
87int ccw_device_do_sense(struct ccw_device *, struct irb *);
88
89/* Function prototypes for sense id stuff. */
90void ccw_device_sense_id_start(struct ccw_device *);
91void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event);
92void ccw_device_sense_id_done(struct ccw_device *, int);
93
94/* Function prototypes for path grouping stuff. */
95void ccw_device_sense_pgid_start(struct ccw_device *);
96void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event);
97void ccw_device_sense_pgid_done(struct ccw_device *, int);
98
99void ccw_device_verify_start(struct ccw_device *);
100void ccw_device_verify_irq(struct ccw_device *, enum dev_event);
101void ccw_device_verify_done(struct ccw_device *, int);
102
103void ccw_device_disband_start(struct ccw_device *);
104void ccw_device_disband_irq(struct ccw_device *, enum dev_event);
105void ccw_device_disband_done(struct ccw_device *, int);
106
107int ccw_device_call_handler(struct ccw_device *);
108
109int ccw_device_stlck(struct ccw_device *);
110
111/* qdio needs this. */
112void ccw_device_set_timeout(struct ccw_device *, int);
113
114void retry_set_schib(struct ccw_device *cdev);
115#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
new file mode 100644
index 000000000000..9b7f6f548b1d
--- /dev/null
+++ b/drivers/s390/cio/device_fsm.c
@@ -0,0 +1,1250 @@
1/*
2 * drivers/s390/cio/device_fsm.c
3 * finite state machine for device handling
4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6 * IBM Corporation
7 * Author(s): Cornelia Huck(cohuck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */
10
11#include <linux/module.h>
12#include <linux/config.h>
13#include <linux/init.h>
14
15#include <asm/ccwdev.h>
16#include <asm/qdio.h>
17
18#include "cio.h"
19#include "cio_debug.h"
20#include "css.h"
21#include "device.h"
22#include "chsc.h"
23#include "ioasm.h"
24#include "qdio.h"
25
26int
27device_is_online(struct subchannel *sch)
28{
29 struct ccw_device *cdev;
30
31 if (!sch->dev.driver_data)
32 return 0;
33 cdev = sch->dev.driver_data;
34 return (cdev->private->state == DEV_STATE_ONLINE);
35}
36
37int
38device_is_disconnected(struct subchannel *sch)
39{
40 struct ccw_device *cdev;
41
42 if (!sch->dev.driver_data)
43 return 0;
44 cdev = sch->dev.driver_data;
45 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
46 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
47}
48
49void
50device_set_disconnected(struct subchannel *sch)
51{
52 struct ccw_device *cdev;
53
54 if (!sch->dev.driver_data)
55 return;
56 cdev = sch->dev.driver_data;
57 ccw_device_set_timeout(cdev, 0);
58 cdev->private->flags.fake_irb = 0;
59 cdev->private->state = DEV_STATE_DISCONNECTED;
60}
61
62void
63device_set_waiting(struct subchannel *sch)
64{
65 struct ccw_device *cdev;
66
67 if (!sch->dev.driver_data)
68 return;
69 cdev = sch->dev.driver_data;
70 ccw_device_set_timeout(cdev, 10*HZ);
71 cdev->private->state = DEV_STATE_WAIT4IO;
72}
73
74/*
75 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
76 */
77static void
78ccw_device_timeout(unsigned long data)
79{
80 struct ccw_device *cdev;
81
82 cdev = (struct ccw_device *) data;
83 spin_lock_irq(cdev->ccwlock);
84 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
85 spin_unlock_irq(cdev->ccwlock);
86}
87
88/*
89 * Set timeout
90 */
91void
92ccw_device_set_timeout(struct ccw_device *cdev, int expires)
93{
94 if (expires == 0) {
95 del_timer(&cdev->private->timer);
96 return;
97 }
98 if (timer_pending(&cdev->private->timer)) {
99 if (mod_timer(&cdev->private->timer, jiffies + expires))
100 return;
101 }
102 cdev->private->timer.function = ccw_device_timeout;
103 cdev->private->timer.data = (unsigned long) cdev;
104 cdev->private->timer.expires = jiffies + expires;
105 add_timer(&cdev->private->timer);
106}
107
108/* Kill any pending timers after machine check. */
109void
110device_kill_pending_timer(struct subchannel *sch)
111{
112 struct ccw_device *cdev;
113
114 if (!sch->dev.driver_data)
115 return;
116 cdev = sch->dev.driver_data;
117 ccw_device_set_timeout(cdev, 0);
118}
119
120/*
121 * Cancel running i/o. This is called repeatedly since halt/clear are
122 * asynchronous operations. We do one try with cio_cancel, two tries
123 * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
124 * Returns 0 if device now idle, -ENODEV for device not operational and
125 * -EBUSY if an interrupt is expected (either from halt/clear or from a
126 * status pending).
127 */
128int
129ccw_device_cancel_halt_clear(struct ccw_device *cdev)
130{
131 struct subchannel *sch;
132 int ret;
133
134 sch = to_subchannel(cdev->dev.parent);
135 ret = stsch(sch->irq, &sch->schib);
136 if (ret || !sch->schib.pmcw.dnv)
137 return -ENODEV;
138 if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
139 /* Not operational or no activity -> done. */
140 return 0;
141 /* Stage 1: cancel io. */
142 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
143 !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
144 ret = cio_cancel(sch);
145 if (ret != -EINVAL)
146 return ret;
147 /* cancel io unsuccessful. From now on it is asynchronous. */
148 cdev->private->iretry = 3; /* 3 halt retries. */
149 }
150 if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
151 /* Stage 2: halt io. */
152 if (cdev->private->iretry) {
153 cdev->private->iretry--;
154 ret = cio_halt(sch);
155 return (ret == 0) ? -EBUSY : ret;
156 }
157 /* halt io unsuccessful. */
158 cdev->private->iretry = 255; /* 255 clear retries. */
159 }
160 /* Stage 3: clear io. */
161 if (cdev->private->iretry) {
162 cdev->private->iretry--;
163 ret = cio_clear (sch);
164 return (ret == 0) ? -EBUSY : ret;
165 }
166 panic("Can't stop i/o on subchannel.\n");
167}
168
169static int
170ccw_device_handle_oper(struct ccw_device *cdev)
171{
172 struct subchannel *sch;
173
174 sch = to_subchannel(cdev->dev.parent);
175 cdev->private->flags.recog_done = 1;
176 /*
177 * Check if cu type and device type still match. If
178 * not, it is certainly another device and we have to
179 * de- and re-register. Also check here for non-matching devno.
180 */
181 if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
182 cdev->id.cu_model != cdev->private->senseid.cu_model ||
183 cdev->id.dev_type != cdev->private->senseid.dev_type ||
184 cdev->id.dev_model != cdev->private->senseid.dev_model ||
185 cdev->private->devno != sch->schib.pmcw.dev) {
186 PREPARE_WORK(&cdev->private->kick_work,
187 ccw_device_do_unreg_rereg, (void *)cdev);
188 queue_work(ccw_device_work, &cdev->private->kick_work);
189 return 0;
190 }
191 cdev->private->flags.donotify = 1;
192 return 1;
193}
194
195/*
196 * The machine won't give us any notification by machine check if a chpid has
197 * been varied online on the SE so we have to find out by magic (i. e. driving
198 * the channel subsystem to device selection and updating our path masks).
199 */
200static inline void
201__recover_lost_chpids(struct subchannel *sch, int old_lpm)
202{
203 int mask, i;
204
205 for (i = 0; i<8; i++) {
206 mask = 0x80 >> i;
207 if (!(sch->lpm & mask))
208 continue;
209 if (old_lpm & mask)
210 continue;
211 chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
212 }
213}
214
215/*
216 * Stop device recognition.
217 */
218static void
219ccw_device_recog_done(struct ccw_device *cdev, int state)
220{
221 struct subchannel *sch;
222 int notify, old_lpm, same_dev;
223
224 sch = to_subchannel(cdev->dev.parent);
225
226 ccw_device_set_timeout(cdev, 0);
227 cio_disable_subchannel(sch);
228 /*
229 * Now that we tried recognition, we have performed device selection
230 * through ssch() and the path information is up to date.
231 */
232 old_lpm = sch->lpm;
233 stsch(sch->irq, &sch->schib);
234 sch->lpm = sch->schib.pmcw.pim &
235 sch->schib.pmcw.pam &
236 sch->schib.pmcw.pom &
237 sch->opm;
238 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
239 /* Force reprobe on all chpids. */
240 old_lpm = 0;
241 if (sch->lpm != old_lpm)
242 __recover_lost_chpids(sch, old_lpm);
243 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
244 if (state == DEV_STATE_NOT_OPER) {
245 cdev->private->flags.recog_done = 1;
246 cdev->private->state = DEV_STATE_DISCONNECTED;
247 return;
248 }
249 /* Boxed devices don't need extra treatment. */
250 }
251 notify = 0;
252 same_dev = 0; /* Keep the compiler quiet... */
253 switch (state) {
254 case DEV_STATE_NOT_OPER:
255 CIO_DEBUG(KERN_WARNING, 2,
256 "SenseID : unknown device %04x on subchannel %04x\n",
257 cdev->private->devno, sch->irq);
258 break;
259 case DEV_STATE_OFFLINE:
260 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
261 same_dev = ccw_device_handle_oper(cdev);
262 notify = 1;
263 }
264 /* fill out sense information */
265 cdev->id = (struct ccw_device_id) {
266 .cu_type = cdev->private->senseid.cu_type,
267 .cu_model = cdev->private->senseid.cu_model,
268 .dev_type = cdev->private->senseid.dev_type,
269 .dev_model = cdev->private->senseid.dev_model,
270 };
271 if (notify) {
272 cdev->private->state = DEV_STATE_OFFLINE;
273 if (same_dev) {
274 /* Get device online again. */
275 ccw_device_online(cdev);
276 wake_up(&cdev->private->wait_q);
277 }
278 return;
279 }
280 /* Issue device info message. */
281 CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
282 "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
283 "%04X/%02X\n", cdev->private->devno,
284 cdev->id.cu_type, cdev->id.cu_model,
285 cdev->id.dev_type, cdev->id.dev_model);
286 break;
287 case DEV_STATE_BOXED:
288 CIO_DEBUG(KERN_WARNING, 2,
289 "SenseID : boxed device %04x on subchannel %04x\n",
290 cdev->private->devno, sch->irq);
291 break;
292 }
293 cdev->private->state = state;
294 io_subchannel_recog_done(cdev);
295 if (state != DEV_STATE_NOT_OPER)
296 wake_up(&cdev->private->wait_q);
297}
298
299/*
300 * Function called from device_id.c after sense id has completed.
301 */
302void
303ccw_device_sense_id_done(struct ccw_device *cdev, int err)
304{
305 switch (err) {
306 case 0:
307 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
308 break;
309 case -ETIME: /* Sense id stopped by timeout. */
310 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
311 break;
312 default:
313 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
314 break;
315 }
316}
317
318static void
319ccw_device_oper_notify(void *data)
320{
321 struct ccw_device *cdev;
322 struct subchannel *sch;
323 int ret;
324
325 cdev = (struct ccw_device *)data;
326 sch = to_subchannel(cdev->dev.parent);
327 ret = (sch->driver && sch->driver->notify) ?
328 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
329 if (!ret)
330 /* Driver doesn't want device back. */
331 ccw_device_do_unreg_rereg((void *)cdev);
332 else
333 wake_up(&cdev->private->wait_q);
334}
335
336/*
337 * Finished with online/offline processing.
338 */
339static void
340ccw_device_done(struct ccw_device *cdev, int state)
341{
342 struct subchannel *sch;
343
344 sch = to_subchannel(cdev->dev.parent);
345
346 if (state != DEV_STATE_ONLINE)
347 cio_disable_subchannel(sch);
348
349 /* Reset device status. */
350 memset(&cdev->private->irb, 0, sizeof(struct irb));
351
352 cdev->private->state = state;
353
354
355 if (state == DEV_STATE_BOXED)
356 CIO_DEBUG(KERN_WARNING, 2,
357 "Boxed device %04x on subchannel %04x\n",
358 cdev->private->devno, sch->irq);
359
360 if (cdev->private->flags.donotify) {
361 cdev->private->flags.donotify = 0;
362 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
363 (void *)cdev);
364 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
365 }
366 wake_up(&cdev->private->wait_q);
367
368 if (css_init_done && state != DEV_STATE_ONLINE)
369 put_device (&cdev->dev);
370}
371
372/*
373 * Function called from device_pgid.c after sense path ground has completed.
374 */
375void
376ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
377{
378 struct subchannel *sch;
379
380 sch = to_subchannel(cdev->dev.parent);
381 switch (err) {
382 case 0:
383 /* Start Path Group verification. */
384 sch->vpm = 0; /* Start with no path groups set. */
385 cdev->private->state = DEV_STATE_VERIFY;
386 ccw_device_verify_start(cdev);
387 break;
388 case -ETIME: /* Sense path group id stopped by timeout. */
389 case -EUSERS: /* device is reserved for someone else. */
390 ccw_device_done(cdev, DEV_STATE_BOXED);
391 break;
392 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
393 cdev->private->options.pgroup = 0;
394 ccw_device_done(cdev, DEV_STATE_ONLINE);
395 break;
396 default:
397 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
398 break;
399 }
400}
401
402/*
403 * Start device recognition.
404 */
405int
406ccw_device_recognition(struct ccw_device *cdev)
407{
408 struct subchannel *sch;
409 int ret;
410
411 if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
412 (cdev->private->state != DEV_STATE_BOXED))
413 return -EINVAL;
414 sch = to_subchannel(cdev->dev.parent);
415 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
416 if (ret != 0)
417 /* Couldn't enable the subchannel for i/o. Sick device. */
418 return ret;
419
420 /* After 60s the device recognition is considered to have failed. */
421 ccw_device_set_timeout(cdev, 60*HZ);
422
423 /*
424 * We used to start here with a sense pgid to find out whether a device
425 * is locked by someone else. Unfortunately, the sense pgid command
426 * code has other meanings on devices predating the path grouping
427 * algorithm, so we start with sense id and box the device after an
428 * timeout (or if sense pgid during path verification detects the device
429 * is locked, as may happen on newer devices).
430 */
431 cdev->private->flags.recog_done = 0;
432 cdev->private->state = DEV_STATE_SENSE_ID;
433 ccw_device_sense_id_start(cdev);
434 return 0;
435}
436
437/*
438 * Handle timeout in device recognition.
439 */
440static void
441ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
442{
443 int ret;
444
445 ret = ccw_device_cancel_halt_clear(cdev);
446 switch (ret) {
447 case 0:
448 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
449 break;
450 case -ENODEV:
451 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
452 break;
453 default:
454 ccw_device_set_timeout(cdev, 3*HZ);
455 }
456}
457
458
459static void
460ccw_device_nopath_notify(void *data)
461{
462 struct ccw_device *cdev;
463 struct subchannel *sch;
464 int ret;
465
466 cdev = (struct ccw_device *)data;
467 sch = to_subchannel(cdev->dev.parent);
468 /* Extra sanity. */
469 if (sch->lpm)
470 return;
471 ret = (sch->driver && sch->driver->notify) ?
472 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
473 if (!ret) {
474 if (get_device(&sch->dev)) {
475 /* Driver doesn't want to keep device. */
476 cio_disable_subchannel(sch);
477 if (get_device(&cdev->dev)) {
478 PREPARE_WORK(&cdev->private->kick_work,
479 ccw_device_call_sch_unregister,
480 (void *)cdev);
481 queue_work(ccw_device_work,
482 &cdev->private->kick_work);
483 } else
484 put_device(&sch->dev);
485 }
486 } else {
487 cio_disable_subchannel(sch);
488 ccw_device_set_timeout(cdev, 0);
489 cdev->private->flags.fake_irb = 0;
490 cdev->private->state = DEV_STATE_DISCONNECTED;
491 wake_up(&cdev->private->wait_q);
492 }
493}
494
495void
496ccw_device_verify_done(struct ccw_device *cdev, int err)
497{
498 cdev->private->flags.doverify = 0;
499 switch (err) {
500 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
501 cdev->private->options.pgroup = 0;
502 case 0:
503 ccw_device_done(cdev, DEV_STATE_ONLINE);
504 /* Deliver fake irb to device driver, if needed. */
505 if (cdev->private->flags.fake_irb) {
506 memset(&cdev->private->irb, 0, sizeof(struct irb));
507 cdev->private->irb.scsw = (struct scsw) {
508 .cc = 1,
509 .fctl = SCSW_FCTL_START_FUNC,
510 .actl = SCSW_ACTL_START_PEND,
511 .stctl = SCSW_STCTL_STATUS_PEND,
512 };
513 cdev->private->flags.fake_irb = 0;
514 if (cdev->handler)
515 cdev->handler(cdev, cdev->private->intparm,
516 &cdev->private->irb);
517 memset(&cdev->private->irb, 0, sizeof(struct irb));
518 }
519 break;
520 case -ETIME:
521 ccw_device_done(cdev, DEV_STATE_BOXED);
522 break;
523 default:
524 PREPARE_WORK(&cdev->private->kick_work,
525 ccw_device_nopath_notify, (void *)cdev);
526 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
527 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
528 break;
529 }
530}
531
532/*
533 * Get device online.
534 */
535int
536ccw_device_online(struct ccw_device *cdev)
537{
538 struct subchannel *sch;
539 int ret;
540
541 if ((cdev->private->state != DEV_STATE_OFFLINE) &&
542 (cdev->private->state != DEV_STATE_BOXED))
543 return -EINVAL;
544 sch = to_subchannel(cdev->dev.parent);
545 if (css_init_done && !get_device(&cdev->dev))
546 return -ENODEV;
547 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
548 if (ret != 0) {
549 /* Couldn't enable the subchannel for i/o. Sick device. */
550 if (ret == -ENODEV)
551 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
552 return ret;
553 }
554 /* Do we want to do path grouping? */
555 if (!cdev->private->options.pgroup) {
556 /* No, set state online immediately. */
557 ccw_device_done(cdev, DEV_STATE_ONLINE);
558 return 0;
559 }
560 /* Do a SensePGID first. */
561 cdev->private->state = DEV_STATE_SENSE_PGID;
562 ccw_device_sense_pgid_start(cdev);
563 return 0;
564}
565
566void
567ccw_device_disband_done(struct ccw_device *cdev, int err)
568{
569 switch (err) {
570 case 0:
571 ccw_device_done(cdev, DEV_STATE_OFFLINE);
572 break;
573 case -ETIME:
574 ccw_device_done(cdev, DEV_STATE_BOXED);
575 break;
576 default:
577 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
578 break;
579 }
580}
581
582/*
583 * Shutdown device.
584 */
585int
586ccw_device_offline(struct ccw_device *cdev)
587{
588 struct subchannel *sch;
589
590 sch = to_subchannel(cdev->dev.parent);
591 if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv)
592 return -ENODEV;
593 if (cdev->private->state != DEV_STATE_ONLINE) {
594 if (sch->schib.scsw.actl != 0)
595 return -EBUSY;
596 return -EINVAL;
597 }
598 if (sch->schib.scsw.actl != 0)
599 return -EBUSY;
600 /* Are we doing path grouping? */
601 if (!cdev->private->options.pgroup) {
602 /* No, set state offline immediately. */
603 ccw_device_done(cdev, DEV_STATE_OFFLINE);
604 return 0;
605 }
606 /* Start Set Path Group commands. */
607 cdev->private->state = DEV_STATE_DISBAND_PGID;
608 ccw_device_disband_start(cdev);
609 return 0;
610}
611
612/*
613 * Handle timeout in device online/offline process.
614 */
615static void
616ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
617{
618 int ret;
619
620 ret = ccw_device_cancel_halt_clear(cdev);
621 switch (ret) {
622 case 0:
623 ccw_device_done(cdev, DEV_STATE_BOXED);
624 break;
625 case -ENODEV:
626 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
627 break;
628 default:
629 ccw_device_set_timeout(cdev, 3*HZ);
630 }
631}
632
633/*
634 * Handle not oper event in device recognition.
635 */
636static void
637ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
638{
639 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
640}
641
642/*
643 * Handle not operational event while offline.
644 */
645static void
646ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
647{
648 struct subchannel *sch;
649
650 cdev->private->state = DEV_STATE_NOT_OPER;
651 sch = to_subchannel(cdev->dev.parent);
652 if (get_device(&cdev->dev)) {
653 PREPARE_WORK(&cdev->private->kick_work,
654 ccw_device_call_sch_unregister, (void *)cdev);
655 queue_work(ccw_device_work, &cdev->private->kick_work);
656 }
657 wake_up(&cdev->private->wait_q);
658}
659
660/*
661 * Handle not operational event while online.
662 */
663static void
664ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
665{
666 struct subchannel *sch;
667
668 sch = to_subchannel(cdev->dev.parent);
669 if (sch->driver->notify &&
670 sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
671 ccw_device_set_timeout(cdev, 0);
672 cdev->private->flags.fake_irb = 0;
673 cdev->private->state = DEV_STATE_DISCONNECTED;
674 wake_up(&cdev->private->wait_q);
675 return;
676 }
677 cdev->private->state = DEV_STATE_NOT_OPER;
678 cio_disable_subchannel(sch);
679 if (sch->schib.scsw.actl != 0) {
680 // FIXME: not-oper indication to device driver ?
681 ccw_device_call_handler(cdev);
682 }
683 if (get_device(&cdev->dev)) {
684 PREPARE_WORK(&cdev->private->kick_work,
685 ccw_device_call_sch_unregister, (void *)cdev);
686 queue_work(ccw_device_work, &cdev->private->kick_work);
687 }
688 wake_up(&cdev->private->wait_q);
689}
690
691/*
692 * Handle path verification event.
693 */
694static void
695ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
696{
697 struct subchannel *sch;
698
699 if (!cdev->private->options.pgroup)
700 return;
701 if (cdev->private->state == DEV_STATE_W4SENSE) {
702 cdev->private->flags.doverify = 1;
703 return;
704 }
705 sch = to_subchannel(cdev->dev.parent);
706 /*
707 * Since we might not just be coming from an interrupt from the
708 * subchannel we have to update the schib.
709 */
710 stsch(sch->irq, &sch->schib);
711
712 if (sch->schib.scsw.actl != 0 ||
713 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
714 /*
715 * No final status yet or final status not yet delivered
716 * to the device driver. Can't do path verfication now,
717 * delay until final status was delivered.
718 */
719 cdev->private->flags.doverify = 1;
720 return;
721 }
722 /* Device is idle, we can do the path verification. */
723 cdev->private->state = DEV_STATE_VERIFY;
724 ccw_device_verify_start(cdev);
725}
726
727/*
728 * Got an interrupt for a normal io (state online).
729 */
730static void
731ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
732{
733 struct irb *irb;
734
735 irb = (struct irb *) __LC_IRB;
736 /* Check for unsolicited interrupt. */
737 if ((irb->scsw.stctl ==
738 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
739 && (!irb->scsw.cc)) {
740 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
741 !irb->esw.esw0.erw.cons) {
742 /* Unit check but no sense data. Need basic sense. */
743 if (ccw_device_do_sense(cdev, irb) != 0)
744 goto call_handler_unsol;
745 memcpy(irb, &cdev->private->irb, sizeof(struct irb));
746 cdev->private->state = DEV_STATE_W4SENSE;
747 cdev->private->intparm = 0;
748 return;
749 }
750call_handler_unsol:
751 if (cdev->handler)
752 cdev->handler (cdev, 0, irb);
753 return;
754 }
755 /* Accumulate status and find out if a basic sense is needed. */
756 ccw_device_accumulate_irb(cdev, irb);
757 if (cdev->private->flags.dosense) {
758 if (ccw_device_do_sense(cdev, irb) == 0) {
759 cdev->private->state = DEV_STATE_W4SENSE;
760 }
761 return;
762 }
763 /* Call the handler. */
764 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
765 /* Start delayed path verification. */
766 ccw_device_online_verify(cdev, 0);
767}
768
769/*
770 * Got an timeout in online state.
771 */
772static void
773ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
774{
775 int ret;
776
777 ccw_device_set_timeout(cdev, 0);
778 ret = ccw_device_cancel_halt_clear(cdev);
779 if (ret == -EBUSY) {
780 ccw_device_set_timeout(cdev, 3*HZ);
781 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
782 return;
783 }
784 if (ret == -ENODEV) {
785 struct subchannel *sch;
786
787 sch = to_subchannel(cdev->dev.parent);
788 if (!sch->lpm) {
789 PREPARE_WORK(&cdev->private->kick_work,
790 ccw_device_nopath_notify, (void *)cdev);
791 queue_work(ccw_device_notify_work,
792 &cdev->private->kick_work);
793 } else
794 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
795 } else if (cdev->handler)
796 cdev->handler(cdev, cdev->private->intparm,
797 ERR_PTR(-ETIMEDOUT));
798}
799
800/*
801 * Got an interrupt for a basic sense.
802 */
803void
804ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
805{
806 struct irb *irb;
807
808 irb = (struct irb *) __LC_IRB;
809 /* Check for unsolicited interrupt. */
810 if (irb->scsw.stctl ==
811 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
812 if (irb->scsw.cc == 1)
813 /* Basic sense hasn't started. Try again. */
814 ccw_device_do_sense(cdev, irb);
815 else {
816 printk("Huh? %s(%s): unsolicited interrupt...\n",
817 __FUNCTION__, cdev->dev.bus_id);
818 if (cdev->handler)
819 cdev->handler (cdev, 0, irb);
820 }
821 return;
822 }
823 /* Add basic sense info to irb. */
824 ccw_device_accumulate_basic_sense(cdev, irb);
825 if (cdev->private->flags.dosense) {
826 /* Another basic sense is needed. */
827 ccw_device_do_sense(cdev, irb);
828 return;
829 }
830 cdev->private->state = DEV_STATE_ONLINE;
831 /* Call the handler. */
832 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
833 /* Start delayed path verification. */
834 ccw_device_online_verify(cdev, 0);
835}
836
837static void
838ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
839{
840 struct irb *irb;
841
842 irb = (struct irb *) __LC_IRB;
843 /* Accumulate status. We don't do basic sense. */
844 ccw_device_accumulate_irb(cdev, irb);
845 /* Try to start delayed device verification. */
846 ccw_device_online_verify(cdev, 0);
847 /* Note: Don't call handler for cio initiated clear! */
848}
849
850static void
851ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
852{
853 struct subchannel *sch;
854
855 sch = to_subchannel(cdev->dev.parent);
856 ccw_device_set_timeout(cdev, 0);
857 /* OK, i/o is dead now. Call interrupt handler. */
858 cdev->private->state = DEV_STATE_ONLINE;
859 if (cdev->handler)
860 cdev->handler(cdev, cdev->private->intparm,
861 ERR_PTR(-ETIMEDOUT));
862 if (!sch->lpm) {
863 PREPARE_WORK(&cdev->private->kick_work,
864 ccw_device_nopath_notify, (void *)cdev);
865 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
866 } else if (cdev->private->flags.doverify)
867 /* Start delayed path verification. */
868 ccw_device_online_verify(cdev, 0);
869}
870
871static void
872ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
873{
874 int ret;
875
876 ret = ccw_device_cancel_halt_clear(cdev);
877 if (ret == -EBUSY) {
878 ccw_device_set_timeout(cdev, 3*HZ);
879 return;
880 }
881 if (ret == -ENODEV) {
882 struct subchannel *sch;
883
884 sch = to_subchannel(cdev->dev.parent);
885 if (!sch->lpm) {
886 PREPARE_WORK(&cdev->private->kick_work,
887 ccw_device_nopath_notify, (void *)cdev);
888 queue_work(ccw_device_notify_work,
889 &cdev->private->kick_work);
890 } else
891 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
892 return;
893 }
894 //FIXME: Can we get here?
895 cdev->private->state = DEV_STATE_ONLINE;
896 if (cdev->handler)
897 cdev->handler(cdev, cdev->private->intparm,
898 ERR_PTR(-ETIMEDOUT));
899}
900
901static void
902ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
903{
904 struct irb *irb;
905 struct subchannel *sch;
906
907 irb = (struct irb *) __LC_IRB;
908 /*
909 * Accumulate status and find out if a basic sense is needed.
910 * This is fine since we have already adapted the lpm.
911 */
912 ccw_device_accumulate_irb(cdev, irb);
913 if (cdev->private->flags.dosense) {
914 if (ccw_device_do_sense(cdev, irb) == 0) {
915 cdev->private->state = DEV_STATE_W4SENSE;
916 }
917 return;
918 }
919
920 /* Iff device is idle, reset timeout. */
921 sch = to_subchannel(cdev->dev.parent);
922 if (!stsch(sch->irq, &sch->schib))
923 if (sch->schib.scsw.actl == 0)
924 ccw_device_set_timeout(cdev, 0);
925 /* Call the handler. */
926 ccw_device_call_handler(cdev);
927 if (!sch->lpm) {
928 PREPARE_WORK(&cdev->private->kick_work,
929 ccw_device_nopath_notify, (void *)cdev);
930 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
931 } else if (cdev->private->flags.doverify)
932 ccw_device_online_verify(cdev, 0);
933}
934
935static void
936ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
937{
938 int ret;
939 struct subchannel *sch;
940
941 sch = to_subchannel(cdev->dev.parent);
942 ccw_device_set_timeout(cdev, 0);
943 ret = ccw_device_cancel_halt_clear(cdev);
944 if (ret == -EBUSY) {
945 ccw_device_set_timeout(cdev, 3*HZ);
946 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
947 return;
948 }
949 if (ret == -ENODEV) {
950 if (!sch->lpm) {
951 PREPARE_WORK(&cdev->private->kick_work,
952 ccw_device_nopath_notify, (void *)cdev);
953 queue_work(ccw_device_notify_work,
954 &cdev->private->kick_work);
955 } else
956 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
957 return;
958 }
959 if (cdev->handler)
960 cdev->handler(cdev, cdev->private->intparm,
961 ERR_PTR(-ETIMEDOUT));
962 if (!sch->lpm) {
963 PREPARE_WORK(&cdev->private->kick_work,
964 ccw_device_nopath_notify, (void *)cdev);
965 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
966 } else if (cdev->private->flags.doverify)
967 /* Start delayed path verification. */
968 ccw_device_online_verify(cdev, 0);
969}
970
971static void
972ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
973{
974 /* When the I/O has terminated, we have to start verification. */
975 if (cdev->private->options.pgroup)
976 cdev->private->flags.doverify = 1;
977}
978
979static void
980ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
981{
982 struct irb *irb;
983
984 switch (dev_event) {
985 case DEV_EVENT_INTERRUPT:
986 irb = (struct irb *) __LC_IRB;
987 /* Check for unsolicited interrupt. */
988 if ((irb->scsw.stctl ==
989 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
990 (!irb->scsw.cc))
991 /* FIXME: we should restart stlck here, but this
992 * is extremely unlikely ... */
993 goto out_wakeup;
994
995 ccw_device_accumulate_irb(cdev, irb);
996 /* We don't care about basic sense etc. */
997 break;
998 default: /* timeout */
999 break;
1000 }
1001out_wakeup:
1002 wake_up(&cdev->private->wait_q);
1003}
1004
1005static void
1006ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1007{
1008 struct subchannel *sch;
1009
1010 sch = to_subchannel(cdev->dev.parent);
1011 if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
1012 /* Couldn't enable the subchannel for i/o. Sick device. */
1013 return;
1014
1015 /* After 60s the device recognition is considered to have failed. */
1016 ccw_device_set_timeout(cdev, 60*HZ);
1017
1018 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1019 ccw_device_sense_id_start(cdev);
1020}
1021
1022void
1023device_trigger_reprobe(struct subchannel *sch)
1024{
1025 struct ccw_device *cdev;
1026
1027 if (!sch->dev.driver_data)
1028 return;
1029 cdev = sch->dev.driver_data;
1030 if (cdev->private->state != DEV_STATE_DISCONNECTED)
1031 return;
1032
1033 /* Update some values. */
1034 if (stsch(sch->irq, &sch->schib))
1035 return;
1036
1037 /*
1038 * The pim, pam, pom values may not be accurate, but they are the best
1039 * we have before performing device selection :/
1040 */
1041 sch->lpm = sch->schib.pmcw.pim &
1042 sch->schib.pmcw.pam &
1043 sch->schib.pmcw.pom &
1044 sch->opm;
1045 /* Re-set some bits in the pmcw that were lost. */
1046 sch->schib.pmcw.isc = 3;
1047 sch->schib.pmcw.csense = 1;
1048 sch->schib.pmcw.ena = 0;
1049 if ((sch->lpm & (sch->lpm - 1)) != 0)
1050 sch->schib.pmcw.mp = 1;
1051 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1052 /* We should also udate ssd info, but this has to wait. */
1053 ccw_device_start_id(cdev, 0);
1054}
1055
1056static void
1057ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1058{
1059 struct subchannel *sch;
1060
1061 sch = to_subchannel(cdev->dev.parent);
1062 /*
1063 * An interrupt in state offline means a previous disable was not
1064 * successful. Try again.
1065 */
1066 cio_disable_subchannel(sch);
1067}
1068
1069static void
1070ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1071{
1072 retry_set_schib(cdev);
1073 cdev->private->state = DEV_STATE_ONLINE;
1074 dev_fsm_event(cdev, dev_event);
1075}
1076
1077
1078static void
1079ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1080{
1081 ccw_device_set_timeout(cdev, 0);
1082 if (dev_event == DEV_EVENT_NOTOPER)
1083 cdev->private->state = DEV_STATE_NOT_OPER;
1084 else
1085 cdev->private->state = DEV_STATE_OFFLINE;
1086 wake_up(&cdev->private->wait_q);
1087}
1088
1089static void
1090ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1091{
1092 int ret;
1093
1094 ret = ccw_device_cancel_halt_clear(cdev);
1095 switch (ret) {
1096 case 0:
1097 cdev->private->state = DEV_STATE_OFFLINE;
1098 wake_up(&cdev->private->wait_q);
1099 break;
1100 case -ENODEV:
1101 cdev->private->state = DEV_STATE_NOT_OPER;
1102 wake_up(&cdev->private->wait_q);
1103 break;
1104 default:
1105 ccw_device_set_timeout(cdev, HZ/10);
1106 }
1107}
1108
1109/*
1110 * No operation action. This is used e.g. to ignore a timeout event in
1111 * state offline.
1112 */
1113static void
1114ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1115{
1116}
1117
1118/*
1119 * Bug operation action.
1120 */
1121static void
1122ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1123{
1124 printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1125 cdev->private->state, dev_event);
1126 BUG();
1127}
1128
1129/*
1130 * device statemachine
1131 */
1132fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1133 [DEV_STATE_NOT_OPER] = {
1134 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1135 [DEV_EVENT_INTERRUPT] = ccw_device_bug,
1136 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1137 [DEV_EVENT_VERIFY] = ccw_device_nop,
1138 },
1139 [DEV_STATE_SENSE_PGID] = {
1140 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1141 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq,
1142 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1143 [DEV_EVENT_VERIFY] = ccw_device_nop,
1144 },
1145 [DEV_STATE_SENSE_ID] = {
1146 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
1147 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
1148 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
1149 [DEV_EVENT_VERIFY] = ccw_device_nop,
1150 },
1151 [DEV_STATE_OFFLINE] = {
1152 [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper,
1153 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
1154 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1155 [DEV_EVENT_VERIFY] = ccw_device_nop,
1156 },
1157 [DEV_STATE_VERIFY] = {
1158 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1159 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
1160 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1161 [DEV_EVENT_VERIFY] = ccw_device_nop,
1162 },
1163 [DEV_STATE_ONLINE] = {
1164 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1165 [DEV_EVENT_INTERRUPT] = ccw_device_irq,
1166 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
1167 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1168 },
1169 [DEV_STATE_W4SENSE] = {
1170 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1171 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
1172 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1173 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1174 },
1175 [DEV_STATE_DISBAND_PGID] = {
1176 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1177 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq,
1178 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1179 [DEV_EVENT_VERIFY] = ccw_device_nop,
1180 },
1181 [DEV_STATE_BOXED] = {
1182 [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper,
1183 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
1184 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
1185 [DEV_EVENT_VERIFY] = ccw_device_nop,
1186 },
1187 /* states to wait for i/o completion before doing something */
1188 [DEV_STATE_CLEAR_VERIFY] = {
1189 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1190 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
1191 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1192 [DEV_EVENT_VERIFY] = ccw_device_nop,
1193 },
1194 [DEV_STATE_TIMEOUT_KILL] = {
1195 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1196 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
1197 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
1198 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
1199 },
1200 [DEV_STATE_WAIT4IO] = {
1201 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1202 [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq,
1203 [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout,
1204 [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify,
1205 },
1206 [DEV_STATE_QUIESCE] = {
1207 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
1208 [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
1209 [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
1210 [DEV_EVENT_VERIFY] = ccw_device_nop,
1211 },
1212 /* special states for devices gone not operational */
1213 [DEV_STATE_DISCONNECTED] = {
1214 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1215 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1216 [DEV_EVENT_TIMEOUT] = ccw_device_bug,
1217 [DEV_EVENT_VERIFY] = ccw_device_nop,
1218 },
1219 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1220 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
1221 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
1222 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
1223 [DEV_EVENT_VERIFY] = ccw_device_nop,
1224 },
1225 [DEV_STATE_CMFCHANGE] = {
1226 [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
1227 [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
1228 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
1229 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
1230 },
1231};
1232
1233/*
1234 * io_subchannel_irq is called for "real" interrupts or for status
1235 * pending conditions on msch.
1236 */
1237void
1238io_subchannel_irq (struct device *pdev)
1239{
1240 struct ccw_device *cdev;
1241
1242 cdev = to_subchannel(pdev)->dev.driver_data;
1243
1244 CIO_TRACE_EVENT (3, "IRQ");
1245 CIO_TRACE_EVENT (3, pdev->bus_id);
1246 if (cdev)
1247 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1248}
1249
1250EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
new file mode 100644
index 000000000000..0e68fb511dc9
--- /dev/null
+++ b/drivers/s390/cio/device_id.c
@@ -0,0 +1,355 @@
1/*
2 * drivers/s390/cio/device_id.c
3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
5 * IBM Corporation
6 * Author(s): Cornelia Huck(cohuck@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Sense ID functions.
10 */
11
12#include <linux/module.h>
13#include <linux/config.h>
14#include <linux/init.h>
15
16#include <asm/ccwdev.h>
17#include <asm/delay.h>
18#include <asm/cio.h>
19#include <asm/lowcore.h>
20
21#include "cio.h"
22#include "cio_debug.h"
23#include "css.h"
24#include "device.h"
25#include "ioasm.h"
26
27/*
28 * diag210 is used under VM to get information about a virtual device
29 */
30#ifdef CONFIG_ARCH_S390X
31int
32diag210(struct diag210 * addr)
33{
34 /*
35 * diag 210 needs its data below the 2GB border, so we
36 * use a static data area to be sure
37 */
38 static struct diag210 diag210_tmp;
39 static DEFINE_SPINLOCK(diag210_lock);
40 unsigned long flags;
41 int ccode;
42
43 spin_lock_irqsave(&diag210_lock, flags);
44 diag210_tmp = *addr;
45
46 asm volatile (
47 " lhi %0,-1\n"
48 " sam31\n"
49 " diag %1,0,0x210\n"
50 "0: ipm %0\n"
51 " srl %0,28\n"
52 "1: sam64\n"
53 ".section __ex_table,\"a\"\n"
54 " .align 8\n"
55 " .quad 0b,1b\n"
56 ".previous"
57 : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory" );
58
59 *addr = diag210_tmp;
60 spin_unlock_irqrestore(&diag210_lock, flags);
61
62 return ccode;
63}
64#else
65int
66diag210(struct diag210 * addr)
67{
68 int ccode;
69
70 asm volatile (
71 " lhi %0,-1\n"
72 " diag %1,0,0x210\n"
73 "0: ipm %0\n"
74 " srl %0,28\n"
75 "1:\n"
76 ".section __ex_table,\"a\"\n"
77 " .align 4\n"
78 " .long 0b,1b\n"
79 ".previous"
80 : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory" );
81
82 return ccode;
83}
84#endif
85
86/*
87 * Input :
88 * devno - device number
89 * ps - pointer to sense ID data area
90 * Output : none
91 */
92static void
93VM_virtual_device_info (__u16 devno, struct senseid *ps)
94{
95 static struct {
96 int vrdcvcla, vrdcvtyp, cu_type;
97 } vm_devices[] = {
98 { 0x08, 0x01, 0x3480 },
99 { 0x08, 0x02, 0x3430 },
100 { 0x08, 0x10, 0x3420 },
101 { 0x08, 0x42, 0x3424 },
102 { 0x08, 0x44, 0x9348 },
103 { 0x08, 0x81, 0x3490 },
104 { 0x08, 0x82, 0x3422 },
105 { 0x10, 0x41, 0x1403 },
106 { 0x10, 0x42, 0x3211 },
107 { 0x10, 0x43, 0x3203 },
108 { 0x10, 0x45, 0x3800 },
109 { 0x10, 0x47, 0x3262 },
110 { 0x10, 0x48, 0x3820 },
111 { 0x10, 0x49, 0x3800 },
112 { 0x10, 0x4a, 0x4245 },
113 { 0x10, 0x4b, 0x4248 },
114 { 0x10, 0x4d, 0x3800 },
115 { 0x10, 0x4e, 0x3820 },
116 { 0x10, 0x4f, 0x3820 },
117 { 0x10, 0x82, 0x2540 },
118 { 0x10, 0x84, 0x3525 },
119 { 0x20, 0x81, 0x2501 },
120 { 0x20, 0x82, 0x2540 },
121 { 0x20, 0x84, 0x3505 },
122 { 0x40, 0x01, 0x3278 },
123 { 0x40, 0x04, 0x3277 },
124 { 0x40, 0x80, 0x2250 },
125 { 0x40, 0xc0, 0x5080 },
126 { 0x80, 0x00, 0x3215 },
127 };
128 struct diag210 diag_data;
129 int ccode, i;
130
131 CIO_TRACE_EVENT (4, "VMvdinf");
132
133 diag_data = (struct diag210) {
134 .vrdcdvno = devno,
135 .vrdclen = sizeof (diag_data),
136 };
137
138 ccode = diag210 (&diag_data);
139 ps->reserved = 0xff;
140
141 /* Special case for bloody osa devices. */
142 if (diag_data.vrdcvcla == 0x02 &&
143 diag_data.vrdcvtyp == 0x20) {
144 ps->cu_type = 0x3088;
145 ps->cu_model = 0x60;
146 return;
147 }
148 for (i = 0; i < sizeof(vm_devices) / sizeof(vm_devices[0]); i++)
149 if (diag_data.vrdcvcla == vm_devices[i].vrdcvcla &&
150 diag_data.vrdcvtyp == vm_devices[i].vrdcvtyp) {
151 ps->cu_type = vm_devices[i].cu_type;
152 return;
153 }
154 CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
155 "vdev class : %02X, vdev type : %04X \n ... "
156 "rdev class : %02X, rdev type : %04X, "
157 "rdev model: %02X\n",
158 devno, ccode,
159 diag_data.vrdcvcla, diag_data.vrdcvtyp,
160 diag_data.vrdcrccl, diag_data.vrdccrty,
161 diag_data.vrdccrmd);
162}
163
164/*
165 * Start Sense ID helper function.
166 * Try to obtain the 'control unit'/'device type' information
167 * associated with the subchannel.
168 */
169static int
170__ccw_device_sense_id_start(struct ccw_device *cdev)
171{
172 struct subchannel *sch;
173 struct ccw1 *ccw;
174 int ret;
175
176 sch = to_subchannel(cdev->dev.parent);
177 /* Setup sense channel program. */
178 ccw = cdev->private->iccws;
179 if (sch->schib.pmcw.pim != 0x80) {
180 /* more than one path installed. */
181 ccw->cmd_code = CCW_CMD_SUSPEND_RECONN;
182 ccw->cda = 0;
183 ccw->count = 0;
184 ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC;
185 ccw++;
186 }
187 ccw->cmd_code = CCW_CMD_SENSE_ID;
188 ccw->cda = (__u32) __pa (&cdev->private->senseid);
189 ccw->count = sizeof (struct senseid);
190 ccw->flags = CCW_FLAG_SLI;
191
192 /* Reset device status. */
193 memset(&cdev->private->irb, 0, sizeof(struct irb));
194
195 /* Try on every path. */
196 ret = -ENODEV;
197 while (cdev->private->imask != 0) {
198 if ((sch->opm & cdev->private->imask) != 0 &&
199 cdev->private->iretry > 0) {
200 cdev->private->iretry--;
201 ret = cio_start (sch, cdev->private->iccws,
202 cdev->private->imask);
203 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
204 if (ret != -EACCES)
205 return ret;
206 }
207 cdev->private->imask >>= 1;
208 cdev->private->iretry = 5;
209 }
210 return ret;
211}
212
213void
214ccw_device_sense_id_start(struct ccw_device *cdev)
215{
216 int ret;
217
218 memset (&cdev->private->senseid, 0, sizeof (struct senseid));
219 cdev->private->senseid.cu_type = 0xFFFF;
220 cdev->private->imask = 0x80;
221 cdev->private->iretry = 5;
222 ret = __ccw_device_sense_id_start(cdev);
223 if (ret && ret != -EBUSY)
224 ccw_device_sense_id_done(cdev, ret);
225}
226
227/*
228 * Called from interrupt context to check if a valid answer
229 * to Sense ID was received.
230 */
231static int
232ccw_device_check_sense_id(struct ccw_device *cdev)
233{
234 struct subchannel *sch;
235 struct irb *irb;
236
237 sch = to_subchannel(cdev->dev.parent);
238 irb = &cdev->private->irb;
239 /* Did we get a proper answer ? */
240 if (cdev->private->senseid.cu_type != 0xFFFF &&
241 cdev->private->senseid.reserved == 0xFF) {
242 if (irb->scsw.count < sizeof (struct senseid) - 8)
243 cdev->private->flags.esid = 1;
244 return 0; /* Success */
245 }
246 /* Check the error cases. */
247 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
248 return -ETIME;
249 if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
250 /*
251 * if the device doesn't support the SenseID
252 * command further retries wouldn't help ...
253 * NB: We don't check here for intervention required like we
254 * did before, because tape devices with no tape inserted
255 * may present this status *in conjunction with* the
256 * sense id information. So, for intervention required,
257 * we use the "whack it until it talks" strategy...
258 */
259 CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x "
260 "reports cmd reject\n",
261 cdev->private->devno, sch->irq);
262 return -EOPNOTSUPP;
263 }
264 if (irb->esw.esw0.erw.cons) {
265 CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, "
266 "lpum %02X, cnt %02d, sns :"
267 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
268 cdev->private->devno,
269 irb->esw.esw0.sublog.lpum,
270 irb->esw.esw0.erw.scnt,
271 irb->ecw[0], irb->ecw[1],
272 irb->ecw[2], irb->ecw[3],
273 irb->ecw[4], irb->ecw[5],
274 irb->ecw[6], irb->ecw[7]);
275 return -EAGAIN;
276 }
277 if (irb->scsw.cc == 3) {
278 if ((sch->orb.lpm &
279 sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
280 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on"
281 " subchannel %04x is 'not operational'\n",
282 sch->orb.lpm, cdev->private->devno,
283 sch->irq);
284 return -EACCES;
285 }
286 /* Hmm, whatever happened, try again. */
287 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
288 "subchannel %04x returns status %02X%02X\n",
289 cdev->private->devno, sch->irq,
290 irb->scsw.dstat, irb->scsw.cstat);
291 return -EAGAIN;
292}
293
294/*
295 * Got interrupt for Sense ID.
296 */
297void
298ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
299{
300 struct subchannel *sch;
301 struct irb *irb;
302 int ret;
303
304 sch = to_subchannel(cdev->dev.parent);
305 irb = (struct irb *) __LC_IRB;
306 /* Retry sense id, if needed. */
307 if (irb->scsw.stctl ==
308 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
309 if ((irb->scsw.cc == 1) || !irb->scsw.actl) {
310 ret = __ccw_device_sense_id_start(cdev);
311 if (ret && ret != -EBUSY)
312 ccw_device_sense_id_done(cdev, ret);
313 }
314 return;
315 }
316 if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
317 return;
318 ret = ccw_device_check_sense_id(cdev);
319 memset(&cdev->private->irb, 0, sizeof(struct irb));
320 switch (ret) {
321 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */
322 case 0: /* Sense id succeeded. */
323 case -ETIME: /* Sense id stopped by timeout. */
324 ccw_device_sense_id_done(cdev, ret);
325 break;
326 case -EACCES: /* channel is not operational. */
327 sch->lpm &= ~cdev->private->imask;
328 cdev->private->imask >>= 1;
329 cdev->private->iretry = 5;
330 /* fall through. */
331 case -EAGAIN: /* try again. */
332 ret = __ccw_device_sense_id_start(cdev);
333 if (ret == 0 || ret == -EBUSY)
334 break;
335 /* fall through. */
336 default: /* Sense ID failed. Try asking VM. */
337 if (MACHINE_IS_VM) {
338 VM_virtual_device_info (cdev->private->devno,
339 &cdev->private->senseid);
340 if (cdev->private->senseid.cu_type != 0xFFFF) {
341 /* Got the device information from VM. */
342 ccw_device_sense_id_done(cdev, 0);
343 return;
344 }
345 }
346 /*
347 * If we can't couldn't identify the device type we
348 * consider the device "not operational".
349 */
350 ccw_device_sense_id_done(cdev, -ENODEV);
351 break;
352 }
353}
354
355EXPORT_SYMBOL(diag210);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
new file mode 100644
index 000000000000..11e260e0b9c9
--- /dev/null
+++ b/drivers/s390/cio/device_ops.c
@@ -0,0 +1,603 @@
1/*
2 * drivers/s390/cio/device_ops.c
3 *
4 * $Revision: 1.55 $
5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
10 */
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/errno.h>
15#include <linux/slab.h>
16#include <linux/list.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19
20#include <asm/ccwdev.h>
21#include <asm/idals.h>
22#include <asm/qdio.h>
23
24#include "cio.h"
25#include "cio_debug.h"
26#include "css.h"
27#include "chsc.h"
28#include "device.h"
29#include "qdio.h"
30
31int
32ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
33{
34 /*
35 * The flag usage is mutal exclusive ...
36 */
37 if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
38 (flags & CCWDEV_REPORT_ALL))
39 return -EINVAL;
40 cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
41 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
42 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
43 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
44 return 0;
45}
46
47int
48ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
49{
50 struct subchannel *sch;
51 int ret;
52
53 if (!cdev)
54 return -ENODEV;
55 if (cdev->private->state == DEV_STATE_NOT_OPER)
56 return -ENODEV;
57 if (cdev->private->state != DEV_STATE_ONLINE &&
58 cdev->private->state != DEV_STATE_WAIT4IO &&
59 cdev->private->state != DEV_STATE_W4SENSE)
60 return -EINVAL;
61 sch = to_subchannel(cdev->dev.parent);
62 if (!sch)
63 return -ENODEV;
64 ret = cio_clear(sch);
65 if (ret == 0)
66 cdev->private->intparm = intparm;
67 return ret;
68}
69
70int
71ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
72 unsigned long intparm, __u8 lpm, __u8 key,
73 unsigned long flags)
74{
75 struct subchannel *sch;
76 int ret;
77
78 if (!cdev)
79 return -ENODEV;
80 sch = to_subchannel(cdev->dev.parent);
81 if (!sch)
82 return -ENODEV;
83 if (cdev->private->state == DEV_STATE_NOT_OPER)
84 return -ENODEV;
85 if (cdev->private->state == DEV_STATE_VERIFY) {
86 /* Remember to fake irb when finished. */
87 if (!cdev->private->flags.fake_irb) {
88 cdev->private->flags.fake_irb = 1;
89 cdev->private->intparm = intparm;
90 return 0;
91 } else
92 /* There's already a fake I/O around. */
93 return -EBUSY;
94 }
95 if (cdev->private->state != DEV_STATE_ONLINE ||
96 ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
97 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
98 cdev->private->flags.doverify)
99 return -EBUSY;
100 ret = cio_set_options (sch, flags);
101 if (ret)
102 return ret;
103 ret = cio_start_key (sch, cpa, lpm, key);
104 if (ret == 0)
105 cdev->private->intparm = intparm;
106 return ret;
107}
108
109
110int
111ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
112 unsigned long intparm, __u8 lpm, __u8 key,
113 unsigned long flags, int expires)
114{
115 int ret;
116
117 if (!cdev)
118 return -ENODEV;
119 ccw_device_set_timeout(cdev, expires);
120 ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
121 if (ret != 0)
122 ccw_device_set_timeout(cdev, 0);
123 return ret;
124}
125
126int
127ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
128 unsigned long intparm, __u8 lpm, unsigned long flags)
129{
130 return ccw_device_start_key(cdev, cpa, intparm, lpm,
131 default_storage_key, flags);
132}
133
134int
135ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
136 unsigned long intparm, __u8 lpm, unsigned long flags,
137 int expires)
138{
139 return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
140 default_storage_key, flags,
141 expires);
142}
143
144
145int
146ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
147{
148 struct subchannel *sch;
149 int ret;
150
151 if (!cdev)
152 return -ENODEV;
153 if (cdev->private->state == DEV_STATE_NOT_OPER)
154 return -ENODEV;
155 if (cdev->private->state != DEV_STATE_ONLINE &&
156 cdev->private->state != DEV_STATE_WAIT4IO &&
157 cdev->private->state != DEV_STATE_W4SENSE)
158 return -EINVAL;
159 sch = to_subchannel(cdev->dev.parent);
160 if (!sch)
161 return -ENODEV;
162 ret = cio_halt(sch);
163 if (ret == 0)
164 cdev->private->intparm = intparm;
165 return ret;
166}
167
168int
169ccw_device_resume(struct ccw_device *cdev)
170{
171 struct subchannel *sch;
172
173 if (!cdev)
174 return -ENODEV;
175 sch = to_subchannel(cdev->dev.parent);
176 if (!sch)
177 return -ENODEV;
178 if (cdev->private->state == DEV_STATE_NOT_OPER)
179 return -ENODEV;
180 if (cdev->private->state != DEV_STATE_ONLINE ||
181 !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
182 return -EINVAL;
183 return cio_resume(sch);
184}
185
186/*
187 * Pass interrupt to device driver.
188 */
189int
190ccw_device_call_handler(struct ccw_device *cdev)
191{
192 struct subchannel *sch;
193 unsigned int stctl;
194 int ending_status;
195
196 sch = to_subchannel(cdev->dev.parent);
197
198 /*
199 * we allow for the device action handler if .
200 * - we received ending status
201 * - the action handler requested to see all interrupts
202 * - we received an intermediate status
203 * - fast notification was requested (primary status)
204 * - unsolicited interrupts
205 */
206 stctl = cdev->private->irb.scsw.stctl;
207 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
208 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
209 (stctl == SCSW_STCTL_STATUS_PEND);
210 if (!ending_status &&
211 !cdev->private->options.repall &&
212 !(stctl & SCSW_STCTL_INTER_STATUS) &&
213 !(cdev->private->options.fast &&
214 (stctl & SCSW_STCTL_PRIM_STATUS)))
215 return 0;
216
217 /*
218 * Now we are ready to call the device driver interrupt handler.
219 */
220 if (cdev->handler)
221 cdev->handler(cdev, cdev->private->intparm,
222 &cdev->private->irb);
223
224 /*
225 * Clear the old and now useless interrupt response block.
226 */
227 memset(&cdev->private->irb, 0, sizeof(struct irb));
228
229 return 1;
230}
231
232/*
233 * Search for CIW command in extended sense data.
234 */
235struct ciw *
236ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
237{
238 int ciw_cnt;
239
240 if (cdev->private->flags.esid == 0)
241 return NULL;
242 for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
243 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
244 return cdev->private->senseid.ciw + ciw_cnt;
245 return NULL;
246}
247
248__u8
249ccw_device_get_path_mask(struct ccw_device *cdev)
250{
251 struct subchannel *sch;
252
253 sch = to_subchannel(cdev->dev.parent);
254 if (!sch)
255 return 0;
256 else
257 return sch->vpm;
258}
259
260static void
261ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
262{
263 if (!ip)
264 /* unsolicited interrupt */
265 return;
266
267 /* Abuse intparm for error reporting. */
268 if (IS_ERR(irb))
269 cdev->private->intparm = -EIO;
270 else if ((irb->scsw.dstat !=
271 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
272 (irb->scsw.cstat != 0)) {
273 /*
274 * We didn't get channel end / device end. Check if path
275 * verification has been started; we can retry after it has
276 * finished. We also retry unit checks except for command reject
277 * or intervention required.
278 */
279 if (cdev->private->flags.doverify ||
280 cdev->private->state == DEV_STATE_VERIFY)
281 cdev->private->intparm = -EAGAIN;
282 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
283 !(irb->ecw[0] &
284 (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
285 cdev->private->intparm = -EAGAIN;
286 else
287 cdev->private->intparm = -EIO;
288
289 } else
290 cdev->private->intparm = 0;
291 wake_up(&cdev->private->wait_q);
292}
293
294static inline int
295__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
296{
297 int ret;
298 struct subchannel *sch;
299
300 sch = to_subchannel(cdev->dev.parent);
301 do {
302 ret = cio_start (sch, ccw, lpm);
303 if ((ret == -EBUSY) || (ret == -EACCES)) {
304 /* Try again later. */
305 spin_unlock_irq(&sch->lock);
306 msleep(10);
307 spin_lock_irq(&sch->lock);
308 continue;
309 }
310 if (ret != 0)
311 /* Non-retryable error. */
312 break;
313 /* Wait for end of request. */
314 cdev->private->intparm = magic;
315 spin_unlock_irq(&sch->lock);
316 wait_event(cdev->private->wait_q,
317 (cdev->private->intparm == -EIO) ||
318 (cdev->private->intparm == -EAGAIN) ||
319 (cdev->private->intparm == 0));
320 spin_lock_irq(&sch->lock);
321 /* Check at least for channel end / device end */
322 if (cdev->private->intparm == -EIO) {
323 /* Non-retryable error. */
324 ret = -EIO;
325 break;
326 }
327 if (cdev->private->intparm == 0)
328 /* Success. */
329 break;
330 /* Try again later. */
331 spin_unlock_irq(&sch->lock);
332 msleep(10);
333 spin_lock_irq(&sch->lock);
334 } while (1);
335
336 return ret;
337}
338
339/**
340 * read_dev_chars() - read device characteristics
341 * @param cdev target ccw device
342 * @param buffer pointer to buffer for rdc data
343 * @param length size of rdc data
344 * @returns 0 for success, negative error value on failure
345 *
346 * Context:
347 * called for online device, lock not held
348 **/
349int
350read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
351{
352 void (*handler)(struct ccw_device *, unsigned long, struct irb *);
353 struct subchannel *sch;
354 int ret;
355 struct ccw1 *rdc_ccw;
356
357 if (!cdev)
358 return -ENODEV;
359 if (!buffer || !length)
360 return -EINVAL;
361 sch = to_subchannel(cdev->dev.parent);
362
363 CIO_TRACE_EVENT (4, "rddevch");
364 CIO_TRACE_EVENT (4, sch->dev.bus_id);
365
366 rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
367 if (!rdc_ccw)
368 return -ENOMEM;
369 memset(rdc_ccw, 0, sizeof(struct ccw1));
370 rdc_ccw->cmd_code = CCW_CMD_RDC;
371 rdc_ccw->count = length;
372 rdc_ccw->flags = CCW_FLAG_SLI;
373 ret = set_normalized_cda (rdc_ccw, (*buffer));
374 if (ret != 0) {
375 kfree(rdc_ccw);
376 return ret;
377 }
378
379 spin_lock_irq(&sch->lock);
380 /* Save interrupt handler. */
381 handler = cdev->handler;
382 /* Temporarily install own handler. */
383 cdev->handler = ccw_device_wake_up;
384 if (cdev->private->state != DEV_STATE_ONLINE)
385 ret = -ENODEV;
386 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
387 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
388 cdev->private->flags.doverify)
389 ret = -EBUSY;
390 else
391 /* 0x00D9C4C3 == ebcdic "RDC" */
392 ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0);
393
394 /* Restore interrupt handler. */
395 cdev->handler = handler;
396 spin_unlock_irq(&sch->lock);
397
398 clear_normalized_cda (rdc_ccw);
399 kfree(rdc_ccw);
400
401 return ret;
402}
403
404/*
405 * Read Configuration data using path mask
406 */
407int
408read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm)
409{
410 void (*handler)(struct ccw_device *, unsigned long, struct irb *);
411 struct subchannel *sch;
412 struct ciw *ciw;
413 char *rcd_buf;
414 int ret;
415 struct ccw1 *rcd_ccw;
416
417 if (!cdev)
418 return -ENODEV;
419 if (!buffer || !length)
420 return -EINVAL;
421 sch = to_subchannel(cdev->dev.parent);
422
423 CIO_TRACE_EVENT (4, "rdconf");
424 CIO_TRACE_EVENT (4, sch->dev.bus_id);
425
426 /*
427 * scan for RCD command in extended SenseID data
428 */
429 ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
430 if (!ciw || ciw->cmd == 0)
431 return -EOPNOTSUPP;
432
433 rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
434 if (!rcd_ccw)
435 return -ENOMEM;
436 memset(rcd_ccw, 0, sizeof(struct ccw1));
437 rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA);
438 if (!rcd_buf) {
439 kfree(rcd_ccw);
440 return -ENOMEM;
441 }
442 memset (rcd_buf, 0, ciw->count);
443 rcd_ccw->cmd_code = ciw->cmd;
444 rcd_ccw->cda = (__u32) __pa (rcd_buf);
445 rcd_ccw->count = ciw->count;
446 rcd_ccw->flags = CCW_FLAG_SLI;
447
448 spin_lock_irq(&sch->lock);
449 /* Save interrupt handler. */
450 handler = cdev->handler;
451 /* Temporarily install own handler. */
452 cdev->handler = ccw_device_wake_up;
453 if (cdev->private->state != DEV_STATE_ONLINE)
454 ret = -ENODEV;
455 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
456 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
457 cdev->private->flags.doverify)
458 ret = -EBUSY;
459 else
460 /* 0x00D9C3C4 == ebcdic "RCD" */
461 ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm);
462
463 /* Restore interrupt handler. */
464 cdev->handler = handler;
465 spin_unlock_irq(&sch->lock);
466
467 /*
468 * on success we update the user input parms
469 */
470 if (ret) {
471 kfree (rcd_buf);
472 *buffer = NULL;
473 *length = 0;
474 } else {
475 *length = ciw->count;
476 *buffer = rcd_buf;
477 }
478 kfree(rcd_ccw);
479
480 return ret;
481}
482
483/*
484 * Read Configuration data
485 */
486int
487read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
488{
489 return read_conf_data_lpm (cdev, buffer, length, 0);
490}
491
492/*
493 * Try to break the lock on a boxed device.
494 */
495int
496ccw_device_stlck(struct ccw_device *cdev)
497{
498 void *buf, *buf2;
499 unsigned long flags;
500 struct subchannel *sch;
501 int ret;
502
503 if (!cdev)
504 return -ENODEV;
505
506 if (cdev->drv && !cdev->private->options.force)
507 return -EINVAL;
508
509 sch = to_subchannel(cdev->dev.parent);
510
511 CIO_TRACE_EVENT(2, "stl lock");
512 CIO_TRACE_EVENT(2, cdev->dev.bus_id);
513
514 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
515 if (!buf)
516 return -ENOMEM;
517 buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
518 if (!buf2) {
519 kfree(buf);
520 return -ENOMEM;
521 }
522 spin_lock_irqsave(&sch->lock, flags);
523 ret = cio_enable_subchannel(sch, 3);
524 if (ret)
525 goto out_unlock;
526 /*
527 * Setup ccw. We chain an unconditional reserve and a release so we
528 * only break the lock.
529 */
530 cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
531 cdev->private->iccws[0].cda = (__u32) __pa(buf);
532 cdev->private->iccws[0].count = 32;
533 cdev->private->iccws[0].flags = CCW_FLAG_CC;
534 cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
535 cdev->private->iccws[1].cda = (__u32) __pa(buf2);
536 cdev->private->iccws[1].count = 32;
537 cdev->private->iccws[1].flags = 0;
538 ret = cio_start(sch, cdev->private->iccws, 0);
539 if (ret) {
540 cio_disable_subchannel(sch); //FIXME: return code?
541 goto out_unlock;
542 }
543 cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
544 spin_unlock_irqrestore(&sch->lock, flags);
545 wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
546 spin_lock_irqsave(&sch->lock, flags);
547 cio_disable_subchannel(sch); //FIXME: return code?
548 if ((cdev->private->irb.scsw.dstat !=
549 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
550 (cdev->private->irb.scsw.cstat != 0))
551 ret = -EIO;
552 /* Clear irb. */
553 memset(&cdev->private->irb, 0, sizeof(struct irb));
554out_unlock:
555 if (buf)
556 kfree(buf);
557 if (buf2)
558 kfree(buf2);
559 spin_unlock_irqrestore(&sch->lock, flags);
560 return ret;
561}
562
563void *
564ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
565{
566 struct subchannel *sch;
567
568 sch = to_subchannel(cdev->dev.parent);
569 return chsc_get_chp_desc(sch, chp_no);
570}
571
572// FIXME: these have to go:
573
574int
575_ccw_device_get_subchannel_number(struct ccw_device *cdev)
576{
577 return cdev->private->irq;
578}
579
580int
581_ccw_device_get_device_number(struct ccw_device *cdev)
582{
583 return cdev->private->devno;
584}
585
586
587MODULE_LICENSE("GPL");
588EXPORT_SYMBOL(ccw_device_set_options);
589EXPORT_SYMBOL(ccw_device_clear);
590EXPORT_SYMBOL(ccw_device_halt);
591EXPORT_SYMBOL(ccw_device_resume);
592EXPORT_SYMBOL(ccw_device_start_timeout);
593EXPORT_SYMBOL(ccw_device_start);
594EXPORT_SYMBOL(ccw_device_start_timeout_key);
595EXPORT_SYMBOL(ccw_device_start_key);
596EXPORT_SYMBOL(ccw_device_get_ciw);
597EXPORT_SYMBOL(ccw_device_get_path_mask);
598EXPORT_SYMBOL(read_conf_data);
599EXPORT_SYMBOL(read_dev_chars);
600EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
601EXPORT_SYMBOL(_ccw_device_get_device_number);
602EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
603EXPORT_SYMBOL_GPL(read_conf_data_lpm);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
new file mode 100644
index 000000000000..0adac8a67331
--- /dev/null
+++ b/drivers/s390/cio/device_pgid.c
@@ -0,0 +1,448 @@
1/*
2 * drivers/s390/cio/device_pgid.c
3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
5 * IBM Corporation
6 * Author(s): Cornelia Huck(cohuck@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Path Group ID functions.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/init.h>
15
16#include <asm/ccwdev.h>
17#include <asm/cio.h>
18#include <asm/delay.h>
19#include <asm/lowcore.h>
20
21#include "cio.h"
22#include "cio_debug.h"
23#include "css.h"
24#include "device.h"
25
26/*
27 * Start Sense Path Group ID helper function. Used in ccw_device_recog
28 * and ccw_device_sense_pgid.
29 */
30static int
31__ccw_device_sense_pgid_start(struct ccw_device *cdev)
32{
33 struct subchannel *sch;
34 struct ccw1 *ccw;
35 int ret;
36
37 sch = to_subchannel(cdev->dev.parent);
38 /* Setup sense path group id channel program. */
39 ccw = cdev->private->iccws;
40 ccw->cmd_code = CCW_CMD_SENSE_PGID;
41 ccw->cda = (__u32) __pa (&cdev->private->pgid);
42 ccw->count = sizeof (struct pgid);
43 ccw->flags = CCW_FLAG_SLI;
44
45 /* Reset device status. */
46 memset(&cdev->private->irb, 0, sizeof(struct irb));
47 /* Try on every path. */
48 ret = -ENODEV;
49 while (cdev->private->imask != 0) {
50 /* Try every path multiple times. */
51 if (cdev->private->iretry > 0) {
52 cdev->private->iretry--;
53 ret = cio_start (sch, cdev->private->iccws,
54 cdev->private->imask);
55 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
56 if (ret != -EACCES)
57 return ret;
58 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
59 "%04x, lpm %02X, became 'not "
60 "operational'\n",
61 cdev->private->devno, sch->irq,
62 cdev->private->imask);
63
64 }
65 cdev->private->imask >>= 1;
66 cdev->private->iretry = 5;
67 }
68 return ret;
69}
70
71void
72ccw_device_sense_pgid_start(struct ccw_device *cdev)
73{
74 int ret;
75
76 cdev->private->state = DEV_STATE_SENSE_PGID;
77 cdev->private->imask = 0x80;
78 cdev->private->iretry = 5;
79 memset (&cdev->private->pgid, 0, sizeof (struct pgid));
80 ret = __ccw_device_sense_pgid_start(cdev);
81 if (ret && ret != -EBUSY)
82 ccw_device_sense_pgid_done(cdev, ret);
83}
84
85/*
86 * Called from interrupt context to check if a valid answer
87 * to Sense Path Group ID was received.
88 */
89static int
90__ccw_device_check_sense_pgid(struct ccw_device *cdev)
91{
92 struct subchannel *sch;
93 struct irb *irb;
94
95 sch = to_subchannel(cdev->dev.parent);
96 irb = &cdev->private->irb;
97 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
98 return -ETIME;
99 if (irb->esw.esw0.erw.cons &&
100 (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
101 /*
102 * If the device doesn't support the Sense Path Group ID
103 * command further retries wouldn't help ...
104 */
105 return -EOPNOTSUPP;
106 }
107 if (irb->esw.esw0.erw.cons) {
108 CIO_MSG_EVENT(2, "SNID - device %04x, unit check, "
109 "lpum %02X, cnt %02d, sns : "
110 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
111 cdev->private->devno,
112 irb->esw.esw0.sublog.lpum,
113 irb->esw.esw0.erw.scnt,
114 irb->ecw[0], irb->ecw[1],
115 irb->ecw[2], irb->ecw[3],
116 irb->ecw[4], irb->ecw[5],
117 irb->ecw[6], irb->ecw[7]);
118 return -EAGAIN;
119 }
120 if (irb->scsw.cc == 3) {
121 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
122 "%04x, lpm %02X, became 'not operational'\n",
123 cdev->private->devno, sch->irq, sch->orb.lpm);
124 return -EACCES;
125 }
126 if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
127 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x "
128 "is reserved by someone else\n",
129 cdev->private->devno, sch->irq);
130 return -EUSERS;
131 }
132 return 0;
133}
134
135/*
136 * Got interrupt for Sense Path Group ID.
137 */
138void
139ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
140{
141 struct subchannel *sch;
142 struct irb *irb;
143 int ret;
144
145 irb = (struct irb *) __LC_IRB;
146 /* Retry sense pgid for cc=1. */
147 if (irb->scsw.stctl ==
148 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
149 if (irb->scsw.cc == 1) {
150 ret = __ccw_device_sense_pgid_start(cdev);
151 if (ret && ret != -EBUSY)
152 ccw_device_sense_pgid_done(cdev, ret);
153 }
154 return;
155 }
156 if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
157 return;
158 sch = to_subchannel(cdev->dev.parent);
159 ret = __ccw_device_check_sense_pgid(cdev);
160 memset(&cdev->private->irb, 0, sizeof(struct irb));
161 switch (ret) {
162 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
163 case 0: /* Sense Path Group ID successful. */
164 if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET)
165 memcpy(&cdev->private->pgid, &global_pgid,
166 sizeof(struct pgid));
167 ccw_device_sense_pgid_done(cdev, 0);
168 break;
169 case -EOPNOTSUPP: /* Sense Path Group ID not supported */
170 ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
171 break;
172 case -ETIME: /* Sense path group id stopped by timeout. */
173 ccw_device_sense_pgid_done(cdev, -ETIME);
174 break;
175 case -EACCES: /* channel is not operational. */
176 sch->lpm &= ~cdev->private->imask;
177 cdev->private->imask >>= 1;
178 cdev->private->iretry = 5;
179 /* Fall through. */
180 case -EAGAIN: /* Try again. */
181 ret = __ccw_device_sense_pgid_start(cdev);
182 if (ret != 0 && ret != -EBUSY)
183 ccw_device_sense_pgid_done(cdev, -ENODEV);
184 break;
185 case -EUSERS: /* device is reserved for someone else. */
186 ccw_device_sense_pgid_done(cdev, -EUSERS);
187 break;
188 }
189}
190
191/*
192 * Path Group ID helper function.
193 */
194static int
195__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
196{
197 struct subchannel *sch;
198 struct ccw1 *ccw;
199 int ret;
200
201 sch = to_subchannel(cdev->dev.parent);
202
203 /* Setup sense path group id channel program. */
204 cdev->private->pgid.inf.fc = func;
205 ccw = cdev->private->iccws;
206 if (!cdev->private->flags.pgid_single) {
207 cdev->private->pgid.inf.fc |= SPID_FUNC_MULTI_PATH;
208 ccw->cmd_code = CCW_CMD_SUSPEND_RECONN;
209 ccw->cda = 0;
210 ccw->count = 0;
211 ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC;
212 ccw++;
213 } else
214 cdev->private->pgid.inf.fc |= SPID_FUNC_SINGLE_PATH;
215
216 ccw->cmd_code = CCW_CMD_SET_PGID;
217 ccw->cda = (__u32) __pa (&cdev->private->pgid);
218 ccw->count = sizeof (struct pgid);
219 ccw->flags = CCW_FLAG_SLI;
220
221 /* Reset device status. */
222 memset(&cdev->private->irb, 0, sizeof(struct irb));
223
224 /* Try multiple times. */
225 ret = -ENODEV;
226 if (cdev->private->iretry > 0) {
227 cdev->private->iretry--;
228 ret = cio_start (sch, cdev->private->iccws,
229 cdev->private->imask);
230 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
231 if ((ret != -EACCES) && (ret != -ENODEV))
232 return ret;
233 }
234 /* PGID command failed on this path. Switch it off. */
235 sch->lpm &= ~cdev->private->imask;
236 sch->vpm &= ~cdev->private->imask;
237 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
238 "%04x, lpm %02X, became 'not operational'\n",
239 cdev->private->devno, sch->irq, cdev->private->imask);
240 return ret;
241}
242
243/*
244 * Called from interrupt context to check if a valid answer
245 * to Set Path Group ID was received.
246 */
247static int
248__ccw_device_check_pgid(struct ccw_device *cdev)
249{
250 struct subchannel *sch;
251 struct irb *irb;
252
253 sch = to_subchannel(cdev->dev.parent);
254 irb = &cdev->private->irb;
255 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
256 return -ETIME;
257 if (irb->esw.esw0.erw.cons) {
258 if (irb->ecw[0] & SNS0_CMD_REJECT)
259 return -EOPNOTSUPP;
260 /* Hmm, whatever happened, try again. */
261 CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, "
262 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
263 cdev->private->devno, irb->esw.esw0.erw.scnt,
264 irb->ecw[0], irb->ecw[1],
265 irb->ecw[2], irb->ecw[3],
266 irb->ecw[4], irb->ecw[5],
267 irb->ecw[6], irb->ecw[7]);
268 return -EAGAIN;
269 }
270 if (irb->scsw.cc == 3) {
271 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
272 "%04x, lpm %02X, became 'not operational'\n",
273 cdev->private->devno, sch->irq,
274 cdev->private->imask);
275 return -EACCES;
276 }
277 return 0;
278}
279
280static void
281__ccw_device_verify_start(struct ccw_device *cdev)
282{
283 struct subchannel *sch;
284 __u8 imask, func;
285 int ret;
286
287 sch = to_subchannel(cdev->dev.parent);
288 while (sch->vpm != sch->lpm) {
289 /* Find first unequal bit in vpm vs. lpm */
290 for (imask = 0x80; imask != 0; imask >>= 1)
291 if ((sch->vpm & imask) != (sch->lpm & imask))
292 break;
293 cdev->private->imask = imask;
294 func = (sch->vpm & imask) ?
295 SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH;
296 ret = __ccw_device_do_pgid(cdev, func);
297 if (ret == 0 || ret == -EBUSY)
298 return;
299 cdev->private->iretry = 5;
300 }
301 ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
302}
303
304/*
305 * Got interrupt for Set Path Group ID.
306 */
307void
308ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
309{
310 struct subchannel *sch;
311 struct irb *irb;
312 int ret;
313
314 irb = (struct irb *) __LC_IRB;
315 /* Retry set pgid for cc=1. */
316 if (irb->scsw.stctl ==
317 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
318 if (irb->scsw.cc == 1)
319 __ccw_device_verify_start(cdev);
320 return;
321 }
322 if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
323 return;
324 sch = to_subchannel(cdev->dev.parent);
325 ret = __ccw_device_check_pgid(cdev);
326 memset(&cdev->private->irb, 0, sizeof(struct irb));
327 switch (ret) {
328 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
329 case 0:
330 /* Establish or Resign Path Group done. Update vpm. */
331 if ((sch->lpm & cdev->private->imask) != 0)
332 sch->vpm |= cdev->private->imask;
333 else
334 sch->vpm &= ~cdev->private->imask;
335 cdev->private->iretry = 5;
336 __ccw_device_verify_start(cdev);
337 break;
338 case -EOPNOTSUPP:
339 /*
340 * One of those strange devices which claim to be able
341 * to do multipathing but not for Set Path Group ID.
342 */
343 if (cdev->private->flags.pgid_single) {
344 ccw_device_verify_done(cdev, -EOPNOTSUPP);
345 break;
346 }
347 cdev->private->flags.pgid_single = 1;
348 /* fall through. */
349 case -EAGAIN: /* Try again. */
350 __ccw_device_verify_start(cdev);
351 break;
352 case -ETIME: /* Set path group id stopped by timeout. */
353 ccw_device_verify_done(cdev, -ETIME);
354 break;
355 case -EACCES: /* channel is not operational. */
356 sch->lpm &= ~cdev->private->imask;
357 sch->vpm &= ~cdev->private->imask;
358 cdev->private->iretry = 5;
359 __ccw_device_verify_start(cdev);
360 break;
361 }
362}
363
364void
365ccw_device_verify_start(struct ccw_device *cdev)
366{
367 cdev->private->flags.pgid_single = 0;
368 cdev->private->iretry = 5;
369 __ccw_device_verify_start(cdev);
370}
371
372static void
373__ccw_device_disband_start(struct ccw_device *cdev)
374{
375 struct subchannel *sch;
376 int ret;
377
378 sch = to_subchannel(cdev->dev.parent);
379 while (cdev->private->imask != 0) {
380 if (sch->lpm & cdev->private->imask) {
381 ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND);
382 if (ret == 0)
383 return;
384 }
385 cdev->private->iretry = 5;
386 cdev->private->imask >>= 1;
387 }
388 ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
389}
390
391/*
392 * Got interrupt for Unset Path Group ID.
393 */
394void
395ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
396{
397 struct subchannel *sch;
398 struct irb *irb;
399 int ret;
400
401 irb = (struct irb *) __LC_IRB;
402 /* Retry set pgid for cc=1. */
403 if (irb->scsw.stctl ==
404 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
405 if (irb->scsw.cc == 1)
406 __ccw_device_disband_start(cdev);
407 return;
408 }
409 if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
410 return;
411 sch = to_subchannel(cdev->dev.parent);
412 ret = __ccw_device_check_pgid(cdev);
413 memset(&cdev->private->irb, 0, sizeof(struct irb));
414 switch (ret) {
415 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
416 case 0: /* disband successful. */
417 sch->vpm = 0;
418 ccw_device_disband_done(cdev, ret);
419 break;
420 case -EOPNOTSUPP:
421 /*
422 * One of those strange devices which claim to be able
423 * to do multipathing but not for Unset Path Group ID.
424 */
425 cdev->private->flags.pgid_single = 1;
426 /* fall through. */
427 case -EAGAIN: /* Try again. */
428 __ccw_device_disband_start(cdev);
429 break;
430 case -ETIME: /* Set path group id stopped by timeout. */
431 ccw_device_disband_done(cdev, -ETIME);
432 break;
433 case -EACCES: /* channel is not operational. */
434 cdev->private->imask >>= 1;
435 cdev->private->iretry = 5;
436 __ccw_device_disband_start(cdev);
437 break;
438 }
439}
440
441void
442ccw_device_disband_start(struct ccw_device *cdev)
443{
444 cdev->private->flags.pgid_single = 0;
445 cdev->private->iretry = 5;
446 cdev->private->imask = 0x80;
447 __ccw_device_disband_start(cdev);
448}
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
new file mode 100644
index 000000000000..4ab2e0d95009
--- /dev/null
+++ b/drivers/s390/cio/device_status.c
@@ -0,0 +1,385 @@
1/*
2 * drivers/s390/cio/device_status.c
3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
5 * IBM Corporation
6 * Author(s): Cornelia Huck(cohuck@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Status accumulation and basic sense functions.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/init.h>
15
16#include <asm/ccwdev.h>
17#include <asm/cio.h>
18
19#include "cio.h"
20#include "cio_debug.h"
21#include "css.h"
22#include "device.h"
23#include "ioasm.h"
24
25/*
26 * Check for any kind of channel or interface control check but don't
27 * issue the message for the console device
28 */
29static inline void
30ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
31{
32 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
33 SCHN_STAT_CHN_CTRL_CHK |
34 SCHN_STAT_INTF_CTRL_CHK)))
35 return;
36
37 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
38 "received"
39 " ... device %04X on subchannel %04X, dev_stat "
40 ": %02X sch_stat : %02X\n",
41 cdev->private->devno, cdev->private->irq,
42 cdev->private->irb.scsw.dstat,
43 cdev->private->irb.scsw.cstat);
44
45 if (irb->scsw.cc != 3) {
46 char dbf_text[15];
47
48 sprintf(dbf_text, "chk%x", cdev->private->irq);
49 CIO_TRACE_EVENT(0, dbf_text);
50 CIO_HEX_EVENT(0, &cdev->private->irb, sizeof (struct irb));
51 }
52}
53
54/*
55 * Some paths became not operational (pno bit in scsw is set).
56 */
57static void
58ccw_device_path_notoper(struct ccw_device *cdev)
59{
60 struct subchannel *sch;
61
62 sch = to_subchannel(cdev->dev.parent);
63 stsch (sch->irq, &sch->schib);
64
65 CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are "
66 "not operational \n", __FUNCTION__, sch->irq,
67 sch->schib.pmcw.pnom);
68
69 sch->lpm &= ~sch->schib.pmcw.pnom;
70 if (cdev->private->options.pgroup)
71 cdev->private->flags.doverify = 1;
72}
73
74/*
75 * Copy valid bits from the extended control word to device irb.
76 */
77static inline void
78ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
79{
80 /*
81 * Copy extended control bit if it is valid... yes there
82 * are condition that have to be met for the extended control
83 * bit to have meaning. Sick.
84 */
85 cdev->private->irb.scsw.ectl = 0;
86 if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) &&
87 !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS))
88 cdev->private->irb.scsw.ectl = irb->scsw.ectl;
89 /* Check if extended control word is valid. */
90 if (!cdev->private->irb.scsw.ectl)
91 return;
92 /* Copy concurrent sense / model dependent information. */
93 memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
94}
95
96/*
97 * Check if extended status word is valid.
98 */
99static inline int
100ccw_device_accumulate_esw_valid(struct irb *irb)
101{
102 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND)
103 return 0;
104 if (irb->scsw.stctl ==
105 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
106 !(irb->scsw.actl & SCSW_ACTL_SUSPENDED))
107 return 0;
108 return 1;
109}
110
111/*
112 * Copy valid bits from the extended status word to device irb.
113 */
114static inline void
115ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
116{
117 struct irb *cdev_irb;
118 struct sublog *cdev_sublog, *sublog;
119
120 if (!ccw_device_accumulate_esw_valid(irb))
121 return;
122
123 cdev_irb = &cdev->private->irb;
124
125 /* Copy last path used mask. */
126 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
127
128 /* Copy subchannel logout information if esw is of format 0. */
129 if (irb->scsw.eswf) {
130 cdev_sublog = &cdev_irb->esw.esw0.sublog;
131 sublog = &irb->esw.esw0.sublog;
132 /* Copy extended status flags. */
133 cdev_sublog->esf = sublog->esf;
134 /*
135 * Copy fields that have a meaning for channel data check
136 * channel control check and interface control check.
137 */
138 if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
139 SCHN_STAT_CHN_CTRL_CHK |
140 SCHN_STAT_INTF_CTRL_CHK)) {
141 /* Copy ancillary report bit. */
142 cdev_sublog->arep = sublog->arep;
143 /* Copy field-validity-flags. */
144 cdev_sublog->fvf = sublog->fvf;
145 /* Copy storage access code. */
146 cdev_sublog->sacc = sublog->sacc;
147 /* Copy termination code. */
148 cdev_sublog->termc = sublog->termc;
149 /* Copy sequence code. */
150 cdev_sublog->seqc = sublog->seqc;
151 }
152 /* Copy device status check. */
153 cdev_sublog->devsc = sublog->devsc;
154 /* Copy secondary error. */
155 cdev_sublog->serr = sublog->serr;
156 /* Copy i/o-error alert. */
157 cdev_sublog->ioerr = sublog->ioerr;
158 /* Copy channel path timeout bit. */
159 if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK)
160 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
161 /* Copy failing storage address validity flag. */
162 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
163 if (cdev_irb->esw.esw0.erw.fsavf) {
164 /* ... and copy the failing storage address. */
165 memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
166 sizeof (irb->esw.esw0.faddr));
167 /* ... and copy the failing storage address format. */
168 cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
169 }
170 /* Copy secondary ccw address validity bit. */
171 cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
172 if (irb->esw.esw0.erw.scavf)
173 /* ... and copy the secondary ccw address. */
174 cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
175
176 }
177 /* FIXME: DCTI for format 2? */
178
179 /* Copy authorization bit. */
180 cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
181 /* Copy path verification required flag. */
182 cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
183 if (irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup)
184 cdev->private->flags.doverify = 1;
185 /* Copy concurrent sense bit. */
186 cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
187 if (irb->esw.esw0.erw.cons)
188 cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
189}
190
191/*
192 * Accumulate status from irb to devstat.
193 */
194void
195ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
196{
197 struct irb *cdev_irb;
198
199 /*
200 * Check if the status pending bit is set in stctl.
201 * If not, the remaining bit have no meaning and we must ignore them.
202 * The esw is not meaningful as well...
203 */
204 if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND))
205 return;
206
207 /* Check for channel checks and interface control checks. */
208 ccw_device_msg_control_check(cdev, irb);
209
210 /* Check for path not operational. */
211 if (irb->scsw.pno && irb->scsw.fctl != 0 &&
212 (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
213 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
214 ccw_device_path_notoper(cdev);
215
216 /*
217 * Don't accumulate unsolicited interrupts.
218 */
219 if ((irb->scsw.stctl ==
220 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
221 (!irb->scsw.cc))
222 return;
223
224 cdev_irb = &cdev->private->irb;
225
226 /* Copy bits which are valid only for the start function. */
227 if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) {
228 /* Copy key. */
229 cdev_irb->scsw.key = irb->scsw.key;
230 /* Copy suspend control bit. */
231 cdev_irb->scsw.sctl = irb->scsw.sctl;
232 /* Accumulate deferred condition code. */
233 cdev_irb->scsw.cc |= irb->scsw.cc;
234 /* Copy ccw format bit. */
235 cdev_irb->scsw.fmt = irb->scsw.fmt;
236 /* Copy prefetch bit. */
237 cdev_irb->scsw.pfch = irb->scsw.pfch;
238 /* Copy initial-status-interruption-control. */
239 cdev_irb->scsw.isic = irb->scsw.isic;
240 /* Copy address limit checking control. */
241 cdev_irb->scsw.alcc = irb->scsw.alcc;
242 /* Copy suppress suspend bit. */
243 cdev_irb->scsw.ssi = irb->scsw.ssi;
244 }
245
246 /* Take care of the extended control bit and extended control word. */
247 ccw_device_accumulate_ecw(cdev, irb);
248
249 /* Accumulate function control. */
250 cdev_irb->scsw.fctl |= irb->scsw.fctl;
251 /* Copy activity control. */
252 cdev_irb->scsw.actl= irb->scsw.actl;
253 /* Accumulate status control. */
254 cdev_irb->scsw.stctl |= irb->scsw.stctl;
255 /*
256 * Copy ccw address if it is valid. This is a bit simplified
257 * but should be close enough for all practical purposes.
258 */
259 if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) ||
260 ((irb->scsw.stctl ==
261 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
262 (irb->scsw.actl & SCSW_ACTL_DEVACT) &&
263 (irb->scsw.actl & SCSW_ACTL_SCHACT)) ||
264 (irb->scsw.actl & SCSW_ACTL_SUSPENDED))
265 cdev_irb->scsw.cpa = irb->scsw.cpa;
266 /* Accumulate device status, but not the device busy flag. */
267 cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY;
268 cdev_irb->scsw.dstat |= irb->scsw.dstat;
269 /* Accumulate subchannel status. */
270 cdev_irb->scsw.cstat |= irb->scsw.cstat;
271 /* Copy residual count if it is valid. */
272 if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
273 (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0)
274 cdev_irb->scsw.count = irb->scsw.count;
275
276 /* Take care of bits in the extended status word. */
277 ccw_device_accumulate_esw(cdev, irb);
278
279 /*
280 * Check whether we must issue a SENSE CCW ourselves if there is no
281 * concurrent sense facility installed for the subchannel.
282 * No sense is required if no delayed sense is pending
283 * and we did not get a unit check without sense information.
284 *
285 * Note: We should check for ioinfo[irq]->flags.consns but VM
286 * violates the ESA/390 architecture and doesn't present an
287 * operand exception for virtual devices without concurrent
288 * sense facility available/supported when enabling the
289 * concurrent sense facility.
290 */
291 if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
292 !(cdev_irb->esw.esw0.erw.cons))
293 cdev->private->flags.dosense = 1;
294}
295
296/*
297 * Do a basic sense.
298 */
299int
300ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
301{
302 struct subchannel *sch;
303
304 sch = to_subchannel(cdev->dev.parent);
305
306 /* A sense is required, can we do it now ? */
307 if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
308 /*
309 * we received an Unit Check but we have no final
310 * status yet, therefore we must delay the SENSE
311 * processing. We must not report this intermediate
312 * status to the device interrupt handler.
313 */
314 return -EBUSY;
315
316 /*
317 * We have ending status but no sense information. Do a basic sense.
318 */
319 sch = to_subchannel(cdev->dev.parent);
320 sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE;
321 sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw);
322 sch->sense_ccw.count = SENSE_MAX_COUNT;
323 sch->sense_ccw.flags = CCW_FLAG_SLI;
324
325 return cio_start (sch, &sch->sense_ccw, 0xff);
326}
327
328/*
329 * Add information from basic sense to devstat.
330 */
331void
332ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
333{
334 /*
335 * Check if the status pending bit is set in stctl.
336 * If not, the remaining bit have no meaning and we must ignore them.
337 * The esw is not meaningful as well...
338 */
339 if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND))
340 return;
341
342 /* Check for channel checks and interface control checks. */
343 ccw_device_msg_control_check(cdev, irb);
344
345 /* Check for path not operational. */
346 if (irb->scsw.pno && irb->scsw.fctl != 0 &&
347 (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
348 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
349 ccw_device_path_notoper(cdev);
350
351 if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
352 (irb->scsw.dstat & DEV_STAT_CHN_END)) {
353 cdev->private->irb.esw.esw0.erw.cons = 1;
354 cdev->private->flags.dosense = 0;
355 }
356 /* Check if path verification is required. */
357 if (ccw_device_accumulate_esw_valid(irb) &&
358 irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup)
359 cdev->private->flags.doverify = 1;
360}
361
362/*
363 * This function accumulates the status into the private devstat and
364 * starts a basic sense if one is needed.
365 */
366int
367ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
368{
369 ccw_device_accumulate_irb(cdev, irb);
370 if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
371 return -EBUSY;
372 /* Check for basic sense. */
373 if (cdev->private->flags.dosense &&
374 !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) {
375 cdev->private->irb.esw.esw0.erw.cons = 1;
376 cdev->private->flags.dosense = 0;
377 return 0;
378 }
379 if (cdev->private->flags.dosense) {
380 ccw_device_do_sense(cdev, irb);
381 return -EBUSY;
382 }
383 return 0;
384}
385
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
new file mode 100644
index 000000000000..c874607d9a80
--- /dev/null
+++ b/drivers/s390/cio/ioasm.h
@@ -0,0 +1,228 @@
1#ifndef S390_CIO_IOASM_H
2#define S390_CIO_IOASM_H
3
4/*
5 * TPI info structure
6 */
7struct tpi_info {
8 __u32 reserved1 : 16; /* reserved 0x00000001 */
9 __u32 irq : 16; /* aka. subchannel number */
10 __u32 intparm; /* interruption parameter */
11 __u32 adapter_IO : 1;
12 __u32 reserved2 : 1;
13 __u32 isc : 3;
14 __u32 reserved3 : 12;
15 __u32 int_type : 3;
16 __u32 reserved4 : 12;
17} __attribute__ ((packed));
18
19
20/*
21 * Some S390 specific IO instructions as inline
22 */
23
24extern __inline__ int stsch(int irq, volatile struct schib *addr)
25{
26 int ccode;
27
28 __asm__ __volatile__(
29 " lr 1,%1\n"
30 " stsch 0(%2)\n"
31 " ipm %0\n"
32 " srl %0,28"
33 : "=d" (ccode)
34 : "d" (irq | 0x10000), "a" (addr)
35 : "cc", "1" );
36 return ccode;
37}
38
39extern __inline__ int msch(int irq, volatile struct schib *addr)
40{
41 int ccode;
42
43 __asm__ __volatile__(
44 " lr 1,%1\n"
45 " msch 0(%2)\n"
46 " ipm %0\n"
47 " srl %0,28"
48 : "=d" (ccode)
49 : "d" (irq | 0x10000L), "a" (addr)
50 : "cc", "1" );
51 return ccode;
52}
53
54extern __inline__ int msch_err(int irq, volatile struct schib *addr)
55{
56 int ccode;
57
58 __asm__ __volatile__(
59 " lhi %0,%3\n"
60 " lr 1,%1\n"
61 " msch 0(%2)\n"
62 "0: ipm %0\n"
63 " srl %0,28\n"
64 "1:\n"
65#ifdef CONFIG_ARCH_S390X
66 ".section __ex_table,\"a\"\n"
67 " .align 8\n"
68 " .quad 0b,1b\n"
69 ".previous"
70#else
71 ".section __ex_table,\"a\"\n"
72 " .align 4\n"
73 " .long 0b,1b\n"
74 ".previous"
75#endif
76 : "=&d" (ccode)
77 : "d" (irq | 0x10000L), "a" (addr), "K" (-EIO)
78 : "cc", "1" );
79 return ccode;
80}
81
82extern __inline__ int tsch(int irq, volatile struct irb *addr)
83{
84 int ccode;
85
86 __asm__ __volatile__(
87 " lr 1,%1\n"
88 " tsch 0(%2)\n"
89 " ipm %0\n"
90 " srl %0,28"
91 : "=d" (ccode)
92 : "d" (irq | 0x10000L), "a" (addr)
93 : "cc", "1" );
94 return ccode;
95}
96
97extern __inline__ int tpi( volatile struct tpi_info *addr)
98{
99 int ccode;
100
101 __asm__ __volatile__(
102 " tpi 0(%1)\n"
103 " ipm %0\n"
104 " srl %0,28"
105 : "=d" (ccode)
106 : "a" (addr)
107 : "cc", "1" );
108 return ccode;
109}
110
111extern __inline__ int ssch(int irq, volatile struct orb *addr)
112{
113 int ccode;
114
115 __asm__ __volatile__(
116 " lr 1,%1\n"
117 " ssch 0(%2)\n"
118 " ipm %0\n"
119 " srl %0,28"
120 : "=d" (ccode)
121 : "d" (irq | 0x10000L), "a" (addr)
122 : "cc", "1" );
123 return ccode;
124}
125
126extern __inline__ int rsch(int irq)
127{
128 int ccode;
129
130 __asm__ __volatile__(
131 " lr 1,%1\n"
132 " rsch\n"
133 " ipm %0\n"
134 " srl %0,28"
135 : "=d" (ccode)
136 : "d" (irq | 0x10000L)
137 : "cc", "1" );
138 return ccode;
139}
140
141extern __inline__ int csch(int irq)
142{
143 int ccode;
144
145 __asm__ __volatile__(
146 " lr 1,%1\n"
147 " csch\n"
148 " ipm %0\n"
149 " srl %0,28"
150 : "=d" (ccode)
151 : "d" (irq | 0x10000L)
152 : "cc", "1" );
153 return ccode;
154}
155
156extern __inline__ int hsch(int irq)
157{
158 int ccode;
159
160 __asm__ __volatile__(
161 " lr 1,%1\n"
162 " hsch\n"
163 " ipm %0\n"
164 " srl %0,28"
165 : "=d" (ccode)
166 : "d" (irq | 0x10000L)
167 : "cc", "1" );
168 return ccode;
169}
170
171extern __inline__ int xsch(int irq)
172{
173 int ccode;
174
175 __asm__ __volatile__(
176 " lr 1,%1\n"
177 " .insn rre,0xb2760000,%1,0\n"
178 " ipm %0\n"
179 " srl %0,28"
180 : "=d" (ccode)
181 : "d" (irq | 0x10000L)
182 : "cc", "1" );
183 return ccode;
184}
185
186extern __inline__ int chsc(void *chsc_area)
187{
188 int cc;
189
190 __asm__ __volatile__ (
191 ".insn rre,0xb25f0000,%1,0 \n\t"
192 "ipm %0 \n\t"
193 "srl %0,28 \n\t"
194 : "=d" (cc)
195 : "d" (chsc_area)
196 : "cc" );
197
198 return cc;
199}
200
201extern __inline__ int iac( void)
202{
203 int ccode;
204
205 __asm__ __volatile__(
206 " iac 1\n"
207 " ipm %0\n"
208 " srl %0,28"
209 : "=d" (ccode) : : "cc", "1" );
210 return ccode;
211}
212
213extern __inline__ int rchp(int chpid)
214{
215 int ccode;
216
217 __asm__ __volatile__(
218 " lr 1,%1\n"
219 " rchp\n"
220 " ipm %0\n"
221 " srl %0,28"
222 : "=d" (ccode)
223 : "d" (chpid)
224 : "cc", "1" );
225 return ccode;
226}
227
228#endif
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
new file mode 100644
index 000000000000..bbe9f45d1438
--- /dev/null
+++ b/drivers/s390/cio/qdio.c
@@ -0,0 +1,3468 @@
1/*
2 *
3 * linux/drivers/s390/cio/qdio.c
4 *
5 * Linux for S/390 QDIO base support, Hipersocket base support
6 * version 2
7 *
8 * Copyright 2000,2002 IBM Corporation
9 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
10 * 2.6 cio integration by Cornelia Huck <cohuck@de.ibm.com>
11 *
12 * Restriction: only 63 iqdio subchannels would have its own indicator,
13 * after that, subsequent subchannels share one indicator
14 *
15 *
16 *
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33#include <linux/config.h>
34#include <linux/module.h>
35#include <linux/init.h>
36
37#include <linux/slab.h>
38#include <linux/kernel.h>
39#include <linux/proc_fs.h>
40#include <linux/timer.h>
41
42#include <asm/ccwdev.h>
43#include <asm/io.h>
44#include <asm/atomic.h>
45#include <asm/semaphore.h>
46#include <asm/timex.h>
47
48#include <asm/debug.h>
49#include <asm/qdio.h>
50
51#include "cio.h"
52#include "css.h"
53#include "device.h"
54#include "airq.h"
55#include "qdio.h"
56#include "ioasm.h"
57#include "chsc.h"
58
59#define VERSION_QDIO_C "$Revision: 1.98 $"
60
61/****************** MODULE PARAMETER VARIABLES ********************/
62MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
63MODULE_DESCRIPTION("QDIO base support version 2, " \
64 "Copyright 2000 IBM Corporation");
65MODULE_LICENSE("GPL");
66
67/******************** HERE WE GO ***********************************/
68
69static const char version[] = "QDIO base support version 2 ("
70 VERSION_QDIO_C "/" VERSION_QDIO_H "/" VERSION_CIO_QDIO_H ")";
71
72#ifdef QDIO_PERFORMANCE_STATS
73static int proc_perf_file_registration;
74static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc;
75static struct qdio_perf_stats perf_stats;
76#endif /* QDIO_PERFORMANCE_STATS */
77
78static int hydra_thinints;
79static int omit_svs;
80
81static int indicator_used[INDICATORS_PER_CACHELINE];
82static __u32 * volatile indicators;
83static __u32 volatile spare_indicator;
84static atomic_t spare_indicator_usecount;
85
86static debug_info_t *qdio_dbf_setup;
87static debug_info_t *qdio_dbf_sbal;
88static debug_info_t *qdio_dbf_trace;
89static debug_info_t *qdio_dbf_sense;
90#ifdef CONFIG_QDIO_DEBUG
91static debug_info_t *qdio_dbf_slsb_out;
92static debug_info_t *qdio_dbf_slsb_in;
93#endif /* CONFIG_QDIO_DEBUG */
94
95/* iQDIO stuff: */
96static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
97 during a while loop */
98static DEFINE_SPINLOCK(ttiq_list_lock);
99static int register_thinint_result;
100static void tiqdio_tl(unsigned long);
101static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
102
103/* not a macro, as one of the arguments is atomic_read */
104static inline int
105qdio_min(int a,int b)
106{
107 if (a<b)
108 return a;
109 else
110 return b;
111}
112
113/***************** SCRUBBER HELPER ROUTINES **********************/
114
115static inline volatile __u64
116qdio_get_micros(void)
117{
118 return (get_clock() >> 10); /* time>>12 is microseconds */
119}
120
121/*
122 * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
123 * the q in any case, so that we'll not be interrupted when we are in
124 * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
125 * ever works (last famous words)
126 */
127static inline int
128qdio_reserve_q(struct qdio_q *q)
129{
130 return atomic_add_return(1,&q->use_count) - 1;
131}
132
133static inline void
134qdio_release_q(struct qdio_q *q)
135{
136 atomic_dec(&q->use_count);
137}
138
139static volatile inline void
140qdio_set_slsb(volatile char *slsb, unsigned char value)
141{
142 xchg((char*)slsb,value);
143}
144
145static inline int
146qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
147 unsigned int gpr3)
148{
149 int cc;
150
151 QDIO_DBF_TEXT4(0,trace,"sigasync");
152 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
153
154#ifdef QDIO_PERFORMANCE_STATS
155 perf_stats.siga_syncs++;
156#endif /* QDIO_PERFORMANCE_STATS */
157
158 cc = do_siga_sync(q->irq, gpr2, gpr3);
159 if (cc)
160 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
161
162 return cc;
163}
164
165static inline int
166qdio_siga_sync_q(struct qdio_q *q)
167{
168 if (q->is_input_q)
169 return qdio_siga_sync(q, 0, q->mask);
170 return qdio_siga_sync(q, q->mask, 0);
171}
172
173/*
174 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
175 * an access exception
176 */
177static inline int
178qdio_siga_output(struct qdio_q *q)
179{
180 int cc;
181 __u32 busy_bit;
182 __u64 start_time=0;
183
184#ifdef QDIO_PERFORMANCE_STATS
185 perf_stats.siga_outs++;
186#endif /* QDIO_PERFORMANCE_STATS */
187
188 QDIO_DBF_TEXT4(0,trace,"sigaout");
189 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
190
191 for (;;) {
192 cc = do_siga_output(q->irq, q->mask, &busy_bit);
193//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
194 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
195 if (!start_time)
196 start_time=NOW;
197 if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
198 break;
199 } else
200 break;
201 }
202
203 if ((cc==2) && (busy_bit))
204 cc |= QDIO_SIGA_ERROR_B_BIT_SET;
205
206 if (cc)
207 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
208
209 return cc;
210}
211
212static inline int
213qdio_siga_input(struct qdio_q *q)
214{
215 int cc;
216
217 QDIO_DBF_TEXT4(0,trace,"sigain");
218 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
219
220#ifdef QDIO_PERFORMANCE_STATS
221 perf_stats.siga_ins++;
222#endif /* QDIO_PERFORMANCE_STATS */
223
224 cc = do_siga_input(q->irq, q->mask);
225
226 if (cc)
227 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
228
229 return cc;
230}
231
232/* locked by the locks in qdio_activate and qdio_cleanup */
233static __u32 * volatile
234qdio_get_indicator(void)
235{
236 int i;
237
238 for (i=1;i<INDICATORS_PER_CACHELINE;i++)
239 if (!indicator_used[i]) {
240 indicator_used[i]=1;
241 return indicators+i;
242 }
243 atomic_inc(&spare_indicator_usecount);
244 return (__u32 * volatile) &spare_indicator;
245}
246
247/* locked by the locks in qdio_activate and qdio_cleanup */
248static void
249qdio_put_indicator(__u32 *addr)
250{
251 int i;
252
253 if ( (addr) && (addr!=&spare_indicator) ) {
254 i=addr-indicators;
255 indicator_used[i]=0;
256 }
257 if (addr == &spare_indicator)
258 atomic_dec(&spare_indicator_usecount);
259}
260
261static inline volatile void
262tiqdio_clear_summary_bit(__u32 *location)
263{
264 QDIO_DBF_TEXT5(0,trace,"clrsummb");
265 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
266
267 xchg(location,0);
268}
269
270static inline volatile void
271tiqdio_set_summary_bit(__u32 *location)
272{
273 QDIO_DBF_TEXT5(0,trace,"setsummb");
274 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
275
276 xchg(location,-1);
277}
278
279static inline void
280tiqdio_sched_tl(void)
281{
282 tasklet_hi_schedule(&tiqdio_tasklet);
283}
284
285static inline void
286qdio_mark_tiq(struct qdio_q *q)
287{
288 unsigned long flags;
289
290 QDIO_DBF_TEXT4(0,trace,"mark iq");
291 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
292
293 spin_lock_irqsave(&ttiq_list_lock,flags);
294 if (unlikely(atomic_read(&q->is_in_shutdown)))
295 goto out_unlock;
296
297 if (!q->is_input_q)
298 goto out_unlock;
299
300 if ((q->list_prev) || (q->list_next))
301 goto out_unlock;
302
303 if (!tiq_list) {
304 tiq_list=q;
305 q->list_prev=q;
306 q->list_next=q;
307 } else {
308 q->list_next=tiq_list;
309 q->list_prev=tiq_list->list_prev;
310 tiq_list->list_prev->list_next=q;
311 tiq_list->list_prev=q;
312 }
313 spin_unlock_irqrestore(&ttiq_list_lock,flags);
314
315 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
316 tiqdio_sched_tl();
317 return;
318out_unlock:
319 spin_unlock_irqrestore(&ttiq_list_lock,flags);
320 return;
321}
322
323static inline void
324qdio_mark_q(struct qdio_q *q)
325{
326 QDIO_DBF_TEXT4(0,trace,"mark q");
327 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
328
329 if (unlikely(atomic_read(&q->is_in_shutdown)))
330 return;
331
332 tasklet_schedule(&q->tasklet);
333}
334
335static inline int
336qdio_stop_polling(struct qdio_q *q)
337{
338#ifdef QDIO_USE_PROCESSING_STATE
339 int gsf;
340
341 if (!atomic_swap(&q->polling,0))
342 return 1;
343
344 QDIO_DBF_TEXT4(0,trace,"stoppoll");
345 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
346
347 /* show the card that we are not polling anymore */
348 if (!q->is_input_q)
349 return 1;
350
351 gsf=GET_SAVED_FRONTIER(q);
352 set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)&
353 (QDIO_MAX_BUFFERS_PER_Q-1)],
354 SLSB_P_INPUT_NOT_INIT);
355 /*
356 * we don't issue this SYNC_MEMORY, as we trust Rick T and
357 * moreover will not use the PROCESSING state under VM, so
358 * q->polling was 0 anyway
359 */
360 /*SYNC_MEMORY;*/
361 if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED)
362 return 1;
363 /*
364 * set our summary bit again, as otherwise there is a
365 * small window we can miss between resetting it and
366 * checking for PRIMED state
367 */
368 if (q->is_thinint_q)
369 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
370 return 0;
371
372#else /* QDIO_USE_PROCESSING_STATE */
373 return 1;
374#endif /* QDIO_USE_PROCESSING_STATE */
375}
376
377/*
378 * see the comment in do_QDIO and before qdio_reserve_q about the
379 * sophisticated locking outside of unmark_q, so that we don't need to
380 * disable the interrupts :-)
381*/
382static inline void
383qdio_unmark_q(struct qdio_q *q)
384{
385 unsigned long flags;
386
387 QDIO_DBF_TEXT4(0,trace,"unmark q");
388 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
389
390 if ((!q->list_prev)||(!q->list_next))
391 return;
392
393 if ((q->is_thinint_q)&&(q->is_input_q)) {
394 /* iQDIO */
395 spin_lock_irqsave(&ttiq_list_lock,flags);
396 /* in case cleanup has done this already and simultanously
397 * qdio_unmark_q is called from the interrupt handler, we've
398 * got to check this in this specific case again */
399 if ((!q->list_prev)||(!q->list_next))
400 goto out;
401 if (q->list_next==q) {
402 /* q was the only interesting q */
403 tiq_list=NULL;
404 q->list_next=NULL;
405 q->list_prev=NULL;
406 } else {
407 q->list_next->list_prev=q->list_prev;
408 q->list_prev->list_next=q->list_next;
409 tiq_list=q->list_next;
410 q->list_next=NULL;
411 q->list_prev=NULL;
412 }
413out:
414 spin_unlock_irqrestore(&ttiq_list_lock,flags);
415 }
416}
417
418static inline unsigned long
419tiqdio_clear_global_summary(void)
420{
421 unsigned long time;
422
423 QDIO_DBF_TEXT5(0,trace,"clrglobl");
424
425 time = do_clear_global_summary();
426
427 QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
428
429 return time;
430}
431
432
433/************************* OUTBOUND ROUTINES *******************************/
434
435inline static int
436qdio_get_outbound_buffer_frontier(struct qdio_q *q)
437{
438 int f,f_mod_no;
439 volatile char *slsb;
440 int first_not_to_check;
441 char dbf_text[15];
442
443 QDIO_DBF_TEXT4(0,trace,"getobfro");
444 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
445
446 slsb=&q->slsb.acc.val[0];
447 f_mod_no=f=q->first_to_check;
448 /*
449 * f points to already processed elements, so f+no_used is correct...
450 * ... but: we don't check 128 buffers, as otherwise
451 * qdio_has_outbound_q_moved would return 0
452 */
453 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
454 (QDIO_MAX_BUFFERS_PER_Q-1));
455
456 if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis))
457 SYNC_MEMORY;
458
459check_next:
460 if (f==first_not_to_check)
461 goto out;
462
463 switch(slsb[f_mod_no]) {
464
465 /* the adapter has not fetched the output yet */
466 case SLSB_CU_OUTPUT_PRIMED:
467 QDIO_DBF_TEXT5(0,trace,"outpprim");
468 break;
469
470 /* the adapter got it */
471 case SLSB_P_OUTPUT_EMPTY:
472 atomic_dec(&q->number_of_buffers_used);
473 f++;
474 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
475 QDIO_DBF_TEXT5(0,trace,"outpempt");
476 goto check_next;
477
478 case SLSB_P_OUTPUT_ERROR:
479 QDIO_DBF_TEXT3(0,trace,"outperr");
480 sprintf(dbf_text,"%x-%x-%x",f_mod_no,
481 q->sbal[f_mod_no]->element[14].sbalf.value,
482 q->sbal[f_mod_no]->element[15].sbalf.value);
483 QDIO_DBF_TEXT3(1,trace,dbf_text);
484 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
485
486 /* kind of process the buffer */
487 set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT);
488
489 /*
490 * we increment the frontier, as this buffer
491 * was processed obviously
492 */
493 atomic_dec(&q->number_of_buffers_used);
494 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
495
496 if (q->qdio_error)
497 q->error_status_flags|=
498 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
499 q->qdio_error=SLSB_P_OUTPUT_ERROR;
500 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
501
502 break;
503
504 /* no new buffers */
505 default:
506 QDIO_DBF_TEXT5(0,trace,"outpni");
507 }
508out:
509 return (q->first_to_check=f_mod_no);
510}
511
512/* all buffers are processed */
513inline static int
514qdio_is_outbound_q_done(struct qdio_q *q)
515{
516 int no_used;
517#ifdef CONFIG_QDIO_DEBUG
518 char dbf_text[15];
519#endif
520
521 no_used=atomic_read(&q->number_of_buffers_used);
522
523#ifdef CONFIG_QDIO_DEBUG
524 if (no_used) {
525 sprintf(dbf_text,"oqisnt%02x",no_used);
526 QDIO_DBF_TEXT4(0,trace,dbf_text);
527 } else {
528 QDIO_DBF_TEXT4(0,trace,"oqisdone");
529 }
530 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
531#endif /* CONFIG_QDIO_DEBUG */
532 return (no_used==0);
533}
534
535inline static int
536qdio_has_outbound_q_moved(struct qdio_q *q)
537{
538 int i;
539
540 i=qdio_get_outbound_buffer_frontier(q);
541
542 if ( (i!=GET_SAVED_FRONTIER(q)) ||
543 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
544 SAVE_FRONTIER(q,i);
545 QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
546 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
547 return 1;
548 } else {
549 QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
550 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
551 return 0;
552 }
553}
554
555inline static void
556qdio_kick_outbound_q(struct qdio_q *q)
557{
558 int result;
559#ifdef CONFIG_QDIO_DEBUG
560 char dbf_text[15];
561
562 QDIO_DBF_TEXT4(0,trace,"kickoutq");
563 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
564#endif /* CONFIG_QDIO_DEBUG */
565
566 if (!q->siga_out)
567 return;
568
569 /* here's the story with cc=2 and busy bit set (thanks, Rick):
570 * VM's CP could present us cc=2 and busy bit set on SIGA-write
571 * during reconfiguration of their Guest LAN (only in HIPERS mode,
572 * QDIO mode is asynchronous -- cc=2 and busy bit there will take
573 * the queues down immediately; and not being under VM we have a
574 * problem on cc=2 and busy bit set right away).
575 *
576 * Therefore qdio_siga_output will try for a short time constantly,
577 * if such a condition occurs. If it doesn't change, it will
578 * increase the busy_siga_counter and save the timestamp, and
579 * schedule the queue for later processing (via mark_q, using the
580 * queue tasklet). __qdio_outbound_processing will check out the
581 * counter. If non-zero, it will call qdio_kick_outbound_q as often
582 * as the value of the counter. This will attempt further SIGA
583 * instructions. For each successful SIGA, the counter is
584 * decreased, for failing SIGAs the counter remains the same, after
585 * all.
586 * After some time of no movement, qdio_kick_outbound_q will
587 * finally fail and reflect corresponding error codes to call
588 * the upper layer module and have it take the queues down.
589 *
590 * Note that this is a change from the original HiperSockets design
591 * (saying cc=2 and busy bit means take the queues down), but in
592 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
593 * conditions will still take the queues down, but the threshold is
594 * higher due to the Guest LAN environment.
595 */
596
597
598 result=qdio_siga_output(q);
599
600 switch (result) {
601 case 0:
602 /* went smooth this time, reset timestamp */
603#ifdef CONFIG_QDIO_DEBUG
604 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
605 sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
606 atomic_read(&q->busy_siga_counter));
607 QDIO_DBF_TEXT3(0,trace,dbf_text);
608#endif /* CONFIG_QDIO_DEBUG */
609 q->timing.busy_start=0;
610 break;
611 case (2|QDIO_SIGA_ERROR_B_BIT_SET):
612 /* cc=2 and busy bit: */
613 atomic_inc(&q->busy_siga_counter);
614
615 /* if the last siga was successful, save
616 * timestamp here */
617 if (!q->timing.busy_start)
618 q->timing.busy_start=NOW;
619
620 /* if we're in time, don't touch error_status_flags
621 * and siga_error */
622 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
623 qdio_mark_q(q);
624 break;
625 }
626 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
627#ifdef CONFIG_QDIO_DEBUG
628 sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
629 atomic_read(&q->busy_siga_counter));
630 QDIO_DBF_TEXT3(0,trace,dbf_text);
631#endif /* CONFIG_QDIO_DEBUG */
632 /* else fallthrough and report error */
633 default:
634 /* for plain cc=1, 2 or 3: */
635 if (q->siga_error)
636 q->error_status_flags|=
637 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
638 q->error_status_flags|=
639 QDIO_STATUS_LOOK_FOR_ERROR;
640 q->siga_error=result;
641 }
642}
643
644inline static void
645qdio_kick_outbound_handler(struct qdio_q *q)
646{
647 int start, end, real_end, count;
648#ifdef CONFIG_QDIO_DEBUG
649 char dbf_text[15];
650#endif
651
652 start = q->first_element_to_kick;
653 /* last_move_ftc was just updated */
654 real_end = GET_SAVED_FRONTIER(q);
655 end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
656 (QDIO_MAX_BUFFERS_PER_Q-1);
657 count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
658 (QDIO_MAX_BUFFERS_PER_Q-1);
659
660#ifdef CONFIG_QDIO_DEBUG
661 QDIO_DBF_TEXT4(0,trace,"kickouth");
662 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
663
664 sprintf(dbf_text,"s=%2xc=%2x",start,count);
665 QDIO_DBF_TEXT4(0,trace,dbf_text);
666#endif /* CONFIG_QDIO_DEBUG */
667
668 if (q->state==QDIO_IRQ_STATE_ACTIVE)
669 q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
670 q->error_status_flags,
671 q->qdio_error,q->siga_error,q->q_no,start,count,
672 q->int_parm);
673
674 /* for the next time: */
675 q->first_element_to_kick=real_end;
676 q->qdio_error=0;
677 q->siga_error=0;
678 q->error_status_flags=0;
679}
680
681static inline void
682__qdio_outbound_processing(struct qdio_q *q)
683{
684 int siga_attempts;
685
686 QDIO_DBF_TEXT4(0,trace,"qoutproc");
687 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
688
689 if (unlikely(qdio_reserve_q(q))) {
690 qdio_release_q(q);
691#ifdef QDIO_PERFORMANCE_STATS
692 o_p_c++;
693#endif /* QDIO_PERFORMANCE_STATS */
694 /* as we're sissies, we'll check next time */
695 if (likely(!atomic_read(&q->is_in_shutdown))) {
696 qdio_mark_q(q);
697 QDIO_DBF_TEXT4(0,trace,"busy,agn");
698 }
699 return;
700 }
701#ifdef QDIO_PERFORMANCE_STATS
702 o_p_nc++;
703 perf_stats.tl_runs++;
704#endif /* QDIO_PERFORMANCE_STATS */
705
706 /* see comment in qdio_kick_outbound_q */
707 siga_attempts=atomic_read(&q->busy_siga_counter);
708 while (siga_attempts) {
709 atomic_dec(&q->busy_siga_counter);
710 qdio_kick_outbound_q(q);
711 siga_attempts--;
712 }
713
714 if (qdio_has_outbound_q_moved(q))
715 qdio_kick_outbound_handler(q);
716
717 if (q->is_iqdio_q) {
718 /*
719 * for asynchronous queues, we better check, if the fill
720 * level is too high. for synchronous queues, the fill
721 * level will never be that high.
722 */
723 if (atomic_read(&q->number_of_buffers_used)>
724 IQDIO_FILL_LEVEL_TO_POLL)
725 qdio_mark_q(q);
726
727 } else if (!q->hydra_gives_outbound_pcis)
728 if (!qdio_is_outbound_q_done(q))
729 qdio_mark_q(q);
730
731 qdio_release_q(q);
732}
733
734static void
735qdio_outbound_processing(struct qdio_q *q)
736{
737 __qdio_outbound_processing(q);
738}
739
740/************************* INBOUND ROUTINES *******************************/
741
742
743inline static int
744qdio_get_inbound_buffer_frontier(struct qdio_q *q)
745{
746 int f,f_mod_no;
747 volatile char *slsb;
748 int first_not_to_check;
749#ifdef CONFIG_QDIO_DEBUG
750 char dbf_text[15];
751#endif /* CONFIG_QDIO_DEBUG */
752#ifdef QDIO_USE_PROCESSING_STATE
753 int last_position=-1;
754#endif /* QDIO_USE_PROCESSING_STATE */
755
756 QDIO_DBF_TEXT4(0,trace,"getibfro");
757 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
758
759 slsb=&q->slsb.acc.val[0];
760 f_mod_no=f=q->first_to_check;
761 /*
762 * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
763 * would return 0
764 */
765 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
766 (QDIO_MAX_BUFFERS_PER_Q-1));
767
768 /*
769 * we don't use this one, as a PCI or we after a thin interrupt
770 * will sync the queues
771 */
772 /* SYNC_MEMORY;*/
773
774check_next:
775 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
776 if (f==first_not_to_check)
777 goto out;
778 switch (slsb[f_mod_no]) {
779
780 /* CU_EMPTY means frontier is reached */
781 case SLSB_CU_INPUT_EMPTY:
782 QDIO_DBF_TEXT5(0,trace,"inptempt");
783 break;
784
785 /* P_PRIMED means set slsb to P_PROCESSING and move on */
786 case SLSB_P_INPUT_PRIMED:
787 QDIO_DBF_TEXT5(0,trace,"inptprim");
788
789#ifdef QDIO_USE_PROCESSING_STATE
790 /*
791 * as soon as running under VM, polling the input queues will
792 * kill VM in terms of CP overhead
793 */
794 if (q->siga_sync) {
795 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
796 } else {
797 /* set the previous buffer to NOT_INIT. The current
798 * buffer will be set to PROCESSING at the end of
799 * this function to avoid further interrupts. */
800 if (last_position>=0)
801 set_slsb(&slsb[last_position],
802 SLSB_P_INPUT_NOT_INIT);
803 atomic_set(&q->polling,1);
804 last_position=f_mod_no;
805 }
806#else /* QDIO_USE_PROCESSING_STATE */
807 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
808#endif /* QDIO_USE_PROCESSING_STATE */
809 /*
810 * not needed, as the inbound queue will be synced on the next
811 * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
812 */
813 /*SYNC_MEMORY;*/
814 f++;
815 atomic_dec(&q->number_of_buffers_used);
816 goto check_next;
817
818 case SLSB_P_INPUT_NOT_INIT:
819 case SLSB_P_INPUT_PROCESSING:
820 QDIO_DBF_TEXT5(0,trace,"inpnipro");
821 break;
822
823 /* P_ERROR means frontier is reached, break and report error */
824 case SLSB_P_INPUT_ERROR:
825#ifdef CONFIG_QDIO_DEBUG
826 sprintf(dbf_text,"inperr%2x",f_mod_no);
827 QDIO_DBF_TEXT3(1,trace,dbf_text);
828#endif /* CONFIG_QDIO_DEBUG */
829 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
830
831 /* kind of process the buffer */
832 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
833
834 if (q->qdio_error)
835 q->error_status_flags|=
836 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
837 q->qdio_error=SLSB_P_INPUT_ERROR;
838 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
839
840 /* we increment the frontier, as this buffer
841 * was processed obviously */
842 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
843 atomic_dec(&q->number_of_buffers_used);
844
845#ifdef QDIO_USE_PROCESSING_STATE
846 last_position=-1;
847#endif /* QDIO_USE_PROCESSING_STATE */
848
849 break;
850
851 /* everything else means frontier not changed (HALTED or so) */
852 default:
853 break;
854 }
855out:
856 q->first_to_check=f_mod_no;
857
858#ifdef QDIO_USE_PROCESSING_STATE
859 if (last_position>=0)
860 set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING);
861#endif /* QDIO_USE_PROCESSING_STATE */
862
863 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
864
865 return q->first_to_check;
866}
867
868inline static int
869qdio_has_inbound_q_moved(struct qdio_q *q)
870{
871 int i;
872
873#ifdef QDIO_PERFORMANCE_STATS
874 static int old_pcis=0;
875 static int old_thinints=0;
876
877 if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints))
878 perf_stats.start_time_inbound=NOW;
879 else
880 old_pcis=perf_stats.pcis;
881#endif /* QDIO_PERFORMANCE_STATS */
882
883 i=qdio_get_inbound_buffer_frontier(q);
884 if ( (i!=GET_SAVED_FRONTIER(q)) ||
885 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
886 SAVE_FRONTIER(q,i);
887 if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
888 SAVE_TIMESTAMP(q);
889
890 QDIO_DBF_TEXT4(0,trace,"inhasmvd");
891 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
892 return 1;
893 } else {
894 QDIO_DBF_TEXT4(0,trace,"inhsntmv");
895 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
896 return 0;
897 }
898}
899
900/* means, no more buffers to be filled */
901inline static int
902tiqdio_is_inbound_q_done(struct qdio_q *q)
903{
904 int no_used;
905#ifdef CONFIG_QDIO_DEBUG
906 char dbf_text[15];
907#endif
908
909 no_used=atomic_read(&q->number_of_buffers_used);
910
911 /* propagate the change from 82 to 80 through VM */
912 SYNC_MEMORY;
913
914#ifdef CONFIG_QDIO_DEBUG
915 if (no_used) {
916 sprintf(dbf_text,"iqisnt%02x",no_used);
917 QDIO_DBF_TEXT4(0,trace,dbf_text);
918 } else {
919 QDIO_DBF_TEXT4(0,trace,"iniqisdo");
920 }
921 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
922#endif /* CONFIG_QDIO_DEBUG */
923
924 if (!no_used)
925 return 1;
926
927 if (!q->siga_sync)
928 /* we'll check for more primed buffers in qeth_stop_polling */
929 return 0;
930
931 if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED)
932 /*
933 * nothing more to do, if next buffer is not PRIMED.
934 * note that we did a SYNC_MEMORY before, that there
935 * has been a sychnronization.
936 * we will return 0 below, as there is nothing to do
937 * (stop_polling not necessary, as we have not been
938 * using the PROCESSING state
939 */
940 return 0;
941
942 /*
943 * ok, the next input buffer is primed. that means, that device state
944 * change indicator and adapter local summary are set, so we will find
945 * it next time.
946 * we will return 0 below, as there is nothing to do, except scheduling
947 * ourselves for the next time.
948 */
949 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
950 tiqdio_sched_tl();
951 return 0;
952}
953
954inline static int
955qdio_is_inbound_q_done(struct qdio_q *q)
956{
957 int no_used;
958#ifdef CONFIG_QDIO_DEBUG
959 char dbf_text[15];
960#endif
961
962 no_used=atomic_read(&q->number_of_buffers_used);
963
964 /*
965 * we need that one for synchronization with the adapter, as it
966 * does a kind of PCI avoidance
967 */
968 SYNC_MEMORY;
969
970 if (!no_used) {
971 QDIO_DBF_TEXT4(0,trace,"inqisdnA");
972 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
973 QDIO_DBF_TEXT4(0,trace,dbf_text);
974 return 1;
975 }
976
977 if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) {
978 /* we got something to do */
979 QDIO_DBF_TEXT4(0,trace,"inqisntA");
980 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
981 return 0;
982 }
983
984 /* on VM, we don't poll, so the q is always done here */
985 if (q->siga_sync)
986 return 1;
987 if (q->hydra_gives_outbound_pcis)
988 return 1;
989
990 /*
991 * at this point we know, that inbound first_to_check
992 * has (probably) not moved (see qdio_inbound_processing)
993 */
994 if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
995#ifdef CONFIG_QDIO_DEBUG
996 QDIO_DBF_TEXT4(0,trace,"inqisdon");
997 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
998 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
999 QDIO_DBF_TEXT4(0,trace,dbf_text);
1000#endif /* CONFIG_QDIO_DEBUG */
1001 return 1;
1002 } else {
1003#ifdef CONFIG_QDIO_DEBUG
1004 QDIO_DBF_TEXT4(0,trace,"inqisntd");
1005 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1006 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1007 QDIO_DBF_TEXT4(0,trace,dbf_text);
1008#endif /* CONFIG_QDIO_DEBUG */
1009 return 0;
1010 }
1011}
1012
1013inline static void
1014qdio_kick_inbound_handler(struct qdio_q *q)
1015{
1016 int count, start, end, real_end, i;
1017#ifdef CONFIG_QDIO_DEBUG
1018 char dbf_text[15];
1019#endif
1020
1021 QDIO_DBF_TEXT4(0,trace,"kickinh");
1022 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1023
1024 start=q->first_element_to_kick;
1025 real_end=q->first_to_check;
1026 end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1027
1028 i=start;
1029 count=0;
1030 while (1) {
1031 count++;
1032 if (i==end)
1033 break;
1034 i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1035 }
1036
1037#ifdef CONFIG_QDIO_DEBUG
1038 sprintf(dbf_text,"s=%2xc=%2x",start,count);
1039 QDIO_DBF_TEXT4(0,trace,dbf_text);
1040#endif /* CONFIG_QDIO_DEBUG */
1041
1042 if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
1043 q->handler(q->cdev,
1044 QDIO_STATUS_INBOUND_INT|q->error_status_flags,
1045 q->qdio_error,q->siga_error,q->q_no,start,count,
1046 q->int_parm);
1047
1048 /* for the next time: */
1049 q->first_element_to_kick=real_end;
1050 q->qdio_error=0;
1051 q->siga_error=0;
1052 q->error_status_flags=0;
1053
1054#ifdef QDIO_PERFORMANCE_STATS
1055 perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
1056 perf_stats.inbound_cnt++;
1057#endif /* QDIO_PERFORMANCE_STATS */
1058}
1059
1060static inline void
1061__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1062{
1063 struct qdio_irq *irq_ptr;
1064 struct qdio_q *oq;
1065 int i;
1066
1067 QDIO_DBF_TEXT4(0,trace,"iqinproc");
1068 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1069
1070 /*
1071 * we first want to reserve the q, so that we know, that we don't
1072 * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
1073 * be set
1074 */
1075 if (unlikely(qdio_reserve_q(q))) {
1076 qdio_release_q(q);
1077#ifdef QDIO_PERFORMANCE_STATS
1078 ii_p_c++;
1079#endif /* QDIO_PERFORMANCE_STATS */
1080 /*
1081 * as we might just be about to stop polling, we make
1082 * sure that we check again at least once more
1083 */
1084 tiqdio_sched_tl();
1085 return;
1086 }
1087#ifdef QDIO_PERFORMANCE_STATS
1088 ii_p_nc++;
1089#endif /* QDIO_PERFORMANCE_STATS */
1090 if (unlikely(atomic_read(&q->is_in_shutdown))) {
1091 qdio_unmark_q(q);
1092 goto out;
1093 }
1094
1095 /*
1096 * we reset spare_ind_was_set, when the queue does not use the
1097 * spare indicator
1098 */
1099 if (spare_ind_was_set)
1100 spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
1101
1102 if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
1103 goto out;
1104 /*
1105 * q->dev_st_chg_ind is the indicator, be it shared or not.
1106 * only clear it, if indicator is non-shared
1107 */
1108 if (!spare_ind_was_set)
1109 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
1110
1111 if (q->hydra_gives_outbound_pcis) {
1112 if (!q->siga_sync_done_on_thinints) {
1113 SYNC_MEMORY_ALL;
1114 } else if ((!q->siga_sync_done_on_outb_tis)&&
1115 (q->hydra_gives_outbound_pcis)) {
1116 SYNC_MEMORY_ALL_OUTB;
1117 }
1118 } else {
1119 SYNC_MEMORY;
1120 }
1121 /*
1122 * maybe we have to do work on our outbound queues... at least
1123 * we have to check the outbound-int-capable thinint-capable
1124 * queues
1125 */
1126 if (q->hydra_gives_outbound_pcis) {
1127 irq_ptr = (struct qdio_irq*)q->irq_ptr;
1128 for (i=0;i<irq_ptr->no_output_qs;i++) {
1129 oq = irq_ptr->output_qs[i];
1130#ifdef QDIO_PERFORMANCE_STATS
1131 perf_stats.tl_runs--;
1132#endif /* QDIO_PERFORMANCE_STATS */
1133 if (!qdio_is_outbound_q_done(oq))
1134 __qdio_outbound_processing(oq);
1135 }
1136 }
1137
1138 if (!qdio_has_inbound_q_moved(q))
1139 goto out;
1140
1141 qdio_kick_inbound_handler(q);
1142 if (tiqdio_is_inbound_q_done(q))
1143 if (!qdio_stop_polling(q)) {
1144 /*
1145 * we set the flags to get into the stuff next time,
1146 * see also comment in qdio_stop_polling
1147 */
1148 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1149 tiqdio_sched_tl();
1150 }
1151out:
1152 qdio_release_q(q);
1153}
1154
1155static void
1156tiqdio_inbound_processing(struct qdio_q *q)
1157{
1158 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
1159}
1160
1161static inline void
1162__qdio_inbound_processing(struct qdio_q *q)
1163{
1164 int q_laps=0;
1165
1166 QDIO_DBF_TEXT4(0,trace,"qinproc");
1167 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1168
1169 if (unlikely(qdio_reserve_q(q))) {
1170 qdio_release_q(q);
1171#ifdef QDIO_PERFORMANCE_STATS
1172 i_p_c++;
1173#endif /* QDIO_PERFORMANCE_STATS */
1174 /* as we're sissies, we'll check next time */
1175 if (likely(!atomic_read(&q->is_in_shutdown))) {
1176 qdio_mark_q(q);
1177 QDIO_DBF_TEXT4(0,trace,"busy,agn");
1178 }
1179 return;
1180 }
1181#ifdef QDIO_PERFORMANCE_STATS
1182 i_p_nc++;
1183 perf_stats.tl_runs++;
1184#endif /* QDIO_PERFORMANCE_STATS */
1185
1186again:
1187 if (qdio_has_inbound_q_moved(q)) {
1188 qdio_kick_inbound_handler(q);
1189 if (!qdio_stop_polling(q)) {
1190 q_laps++;
1191 if (q_laps<QDIO_Q_LAPS)
1192 goto again;
1193 }
1194 qdio_mark_q(q);
1195 } else {
1196 if (!qdio_is_inbound_q_done(q))
1197 /* means poll time is not yet over */
1198 qdio_mark_q(q);
1199 }
1200
1201 qdio_release_q(q);
1202}
1203
1204static void
1205qdio_inbound_processing(struct qdio_q *q)
1206{
1207 __qdio_inbound_processing(q);
1208}
1209
1210/************************* MAIN ROUTINES *******************************/
1211
1212#ifdef QDIO_USE_PROCESSING_STATE
1213static inline int
1214tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1215{
1216 if (!q) {
1217 tiqdio_sched_tl();
1218 return 0;
1219 }
1220
1221 /*
1222 * under VM, we have not used the PROCESSING state, so no
1223 * need to stop polling
1224 */
1225 if (q->siga_sync)
1226 return 2;
1227
1228 if (unlikely(qdio_reserve_q(q))) {
1229 qdio_release_q(q);
1230#ifdef QDIO_PERFORMANCE_STATS
1231 ii_p_c++;
1232#endif /* QDIO_PERFORMANCE_STATS */
1233 /*
1234 * as we might just be about to stop polling, we make
1235 * sure that we check again at least once more
1236 */
1237
1238 /*
1239 * sanity -- we'd get here without setting the
1240 * dev st chg ind
1241 */
1242 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1243 tiqdio_sched_tl();
1244 return 0;
1245 }
1246 if (qdio_stop_polling(q)) {
1247 qdio_release_q(q);
1248 return 2;
1249 }
1250 if (q_laps<QDIO_Q_LAPS-1) {
1251 qdio_release_q(q);
1252 return 3;
1253 }
1254 /*
1255 * we set the flags to get into the stuff
1256 * next time, see also comment in qdio_stop_polling
1257 */
1258 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1259 tiqdio_sched_tl();
1260 qdio_release_q(q);
1261 return 1;
1262
1263}
1264#endif /* QDIO_USE_PROCESSING_STATE */
1265
1266static inline void
1267tiqdio_inbound_checks(void)
1268{
1269 struct qdio_q *q;
1270 int spare_ind_was_set=0;
1271#ifdef QDIO_USE_PROCESSING_STATE
1272 int q_laps=0;
1273#endif /* QDIO_USE_PROCESSING_STATE */
1274
1275 QDIO_DBF_TEXT4(0,trace,"iqdinbck");
1276 QDIO_DBF_TEXT5(0,trace,"iqlocsum");
1277
1278#ifdef QDIO_USE_PROCESSING_STATE
1279again:
1280#endif /* QDIO_USE_PROCESSING_STATE */
1281
1282 /* when the spare indicator is used and set, save that and clear it */
1283 if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
1284 spare_ind_was_set = 1;
1285 tiqdio_clear_summary_bit((__u32*)&spare_indicator);
1286 }
1287
1288 q=(struct qdio_q*)tiq_list;
1289 do {
1290 if (!q)
1291 break;
1292 __tiqdio_inbound_processing(q, spare_ind_was_set);
1293 q=(struct qdio_q*)q->list_next;
1294 } while (q!=(struct qdio_q*)tiq_list);
1295
1296#ifdef QDIO_USE_PROCESSING_STATE
1297 q=(struct qdio_q*)tiq_list;
1298 do {
1299 int ret;
1300
1301 ret = tiqdio_reset_processing_state(q, q_laps);
1302 switch (ret) {
1303 case 0:
1304 return;
1305 case 1:
1306 q_laps++;
1307 case 2:
1308 q = (struct qdio_q*)q->list_next;
1309 break;
1310 default:
1311 q_laps++;
1312 goto again;
1313 }
1314 } while (q!=(struct qdio_q*)tiq_list);
1315#endif /* QDIO_USE_PROCESSING_STATE */
1316}
1317
1318static void
1319tiqdio_tl(unsigned long data)
1320{
1321 QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
1322
1323#ifdef QDIO_PERFORMANCE_STATS
1324 perf_stats.tl_runs++;
1325#endif /* QDIO_PERFORMANCE_STATS */
1326
1327 tiqdio_inbound_checks();
1328}
1329
1330/********************* GENERAL HELPER_ROUTINES ***********************/
1331
1332static void
1333qdio_release_irq_memory(struct qdio_irq *irq_ptr)
1334{
1335 int i;
1336
1337 for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) {
1338 if (!irq_ptr->input_qs[i])
1339 goto next;
1340
1341 if (irq_ptr->input_qs[i]->slib)
1342 kfree(irq_ptr->input_qs[i]->slib);
1343 kfree(irq_ptr->input_qs[i]);
1344
1345next:
1346 if (!irq_ptr->output_qs[i])
1347 continue;
1348
1349 if (irq_ptr->output_qs[i]->slib)
1350 kfree(irq_ptr->output_qs[i]->slib);
1351 kfree(irq_ptr->output_qs[i]);
1352
1353 }
1354 kfree(irq_ptr->qdr);
1355 kfree(irq_ptr);
1356}
1357
1358static void
1359qdio_set_impl_params(struct qdio_irq *irq_ptr,
1360 unsigned int qib_param_field_format,
1361 /* pointer to 128 bytes or NULL, if no param field */
1362 unsigned char *qib_param_field,
1363 /* pointer to no_queues*128 words of data or NULL */
1364 unsigned int no_input_qs,
1365 unsigned int no_output_qs,
1366 unsigned long *input_slib_elements,
1367 unsigned long *output_slib_elements)
1368{
1369 int i,j;
1370
1371 if (!irq_ptr)
1372 return;
1373
1374 irq_ptr->qib.pfmt=qib_param_field_format;
1375 if (qib_param_field)
1376 memcpy(irq_ptr->qib.parm,qib_param_field,
1377 QDIO_MAX_BUFFERS_PER_Q);
1378
1379 if (input_slib_elements)
1380 for (i=0;i<no_input_qs;i++) {
1381 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1382 irq_ptr->input_qs[i]->slib->slibe[j].parms=
1383 input_slib_elements[
1384 i*QDIO_MAX_BUFFERS_PER_Q+j];
1385 }
1386 if (output_slib_elements)
1387 for (i=0;i<no_output_qs;i++) {
1388 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1389 irq_ptr->output_qs[i]->slib->slibe[j].parms=
1390 output_slib_elements[
1391 i*QDIO_MAX_BUFFERS_PER_Q+j];
1392 }
1393}
1394
1395static int
1396qdio_alloc_qs(struct qdio_irq *irq_ptr,
1397 int no_input_qs, int no_output_qs)
1398{
1399 int i;
1400 struct qdio_q *q;
1401 int result=-ENOMEM;
1402
1403 for (i=0;i<no_input_qs;i++) {
1404 q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL);
1405
1406 if (!q) {
1407 QDIO_PRINT_ERR("kmalloc of q failed!\n");
1408 goto out;
1409 }
1410
1411 memset(q,0,sizeof(struct qdio_q));
1412
1413 q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL);
1414 if (!q->slib) {
1415 QDIO_PRINT_ERR("kmalloc of slib failed!\n");
1416 goto out;
1417 }
1418
1419 irq_ptr->input_qs[i]=q;
1420 }
1421
1422 for (i=0;i<no_output_qs;i++) {
1423 q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL);
1424
1425 if (!q) {
1426 goto out;
1427 }
1428
1429 memset(q,0,sizeof(struct qdio_q));
1430
1431 q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL);
1432 if (!q->slib) {
1433 QDIO_PRINT_ERR("kmalloc of slib failed!\n");
1434 goto out;
1435 }
1436
1437 irq_ptr->output_qs[i]=q;
1438 }
1439
1440 result=0;
1441out:
1442 return result;
1443}
1444
1445static void
1446qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1447 int no_input_qs, int no_output_qs,
1448 qdio_handler_t *input_handler,
1449 qdio_handler_t *output_handler,
1450 unsigned long int_parm,int q_format,
1451 unsigned long flags,
1452 void **inbound_sbals_array,
1453 void **outbound_sbals_array)
1454{
1455 struct qdio_q *q;
1456 int i,j;
1457 char dbf_text[20]; /* see qdio_initialize */
1458 void *ptr;
1459 int available;
1460
1461 sprintf(dbf_text,"qfqs%4x",cdev->private->irq);
1462 QDIO_DBF_TEXT0(0,setup,dbf_text);
1463 for (i=0;i<no_input_qs;i++) {
1464 q=irq_ptr->input_qs[i];
1465
1466 memset(q,0,((char*)&q->slib)-((char*)q));
1467 sprintf(dbf_text,"in-q%4x",i);
1468 QDIO_DBF_TEXT0(0,setup,dbf_text);
1469 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1470
1471 memset(q->slib,0,PAGE_SIZE);
1472 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1473
1474 available=0;
1475
1476 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1477 q->sbal[j]=*(inbound_sbals_array++);
1478
1479 q->queue_type=q_format;
1480 q->int_parm=int_parm;
1481 q->irq=irq_ptr->irq;
1482 q->irq_ptr = irq_ptr;
1483 q->cdev = cdev;
1484 q->mask=1<<(31-i);
1485 q->q_no=i;
1486 q->is_input_q=1;
1487 q->first_to_check=0;
1488 q->last_move_ftc=0;
1489 q->handler=input_handler;
1490 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
1491
1492 q->tasklet.data=(unsigned long)q;
1493 /* q->is_thinint_q isn't valid at this time, but
1494 * irq_ptr->is_thinint_irq is */
1495 q->tasklet.func=(void(*)(unsigned long))
1496 ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing:
1497 &qdio_inbound_processing);
1498
1499 /* actually this is not used for inbound queues. yet. */
1500 atomic_set(&q->busy_siga_counter,0);
1501 q->timing.busy_start=0;
1502
1503/* for (j=0;j<QDIO_STATS_NUMBER;j++)
1504 q->timing.last_transfer_times[j]=(qdio_get_micros()/
1505 QDIO_STATS_NUMBER)*j;
1506 q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
1507*/
1508
1509 /* fill in slib */
1510 if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
1511 (unsigned long)(q->slib);
1512 q->slib->sla=(unsigned long)(q->sl);
1513 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1514
1515 /* fill in sl */
1516 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1517 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1518
1519 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1520 ptr=(void*)q->sl;
1521 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1522 ptr=(void*)&q->slsb;
1523 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1524 ptr=(void*)q->sbal[0];
1525 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1526
1527 /* fill in slsb */
1528 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) {
1529 set_slsb(&q->slsb.acc.val[j],
1530 SLSB_P_INPUT_NOT_INIT);
1531/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/
1532 }
1533 }
1534
1535 for (i=0;i<no_output_qs;i++) {
1536 q=irq_ptr->output_qs[i];
1537 memset(q,0,((char*)&q->slib)-((char*)q));
1538
1539 sprintf(dbf_text,"outq%4x",i);
1540 QDIO_DBF_TEXT0(0,setup,dbf_text);
1541 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1542
1543 memset(q->slib,0,PAGE_SIZE);
1544 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1545
1546 available=0;
1547
1548 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1549 q->sbal[j]=*(outbound_sbals_array++);
1550
1551 q->queue_type=q_format;
1552 q->int_parm=int_parm;
1553 q->is_input_q=0;
1554 q->irq=irq_ptr->irq;
1555 q->cdev = cdev;
1556 q->irq_ptr = irq_ptr;
1557 q->mask=1<<(31-i);
1558 q->q_no=i;
1559 q->first_to_check=0;
1560 q->last_move_ftc=0;
1561 q->handler=output_handler;
1562
1563 q->tasklet.data=(unsigned long)q;
1564 q->tasklet.func=(void(*)(unsigned long))
1565 &qdio_outbound_processing;
1566
1567 atomic_set(&q->busy_siga_counter,0);
1568 q->timing.busy_start=0;
1569
1570 /* fill in slib */
1571 if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
1572 (unsigned long)(q->slib);
1573 q->slib->sla=(unsigned long)(q->sl);
1574 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1575
1576 /* fill in sl */
1577 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1578 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1579
1580 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1581 ptr=(void*)q->sl;
1582 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1583 ptr=(void*)&q->slsb;
1584 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1585 ptr=(void*)q->sbal[0];
1586 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1587
1588 /* fill in slsb */
1589 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) {
1590 set_slsb(&q->slsb.acc.val[j],
1591 SLSB_P_OUTPUT_NOT_INIT);
1592/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/
1593 }
1594 }
1595}
1596
1597static void
1598qdio_fill_thresholds(struct qdio_irq *irq_ptr,
1599 unsigned int no_input_qs,
1600 unsigned int no_output_qs,
1601 unsigned int min_input_threshold,
1602 unsigned int max_input_threshold,
1603 unsigned int min_output_threshold,
1604 unsigned int max_output_threshold)
1605{
1606 int i;
1607 struct qdio_q *q;
1608
1609 for (i=0;i<no_input_qs;i++) {
1610 q=irq_ptr->input_qs[i];
1611 q->timing.threshold=max_input_threshold;
1612/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1613 q->threshold_classes[j].threshold=
1614 min_input_threshold+
1615 (max_input_threshold-min_input_threshold)/
1616 QDIO_STATS_CLASSES;
1617 }
1618 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1619 }
1620 for (i=0;i<no_output_qs;i++) {
1621 q=irq_ptr->output_qs[i];
1622 q->timing.threshold=max_output_threshold;
1623/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1624 q->threshold_classes[j].threshold=
1625 min_output_threshold+
1626 (max_output_threshold-min_output_threshold)/
1627 QDIO_STATS_CLASSES;
1628 }
1629 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1630 }
1631}
1632
1633static int
1634tiqdio_thinint_handler(void)
1635{
1636 QDIO_DBF_TEXT4(0,trace,"thin_int");
1637
1638#ifdef QDIO_PERFORMANCE_STATS
1639 perf_stats.thinints++;
1640 perf_stats.start_time_inbound=NOW;
1641#endif /* QDIO_PERFORMANCE_STATS */
1642
1643 /* SVS only when needed:
1644 * issue SVS to benefit from iqdio interrupt avoidance
1645 * (SVS clears AISOI)*/
1646 if (!omit_svs)
1647 tiqdio_clear_global_summary();
1648
1649 tiqdio_inbound_checks();
1650 return 0;
1651}
1652
1653static void
1654qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1655{
1656 int i;
1657#ifdef CONFIG_QDIO_DEBUG
1658 char dbf_text[15];
1659
1660 QDIO_DBF_TEXT5(0,trace,"newstate");
1661 sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state);
1662 QDIO_DBF_TEXT5(0,trace,dbf_text);
1663#endif /* CONFIG_QDIO_DEBUG */
1664
1665 irq_ptr->state=state;
1666 for (i=0;i<irq_ptr->no_input_qs;i++)
1667 irq_ptr->input_qs[i]->state=state;
1668 for (i=0;i<irq_ptr->no_output_qs;i++)
1669 irq_ptr->output_qs[i]->state=state;
1670 mb();
1671}
1672
1673static inline void
1674qdio_irq_check_sense(int irq, struct irb *irb)
1675{
1676 char dbf_text[15];
1677
1678 if (irb->esw.esw0.erw.cons) {
1679 sprintf(dbf_text,"sens%4x",irq);
1680 QDIO_DBF_TEXT2(1,trace,dbf_text);
1681 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1682
1683 QDIO_PRINT_WARN("sense data available on qdio channel.\n");
1684 HEXDUMP16(WARN,"irb: ",irb);
1685 HEXDUMP16(WARN,"sense data: ",irb->ecw);
1686 }
1687
1688}
1689
1690static inline void
1691qdio_handle_pci(struct qdio_irq *irq_ptr)
1692{
1693 int i;
1694 struct qdio_q *q;
1695
1696#ifdef QDIO_PERFORMANCE_STATS
1697 perf_stats.pcis++;
1698 perf_stats.start_time_inbound=NOW;
1699#endif /* QDIO_PERFORMANCE_STATS */
1700 for (i=0;i<irq_ptr->no_input_qs;i++) {
1701 q=irq_ptr->input_qs[i];
1702 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1703 qdio_mark_q(q);
1704 else {
1705#ifdef QDIO_PERFORMANCE_STATS
1706 perf_stats.tl_runs--;
1707#endif /* QDIO_PERFORMANCE_STATS */
1708 __qdio_inbound_processing(q);
1709 }
1710 }
1711 if (!irq_ptr->hydra_gives_outbound_pcis)
1712 return;
1713 for (i=0;i<irq_ptr->no_output_qs;i++) {
1714 q=irq_ptr->output_qs[i];
1715#ifdef QDIO_PERFORMANCE_STATS
1716 perf_stats.tl_runs--;
1717#endif /* QDIO_PERFORMANCE_STATS */
1718 if (qdio_is_outbound_q_done(q))
1719 continue;
1720 if (!irq_ptr->sync_done_on_outb_pcis)
1721 SYNC_MEMORY;
1722 __qdio_outbound_processing(q);
1723 }
1724}
1725
1726static void qdio_establish_handle_irq(struct ccw_device*, int, int);
1727
1728static inline void
1729qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
1730 int cstat, int dstat)
1731{
1732 struct qdio_irq *irq_ptr;
1733 struct qdio_q *q;
1734 char dbf_text[15];
1735
1736 irq_ptr = cdev->private->qdio_data;
1737
1738 QDIO_DBF_TEXT2(1, trace, "ick2");
1739 sprintf(dbf_text,"%s", cdev->dev.bus_id);
1740 QDIO_DBF_TEXT2(1,trace,dbf_text);
1741 QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
1742 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
1743 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
1744 QDIO_PRINT_ERR("received check condition on activate " \
1745 "queues on device %s (cs=x%x, ds=x%x).\n",
1746 cdev->dev.bus_id, cstat, dstat);
1747 if (irq_ptr->no_input_qs) {
1748 q=irq_ptr->input_qs[0];
1749 } else if (irq_ptr->no_output_qs) {
1750 q=irq_ptr->output_qs[0];
1751 } else {
1752 QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
1753 cdev->dev.bus_id);
1754 goto omit_handler_call;
1755 }
1756 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
1757 QDIO_STATUS_LOOK_FOR_ERROR,
1758 0,0,0,-1,-1,q->int_parm);
1759omit_handler_call:
1760 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
1761
1762}
1763
1764static void
1765qdio_call_shutdown(void *data)
1766{
1767 struct ccw_device *cdev;
1768
1769 cdev = (struct ccw_device *)data;
1770 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1771 put_device(&cdev->dev);
1772}
1773
1774static void
1775qdio_timeout_handler(struct ccw_device *cdev)
1776{
1777 struct qdio_irq *irq_ptr;
1778 char dbf_text[15];
1779
1780 QDIO_DBF_TEXT2(0, trace, "qtoh");
1781 sprintf(dbf_text, "%s", cdev->dev.bus_id);
1782 QDIO_DBF_TEXT2(0, trace, dbf_text);
1783
1784 irq_ptr = cdev->private->qdio_data;
1785 sprintf(dbf_text, "state:%d", irq_ptr->state);
1786 QDIO_DBF_TEXT2(0, trace, dbf_text);
1787
1788 switch (irq_ptr->state) {
1789 case QDIO_IRQ_STATE_INACTIVE:
1790 QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n",
1791 irq_ptr->irq);
1792 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
1793 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1794 break;
1795 case QDIO_IRQ_STATE_CLEANUP:
1796 QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n",
1797 irq_ptr->irq);
1798 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1799 break;
1800 case QDIO_IRQ_STATE_ESTABLISHED:
1801 case QDIO_IRQ_STATE_ACTIVE:
1802 /* I/O has been terminated by common I/O layer. */
1803 QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n",
1804 irq_ptr->irq);
1805 QDIO_DBF_TEXT2(1, trace, "cio:term");
1806 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1807 if (get_device(&cdev->dev)) {
1808 /* Can't call shutdown from interrupt context. */
1809 PREPARE_WORK(&cdev->private->kick_work,
1810 qdio_call_shutdown, (void *)cdev);
1811 queue_work(ccw_device_work, &cdev->private->kick_work);
1812 }
1813 break;
1814 default:
1815 BUG();
1816 }
1817 ccw_device_set_timeout(cdev, 0);
1818 wake_up(&cdev->private->wait_q);
1819}
1820
1821static void
1822qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1823{
1824 struct qdio_irq *irq_ptr;
1825 int cstat,dstat;
1826 char dbf_text[15];
1827
1828#ifdef CONFIG_QDIO_DEBUG
1829 QDIO_DBF_TEXT4(0, trace, "qint");
1830 sprintf(dbf_text, "%s", cdev->dev.bus_id);
1831 QDIO_DBF_TEXT4(0, trace, dbf_text);
1832#endif /* CONFIG_QDIO_DEBUG */
1833
1834 if (!intparm) {
1835 QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
1836 "handler, device %s\n", cdev->dev.bus_id);
1837 return;
1838 }
1839
1840 irq_ptr = cdev->private->qdio_data;
1841 if (!irq_ptr) {
1842 QDIO_DBF_TEXT2(1, trace, "uint");
1843 sprintf(dbf_text,"%s", cdev->dev.bus_id);
1844 QDIO_DBF_TEXT2(1,trace,dbf_text);
1845 QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
1846 cdev->dev.bus_id);
1847 return;
1848 }
1849
1850 if (IS_ERR(irb)) {
1851 /* Currently running i/o is in error. */
1852 switch (PTR_ERR(irb)) {
1853 case -EIO:
1854 QDIO_PRINT_ERR("i/o error on device %s\n",
1855 cdev->dev.bus_id);
1856 return;
1857 case -ETIMEDOUT:
1858 qdio_timeout_handler(cdev);
1859 return;
1860 default:
1861 QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
1862 PTR_ERR(irb), cdev->dev.bus_id);
1863 return;
1864 }
1865 }
1866
1867 qdio_irq_check_sense(irq_ptr->irq, irb);
1868
1869#ifdef CONFIG_QDIO_DEBUG
1870 sprintf(dbf_text, "state:%d", irq_ptr->state);
1871 QDIO_DBF_TEXT4(0, trace, dbf_text);
1872#endif /* CONFIG_QDIO_DEBUG */
1873
1874 cstat = irb->scsw.cstat;
1875 dstat = irb->scsw.dstat;
1876
1877 switch (irq_ptr->state) {
1878 case QDIO_IRQ_STATE_INACTIVE:
1879 qdio_establish_handle_irq(cdev, cstat, dstat);
1880 break;
1881
1882 case QDIO_IRQ_STATE_CLEANUP:
1883 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1884 break;
1885
1886 case QDIO_IRQ_STATE_ESTABLISHED:
1887 case QDIO_IRQ_STATE_ACTIVE:
1888 if (cstat & SCHN_STAT_PCI) {
1889 qdio_handle_pci(irq_ptr);
1890 break;
1891 }
1892
1893 if ((cstat&~SCHN_STAT_PCI)||dstat) {
1894 qdio_handle_activate_check(cdev, intparm, cstat, dstat);
1895 break;
1896 }
1897 default:
1898 QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
1899 "device %s?!\n",
1900 irq_ptr->state, cdev->dev.bus_id);
1901 }
1902 wake_up(&cdev->private->wait_q);
1903
1904}
1905
1906int
1907qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1908 unsigned int queue_number)
1909{
1910 int cc;
1911 struct qdio_q *q;
1912 struct qdio_irq *irq_ptr;
1913 void *ptr;
1914#ifdef CONFIG_QDIO_DEBUG
1915 char dbf_text[15]="SyncXXXX";
1916#endif
1917
1918 irq_ptr = cdev->private->qdio_data;
1919 if (!irq_ptr)
1920 return -ENODEV;
1921
1922#ifdef CONFIG_QDIO_DEBUG
1923 *((int*)(&dbf_text[4])) = irq_ptr->irq;
1924 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
1925 *((int*)(&dbf_text[0]))=flags;
1926 *((int*)(&dbf_text[4]))=queue_number;
1927 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
1928#endif /* CONFIG_QDIO_DEBUG */
1929
1930 if (flags&QDIO_FLAG_SYNC_INPUT) {
1931 q=irq_ptr->input_qs[queue_number];
1932 if (!q)
1933 return -EINVAL;
1934 cc = do_siga_sync(q->irq, 0, q->mask);
1935 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
1936 q=irq_ptr->output_qs[queue_number];
1937 if (!q)
1938 return -EINVAL;
1939 cc = do_siga_sync(q->irq, q->mask, 0);
1940 } else
1941 return -EINVAL;
1942
1943 ptr=&cc;
1944 if (cc)
1945 QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
1946
1947 return cc;
1948}
1949
1950static unsigned char
1951qdio_check_siga_needs(int sch)
1952{
1953 int result;
1954 unsigned char qdioac;
1955
1956 struct {
1957 struct chsc_header request;
1958 u16 reserved1;
1959 u16 first_sch;
1960 u16 reserved2;
1961 u16 last_sch;
1962 u32 reserved3;
1963 struct chsc_header response;
1964 u32 reserved4;
1965 u8 flags;
1966 u8 reserved5;
1967 u16 sch;
1968 u8 qfmt;
1969 u8 reserved6;
1970 u8 qdioac;
1971 u8 sch_class;
1972 u8 reserved7;
1973 u8 icnt;
1974 u8 reserved8;
1975 u8 ocnt;
1976 } *ssqd_area;
1977
1978 ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1979 if (!ssqd_area) {
1980 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
1981 "SIGAs for sch x%x.\n", sch);
1982 return CHSC_FLAG_SIGA_INPUT_NECESSARY ||
1983 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
1984 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
1985 }
1986 ssqd_area->request = (struct chsc_header) {
1987 .length = 0x0010,
1988 .code = 0x0024,
1989 };
1990
1991 ssqd_area->first_sch = sch;
1992 ssqd_area->last_sch = sch;
1993
1994 result=chsc(ssqd_area);
1995
1996 if (result) {
1997 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
1998 "SIGAs for sch x%x.\n",
1999 result,sch);
2000 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2001 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2002 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2003 goto out;
2004 }
2005
2006 if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2007 QDIO_PRINT_WARN("response upon checking SIGA needs " \
2008 "is 0x%x. Using all SIGAs for sch x%x.\n",
2009 ssqd_area->response.code, sch);
2010 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2011 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2012 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2013 goto out;
2014 }
2015 if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2016 !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
2017 (ssqd_area->sch != sch)) {
2018 QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \
2019 "using all SIGAs.\n",sch);
2020 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2021 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2022 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
2023 goto out;
2024 }
2025
2026 qdioac = ssqd_area->qdioac;
2027out:
2028 free_page ((unsigned long) ssqd_area);
2029 return qdioac;
2030}
2031
2032static unsigned int
2033tiqdio_check_chsc_availability(void)
2034{
2035 char dbf_text[15];
2036
2037 if (!css_characteristics_avail)
2038 return -EIO;
2039
2040 /* Check for bit 41. */
2041 if (!css_general_characteristics.aif) {
2042 QDIO_PRINT_WARN("Adapter interruption facility not " \
2043 "installed.\n");
2044 return -ENOENT;
2045 }
2046
2047 /* Check for bits 107 and 108. */
2048 if (!css_chsc_characteristics.scssc ||
2049 !css_chsc_characteristics.scsscf) {
2050 QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
2051 "not available.\n");
2052 return -ENOENT;
2053 }
2054
2055 /* Check for OSA/FCP thin interrupts (bit 67). */
2056 hydra_thinints = css_general_characteristics.aif_osa;
2057 sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2058 QDIO_DBF_TEXT0(0,setup,dbf_text);
2059
2060 /* Check for aif time delay disablement fac (bit 56). If installed,
2061 * omit svs even under lpar (good point by rick again) */
2062 omit_svs = css_general_characteristics.aif_tdd;
2063 sprintf(dbf_text,"omitsvs%1x", omit_svs);
2064 QDIO_DBF_TEXT0(0,setup,dbf_text);
2065 return 0;
2066}
2067
2068
2069static unsigned int
2070tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2071{
2072 unsigned long real_addr_local_summary_bit;
2073 unsigned long real_addr_dev_st_chg_ind;
2074 void *ptr;
2075 char dbf_text[15];
2076
2077 unsigned int resp_code;
2078 int result;
2079
2080 struct {
2081 struct chsc_header request;
2082 u16 operation_code;
2083 u16 reserved1;
2084 u32 reserved2;
2085 u32 reserved3;
2086 u64 summary_indicator_addr;
2087 u64 subchannel_indicator_addr;
2088 u32 ks:4;
2089 u32 kc:4;
2090 u32 reserved4:21;
2091 u32 isc:3;
2092 u32 word_with_d_bit;
2093 /* set to 0x10000000 to enable
2094 * time delay disablement facility */
2095 u32 reserved5;
2096 u32 subsystem_id;
2097 u32 reserved6[1004];
2098 struct chsc_header response;
2099 u32 reserved7;
2100 } *scssc_area;
2101
2102 if (!irq_ptr->is_thinint_irq)
2103 return -ENODEV;
2104
2105 if (reset_to_zero) {
2106 real_addr_local_summary_bit=0;
2107 real_addr_dev_st_chg_ind=0;
2108 } else {
2109 real_addr_local_summary_bit=
2110 virt_to_phys((volatile void *)indicators);
2111 real_addr_dev_st_chg_ind=
2112 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2113 }
2114
2115 scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2116 if (!scssc_area) {
2117 QDIO_PRINT_WARN("No memory for setting indicators on " \
2118 "subchannel x%x.\n", irq_ptr->irq);
2119 return -ENOMEM;
2120 }
2121 scssc_area->request = (struct chsc_header) {
2122 .length = 0x0fe0,
2123 .code = 0x0021,
2124 };
2125 scssc_area->operation_code = 0;
2126
2127 scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
2128 scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
2129 scssc_area->ks = QDIO_STORAGE_KEY;
2130 scssc_area->kc = QDIO_STORAGE_KEY;
2131 scssc_area->isc = TIQDIO_THININT_ISC;
2132 scssc_area->subsystem_id = (1<<16) + irq_ptr->irq;
2133 /* enables the time delay disablement facility. Don't care
2134 * whether it is really there (i.e. we haven't checked for
2135 * it) */
2136 if (css_general_characteristics.aif_tdd)
2137 scssc_area->word_with_d_bit = 0x10000000;
2138 else
2139 QDIO_PRINT_WARN("Time delay disablement facility " \
2140 "not available\n");
2141
2142
2143
2144 result = chsc(scssc_area);
2145 if (result) {
2146 QDIO_PRINT_WARN("could not set indicators on irq x%x, " \
2147 "cc=%i.\n",irq_ptr->irq,result);
2148 result = -EIO;
2149 goto out;
2150 }
2151
2152 resp_code = scssc_area->response.code;
2153 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2154 QDIO_PRINT_WARN("response upon setting indicators " \
2155 "is 0x%x.\n",resp_code);
2156 sprintf(dbf_text,"sidR%4x",resp_code);
2157 QDIO_DBF_TEXT1(0,trace,dbf_text);
2158 QDIO_DBF_TEXT1(0,setup,dbf_text);
2159 ptr=&scssc_area->response;
2160 QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
2161 result = -EIO;
2162 goto out;
2163 }
2164
2165 QDIO_DBF_TEXT2(0,setup,"setscind");
2166 QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
2167 sizeof(unsigned long));
2168 QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
2169 result = 0;
2170out:
2171 free_page ((unsigned long) scssc_area);
2172 return result;
2173
2174}
2175
2176static unsigned int
2177tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2178{
2179 unsigned int resp_code;
2180 int result;
2181 void *ptr;
2182 char dbf_text[15];
2183
2184 struct {
2185 struct chsc_header request;
2186 u16 operation_code;
2187 u16 reserved1;
2188 u32 reserved2;
2189 u32 reserved3;
2190 u32 reserved4[2];
2191 u32 delay_target;
2192 u32 reserved5[1009];
2193 struct chsc_header response;
2194 u32 reserved6;
2195 } *scsscf_area;
2196
2197 if (!irq_ptr->is_thinint_irq)
2198 return -ENODEV;
2199
2200 scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2201 if (!scsscf_area) {
2202 QDIO_PRINT_WARN("No memory for setting delay target on " \
2203 "subchannel x%x.\n", irq_ptr->irq);
2204 return -ENOMEM;
2205 }
2206 scsscf_area->request = (struct chsc_header) {
2207 .length = 0x0fe0,
2208 .code = 0x1027,
2209 };
2210
2211 scsscf_area->delay_target = delay_target<<16;
2212
2213 result=chsc(scsscf_area);
2214 if (result) {
2215 QDIO_PRINT_WARN("could not set delay target on irq x%x, " \
2216 "cc=%i. Continuing.\n",irq_ptr->irq,result);
2217 result = -EIO;
2218 goto out;
2219 }
2220
2221 resp_code = scsscf_area->response.code;
2222 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2223 QDIO_PRINT_WARN("response upon setting delay target " \
2224 "is 0x%x. Continuing.\n",resp_code);
2225 sprintf(dbf_text,"sdtR%4x",resp_code);
2226 QDIO_DBF_TEXT1(0,trace,dbf_text);
2227 QDIO_DBF_TEXT1(0,setup,dbf_text);
2228 ptr=&scsscf_area->response;
2229 QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
2230 }
2231 QDIO_DBF_TEXT2(0,trace,"delytrgt");
2232 QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
2233 result = 0; /* not critical */
2234out:
2235 free_page ((unsigned long) scsscf_area);
2236 return result;
2237}
2238
2239int
2240qdio_cleanup(struct ccw_device *cdev, int how)
2241{
2242 struct qdio_irq *irq_ptr;
2243 char dbf_text[15];
2244 int rc;
2245
2246 irq_ptr = cdev->private->qdio_data;
2247 if (!irq_ptr)
2248 return -ENODEV;
2249
2250 sprintf(dbf_text,"qcln%4x",irq_ptr->irq);
2251 QDIO_DBF_TEXT1(0,trace,dbf_text);
2252 QDIO_DBF_TEXT0(0,setup,dbf_text);
2253
2254 rc = qdio_shutdown(cdev, how);
2255 if ((rc == 0) || (rc == -EINPROGRESS))
2256 rc = qdio_free(cdev);
2257 return rc;
2258}
2259
2260int
2261qdio_shutdown(struct ccw_device *cdev, int how)
2262{
2263 struct qdio_irq *irq_ptr;
2264 int i;
2265 int result = 0;
2266 int rc;
2267 unsigned long flags;
2268 int timeout;
2269 char dbf_text[15];
2270
2271 irq_ptr = cdev->private->qdio_data;
2272 if (!irq_ptr)
2273 return -ENODEV;
2274
2275 down(&irq_ptr->setting_up_sema);
2276
2277 sprintf(dbf_text,"qsqs%4x",irq_ptr->irq);
2278 QDIO_DBF_TEXT1(0,trace,dbf_text);
2279 QDIO_DBF_TEXT0(0,setup,dbf_text);
2280
2281 /* mark all qs as uninteresting */
2282 for (i=0;i<irq_ptr->no_input_qs;i++)
2283 atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
2284
2285 for (i=0;i<irq_ptr->no_output_qs;i++)
2286 atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
2287
2288 tasklet_kill(&tiqdio_tasklet);
2289
2290 for (i=0;i<irq_ptr->no_input_qs;i++) {
2291 qdio_unmark_q(irq_ptr->input_qs[i]);
2292 tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
2293 wait_event_interruptible_timeout(cdev->private->wait_q,
2294 !atomic_read(&irq_ptr->
2295 input_qs[i]->
2296 use_count),
2297 QDIO_NO_USE_COUNT_TIMEOUT);
2298 if (atomic_read(&irq_ptr->input_qs[i]->use_count))
2299 result=-EINPROGRESS;
2300 }
2301
2302 for (i=0;i<irq_ptr->no_output_qs;i++) {
2303 tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
2304 wait_event_interruptible_timeout(cdev->private->wait_q,
2305 !atomic_read(&irq_ptr->
2306 output_qs[i]->
2307 use_count),
2308 QDIO_NO_USE_COUNT_TIMEOUT);
2309 if (atomic_read(&irq_ptr->output_qs[i]->use_count))
2310 result=-EINPROGRESS;
2311 }
2312
2313 /* cleanup subchannel */
2314 spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
2315 if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
2316 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
2317 timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
2318 } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
2319 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2320 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2321 } else { /* default behaviour */
2322 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2323 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2324 }
2325 if (rc == -ENODEV) {
2326 /* No need to wait for device no longer present. */
2327 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2328 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2329 } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
2330 /*
2331 * Whoever put another handler there, has to cope with the
2332 * interrupt theirself. Might happen if qdio_shutdown was
2333 * called on already shutdown queues, but this shouldn't have
2334 * bad side effects.
2335 */
2336 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2337 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2338 } else if (rc == 0) {
2339 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2340 ccw_device_set_timeout(cdev, timeout);
2341 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2342
2343 wait_event(cdev->private->wait_q,
2344 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2345 irq_ptr->state == QDIO_IRQ_STATE_ERR);
2346 } else {
2347 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2348 "device %s\n", result, cdev->dev.bus_id);
2349 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2350 result = rc;
2351 goto out;
2352 }
2353 if (irq_ptr->is_thinint_irq) {
2354 qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
2355 tiqdio_set_subchannel_ind(irq_ptr,1);
2356 /* reset adapter interrupt indicators */
2357 }
2358
2359 /* exchange int handlers, if necessary */
2360 if ((void*)cdev->handler == (void*)qdio_handler)
2361 cdev->handler=irq_ptr->original_int_handler;
2362
2363 /* Ignore errors. */
2364 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2365 ccw_device_set_timeout(cdev, 0);
2366out:
2367 up(&irq_ptr->setting_up_sema);
2368 return result;
2369}
2370
2371int
2372qdio_free(struct ccw_device *cdev)
2373{
2374 struct qdio_irq *irq_ptr;
2375 char dbf_text[15];
2376
2377 irq_ptr = cdev->private->qdio_data;
2378 if (!irq_ptr)
2379 return -ENODEV;
2380
2381 down(&irq_ptr->setting_up_sema);
2382
2383 sprintf(dbf_text,"qfqs%4x",irq_ptr->irq);
2384 QDIO_DBF_TEXT1(0,trace,dbf_text);
2385 QDIO_DBF_TEXT0(0,setup,dbf_text);
2386
2387 cdev->private->qdio_data = 0;
2388
2389 up(&irq_ptr->setting_up_sema);
2390
2391 qdio_release_irq_memory(irq_ptr);
2392 module_put(THIS_MODULE);
2393 return 0;
2394}
2395
2396static inline void
2397qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2398{
2399 char dbf_text[20]; /* if a printf printed out more than 8 chars */
2400
2401 sprintf(dbf_text,"qfmt:%x",init_data->q_format);
2402 QDIO_DBF_TEXT0(0,setup,dbf_text);
2403 QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
2404 sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
2405 QDIO_DBF_TEXT0(0,setup,dbf_text);
2406 QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
2407 QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
2408 QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
2409 sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
2410 QDIO_DBF_TEXT0(0,setup,dbf_text);
2411 sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
2412 QDIO_DBF_TEXT0(0,setup,dbf_text);
2413 sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
2414 QDIO_DBF_TEXT0(0,setup,dbf_text);
2415 sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
2416 QDIO_DBF_TEXT0(0,setup,dbf_text);
2417 sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
2418 QDIO_DBF_TEXT0(0,setup,dbf_text);
2419 sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
2420 QDIO_DBF_TEXT0(0,setup,dbf_text);
2421 QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
2422 QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
2423 QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
2424 QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
2425 QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
2426 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2427}
2428
2429static inline void
2430qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2431{
2432 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
2433 irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2434
2435 irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
2436
2437 irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
2438
2439 irq_ptr->qdr->qdf0[i].slsba=
2440 (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
2441
2442 irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
2443 irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
2444 irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
2445 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2446}
2447
2448static inline void
2449qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2450 int j, int iqfmt)
2451{
2452 irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
2453 irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2454
2455 irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
2456
2457 irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
2458
2459 irq_ptr->qdr->qdf0[i+j].slsba=
2460 (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
2461
2462 irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
2463 irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
2464 irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
2465 irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
2466}
2467
2468
2469static inline void
2470qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2471{
2472 int i;
2473
2474 for (i=0;i<irq_ptr->no_input_qs;i++) {
2475 irq_ptr->input_qs[i]->siga_sync=
2476 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2477 irq_ptr->input_qs[i]->siga_in=
2478 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2479 irq_ptr->input_qs[i]->siga_out=
2480 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2481 irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
2482 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2483 irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
2484 irq_ptr->hydra_gives_outbound_pcis;
2485 irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
2486 ((irq_ptr->qdioac&
2487 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2488 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2489 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2490 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2491
2492 }
2493}
2494
2495static inline void
2496qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2497{
2498 int i;
2499
2500 for (i=0;i<irq_ptr->no_output_qs;i++) {
2501 irq_ptr->output_qs[i]->siga_sync=
2502 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2503 irq_ptr->output_qs[i]->siga_in=
2504 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2505 irq_ptr->output_qs[i]->siga_out=
2506 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2507 irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
2508 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2509 irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
2510 irq_ptr->hydra_gives_outbound_pcis;
2511 irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
2512 ((irq_ptr->qdioac&
2513 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2514 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2515 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2516 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2517
2518 }
2519}
2520
2521static inline int
2522qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2523 int dstat)
2524{
2525 char dbf_text[15];
2526 struct qdio_irq *irq_ptr;
2527
2528 irq_ptr = cdev->private->qdio_data;
2529
2530 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2531 sprintf(dbf_text,"ick1%4x",irq_ptr->irq);
2532 QDIO_DBF_TEXT2(1,trace,dbf_text);
2533 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2534 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2535 QDIO_PRINT_ERR("received check condition on establish " \
2536 "queues on irq 0x%x (cs=x%x, ds=x%x).\n",
2537 irq_ptr->irq,cstat,dstat);
2538 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2539 }
2540
2541 if (!(dstat & DEV_STAT_DEV_END)) {
2542 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2543 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2544 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2545 QDIO_PRINT_ERR("establish queues on irq %04x: didn't get "
2546 "device end: dstat=%02x, cstat=%02x\n",
2547 irq_ptr->irq, dstat, cstat);
2548 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2549 return 1;
2550 }
2551
2552 if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
2553 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2554 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2555 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2556 QDIO_PRINT_ERR("establish queues on irq %04x: got "
2557 "the following devstat: dstat=%02x, "
2558 "cstat=%02x\n",
2559 irq_ptr->irq, dstat, cstat);
2560 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2561 return 1;
2562 }
2563 return 0;
2564}
2565
2566static void
2567qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2568{
2569 struct qdio_irq *irq_ptr;
2570 char dbf_text[15];
2571
2572 irq_ptr = cdev->private->qdio_data;
2573
2574 sprintf(dbf_text,"qehi%4x",cdev->private->irq);
2575 QDIO_DBF_TEXT0(0,setup,dbf_text);
2576 QDIO_DBF_TEXT0(0,trace,dbf_text);
2577
2578 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) {
2579 ccw_device_set_timeout(cdev, 0);
2580 return;
2581 }
2582
2583 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2584 ccw_device_set_timeout(cdev, 0);
2585}
2586
2587int
2588qdio_initialize(struct qdio_initialize *init_data)
2589{
2590 int rc;
2591 char dbf_text[15];
2592
2593 sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq);
2594 QDIO_DBF_TEXT0(0,setup,dbf_text);
2595 QDIO_DBF_TEXT0(0,trace,dbf_text);
2596
2597 rc = qdio_allocate(init_data);
2598 if (rc == 0) {
2599 rc = qdio_establish(init_data);
2600 if (rc != 0)
2601 qdio_free(init_data->cdev);
2602 }
2603
2604 return rc;
2605}
2606
2607
2608int
2609qdio_allocate(struct qdio_initialize *init_data)
2610{
2611 struct qdio_irq *irq_ptr;
2612 char dbf_text[15];
2613
2614 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq);
2615 QDIO_DBF_TEXT0(0,setup,dbf_text);
2616 QDIO_DBF_TEXT0(0,trace,dbf_text);
2617 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2618 (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2619 ((init_data->no_input_qs) && (!init_data->input_handler)) ||
2620 ((init_data->no_output_qs) && (!init_data->output_handler)) )
2621 return -EINVAL;
2622
2623 if (!init_data->input_sbal_addr_array)
2624 return -EINVAL;
2625
2626 if (!init_data->output_sbal_addr_array)
2627 return -EINVAL;
2628
2629 qdio_allocate_do_dbf(init_data);
2630
2631 /* create irq */
2632 irq_ptr=kmalloc(sizeof(struct qdio_irq), GFP_KERNEL | GFP_DMA);
2633
2634 QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
2635 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
2636
2637 if (!irq_ptr) {
2638 QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n");
2639 return -ENOMEM;
2640 }
2641
2642 memset(irq_ptr,0,sizeof(struct qdio_irq));
2643
2644 init_MUTEX(&irq_ptr->setting_up_sema);
2645
2646 /* QDR must be in DMA area since CCW data address is only 32 bit */
2647 irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
2648 if (!(irq_ptr->qdr)) {
2649 kfree(irq_ptr);
2650 QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n");
2651 return -ENOMEM;
2652 }
2653 QDIO_DBF_TEXT0(0,setup,"qdr:");
2654 QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
2655
2656 if (qdio_alloc_qs(irq_ptr,
2657 init_data->no_input_qs,
2658 init_data->no_output_qs)) {
2659 qdio_release_irq_memory(irq_ptr);
2660 return -ENOMEM;
2661 }
2662
2663 init_data->cdev->private->qdio_data = irq_ptr;
2664
2665 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
2666
2667 return 0;
2668}
2669
2670int qdio_fill_irq(struct qdio_initialize *init_data)
2671{
2672 int i;
2673 char dbf_text[15];
2674 struct ciw *ciw;
2675 int is_iqdio;
2676 struct qdio_irq *irq_ptr;
2677
2678 irq_ptr = init_data->cdev->private->qdio_data;
2679
2680 memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
2681
2682 /* wipes qib.ac, required by ar7063 */
2683 memset(irq_ptr->qdr,0,sizeof(struct qdr));
2684
2685 irq_ptr->int_parm=init_data->int_parm;
2686
2687 irq_ptr->irq = init_data->cdev->private->irq;
2688 irq_ptr->no_input_qs=init_data->no_input_qs;
2689 irq_ptr->no_output_qs=init_data->no_output_qs;
2690
2691 if (init_data->q_format==QDIO_IQDIO_QFMT) {
2692 irq_ptr->is_iqdio_irq=1;
2693 irq_ptr->is_thinint_irq=1;
2694 } else {
2695 irq_ptr->is_iqdio_irq=0;
2696 irq_ptr->is_thinint_irq=hydra_thinints;
2697 }
2698 sprintf(dbf_text,"is_i_t%1x%1x",
2699 irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
2700 QDIO_DBF_TEXT2(0,setup,dbf_text);
2701
2702 if (irq_ptr->is_thinint_irq) {
2703 irq_ptr->dev_st_chg_ind=qdio_get_indicator();
2704 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
2705 if (!irq_ptr->dev_st_chg_ind) {
2706 QDIO_PRINT_WARN("no indicator location available " \
2707 "for irq 0x%x\n",irq_ptr->irq);
2708 qdio_release_irq_memory(irq_ptr);
2709 return -ENOBUFS;
2710 }
2711 }
2712
2713 /* defaults */
2714 irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
2715 irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
2716 irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
2717 irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
2718
2719 qdio_fill_qs(irq_ptr, init_data->cdev,
2720 init_data->no_input_qs,
2721 init_data->no_output_qs,
2722 init_data->input_handler,
2723 init_data->output_handler,init_data->int_parm,
2724 init_data->q_format,init_data->flags,
2725 init_data->input_sbal_addr_array,
2726 init_data->output_sbal_addr_array);
2727
2728 if (!try_module_get(THIS_MODULE)) {
2729 QDIO_PRINT_CRIT("try_module_get() failed!\n");
2730 qdio_release_irq_memory(irq_ptr);
2731 return -EINVAL;
2732 }
2733
2734 qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
2735 init_data->no_output_qs,
2736 init_data->min_input_threshold,
2737 init_data->max_input_threshold,
2738 init_data->min_output_threshold,
2739 init_data->max_output_threshold);
2740
2741 /* fill in qdr */
2742 irq_ptr->qdr->qfmt=init_data->q_format;
2743 irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
2744 irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
2745 irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
2746 irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
2747
2748 irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
2749 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
2750
2751 /* fill in qib */
2752 irq_ptr->qib.qfmt=init_data->q_format;
2753 if (init_data->no_input_qs)
2754 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
2755 if (init_data->no_output_qs)
2756 irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
2757 memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
2758
2759 qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
2760 init_data->qib_param_field,
2761 init_data->no_input_qs,
2762 init_data->no_output_qs,
2763 init_data->input_slib_elements,
2764 init_data->output_slib_elements);
2765
2766 /* first input descriptors, then output descriptors */
2767 is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
2768 for (i=0;i<init_data->no_input_qs;i++)
2769 qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
2770
2771 for (i=0;i<init_data->no_output_qs;i++)
2772 qdio_allocate_fill_output_desc(irq_ptr, i,
2773 init_data->no_input_qs,
2774 is_iqdio);
2775
2776 /* qdr, qib, sls, slsbs, slibs, sbales filled. */
2777
2778 /* get qdio commands */
2779 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
2780 if (!ciw) {
2781 QDIO_DBF_TEXT2(1,setup,"no eq");
2782 QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
2783 "Trying to use default.\n");
2784 } else
2785 irq_ptr->equeue = *ciw;
2786 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
2787 if (!ciw) {
2788 QDIO_DBF_TEXT2(1,setup,"no aq");
2789 QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
2790 "Trying to use default.\n");
2791 } else
2792 irq_ptr->aqueue = *ciw;
2793
2794 /* Set new interrupt handler. */
2795 irq_ptr->original_int_handler = init_data->cdev->handler;
2796 init_data->cdev->handler = qdio_handler;
2797
2798 return 0;
2799}
2800
2801int
2802qdio_establish(struct qdio_initialize *init_data)
2803{
2804 struct qdio_irq *irq_ptr;
2805 unsigned long saveflags;
2806 int result, result2;
2807 struct ccw_device *cdev;
2808 char dbf_text[20];
2809
2810 cdev=init_data->cdev;
2811 irq_ptr = cdev->private->qdio_data;
2812 if (!irq_ptr)
2813 return -EINVAL;
2814
2815 if (cdev->private->state != DEV_STATE_ONLINE)
2816 return -EINVAL;
2817
2818 down(&irq_ptr->setting_up_sema);
2819
2820 qdio_fill_irq(init_data);
2821
2822 /* the thinint CHSC stuff */
2823 if (irq_ptr->is_thinint_irq) {
2824
2825 result = tiqdio_set_subchannel_ind(irq_ptr,0);
2826 if (result) {
2827 up(&irq_ptr->setting_up_sema);
2828 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2829 return result;
2830 }
2831 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
2832 }
2833
2834 sprintf(dbf_text,"qest%4x",cdev->private->irq);
2835 QDIO_DBF_TEXT0(0,setup,dbf_text);
2836 QDIO_DBF_TEXT0(0,trace,dbf_text);
2837
2838 /* establish q */
2839 irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
2840 irq_ptr->ccw.flags=CCW_FLAG_SLI;
2841 irq_ptr->ccw.count=irq_ptr->equeue.count;
2842 irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
2843
2844 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
2845
2846 ccw_device_set_options(cdev, 0);
2847 result=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
2848 QDIO_DOING_ESTABLISH,0, 0,
2849 QDIO_ESTABLISH_TIMEOUT);
2850 if (result) {
2851 result2=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
2852 QDIO_DOING_ESTABLISH,0,0,
2853 QDIO_ESTABLISH_TIMEOUT);
2854 sprintf(dbf_text,"eq:io%4x",result);
2855 QDIO_DBF_TEXT2(1,setup,dbf_text);
2856 if (result2) {
2857 sprintf(dbf_text,"eq:io%4x",result);
2858 QDIO_DBF_TEXT2(1,setup,dbf_text);
2859 }
2860 QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \
2861 "returned %i, next try returned %i\n",
2862 irq_ptr->irq,result,result2);
2863 result=result2;
2864 if (result)
2865 ccw_device_set_timeout(cdev, 0);
2866 }
2867
2868 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
2869
2870 if (result) {
2871 up(&irq_ptr->setting_up_sema);
2872 qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
2873 return result;
2874 }
2875
2876 wait_event_interruptible_timeout(cdev->private->wait_q,
2877 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
2878 irq_ptr->state == QDIO_IRQ_STATE_ERR,
2879 QDIO_ESTABLISH_TIMEOUT);
2880
2881 if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
2882 result = 0;
2883 else {
2884 up(&irq_ptr->setting_up_sema);
2885 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2886 return -EIO;
2887 }
2888
2889 irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq);
2890 /* if this gets set once, we're running under VM and can omit SVSes */
2891 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
2892 omit_svs=1;
2893
2894 sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
2895 QDIO_DBF_TEXT2(0,setup,dbf_text);
2896
2897 sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
2898 QDIO_DBF_TEXT2(0,setup,dbf_text);
2899
2900 irq_ptr->hydra_gives_outbound_pcis=
2901 irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
2902 irq_ptr->sync_done_on_outb_pcis=
2903 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
2904
2905 qdio_initialize_set_siga_flags_input(irq_ptr);
2906 qdio_initialize_set_siga_flags_output(irq_ptr);
2907
2908 up(&irq_ptr->setting_up_sema);
2909
2910 return result;
2911
2912}
2913
2914int
2915qdio_activate(struct ccw_device *cdev, int flags)
2916{
2917 struct qdio_irq *irq_ptr;
2918 int i,result=0,result2;
2919 unsigned long saveflags;
2920 char dbf_text[20]; /* see qdio_initialize */
2921
2922 irq_ptr = cdev->private->qdio_data;
2923 if (!irq_ptr)
2924 return -ENODEV;
2925
2926 if (cdev->private->state != DEV_STATE_ONLINE)
2927 return -EINVAL;
2928
2929 down(&irq_ptr->setting_up_sema);
2930 if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
2931 result=-EBUSY;
2932 goto out;
2933 }
2934
2935 sprintf(dbf_text,"qact%4x", irq_ptr->irq);
2936 QDIO_DBF_TEXT2(0,setup,dbf_text);
2937 QDIO_DBF_TEXT2(0,trace,dbf_text);
2938
2939 /* activate q */
2940 irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
2941 irq_ptr->ccw.flags=CCW_FLAG_SLI;
2942 irq_ptr->ccw.count=irq_ptr->aqueue.count;
2943 irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
2944
2945 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
2946
2947 ccw_device_set_timeout(cdev, 0);
2948 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
2949 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
2950 0, DOIO_DENY_PREFETCH);
2951 if (result) {
2952 result2=ccw_device_start(cdev,&irq_ptr->ccw,
2953 QDIO_DOING_ACTIVATE,0,0);
2954 sprintf(dbf_text,"aq:io%4x",result);
2955 QDIO_DBF_TEXT2(1,setup,dbf_text);
2956 if (result2) {
2957 sprintf(dbf_text,"aq:io%4x",result);
2958 QDIO_DBF_TEXT2(1,setup,dbf_text);
2959 }
2960 QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \
2961 "returned %i, next try returned %i\n",
2962 irq_ptr->irq,result,result2);
2963 result=result2;
2964 }
2965
2966 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
2967 if (result)
2968 goto out;
2969
2970 for (i=0;i<irq_ptr->no_input_qs;i++) {
2971 if (irq_ptr->is_thinint_irq) {
2972 /*
2973 * that way we know, that, if we will get interrupted
2974 * by tiqdio_inbound_processing, qdio_unmark_q will
2975 * not be called
2976 */
2977 qdio_reserve_q(irq_ptr->input_qs[i]);
2978 qdio_mark_tiq(irq_ptr->input_qs[i]);
2979 qdio_release_q(irq_ptr->input_qs[i]);
2980 }
2981 }
2982
2983 if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
2984 for (i=0;i<irq_ptr->no_input_qs;i++) {
2985 irq_ptr->input_qs[i]->is_input_q|=
2986 QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
2987 }
2988 }
2989
2990 wait_event_interruptible_timeout(cdev->private->wait_q,
2991 ((irq_ptr->state ==
2992 QDIO_IRQ_STATE_STOPPED) ||
2993 (irq_ptr->state ==
2994 QDIO_IRQ_STATE_ERR)),
2995 QDIO_ACTIVATE_TIMEOUT);
2996
2997 switch (irq_ptr->state) {
2998 case QDIO_IRQ_STATE_STOPPED:
2999 case QDIO_IRQ_STATE_ERR:
3000 up(&irq_ptr->setting_up_sema);
3001 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3002 down(&irq_ptr->setting_up_sema);
3003 result = -EIO;
3004 break;
3005 default:
3006 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
3007 result = 0;
3008 }
3009 out:
3010 up(&irq_ptr->setting_up_sema);
3011
3012 return result;
3013}
3014
3015/* buffers filled forwards again to make Rick happy */
3016static inline void
3017qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3018 unsigned int count, struct qdio_buffer *buffers)
3019{
3020 for (;;) {
3021 set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY);
3022 count--;
3023 if (!count) break;
3024 qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
3025 }
3026
3027 /* not necessary, as the queues are synced during the SIGA read */
3028 /*SYNC_MEMORY;*/
3029}
3030
3031static inline void
3032qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3033 unsigned int count, struct qdio_buffer *buffers)
3034{
3035 for (;;) {
3036 set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED);
3037 count--;
3038 if (!count) break;
3039 qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
3040 }
3041
3042 /* SIGA write will sync the queues */
3043 /*SYNC_MEMORY;*/
3044}
3045
3046static inline void
3047do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3048 unsigned int qidx, unsigned int count,
3049 struct qdio_buffer *buffers)
3050{
3051 int used_elements;
3052
3053 /* This is the inbound handling of queues */
3054 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3055
3056 qdio_do_qdio_fill_input(q,qidx,count,buffers);
3057
3058 if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
3059 (callflags&QDIO_FLAG_UNDER_INTERRUPT))
3060 atomic_swap(&q->polling,0);
3061
3062 if (used_elements)
3063 return;
3064 if (callflags&QDIO_FLAG_DONT_SIGA)
3065 return;
3066 if (q->siga_in) {
3067 int result;
3068
3069 result=qdio_siga_input(q);
3070 if (result) {
3071 if (q->siga_error)
3072 q->error_status_flags|=
3073 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
3074 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
3075 q->siga_error=result;
3076 }
3077 }
3078
3079 qdio_mark_q(q);
3080}
3081
3082static inline void
3083do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3084 unsigned int qidx, unsigned int count,
3085 struct qdio_buffer *buffers)
3086{
3087 int used_elements;
3088
3089 /* This is the outbound handling of queues */
3090#ifdef QDIO_PERFORMANCE_STATS
3091 perf_stats.start_time_outbound=NOW;
3092#endif /* QDIO_PERFORMANCE_STATS */
3093
3094 qdio_do_qdio_fill_output(q,qidx,count,buffers);
3095
3096 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3097
3098 if (callflags&QDIO_FLAG_DONT_SIGA) {
3099#ifdef QDIO_PERFORMANCE_STATS
3100 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3101 perf_stats.outbound_cnt++;
3102#endif /* QDIO_PERFORMANCE_STATS */
3103 return;
3104 }
3105 if (q->is_iqdio_q) {
3106 /* one siga for every sbal */
3107 while (count--)
3108 qdio_kick_outbound_q(q);
3109
3110 __qdio_outbound_processing(q);
3111 } else {
3112 /* under VM, we do a SIGA sync unconditionally */
3113 SYNC_MEMORY;
3114 else {
3115 /*
3116 * w/o shadow queues (else branch of
3117 * SYNC_MEMORY :-/ ), we try to
3118 * fast-requeue buffers
3119 */
3120 if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3121 &(QDIO_MAX_BUFFERS_PER_Q-1)]!=
3122 SLSB_CU_OUTPUT_PRIMED) {
3123 qdio_kick_outbound_q(q);
3124 } else {
3125 QDIO_DBF_TEXT3(0,trace, "fast-req");
3126#ifdef QDIO_PERFORMANCE_STATS
3127 perf_stats.fast_reqs++;
3128#endif /* QDIO_PERFORMANCE_STATS */
3129 }
3130 }
3131 /*
3132 * only marking the q could take too long,
3133 * the upper layer module could do a lot of
3134 * traffic in that time
3135 */
3136 __qdio_outbound_processing(q);
3137 }
3138
3139#ifdef QDIO_PERFORMANCE_STATS
3140 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3141 perf_stats.outbound_cnt++;
3142#endif /* QDIO_PERFORMANCE_STATS */
3143}
3144
3145/* count must be 1 in iqdio */
3146int
3147do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3148 unsigned int queue_number, unsigned int qidx,
3149 unsigned int count,struct qdio_buffer *buffers)
3150{
3151 struct qdio_irq *irq_ptr;
3152#ifdef CONFIG_QDIO_DEBUG
3153 char dbf_text[20];
3154
3155 sprintf(dbf_text,"doQD%04x",cdev->private->irq);
3156 QDIO_DBF_TEXT3(0,trace,dbf_text);
3157#endif /* CONFIG_QDIO_DEBUG */
3158
3159 if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
3160 (count>QDIO_MAX_BUFFERS_PER_Q) ||
3161 (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
3162 return -EINVAL;
3163
3164 if (count==0)
3165 return 0;
3166
3167 irq_ptr = cdev->private->qdio_data;
3168 if (!irq_ptr)
3169 return -ENODEV;
3170
3171#ifdef CONFIG_QDIO_DEBUG
3172 if (callflags&QDIO_FLAG_SYNC_INPUT)
3173 QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
3174 sizeof(void*));
3175 else
3176 QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
3177 sizeof(void*));
3178 sprintf(dbf_text,"flag%04x",callflags);
3179 QDIO_DBF_TEXT3(0,trace,dbf_text);
3180 sprintf(dbf_text,"qi%02xct%02x",qidx,count);
3181 QDIO_DBF_TEXT3(0,trace,dbf_text);
3182#endif /* CONFIG_QDIO_DEBUG */
3183
3184 if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
3185 return -EBUSY;
3186
3187 if (callflags&QDIO_FLAG_SYNC_INPUT)
3188 do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
3189 callflags, qidx, count, buffers);
3190 else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
3191 do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
3192 callflags, qidx, count, buffers);
3193 else {
3194 QDIO_DBF_TEXT3(1,trace,"doQD:inv");
3195 return -EINVAL;
3196 }
3197 return 0;
3198}
3199
3200#ifdef QDIO_PERFORMANCE_STATS
3201static int
3202qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3203 int buffer_length, int *eof, void *data)
3204{
3205 int c=0;
3206
3207 /* we are always called with buffer_length=4k, so we all
3208 deliver on the first read */
3209 if (offset>0)
3210 return 0;
3211
3212#define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
3213 _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c);
3214 _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c);
3215 _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c);
3216 _OUTP_IT("Number of tasklet runs (total) : %u\n",
3217 perf_stats.tl_runs);
3218 _OUTP_IT("\n");
3219 _OUTP_IT("Number of SIGA sync's issued : %u\n",
3220 perf_stats.siga_syncs);
3221 _OUTP_IT("Number of SIGA in's issued : %u\n",
3222 perf_stats.siga_ins);
3223 _OUTP_IT("Number of SIGA out's issued : %u\n",
3224 perf_stats.siga_outs);
3225 _OUTP_IT("Number of PCIs caught : %u\n",
3226 perf_stats.pcis);
3227 _OUTP_IT("Number of adapter interrupts caught : %u\n",
3228 perf_stats.thinints);
3229 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %u\n",
3230 perf_stats.fast_reqs);
3231 _OUTP_IT("\n");
3232 _OUTP_IT("Total time of all inbound actions (us) incl. UL : %u\n",
3233 perf_stats.inbound_time);
3234 _OUTP_IT("Number of inbound transfers : %u\n",
3235 perf_stats.inbound_cnt);
3236 _OUTP_IT("Total time of all outbound do_QDIOs (us) : %u\n",
3237 perf_stats.outbound_time);
3238 _OUTP_IT("Number of do_QDIOs outbound : %u\n",
3239 perf_stats.outbound_cnt);
3240 _OUTP_IT("\n");
3241
3242 return c;
3243}
3244
3245static struct proc_dir_entry *qdio_perf_proc_file;
3246#endif /* QDIO_PERFORMANCE_STATS */
3247
3248static void
3249qdio_add_procfs_entry(void)
3250{
3251#ifdef QDIO_PERFORMANCE_STATS
3252 proc_perf_file_registration=0;
3253 qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
3254 S_IFREG|0444,&proc_root);
3255 if (qdio_perf_proc_file) {
3256 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
3257 } else proc_perf_file_registration=-1;
3258
3259 if (proc_perf_file_registration)
3260 QDIO_PRINT_WARN("was not able to register perf. " \
3261 "proc-file (%i).\n",
3262 proc_perf_file_registration);
3263#endif /* QDIO_PERFORMANCE_STATS */
3264}
3265
3266static void
3267qdio_remove_procfs_entry(void)
3268{
3269#ifdef QDIO_PERFORMANCE_STATS
3270 perf_stats.tl_runs=0;
3271
3272 if (!proc_perf_file_registration) /* means if it went ok earlier */
3273 remove_proc_entry(QDIO_PERF,&proc_root);
3274#endif /* QDIO_PERFORMANCE_STATS */
3275}
3276
3277static void
3278tiqdio_register_thinints(void)
3279{
3280 char dbf_text[20];
3281 register_thinint_result=
3282 s390_register_adapter_interrupt(&tiqdio_thinint_handler);
3283 if (register_thinint_result) {
3284 sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff));
3285 QDIO_DBF_TEXT0(0,setup,dbf_text);
3286 QDIO_PRINT_ERR("failed to register adapter handler " \
3287 "(rc=%i).\nAdapter interrupts might " \
3288 "not work. Continuing.\n",
3289 register_thinint_result);
3290 }
3291}
3292
3293static void
3294tiqdio_unregister_thinints(void)
3295{
3296 if (!register_thinint_result)
3297 s390_unregister_adapter_interrupt(&tiqdio_thinint_handler);
3298}
3299
3300static int
3301qdio_get_qdio_memory(void)
3302{
3303 int i;
3304 indicator_used[0]=1;
3305
3306 for (i=1;i<INDICATORS_PER_CACHELINE;i++)
3307 indicator_used[i]=0;
3308 indicators=(__u32*)kmalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
3309 GFP_KERNEL);
3310 if (!indicators) return -ENOMEM;
3311 memset(indicators,0,sizeof(__u32)*(INDICATORS_PER_CACHELINE));
3312 return 0;
3313}
3314
3315static void
3316qdio_release_qdio_memory(void)
3317{
3318 if (indicators)
3319 kfree(indicators);
3320}
3321
3322static void
3323qdio_unregister_dbf_views(void)
3324{
3325 if (qdio_dbf_setup)
3326 debug_unregister(qdio_dbf_setup);
3327 if (qdio_dbf_sbal)
3328 debug_unregister(qdio_dbf_sbal);
3329 if (qdio_dbf_sense)
3330 debug_unregister(qdio_dbf_sense);
3331 if (qdio_dbf_trace)
3332 debug_unregister(qdio_dbf_trace);
3333#ifdef CONFIG_QDIO_DEBUG
3334 if (qdio_dbf_slsb_out)
3335 debug_unregister(qdio_dbf_slsb_out);
3336 if (qdio_dbf_slsb_in)
3337 debug_unregister(qdio_dbf_slsb_in);
3338#endif /* CONFIG_QDIO_DEBUG */
3339}
3340
3341static int
3342qdio_register_dbf_views(void)
3343{
3344 qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
3345 QDIO_DBF_SETUP_INDEX,
3346 QDIO_DBF_SETUP_NR_AREAS,
3347 QDIO_DBF_SETUP_LEN);
3348 if (!qdio_dbf_setup)
3349 goto oom;
3350 debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
3351 debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
3352
3353 qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
3354 QDIO_DBF_SBAL_INDEX,
3355 QDIO_DBF_SBAL_NR_AREAS,
3356 QDIO_DBF_SBAL_LEN);
3357 if (!qdio_dbf_sbal)
3358 goto oom;
3359
3360 debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
3361 debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
3362
3363 qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
3364 QDIO_DBF_SENSE_INDEX,
3365 QDIO_DBF_SENSE_NR_AREAS,
3366 QDIO_DBF_SENSE_LEN);
3367 if (!qdio_dbf_sense)
3368 goto oom;
3369
3370 debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
3371 debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
3372
3373 qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
3374 QDIO_DBF_TRACE_INDEX,
3375 QDIO_DBF_TRACE_NR_AREAS,
3376 QDIO_DBF_TRACE_LEN);
3377 if (!qdio_dbf_trace)
3378 goto oom;
3379
3380 debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
3381 debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
3382
3383#ifdef CONFIG_QDIO_DEBUG
3384 qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
3385 QDIO_DBF_SLSB_OUT_INDEX,
3386 QDIO_DBF_SLSB_OUT_NR_AREAS,
3387 QDIO_DBF_SLSB_OUT_LEN);
3388 if (!qdio_dbf_slsb_out)
3389 goto oom;
3390 debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
3391 debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
3392
3393 qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
3394 QDIO_DBF_SLSB_IN_INDEX,
3395 QDIO_DBF_SLSB_IN_NR_AREAS,
3396 QDIO_DBF_SLSB_IN_LEN);
3397 if (!qdio_dbf_slsb_in)
3398 goto oom;
3399 debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
3400 debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
3401#endif /* CONFIG_QDIO_DEBUG */
3402 return 0;
3403oom:
3404 QDIO_PRINT_ERR("not enough memory for dbf.\n");
3405 qdio_unregister_dbf_views();
3406 return -ENOMEM;
3407}
3408
3409static int __init
3410init_QDIO(void)
3411{
3412 int res;
3413#ifdef QDIO_PERFORMANCE_STATS
3414 void *ptr;
3415#endif /* QDIO_PERFORMANCE_STATS */
3416
3417 printk("qdio: loading %s\n",version);
3418
3419 res=qdio_get_qdio_memory();
3420 if (res)
3421 return res;
3422
3423 res = qdio_register_dbf_views();
3424 if (res)
3425 return res;
3426
3427 QDIO_DBF_TEXT0(0,setup,"initQDIO");
3428
3429#ifdef QDIO_PERFORMANCE_STATS
3430 memset((void*)&perf_stats,0,sizeof(perf_stats));
3431 QDIO_DBF_TEXT0(0,setup,"perfstat");
3432 ptr=&perf_stats;
3433 QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
3434#endif /* QDIO_PERFORMANCE_STATS */
3435
3436 qdio_add_procfs_entry();
3437
3438 if (tiqdio_check_chsc_availability())
3439 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3440
3441 tiqdio_register_thinints();
3442
3443 return 0;
3444 }
3445
3446static void __exit
3447cleanup_QDIO(void)
3448{
3449 tiqdio_unregister_thinints();
3450 qdio_remove_procfs_entry();
3451 qdio_release_qdio_memory();
3452 qdio_unregister_dbf_views();
3453
3454 printk("qdio: %s: module removed\n",version);
3455}
3456
3457module_init(init_QDIO);
3458module_exit(cleanup_QDIO);
3459
3460EXPORT_SYMBOL(qdio_allocate);
3461EXPORT_SYMBOL(qdio_establish);
3462EXPORT_SYMBOL(qdio_initialize);
3463EXPORT_SYMBOL(qdio_activate);
3464EXPORT_SYMBOL(do_QDIO);
3465EXPORT_SYMBOL(qdio_shutdown);
3466EXPORT_SYMBOL(qdio_free);
3467EXPORT_SYMBOL(qdio_cleanup);
3468EXPORT_SYMBOL(qdio_synchronize);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
new file mode 100644
index 000000000000..9ad14db24143
--- /dev/null
+++ b/drivers/s390/cio/qdio.h
@@ -0,0 +1,648 @@
1#ifndef _CIO_QDIO_H
2#define _CIO_QDIO_H
3
4#define VERSION_CIO_QDIO_H "$Revision: 1.26 $"
5
6#ifdef CONFIG_QDIO_DEBUG
7#define QDIO_VERBOSE_LEVEL 9
8#else /* CONFIG_QDIO_DEBUG */
9#define QDIO_VERBOSE_LEVEL 5
10#endif /* CONFIG_QDIO_DEBUG */
11
12#define QDIO_USE_PROCESSING_STATE
13
14#ifdef CONFIG_QDIO_PERF_STATS
15#define QDIO_PERFORMANCE_STATS
16#endif /* CONFIG_QDIO_PERF_STATS */
17
18#define QDIO_MINIMAL_BH_RELIEF_TIME 16
19#define QDIO_TIMER_POLL_VALUE 1
20#define IQDIO_TIMER_POLL_VALUE 1
21
22/*
23 * unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as
24 * we never know, whether we'll get initiative again, e.g. to give the
25 * transmit skb's back to the stack, however the stack may be waiting for
26 * them... therefore we define 4 as threshold to start polling (which
27 * will stop as soon as the asynchronous queue catches up)
28 * btw, this only applies to the asynchronous HiperSockets queue
29 */
30#define IQDIO_FILL_LEVEL_TO_POLL 4
31
32#define TIQDIO_THININT_ISC 3
33#define TIQDIO_DELAY_TARGET 0
34#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
35#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
36#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
37#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */
38#define IQDIO_LOCAL_LAPS 4
39#define IQDIO_LOCAL_LAPS_INT 1
40#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2
41/*#define IQDIO_IQDC_INT_PARM 0x1234*/
42
43#define QDIO_Q_LAPS 5
44
45#define QDIO_STORAGE_KEY 0
46
47#define L2_CACHELINE_SIZE 256
48#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32))
49
50#define QDIO_PERF "qdio_perf"
51
52/* must be a power of 2 */
53/*#define QDIO_STATS_NUMBER 4
54
55#define QDIO_STATS_CLASSES 2
56#define QDIO_STATS_COUNT_NEEDED 2*/
57
58#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before
59 exiting without having use_count
60 of the queue to 0 */
61
62#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
63#define QDIO_ACTIVATE_TIMEOUT ((5*HZ)>>10)
64#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
65#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
66
67enum qdio_irq_states {
68 QDIO_IRQ_STATE_INACTIVE,
69 QDIO_IRQ_STATE_ESTABLISHED,
70 QDIO_IRQ_STATE_ACTIVE,
71 QDIO_IRQ_STATE_STOPPED,
72 QDIO_IRQ_STATE_CLEANUP,
73 QDIO_IRQ_STATE_ERR,
74 NR_QDIO_IRQ_STATES,
75};
76
77/* used as intparm in do_IO: */
78#define QDIO_DOING_SENSEID 0
79#define QDIO_DOING_ESTABLISH 1
80#define QDIO_DOING_ACTIVATE 2
81#define QDIO_DOING_CLEANUP 3
82
83/************************* DEBUG FACILITY STUFF *********************/
84
85#define QDIO_DBF_HEX(ex,name,level,addr,len) \
86 do { \
87 if (ex) \
88 debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \
89 else \
90 debug_event(qdio_dbf_##name,level,(void*)(addr),len); \
91 } while (0)
92#define QDIO_DBF_TEXT(ex,name,level,text) \
93 do { \
94 if (ex) \
95 debug_text_exception(qdio_dbf_##name,level,text); \
96 else \
97 debug_text_event(qdio_dbf_##name,level,text); \
98 } while (0)
99
100
101#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len)
102#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len)
103#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len)
104#ifdef CONFIG_QDIO_DEBUG
105#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len)
106#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len)
107#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len)
108#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len)
109#else /* CONFIG_QDIO_DEBUG */
110#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0)
111#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0)
112#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0)
113#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0)
114#endif /* CONFIG_QDIO_DEBUG */
115
116#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text)
117#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text)
118#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text)
119#ifdef CONFIG_QDIO_DEBUG
120#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
121#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
122#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
123#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
124#else /* CONFIG_QDIO_DEBUG */
125#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
126#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
127#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
128#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
129#endif /* CONFIG_QDIO_DEBUG */
130
131#define QDIO_DBF_SETUP_NAME "qdio_setup"
132#define QDIO_DBF_SETUP_LEN 8
133#define QDIO_DBF_SETUP_INDEX 2
134#define QDIO_DBF_SETUP_NR_AREAS 1
135#ifdef CONFIG_QDIO_DEBUG
136#define QDIO_DBF_SETUP_LEVEL 6
137#else /* CONFIG_QDIO_DEBUG */
138#define QDIO_DBF_SETUP_LEVEL 2
139#endif /* CONFIG_QDIO_DEBUG */
140
141#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
142#define QDIO_DBF_SBAL_LEN 256
143#define QDIO_DBF_SBAL_INDEX 2
144#define QDIO_DBF_SBAL_NR_AREAS 2
145#ifdef CONFIG_QDIO_DEBUG
146#define QDIO_DBF_SBAL_LEVEL 6
147#else /* CONFIG_QDIO_DEBUG */
148#define QDIO_DBF_SBAL_LEVEL 2
149#endif /* CONFIG_QDIO_DEBUG */
150
151#define QDIO_DBF_TRACE_NAME "qdio_trace"
152#define QDIO_DBF_TRACE_LEN 8
153#define QDIO_DBF_TRACE_NR_AREAS 2
154#ifdef CONFIG_QDIO_DEBUG
155#define QDIO_DBF_TRACE_INDEX 4
156#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
157#else /* CONFIG_QDIO_DEBUG */
158#define QDIO_DBF_TRACE_INDEX 2
159#define QDIO_DBF_TRACE_LEVEL 2
160#endif /* CONFIG_QDIO_DEBUG */
161
162#define QDIO_DBF_SENSE_NAME "qdio_sense"
163#define QDIO_DBF_SENSE_LEN 64
164#define QDIO_DBF_SENSE_INDEX 1
165#define QDIO_DBF_SENSE_NR_AREAS 1
166#ifdef CONFIG_QDIO_DEBUG
167#define QDIO_DBF_SENSE_LEVEL 6
168#else /* CONFIG_QDIO_DEBUG */
169#define QDIO_DBF_SENSE_LEVEL 2
170#endif /* CONFIG_QDIO_DEBUG */
171
172#ifdef CONFIG_QDIO_DEBUG
173#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
174
175#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
176#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q
177#define QDIO_DBF_SLSB_OUT_INDEX 8
178#define QDIO_DBF_SLSB_OUT_NR_AREAS 1
179#define QDIO_DBF_SLSB_OUT_LEVEL 6
180
181#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in"
182#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q
183#define QDIO_DBF_SLSB_IN_INDEX 8
184#define QDIO_DBF_SLSB_IN_NR_AREAS 1
185#define QDIO_DBF_SLSB_IN_LEVEL 6
186#endif /* CONFIG_QDIO_DEBUG */
187
188#define QDIO_PRINTK_HEADER QDIO_NAME ": "
189
190#if QDIO_VERBOSE_LEVEL>8
191#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
192#else
193#define QDIO_PRINT_STUPID(x...)
194#endif
195
196#if QDIO_VERBOSE_LEVEL>7
197#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
198#else
199#define QDIO_PRINT_ALL(x...)
200#endif
201
202#if QDIO_VERBOSE_LEVEL>6
203#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
204#else
205#define QDIO_PRINT_INFO(x...)
206#endif
207
208#if QDIO_VERBOSE_LEVEL>5
209#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
210#else
211#define QDIO_PRINT_WARN(x...)
212#endif
213
214#if QDIO_VERBOSE_LEVEL>4
215#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
216#else
217#define QDIO_PRINT_ERR(x...)
218#endif
219
220#if QDIO_VERBOSE_LEVEL>3
221#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
222#else
223#define QDIO_PRINT_CRIT(x...)
224#endif
225
226#if QDIO_VERBOSE_LEVEL>2
227#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
228#else
229#define QDIO_PRINT_ALERT(x...)
230#endif
231
232#if QDIO_VERBOSE_LEVEL>1
233#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
234#else
235#define QDIO_PRINT_EMERG(x...)
236#endif
237
238#define HEXDUMP16(importance,header,ptr) \
239QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
240 "%02x %02x %02x %02x %02x %02x %02x %02x " \
241 "%02x %02x %02x %02x\n",*(((char*)ptr)), \
242 *(((char*)ptr)+1),*(((char*)ptr)+2), \
243 *(((char*)ptr)+3),*(((char*)ptr)+4), \
244 *(((char*)ptr)+5),*(((char*)ptr)+6), \
245 *(((char*)ptr)+7),*(((char*)ptr)+8), \
246 *(((char*)ptr)+9),*(((char*)ptr)+10), \
247 *(((char*)ptr)+11),*(((char*)ptr)+12), \
248 *(((char*)ptr)+13),*(((char*)ptr)+14), \
249 *(((char*)ptr)+15)); \
250QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
251 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
252 *(((char*)ptr)+16),*(((char*)ptr)+17), \
253 *(((char*)ptr)+18),*(((char*)ptr)+19), \
254 *(((char*)ptr)+20),*(((char*)ptr)+21), \
255 *(((char*)ptr)+22),*(((char*)ptr)+23), \
256 *(((char*)ptr)+24),*(((char*)ptr)+25), \
257 *(((char*)ptr)+26),*(((char*)ptr)+27), \
258 *(((char*)ptr)+28),*(((char*)ptr)+29), \
259 *(((char*)ptr)+30),*(((char*)ptr)+31));
260
261/****************** END OF DEBUG FACILITY STUFF *********************/
262
263/*
264 * Some instructions as assembly
265 */
266extern __inline__ int
267do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
268{
269 int cc;
270
271#ifndef CONFIG_ARCH_S390X
272 asm volatile (
273 "lhi 0,2 \n\t"
274 "lr 1,%1 \n\t"
275 "lr 2,%2 \n\t"
276 "lr 3,%3 \n\t"
277 "siga 0 \n\t"
278 "ipm %0 \n\t"
279 "srl %0,28 \n\t"
280 : "=d" (cc)
281 : "d" (0x10000|irq), "d" (mask1), "d" (mask2)
282 : "cc", "0", "1", "2", "3"
283 );
284#else /* CONFIG_ARCH_S390X */
285 asm volatile (
286 "lghi 0,2 \n\t"
287 "llgfr 1,%1 \n\t"
288 "llgfr 2,%2 \n\t"
289 "llgfr 3,%3 \n\t"
290 "siga 0 \n\t"
291 "ipm %0 \n\t"
292 "srl %0,28 \n\t"
293 : "=d" (cc)
294 : "d" (0x10000|irq), "d" (mask1), "d" (mask2)
295 : "cc", "0", "1", "2", "3"
296 );
297#endif /* CONFIG_ARCH_S390X */
298 return cc;
299}
300
301extern __inline__ int
302do_siga_input(unsigned int irq, unsigned int mask)
303{
304 int cc;
305
306#ifndef CONFIG_ARCH_S390X
307 asm volatile (
308 "lhi 0,1 \n\t"
309 "lr 1,%1 \n\t"
310 "lr 2,%2 \n\t"
311 "siga 0 \n\t"
312 "ipm %0 \n\t"
313 "srl %0,28 \n\t"
314 : "=d" (cc)
315 : "d" (0x10000|irq), "d" (mask)
316 : "cc", "0", "1", "2", "memory"
317 );
318#else /* CONFIG_ARCH_S390X */
319 asm volatile (
320 "lghi 0,1 \n\t"
321 "llgfr 1,%1 \n\t"
322 "llgfr 2,%2 \n\t"
323 "siga 0 \n\t"
324 "ipm %0 \n\t"
325 "srl %0,28 \n\t"
326 : "=d" (cc)
327 : "d" (0x10000|irq), "d" (mask)
328 : "cc", "0", "1", "2", "memory"
329 );
330#endif /* CONFIG_ARCH_S390X */
331
332 return cc;
333}
334
335extern __inline__ int
336do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
337{
338 int cc;
339 __u32 busy_bit;
340
341#ifndef CONFIG_ARCH_S390X
342 asm volatile (
343 "lhi 0,0 \n\t"
344 "lr 1,%2 \n\t"
345 "lr 2,%3 \n\t"
346 "siga 0 \n\t"
347 "0:"
348 "ipm %0 \n\t"
349 "srl %0,28 \n\t"
350 "srl 0,31 \n\t"
351 "lr %1,0 \n\t"
352 "1: \n\t"
353 ".section .fixup,\"ax\"\n\t"
354 "2: \n\t"
355 "lhi %0,%4 \n\t"
356 "bras 1,3f \n\t"
357 ".long 1b \n\t"
358 "3: \n\t"
359 "l 1,0(1) \n\t"
360 "br 1 \n\t"
361 ".previous \n\t"
362 ".section __ex_table,\"a\"\n\t"
363 ".align 4 \n\t"
364 ".long 0b,2b \n\t"
365 ".previous \n\t"
366 : "=d" (cc), "=d" (busy_bit)
367 : "d" (0x10000|irq), "d" (mask),
368 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
369 : "cc", "0", "1", "2", "memory"
370 );
371#else /* CONFIG_ARCH_S390X */
372 asm volatile (
373 "lghi 0,0 \n\t"
374 "llgfr 1,%2 \n\t"
375 "llgfr 2,%3 \n\t"
376 "siga 0 \n\t"
377 "0:"
378 "ipm %0 \n\t"
379 "srl %0,28 \n\t"
380 "srl 0,31 \n\t"
381 "llgfr %1,0 \n\t"
382 "1: \n\t"
383 ".section .fixup,\"ax\"\n\t"
384 "lghi %0,%4 \n\t"
385 "jg 1b \n\t"
386 ".previous\n\t"
387 ".section __ex_table,\"a\"\n\t"
388 ".align 8 \n\t"
389 ".quad 0b,1b \n\t"
390 ".previous \n\t"
391 : "=d" (cc), "=d" (busy_bit)
392 : "d" (0x10000|irq), "d" (mask),
393 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
394 : "cc", "0", "1", "2", "memory"
395 );
396#endif /* CONFIG_ARCH_S390X */
397
398 (*bb) = busy_bit;
399 return cc;
400}
401
402extern __inline__ unsigned long
403do_clear_global_summary(void)
404{
405
406 unsigned long time;
407
408#ifndef CONFIG_ARCH_S390X
409 asm volatile (
410 "lhi 1,3 \n\t"
411 ".insn rre,0xb2650000,2,0 \n\t"
412 "lr %0,3 \n\t"
413 : "=d" (time) : : "cc", "1", "2", "3"
414 );
415#else /* CONFIG_ARCH_S390X */
416 asm volatile (
417 "lghi 1,3 \n\t"
418 ".insn rre,0xb2650000,2,0 \n\t"
419 "lgr %0,3 \n\t"
420 : "=d" (time) : : "cc", "1", "2", "3"
421 );
422#endif /* CONFIG_ARCH_S390X */
423
424 return time;
425}
426
427/*
428 * QDIO device commands returned by extended Sense-ID
429 */
430#define DEFAULT_ESTABLISH_QS_CMD 0x1b
431#define DEFAULT_ESTABLISH_QS_COUNT 0x1000
432#define DEFAULT_ACTIVATE_QS_CMD 0x1f
433#define DEFAULT_ACTIVATE_QS_COUNT 0
434
435/*
436 * additional CIWs returned by extended Sense-ID
437 */
438#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
439#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
440
441#define QDIO_CHSC_RESPONSE_CODE_OK 1
442/* flags for st qdio sch data */
443#define CHSC_FLAG_QDIO_CAPABILITY 0x80
444#define CHSC_FLAG_VALIDITY 0x40
445
446#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40
447#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20
448#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10
449#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
450#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
451
452#ifdef QDIO_PERFORMANCE_STATS
453struct qdio_perf_stats {
454 unsigned int tl_runs;
455
456 unsigned int siga_outs;
457 unsigned int siga_ins;
458 unsigned int siga_syncs;
459 unsigned int pcis;
460 unsigned int thinints;
461 unsigned int fast_reqs;
462
463 __u64 start_time_outbound;
464 unsigned int outbound_cnt;
465 unsigned int outbound_time;
466 __u64 start_time_inbound;
467 unsigned int inbound_cnt;
468 unsigned int inbound_time;
469};
470#endif /* QDIO_PERFORMANCE_STATS */
471
472#define atomic_swap(a,b) xchg((int*)a.counter,b)
473
474/* unlikely as the later the better */
475#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
476#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \
477 qdio_siga_sync(q,~0U,~0U)
478#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \
479 qdio_siga_sync(q,~0U,0)
480
481#define NOW qdio_get_micros()
482#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW
483#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time)
484#define SAVE_FRONTIER(q,val) q->last_move_ftc=val
485#define GET_SAVED_FRONTIER(q) (q->last_move_ftc)
486
487#define MY_MODULE_STRING(x) #x
488
489#ifdef CONFIG_ARCH_S390X
490#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x)
491#else /* CONFIG_ARCH_S390X */
492#define QDIO_GET_ADDR(x) ((__u32)(long)x)
493#endif /* CONFIG_ARCH_S390X */
494
495#ifdef CONFIG_QDIO_DEBUG
496#define set_slsb(x,y) \
497 if(q->queue_type==QDIO_TRACE_QTYPE) { \
498 if(q->is_input_q) { \
499 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
500 } else { \
501 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
502 } \
503 } \
504 qdio_set_slsb(x,y); \
505 if(q->queue_type==QDIO_TRACE_QTYPE) { \
506 if(q->is_input_q) { \
507 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
508 } else { \
509 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
510 } \
511 }
512#else /* CONFIG_QDIO_DEBUG */
513#define set_slsb(x,y) qdio_set_slsb(x,y)
514#endif /* CONFIG_QDIO_DEBUG */
515
516struct qdio_q {
517 volatile struct slsb slsb;
518
519 char unused[QDIO_MAX_BUFFERS_PER_Q];
520
521 __u32 * volatile dev_st_chg_ind;
522
523 int is_input_q;
524 int irq;
525 struct ccw_device *cdev;
526
527 unsigned int is_iqdio_q;
528 unsigned int is_thinint_q;
529
530 /* bit 0 means queue 0, bit 1 means queue 1, ... */
531 unsigned int mask;
532 unsigned int q_no;
533
534 qdio_handler_t (*handler);
535
536 /* points to the next buffer to be checked for having
537 * been processed by the card (outbound)
538 * or to the next buffer the program should check for (inbound) */
539 volatile int first_to_check;
540 /* and the last time it was: */
541 volatile int last_move_ftc;
542
543 atomic_t number_of_buffers_used;
544 atomic_t polling;
545
546 unsigned int siga_in;
547 unsigned int siga_out;
548 unsigned int siga_sync;
549 unsigned int siga_sync_done_on_thinints;
550 unsigned int siga_sync_done_on_outb_tis;
551 unsigned int hydra_gives_outbound_pcis;
552
553 /* used to save beginning position when calling dd_handlers */
554 int first_element_to_kick;
555
556 atomic_t use_count;
557 atomic_t is_in_shutdown;
558
559 void *irq_ptr;
560
561#ifdef QDIO_USE_TIMERS_FOR_POLLING
562 struct timer_list timer;
563 atomic_t timer_already_set;
564 spinlock_t timer_lock;
565#else /* QDIO_USE_TIMERS_FOR_POLLING */
566 struct tasklet_struct tasklet;
567#endif /* QDIO_USE_TIMERS_FOR_POLLING */
568
569 enum qdio_irq_states state;
570
571 /* used to store the error condition during a data transfer */
572 unsigned int qdio_error;
573 unsigned int siga_error;
574 unsigned int error_status_flags;
575
576 /* list of interesting queues */
577 volatile struct qdio_q *list_next;
578 volatile struct qdio_q *list_prev;
579
580 struct sl *sl;
581 volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q];
582
583 struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q];
584
585 unsigned long int_parm;
586
587 /*struct {
588 int in_bh_check_limit;
589 int threshold;
590 } threshold_classes[QDIO_STATS_CLASSES];*/
591
592 struct {
593 /* inbound: the time to stop polling
594 outbound: the time to kick peer */
595 int threshold; /* the real value */
596
597 /* outbound: last time of do_QDIO
598 inbound: last time of noticing incoming data */
599 /*__u64 last_transfer_times[QDIO_STATS_NUMBER];
600 int last_transfer_index; */
601
602 __u64 last_transfer_time;
603 __u64 busy_start;
604 } timing;
605 atomic_t busy_siga_counter;
606 unsigned int queue_type;
607
608 /* leave this member at the end. won't be cleared in qdio_fill_qs */
609 struct slib *slib; /* a page is allocated under this pointer,
610 sl points into this page, offset PAGE_SIZE/2
611 (after slib) */
612} __attribute__ ((aligned(256)));
613
614struct qdio_irq {
615 __u32 * volatile dev_st_chg_ind;
616
617 unsigned long int_parm;
618 int irq;
619
620 unsigned int is_iqdio_irq;
621 unsigned int is_thinint_irq;
622 unsigned int hydra_gives_outbound_pcis;
623 unsigned int sync_done_on_outb_pcis;
624
625 enum qdio_irq_states state;
626
627 unsigned int no_input_qs;
628 unsigned int no_output_qs;
629
630 unsigned char qdioac;
631
632 struct ccw1 ccw;
633
634 struct ciw equeue;
635 struct ciw aqueue;
636
637 struct qib qib;
638
639 void (*original_int_handler) (struct ccw_device *,
640 unsigned long, struct irb *);
641
642 /* leave these four members together at the end. won't be cleared in qdio_fill_irq */
643 struct qdr *qdr;
644 struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
645 struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
646 struct semaphore setting_up_sema;
647};
648#endif
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
new file mode 100644
index 000000000000..15edebbead7f
--- /dev/null
+++ b/drivers/s390/crypto/Makefile
@@ -0,0 +1,6 @@
1#
2# S/390 crypto devices
3#
4
5z90crypt-objs := z90main.o z90hardware.o
6obj-$(CONFIG_Z90CRYPT) += z90crypt.o
diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h
new file mode 100644
index 000000000000..bcabac7a7c46
--- /dev/null
+++ b/drivers/s390/crypto/z90common.h
@@ -0,0 +1,168 @@
1/*
2 * linux/drivers/s390/crypto/z90common.h
3 *
4 * z90crypt 1.3.2
5 *
6 * Copyright (C) 2001, 2004 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _Z90COMMON_H_
28#define _Z90COMMON_H_
29
30#define VERSION_Z90COMMON_H "$Revision: 1.16 $"
31
32
33#define RESPBUFFSIZE 256
34#define PCI_FUNC_KEY_DECRYPT 0x5044
35#define PCI_FUNC_KEY_ENCRYPT 0x504B
36extern int ext_bitlens;
37
38enum devstat {
39 DEV_GONE,
40 DEV_ONLINE,
41 DEV_QUEUE_FULL,
42 DEV_EMPTY,
43 DEV_NO_WORK,
44 DEV_BAD_MESSAGE,
45 DEV_TSQ_EXCEPTION,
46 DEV_RSQ_EXCEPTION,
47 DEV_SEN_EXCEPTION,
48 DEV_REC_EXCEPTION
49};
50
51enum hdstat {
52 HD_NOT_THERE,
53 HD_BUSY,
54 HD_DECONFIGURED,
55 HD_CHECKSTOPPED,
56 HD_ONLINE,
57 HD_TSQ_EXCEPTION
58};
59
60#define Z90C_NO_DEVICES 1
61#define Z90C_AMBIGUOUS_DOMAIN 2
62#define Z90C_INCORRECT_DOMAIN 3
63#define ENOTINIT 4
64
65#define SEN_BUSY 7
66#define SEN_USER_ERROR 8
67#define SEN_QUEUE_FULL 11
68#define SEN_NOT_AVAIL 16
69#define SEN_PAD_ERROR 17
70#define SEN_RETRY 18
71#define SEN_RELEASED 24
72
73#define REC_EMPTY 4
74#define REC_BUSY 6
75#define REC_OPERAND_INV 8
76#define REC_OPERAND_SIZE 9
77#define REC_EVEN_MOD 10
78#define REC_NO_WORK 11
79#define REC_HARDWAR_ERR 12
80#define REC_NO_RESPONSE 13
81#define REC_RETRY_DEV 14
82#define REC_USER_GONE 15
83#define REC_BAD_MESSAGE 16
84#define REC_INVALID_PAD 17
85#define REC_USE_PCICA 18
86
87#define WRONG_DEVICE_TYPE 20
88
89#define REC_FATAL_ERROR 32
90#define SEN_FATAL_ERROR 33
91#define TSQ_FATAL_ERROR 34
92#define RSQ_FATAL_ERROR 35
93
94#define Z90CRYPT_NUM_TYPES 5
95#define PCICA 0
96#define PCICC 1
97#define PCIXCC_MCL2 2
98#define PCIXCC_MCL3 3
99#define CEX2C 4
100#define NILDEV -1
101#define ANYDEV -1
102#define PCIXCC_UNK -2
103
104enum hdevice_type {
105 PCICC_HW = 3,
106 PCICA_HW = 4,
107 PCIXCC_HW = 5,
108 OTHER_HW = 6,
109 CEX2C_HW = 7
110};
111
112struct CPRBX {
113 unsigned short cprb_len;
114 unsigned char cprb_ver_id;
115 unsigned char pad_000[3];
116 unsigned char func_id[2];
117 unsigned char cprb_flags[4];
118 unsigned int req_parml;
119 unsigned int req_datal;
120 unsigned int rpl_msgbl;
121 unsigned int rpld_parml;
122 unsigned int rpl_datal;
123 unsigned int rpld_datal;
124 unsigned int req_extbl;
125 unsigned char pad_001[4];
126 unsigned int rpld_extbl;
127 unsigned char req_parmb[16];
128 unsigned char req_datab[16];
129 unsigned char rpl_parmb[16];
130 unsigned char rpl_datab[16];
131 unsigned char req_extb[16];
132 unsigned char rpl_extb[16];
133 unsigned short ccp_rtcode;
134 unsigned short ccp_rscode;
135 unsigned int mac_data_len;
136 unsigned char logon_id[8];
137 unsigned char mac_value[8];
138 unsigned char mac_content_flgs;
139 unsigned char pad_002;
140 unsigned short domain;
141 unsigned char pad_003[12];
142 unsigned char pad_004[36];
143};
144
145#ifndef DEV_NAME
146#define DEV_NAME "z90crypt"
147#endif
148#define PRINTK(fmt, args...) \
149 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
150#define PRINTKN(fmt, args...) \
151 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
152#define PRINTKW(fmt, args...) \
153 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
154#define PRINTKC(fmt, args...) \
155 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
156
157#ifdef Z90CRYPT_DEBUG
158#define PDEBUG(fmt, args...) \
159 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
160#else
161#define PDEBUG(fmt, args...) do {} while (0)
162#endif
163
164#define UMIN(a,b) ((a) < (b) ? (a) : (b))
165#define IS_EVEN(x) ((x) == (2 * ((x) / 2)))
166
167
168#endif
diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h
new file mode 100644
index 000000000000..82a1d97001d7
--- /dev/null
+++ b/drivers/s390/crypto/z90crypt.h
@@ -0,0 +1,258 @@
1/*
2 * linux/drivers/s390/crypto/z90crypt.h
3 *
4 * z90crypt 1.3.2
5 *
6 * Copyright (C) 2001, 2004 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _Z90CRYPT_H_
28#define _Z90CRYPT_H_
29
30#include <linux/ioctl.h>
31
32#define VERSION_Z90CRYPT_H "$Revision: 1.11 $"
33
34#define z90crypt_VERSION 1
35#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards
36#define z90crypt_VARIANT 2 // 2 = added PCIXCC MCL3 and CEX2C support
37
38/**
39 * If we are not using the sparse checker, __user has no use.
40 */
41#ifdef __CHECKER__
42# define __user __attribute__((noderef, address_space(1)))
43#else
44# define __user
45#endif
46
47/**
48 * struct ica_rsa_modexpo
49 *
50 * Requirements:
51 * - outputdatalength is at least as large as inputdatalength.
52 * - All key parts are right justified in their fields, padded on
53 * the left with zeroes.
54 * - length(b_key) = inputdatalength
55 * - length(n_modulus) = inputdatalength
56 */
57struct ica_rsa_modexpo {
58 char __user * inputdata;
59 unsigned int inputdatalength;
60 char __user * outputdata;
61 unsigned int outputdatalength;
62 char __user * b_key;
63 char __user * n_modulus;
64};
65
66/**
67 * struct ica_rsa_modexpo_crt
68 *
69 * Requirements:
70 * - inputdatalength is even.
71 * - outputdatalength is at least as large as inputdatalength.
72 * - All key parts are right justified in their fields, padded on
73 * the left with zeroes.
74 * - length(bp_key) = inputdatalength/2 + 8
75 * - length(bq_key) = inputdatalength/2
76 * - length(np_key) = inputdatalength/2 + 8
77 * - length(nq_key) = inputdatalength/2
78 * - length(u_mult_inv) = inputdatalength/2 + 8
79 */
80struct ica_rsa_modexpo_crt {
81 char __user * inputdata;
82 unsigned int inputdatalength;
83 char __user * outputdata;
84 unsigned int outputdatalength;
85 char __user * bp_key;
86 char __user * bq_key;
87 char __user * np_prime;
88 char __user * nq_prime;
89 char __user * u_mult_inv;
90};
91
92#define Z90_IOCTL_MAGIC 'z' // NOTE: Need to allocate from linux folks
93
94/**
95 * Interface notes:
96 *
97 * The ioctl()s which are implemented (along with relevant details)
98 * are:
99 *
100 * ICARSAMODEXPO
101 * Perform an RSA operation using a Modulus-Exponent pair
102 * This takes an ica_rsa_modexpo struct as its arg.
103 *
104 * NOTE: please refer to the comments preceding this structure
105 * for the implementation details for the contents of the
106 * block
107 *
108 * ICARSACRT
109 * Perform an RSA operation using a Chinese-Remainder Theorem key
110 * This takes an ica_rsa_modexpo_crt struct as its arg.
111 *
112 * NOTE: please refer to the comments preceding this structure
113 * for the implementation details for the contents of the
114 * block
115 *
116 * Z90STAT_TOTALCOUNT
117 * Return an integer count of all device types together.
118 *
119 * Z90STAT_PCICACOUNT
120 * Return an integer count of all PCICAs.
121 *
122 * Z90STAT_PCICCCOUNT
123 * Return an integer count of all PCICCs.
124 *
125 * Z90STAT_PCIXCCMCL2COUNT
126 * Return an integer count of all MCL2 PCIXCCs.
127 *
128 * Z90STAT_PCIXCCMCL3COUNT
129 * Return an integer count of all MCL3 PCIXCCs.
130 *
131 * Z90STAT_CEX2CCOUNT
132 * Return an integer count of all CEX2Cs.
133 *
134 * Z90STAT_REQUESTQ_COUNT
135 * Return an integer count of the number of entries waiting to be
136 * sent to a device.
137 *
138 * Z90STAT_PENDINGQ_COUNT
139 * Return an integer count of the number of entries sent to a
140 * device awaiting the reply.
141 *
142 * Z90STAT_TOTALOPEN_COUNT
143 * Return an integer count of the number of open file handles.
144 *
145 * Z90STAT_DOMAIN_INDEX
146 * Return the integer value of the Cryptographic Domain.
147 *
148 * Z90STAT_STATUS_MASK
149 * Return an 64 element array of unsigned chars for the status of
150 * all devices.
151 * 0x01: PCICA
152 * 0x02: PCICC
153 * 0x03: PCIXCC_MCL2
154 * 0x04: PCIXCC_MCL3
155 * 0x05: CEX2C
156 * 0x0d: device is disabled via the proc filesystem
157 *
158 * Z90STAT_QDEPTH_MASK
159 * Return an 64 element array of unsigned chars for the queue
160 * depth of all devices.
161 *
162 * Z90STAT_PERDEV_REQCNT
163 * Return an 64 element array of unsigned integers for the number
164 * of successfully completed requests per device since the device
165 * was detected and made available.
166 *
167 * ICAZ90STATUS (deprecated)
168 * Return some device driver status in a ica_z90_status struct
169 * This takes an ica_z90_status struct as its arg.
170 *
171 * NOTE: this ioctl() is deprecated, and has been replaced with
172 * single ioctl()s for each type of status being requested
173 *
174 * Z90STAT_PCIXCCCOUNT (deprecated)
175 * Return an integer count of all PCIXCCs (MCL2 + MCL3).
176 * This is DEPRECATED now that MCL3 PCIXCCs are treated differently from
177 * MCL2 PCIXCCs.
178 *
179 * Z90QUIESCE (not recommended)
180 * Quiesce the driver. This is intended to stop all new
181 * requests from being processed. Its use is NOT recommended,
182 * except in circumstances where there is no other way to stop
183 * callers from accessing the driver. Its original use was to
184 * allow the driver to be "drained" of work in preparation for
185 * a system shutdown.
186 *
187 * NOTE: once issued, this ban on new work cannot be undone
188 * except by unloading and reloading the driver.
189 */
190
191/**
192 * Supported ioctl calls
193 */
194#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x05, 0)
195#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x06, 0)
196
197/* DEPRECATED status calls (bound for removal at some point) */
198#define ICAZ90STATUS _IOR(Z90_IOCTL_MAGIC, 0x10, struct ica_z90_status)
199#define Z90STAT_PCIXCCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x43, int)
200
201/* unrelated to ICA callers */
202#define Z90QUIESCE _IO(Z90_IOCTL_MAGIC, 0x11)
203
204/* New status calls */
205#define Z90STAT_TOTALCOUNT _IOR(Z90_IOCTL_MAGIC, 0x40, int)
206#define Z90STAT_PCICACOUNT _IOR(Z90_IOCTL_MAGIC, 0x41, int)
207#define Z90STAT_PCICCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x42, int)
208#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int)
209#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int)
210#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int)
211#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int)
212#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int)
213#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int)
214#define Z90STAT_DOMAIN_INDEX _IOR(Z90_IOCTL_MAGIC, 0x47, int)
215#define Z90STAT_STATUS_MASK _IOR(Z90_IOCTL_MAGIC, 0x48, char[64])
216#define Z90STAT_QDEPTH_MASK _IOR(Z90_IOCTL_MAGIC, 0x49, char[64])
217#define Z90STAT_PERDEV_REQCNT _IOR(Z90_IOCTL_MAGIC, 0x4a, int[64])
218
219/**
220 * local errno definitions
221 */
222#define ENOBUFF 129 // filp->private_data->...>work_elem_p->buffer is NULL
223#define EWORKPEND 130 // user issues ioctl while another pending
224#define ERELEASED 131 // user released while ioctl pending
225#define EQUIESCE 132 // z90crypt quiescing (no more work allowed)
226#define ETIMEOUT 133 // request timed out
227#define EUNKNOWN 134 // some unrecognized error occured (retry may succeed)
228#define EGETBUFF 135 // Error getting buffer or hardware lacks capability
229 // (retry in software)
230
231/**
232 * DEPRECATED STRUCTURES
233 */
234
235/**
236 * This structure is DEPRECATED and the corresponding ioctl() has been
237 * replaced with individual ioctl()s for each piece of data!
238 * This structure will NOT survive past version 1.3.1, so switch to the
239 * new ioctl()s.
240 */
241#define MASK_LENGTH 64 // mask length
242struct ica_z90_status {
243 int totalcount;
244 int leedslitecount; // PCICA
245 int leeds2count; // PCICC
246 // int PCIXCCCount; is not in struct for backward compatibility
247 int requestqWaitCount;
248 int pendingqWaitCount;
249 int totalOpenCount;
250 int cryptoDomain;
251 // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
252 // 5=CEX2C
253 unsigned char status[MASK_LENGTH];
254 // qdepth: # work elements waiting for each device
255 unsigned char qdepth[MASK_LENGTH];
256};
257
258#endif /* _Z90CRYPT_H_ */
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c
new file mode 100644
index 000000000000..beb6a5e0da22
--- /dev/null
+++ b/drivers/s390/crypto/z90hardware.c
@@ -0,0 +1,2243 @@
1/*
2 * linux/drivers/s390/crypto/z90hardware.c
3 *
4 * z90crypt 1.3.2
5 *
6 * Copyright (C) 2001, 2004 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/uaccess.h>
28#include <linux/compiler.h>
29#include <linux/delay.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include "z90crypt.h"
33#include "z90common.h"
34
35#define VERSION_Z90HARDWARE_C "$Revision: 1.33 $"
36
37char z90hardware_version[] __initdata =
38 "z90hardware.o (" VERSION_Z90HARDWARE_C "/"
39 VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
40
41struct cca_token_hdr {
42 unsigned char token_identifier;
43 unsigned char version;
44 unsigned short token_length;
45 unsigned char reserved[4];
46};
47
48#define CCA_TKN_HDR_ID_EXT 0x1E
49
50struct cca_private_ext_ME_sec {
51 unsigned char section_identifier;
52 unsigned char version;
53 unsigned short section_length;
54 unsigned char private_key_hash[20];
55 unsigned char reserved1[4];
56 unsigned char key_format;
57 unsigned char reserved2;
58 unsigned char key_name_hash[20];
59 unsigned char key_use_flags[4];
60 unsigned char reserved3[6];
61 unsigned char reserved4[24];
62 unsigned char confounder[24];
63 unsigned char exponent[128];
64 unsigned char modulus[128];
65};
66
67#define CCA_PVT_USAGE_ALL 0x80
68
69struct cca_public_sec {
70 unsigned char section_identifier;
71 unsigned char version;
72 unsigned short section_length;
73 unsigned char reserved[2];
74 unsigned short exponent_len;
75 unsigned short modulus_bit_len;
76 unsigned short modulus_byte_len;
77 unsigned char exponent[3];
78};
79
80struct cca_private_ext_ME {
81 struct cca_token_hdr pvtMEHdr;
82 struct cca_private_ext_ME_sec pvtMESec;
83 struct cca_public_sec pubMESec;
84};
85
86struct cca_public_key {
87 struct cca_token_hdr pubHdr;
88 struct cca_public_sec pubSec;
89};
90
91struct cca_pvt_ext_CRT_sec {
92 unsigned char section_identifier;
93 unsigned char version;
94 unsigned short section_length;
95 unsigned char private_key_hash[20];
96 unsigned char reserved1[4];
97 unsigned char key_format;
98 unsigned char reserved2;
99 unsigned char key_name_hash[20];
100 unsigned char key_use_flags[4];
101 unsigned short p_len;
102 unsigned short q_len;
103 unsigned short dp_len;
104 unsigned short dq_len;
105 unsigned short u_len;
106 unsigned short mod_len;
107 unsigned char reserved3[4];
108 unsigned short pad_len;
109 unsigned char reserved4[52];
110 unsigned char confounder[8];
111};
112
113#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
114#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
115
116struct cca_private_ext_CRT {
117 struct cca_token_hdr pvtCrtHdr;
118 struct cca_pvt_ext_CRT_sec pvtCrtSec;
119 struct cca_public_sec pubCrtSec;
120};
121
122struct ap_status_word {
123 unsigned char q_stat_flags;
124 unsigned char response_code;
125 unsigned char reserved[2];
126};
127
128#define AP_Q_STATUS_EMPTY 0x80
129#define AP_Q_STATUS_REPLIES_WAITING 0x40
130#define AP_Q_STATUS_ARRAY_FULL 0x20
131
132#define AP_RESPONSE_NORMAL 0x00
133#define AP_RESPONSE_Q_NOT_AVAIL 0x01
134#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
135#define AP_RESPONSE_DECONFIGURED 0x03
136#define AP_RESPONSE_CHECKSTOPPED 0x04
137#define AP_RESPONSE_BUSY 0x05
138#define AP_RESPONSE_Q_FULL 0x10
139#define AP_RESPONSE_NO_PENDING_REPLY 0x10
140#define AP_RESPONSE_INDEX_TOO_BIG 0x11
141#define AP_RESPONSE_NO_FIRST_PART 0x13
142#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
143
144#define AP_MAX_CDX_BITL 4
145#define AP_RQID_RESERVED_BITL 4
146#define SKIP_BITL (AP_MAX_CDX_BITL + AP_RQID_RESERVED_BITL)
147
148struct type4_hdr {
149 unsigned char reserved1;
150 unsigned char msg_type_code;
151 unsigned short msg_len;
152 unsigned char request_code;
153 unsigned char msg_fmt;
154 unsigned short reserved2;
155};
156
157#define TYPE4_TYPE_CODE 0x04
158#define TYPE4_REQU_CODE 0x40
159
160#define TYPE4_SME_LEN 0x0188
161#define TYPE4_LME_LEN 0x0308
162#define TYPE4_SCR_LEN 0x01E0
163#define TYPE4_LCR_LEN 0x03A0
164
165#define TYPE4_SME_FMT 0x00
166#define TYPE4_LME_FMT 0x10
167#define TYPE4_SCR_FMT 0x40
168#define TYPE4_LCR_FMT 0x50
169
170struct type4_sme {
171 struct type4_hdr header;
172 unsigned char message[128];
173 unsigned char exponent[128];
174 unsigned char modulus[128];
175};
176
177struct type4_lme {
178 struct type4_hdr header;
179 unsigned char message[256];
180 unsigned char exponent[256];
181 unsigned char modulus[256];
182};
183
184struct type4_scr {
185 struct type4_hdr header;
186 unsigned char message[128];
187 unsigned char dp[72];
188 unsigned char dq[64];
189 unsigned char p[72];
190 unsigned char q[64];
191 unsigned char u[72];
192};
193
194struct type4_lcr {
195 struct type4_hdr header;
196 unsigned char message[256];
197 unsigned char dp[136];
198 unsigned char dq[128];
199 unsigned char p[136];
200 unsigned char q[128];
201 unsigned char u[136];
202};
203
204union type4_msg {
205 struct type4_sme sme;
206 struct type4_lme lme;
207 struct type4_scr scr;
208 struct type4_lcr lcr;
209};
210
211struct type84_hdr {
212 unsigned char reserved1;
213 unsigned char code;
214 unsigned short len;
215 unsigned char reserved2[4];
216};
217
218#define TYPE84_RSP_CODE 0x84
219
220struct type6_hdr {
221 unsigned char reserved1;
222 unsigned char type;
223 unsigned char reserved2[2];
224 unsigned char right[4];
225 unsigned char reserved3[2];
226 unsigned char reserved4[2];
227 unsigned char apfs[4];
228 unsigned int offset1;
229 unsigned int offset2;
230 unsigned int offset3;
231 unsigned int offset4;
232 unsigned char agent_id[16];
233 unsigned char rqid[2];
234 unsigned char reserved5[2];
235 unsigned char function_code[2];
236 unsigned char reserved6[2];
237 unsigned int ToCardLen1;
238 unsigned int ToCardLen2;
239 unsigned int ToCardLen3;
240 unsigned int ToCardLen4;
241 unsigned int FromCardLen1;
242 unsigned int FromCardLen2;
243 unsigned int FromCardLen3;
244 unsigned int FromCardLen4;
245};
246
247struct CPRB {
248 unsigned char cprb_len[2];
249 unsigned char cprb_ver_id;
250 unsigned char pad_000;
251 unsigned char srpi_rtcode[4];
252 unsigned char srpi_verb;
253 unsigned char flags;
254 unsigned char func_id[2];
255 unsigned char checkpoint_flag;
256 unsigned char resv2;
257 unsigned char req_parml[2];
258 unsigned char req_parmp[4];
259 unsigned char req_datal[4];
260 unsigned char req_datap[4];
261 unsigned char rpl_parml[2];
262 unsigned char pad_001[2];
263 unsigned char rpl_parmp[4];
264 unsigned char rpl_datal[4];
265 unsigned char rpl_datap[4];
266 unsigned char ccp_rscode[2];
267 unsigned char ccp_rtcode[2];
268 unsigned char repd_parml[2];
269 unsigned char mac_data_len[2];
270 unsigned char repd_datal[4];
271 unsigned char req_pc[2];
272 unsigned char res_origin[8];
273 unsigned char mac_value[8];
274 unsigned char logon_id[8];
275 unsigned char usage_domain[2];
276 unsigned char resv3[18];
277 unsigned char svr_namel[2];
278 unsigned char svr_name[8];
279};
280
281struct type6_msg {
282 struct type6_hdr header;
283 struct CPRB CPRB;
284};
285
286union request_msg {
287 union type4_msg t4msg;
288 struct type6_msg t6msg;
289};
290
291struct request_msg_ext {
292 int q_nr;
293 unsigned char *psmid;
294 union request_msg reqMsg;
295};
296
297struct type82_hdr {
298 unsigned char reserved1;
299 unsigned char type;
300 unsigned char reserved2[2];
301 unsigned char reply_code;
302 unsigned char reserved3[3];
303};
304
305#define TYPE82_RSP_CODE 0x82
306
307#define REPLY_ERROR_MACHINE_FAILURE 0x10
308#define REPLY_ERROR_PREEMPT_FAILURE 0x12
309#define REPLY_ERROR_CHECKPT_FAILURE 0x14
310#define REPLY_ERROR_MESSAGE_TYPE 0x20
311#define REPLY_ERROR_INVALID_COMM_CD 0x21
312#define REPLY_ERROR_INVALID_MSG_LEN 0x23
313#define REPLY_ERROR_RESERVD_FIELD 0x24
314#define REPLY_ERROR_FORMAT_FIELD 0x29
315#define REPLY_ERROR_INVALID_COMMAND 0x30
316#define REPLY_ERROR_MALFORMED_MSG 0x40
317#define REPLY_ERROR_RESERVED_FIELDO 0x50
318#define REPLY_ERROR_WORD_ALIGNMENT 0x60
319#define REPLY_ERROR_MESSAGE_LENGTH 0x80
320#define REPLY_ERROR_OPERAND_INVALID 0x82
321#define REPLY_ERROR_OPERAND_SIZE 0x84
322#define REPLY_ERROR_EVEN_MOD_IN_OPND 0x85
323#define REPLY_ERROR_RESERVED_FIELD 0x88
324#define REPLY_ERROR_TRANSPORT_FAIL 0x90
325#define REPLY_ERROR_PACKET_TRUNCATED 0xA0
326#define REPLY_ERROR_ZERO_BUFFER_LEN 0xB0
327
328struct type86_hdr {
329 unsigned char reserved1;
330 unsigned char type;
331 unsigned char format;
332 unsigned char reserved2;
333 unsigned char reply_code;
334 unsigned char reserved3[3];
335};
336
337#define TYPE86_RSP_CODE 0x86
338#define TYPE86_FMT2 0x02
339
340struct type86_fmt2_msg {
341 struct type86_hdr hdr;
342 unsigned char reserved[4];
343 unsigned char apfs[4];
344 unsigned int count1;
345 unsigned int offset1;
346 unsigned int count2;
347 unsigned int offset2;
348 unsigned int count3;
349 unsigned int offset3;
350 unsigned int count4;
351 unsigned int offset4;
352};
353
354static struct type6_hdr static_type6_hdr = {
355 0x00,
356 0x06,
357 {0x00,0x00},
358 {0x00,0x00,0x00,0x00},
359 {0x00,0x00},
360 {0x00,0x00},
361 {0x00,0x00,0x00,0x00},
362 0x00000058,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
367 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
368 {0x00,0x00},
369 {0x00,0x00},
370 {0x50,0x44},
371 {0x00,0x00},
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000
380};
381
382static struct type6_hdr static_type6_hdrX = {
383 0x00,
384 0x06,
385 {0x00,0x00},
386 {0x00,0x00,0x00,0x00},
387 {0x00,0x00},
388 {0x00,0x00},
389 {0x00,0x00,0x00,0x00},
390 0x00000058,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 {0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
395 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
396 {0x00,0x00},
397 {0x00,0x00},
398 {0x50,0x44},
399 {0x00,0x00},
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000
408};
409
410static struct CPRB static_cprb = {
411 {0x70,0x00},
412 0x41,
413 0x00,
414 {0x00,0x00,0x00,0x00},
415 0x00,
416 0x00,
417 {0x54,0x32},
418 0x01,
419 0x00,
420 {0x00,0x00},
421 {0x00,0x00,0x00,0x00},
422 {0x00,0x00,0x00,0x00},
423 {0x00,0x00,0x00,0x00},
424 {0x00,0x00},
425 {0x00,0x00},
426 {0x00,0x00,0x00,0x00},
427 {0x00,0x00,0x00,0x00},
428 {0x00,0x00,0x00,0x00},
429 {0x00,0x00},
430 {0x00,0x00},
431 {0x00,0x00},
432 {0x00,0x00},
433 {0x00,0x00,0x00,0x00},
434 {0x00,0x00},
435 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
436 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
437 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
438 {0x00,0x00},
439 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
440 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
441 0x00,0x00},
442 {0x08,0x00},
443 {0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20}
444};
445
446struct function_and_rules_block {
447 unsigned char function_code[2];
448 unsigned char ulen[2];
449 unsigned char only_rule[8];
450};
451
452static struct function_and_rules_block static_pkd_function_and_rules = {
453 {0x50,0x44},
454 {0x0A,0x00},
455 {'P','K','C','S','-','1','.','2'}
456};
457
458static struct function_and_rules_block static_pke_function_and_rules = {
459 {0x50,0x4B},
460 {0x0A,0x00},
461 {'P','K','C','S','-','1','.','2'}
462};
463
464struct T6_keyBlock_hdr {
465 unsigned char blen[2];
466 unsigned char ulen[2];
467 unsigned char flags[2];
468};
469
470static struct T6_keyBlock_hdr static_T6_keyBlock_hdr = {
471 {0x89,0x01},
472 {0x87,0x01},
473 {0x00}
474};
475
476static struct CPRBX static_cprbx = {
477 0x00DC,
478 0x02,
479 {0x00,0x00,0x00},
480 {0x54,0x32},
481 {0x00,0x00,0x00,0x00},
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 {0x00,0x00,0x00,0x00},
490 0x00000000,
491 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
492 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
493 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
494 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
495 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
496 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
497 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
498 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
499 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
500 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
501 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
502 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
503 0x0000,
504 0x0000,
505 0x00000000,
506 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
507 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
508 0x00,
509 0x00,
510 0x0000,
511 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
512 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
513 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
514 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
515};
516
517static struct function_and_rules_block static_pkd_function_and_rulesX_MCL2 = {
518 {0x50,0x44},
519 {0x00,0x0A},
520 {'P','K','C','S','-','1','.','2'}
521};
522
523static struct function_and_rules_block static_pke_function_and_rulesX_MCL2 = {
524 {0x50,0x4B},
525 {0x00,0x0A},
526 {'Z','E','R','O','-','P','A','D'}
527};
528
529static struct function_and_rules_block static_pkd_function_and_rulesX = {
530 {0x50,0x44},
531 {0x00,0x0A},
532 {'Z','E','R','O','-','P','A','D'}
533};
534
535static struct function_and_rules_block static_pke_function_and_rulesX = {
536 {0x50,0x4B},
537 {0x00,0x0A},
538 {'M','R','P',' ',' ',' ',' ',' '}
539};
540
541struct T6_keyBlock_hdrX {
542 unsigned short blen;
543 unsigned short ulen;
544 unsigned char flags[2];
545};
546
547static unsigned char static_pad[256] = {
5480x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
5490x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
5500xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
5510x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
5520x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
5530x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
5540x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
5550xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
5560x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
5570x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
5580x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
5590x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
5600x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
5610x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
5620x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
5630x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
564};
565
566static struct cca_private_ext_ME static_pvt_me_key = {
567 {
568 0x1E,
569 0x00,
570 0x0183,
571 {0x00,0x00,0x00,0x00}
572 },
573
574 {
575 0x02,
576 0x00,
577 0x016C,
578 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
579 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
580 0x00,0x00,0x00,0x00},
581 {0x00,0x00,0x00,0x00},
582 0x00,
583 0x00,
584 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
585 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
586 0x00,0x00,0x00,0x00},
587 {0x80,0x00,0x00,0x00},
588 {0x00,0x00,0x00,0x00,0x00,0x00},
589 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
590 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
591 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
592 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
593 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
594 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
595 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
596 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
597 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
598 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
599 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
600 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
601 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
602 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
603 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
604 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
605 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
606 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
607 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
608 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
609 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
610 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
611 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
612 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
613 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
614 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
615 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
616 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
617 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
618 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
619 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
620 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
621 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
622 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
623 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
624 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
625 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
626 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
627 },
628
629 {
630 0x04,
631 0x00,
632 0x000F,
633 {0x00,0x00},
634 0x0003,
635 0x0000,
636 0x0000,
637 {0x01,0x00,0x01}
638 }
639};
640
641static struct cca_public_key static_public_key = {
642 {
643 0x1E,
644 0x00,
645 0x0000,
646 {0x00,0x00,0x00,0x00}
647 },
648
649 {
650 0x04,
651 0x00,
652 0x0000,
653 {0x00,0x00},
654 0x0000,
655 0x0000,
656 0x0000,
657 {0x01,0x00,0x01}
658 }
659};
660
661#define FIXED_TYPE6_ME_LEN 0x0000025F
662
663#define FIXED_TYPE6_ME_EN_LEN 0x000000F0
664
665#define FIXED_TYPE6_ME_LENX 0x000002CB
666
667#define FIXED_TYPE6_ME_EN_LENX 0x0000015C
668
669static struct cca_public_sec static_cca_pub_sec = {
670 0x04,
671 0x00,
672 0x000f,
673 {0x00,0x00},
674 0x0003,
675 0x0000,
676 0x0000,
677 {0x01,0x00,0x01}
678};
679
680#define FIXED_TYPE6_CR_LEN 0x00000177
681
682#define FIXED_TYPE6_CR_LENX 0x000001E3
683
684#define MAX_RESPONSE_SIZE 0x00000710
685
686#define MAX_RESPONSEX_SIZE 0x0000077C
687
688#define RESPONSE_CPRB_SIZE 0x000006B8
689#define RESPONSE_CPRBX_SIZE 0x00000724
690
691#define CALLER_HEADER 12
692
693static unsigned char static_PKE_function_code[2] = {0x50, 0x4B};
694
695static inline int
696testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
697{
698 int ccode;
699
700 asm volatile
701#ifdef __s390x__
702 (" llgfr 0,%4 \n"
703 " slgr 1,1 \n"
704 " lgr 2,1 \n"
705 "0: .long 0xb2af0000 \n"
706 "1: ipm %0 \n"
707 " srl %0,28 \n"
708 " iihh %0,0 \n"
709 " iihl %0,0 \n"
710 " lgr %1,1 \n"
711 " lgr %3,2 \n"
712 " srl %3,24 \n"
713 " sll 2,24 \n"
714 " srl 2,24 \n"
715 " lgr %2,2 \n"
716 "2: \n"
717 ".section .fixup,\"ax\" \n"
718 "3: \n"
719 " lhi %0,%h5 \n"
720 " jg 2b \n"
721 ".previous \n"
722 ".section __ex_table,\"a\" \n"
723 " .align 8 \n"
724 " .quad 0b,3b \n"
725 " .quad 1b,3b \n"
726 ".previous"
727 :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
728 :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
729 :"cc","0","1","2","memory");
730#else
731 (" lr 0,%4 \n"
732 " slr 1,1 \n"
733 " lr 2,1 \n"
734 "0: .long 0xb2af0000 \n"
735 "1: ipm %0 \n"
736 " srl %0,28 \n"
737 " lr %1,1 \n"
738 " lr %3,2 \n"
739 " srl %3,24 \n"
740 " sll 2,24 \n"
741 " srl 2,24 \n"
742 " lr %2,2 \n"
743 "2: \n"
744 ".section .fixup,\"ax\" \n"
745 "3: \n"
746 " lhi %0,%h5 \n"
747 " bras 1,4f \n"
748 " .long 2b \n"
749 "4: \n"
750 " l 1,0(1) \n"
751 " br 1 \n"
752 ".previous \n"
753 ".section __ex_table,\"a\" \n"
754 " .align 4 \n"
755 " .long 0b,3b \n"
756 " .long 1b,3b \n"
757 ".previous"
758 :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
759 :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
760 :"cc","0","1","2","memory");
761#endif
762 return ccode;
763}
764
765static inline int
766resetq(int q_nr, struct ap_status_word *stat_p)
767{
768 int ccode;
769
770 asm volatile
771#ifdef __s390x__
772 (" llgfr 0,%2 \n"
773 " lghi 1,1 \n"
774 " sll 1,24 \n"
775 " or 0,1 \n"
776 " slgr 1,1 \n"
777 " lgr 2,1 \n"
778 "0: .long 0xb2af0000 \n"
779 "1: ipm %0 \n"
780 " srl %0,28 \n"
781 " iihh %0,0 \n"
782 " iihl %0,0 \n"
783 " lgr %1,1 \n"
784 "2: \n"
785 ".section .fixup,\"ax\" \n"
786 "3: \n"
787 " lhi %0,%h3 \n"
788 " jg 2b \n"
789 ".previous \n"
790 ".section __ex_table,\"a\" \n"
791 " .align 8 \n"
792 " .quad 0b,3b \n"
793 " .quad 1b,3b \n"
794 ".previous"
795 :"=d" (ccode),"=d" (*stat_p)
796 :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
797 :"cc","0","1","2","memory");
798#else
799 (" lr 0,%2 \n"
800 " lhi 1,1 \n"
801 " sll 1,24 \n"
802 " or 0,1 \n"
803 " slr 1,1 \n"
804 " lr 2,1 \n"
805 "0: .long 0xb2af0000 \n"
806 "1: ipm %0 \n"
807 " srl %0,28 \n"
808 " lr %1,1 \n"
809 "2: \n"
810 ".section .fixup,\"ax\" \n"
811 "3: \n"
812 " lhi %0,%h3 \n"
813 " bras 1,4f \n"
814 " .long 2b \n"
815 "4: \n"
816 " l 1,0(1) \n"
817 " br 1 \n"
818 ".previous \n"
819 ".section __ex_table,\"a\" \n"
820 " .align 4 \n"
821 " .long 0b,3b \n"
822 " .long 1b,3b \n"
823 ".previous"
824 :"=d" (ccode),"=d" (*stat_p)
825 :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
826 :"cc","0","1","2","memory");
827#endif
828 return ccode;
829}
830
831static inline int
832sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat)
833{
834 int ccode;
835
836 asm volatile
837#ifdef __s390x__
838 (" lgr 6,%3 \n"
839 " llgfr 7,%2 \n"
840 " llgt 0,0(6) \n"
841 " lghi 1,64 \n"
842 " sll 1,24 \n"
843 " or 0,1 \n"
844 " la 6,4(6) \n"
845 " llgt 2,0(6) \n"
846 " llgt 3,4(6) \n"
847 " la 6,8(6) \n"
848 " slr 1,1 \n"
849 "0: .long 0xb2ad0026 \n"
850 "1: brc 2,0b \n"
851 " ipm %0 \n"
852 " srl %0,28 \n"
853 " iihh %0,0 \n"
854 " iihl %0,0 \n"
855 " lgr %1,1 \n"
856 "2: \n"
857 ".section .fixup,\"ax\" \n"
858 "3: \n"
859 " lhi %0,%h4 \n"
860 " jg 2b \n"
861 ".previous \n"
862 ".section __ex_table,\"a\" \n"
863 " .align 8 \n"
864 " .quad 0b,3b \n"
865 " .quad 1b,3b \n"
866 ".previous"
867 :"=d" (ccode),"=d" (*stat)
868 :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
869 :"cc","0","1","2","3","6","7","memory");
870#else
871 (" lr 6,%3 \n"
872 " lr 7,%2 \n"
873 " l 0,0(6) \n"
874 " lhi 1,64 \n"
875 " sll 1,24 \n"
876 " or 0,1 \n"
877 " la 6,4(6) \n"
878 " l 2,0(6) \n"
879 " l 3,4(6) \n"
880 " la 6,8(6) \n"
881 " slr 1,1 \n"
882 "0: .long 0xb2ad0026 \n"
883 "1: brc 2,0b \n"
884 " ipm %0 \n"
885 " srl %0,28 \n"
886 " lr %1,1 \n"
887 "2: \n"
888 ".section .fixup,\"ax\" \n"
889 "3: \n"
890 " lhi %0,%h4 \n"
891 " bras 1,4f \n"
892 " .long 2b \n"
893 "4: \n"
894 " l 1,0(1) \n"
895 " br 1 \n"
896 ".previous \n"
897 ".section __ex_table,\"a\" \n"
898 " .align 4 \n"
899 " .long 0b,3b \n"
900 " .long 1b,3b \n"
901 ".previous"
902 :"=d" (ccode),"=d" (*stat)
903 :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
904 :"cc","0","1","2","3","6","7","memory");
905#endif
906 return ccode;
907}
908
909static inline int
910rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id,
911 struct ap_status_word *st)
912{
913 int ccode;
914
915 asm volatile
916#ifdef __s390x__
917 (" llgfr 0,%2 \n"
918 " lgr 3,%4 \n"
919 " lgr 6,%3 \n"
920 " llgfr 7,%5 \n"
921 " lghi 1,128 \n"
922 " sll 1,24 \n"
923 " or 0,1 \n"
924 " slgr 1,1 \n"
925 " lgr 2,1 \n"
926 " lgr 4,1 \n"
927 " lgr 5,1 \n"
928 "0: .long 0xb2ae0046 \n"
929 "1: brc 2,0b \n"
930 " brc 4,0b \n"
931 " ipm %0 \n"
932 " srl %0,28 \n"
933 " iihh %0,0 \n"
934 " iihl %0,0 \n"
935 " lgr %1,1 \n"
936 " st 4,0(3) \n"
937 " st 5,4(3) \n"
938 "2: \n"
939 ".section .fixup,\"ax\" \n"
940 "3: \n"
941 " lhi %0,%h6 \n"
942 " jg 2b \n"
943 ".previous \n"
944 ".section __ex_table,\"a\" \n"
945 " .align 8 \n"
946 " .quad 0b,3b \n"
947 " .quad 1b,3b \n"
948 ".previous"
949 :"=d"(ccode),"=d"(*st)
950 :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
951 :"cc","0","1","2","3","4","5","6","7","memory");
952#else
953 (" lr 0,%2 \n"
954 " lr 3,%4 \n"
955 " lr 6,%3 \n"
956 " lr 7,%5 \n"
957 " lhi 1,128 \n"
958 " sll 1,24 \n"
959 " or 0,1 \n"
960 " slr 1,1 \n"
961 " lr 2,1 \n"
962 " lr 4,1 \n"
963 " lr 5,1 \n"
964 "0: .long 0xb2ae0046 \n"
965 "1: brc 2,0b \n"
966 " brc 4,0b \n"
967 " ipm %0 \n"
968 " srl %0,28 \n"
969 " lr %1,1 \n"
970 " st 4,0(3) \n"
971 " st 5,4(3) \n"
972 "2: \n"
973 ".section .fixup,\"ax\" \n"
974 "3: \n"
975 " lhi %0,%h6 \n"
976 " bras 1,4f \n"
977 " .long 2b \n"
978 "4: \n"
979 " l 1,0(1) \n"
980 " br 1 \n"
981 ".previous \n"
982 ".section __ex_table,\"a\" \n"
983 " .align 4 \n"
984 " .long 0b,3b \n"
985 " .long 1b,3b \n"
986 ".previous"
987 :"=d"(ccode),"=d"(*st)
988 :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
989 :"cc","0","1","2","3","4","5","6","7","memory");
990#endif
991 return ccode;
992}
993
994static inline void
995itoLe2(int *i_p, unsigned char *lechars)
996{
997 *lechars = *((unsigned char *) i_p + sizeof(int) - 1);
998 *(lechars + 1) = *((unsigned char *) i_p + sizeof(int) - 2);
999}
1000
1001static inline void
1002le2toI(unsigned char *lechars, int *i_p)
1003{
1004 unsigned char *ic_p;
1005 *i_p = 0;
1006 ic_p = (unsigned char *) i_p;
1007 *(ic_p + 2) = *(lechars + 1);
1008 *(ic_p + 3) = *(lechars);
1009}
1010
1011static inline int
1012is_empty(unsigned char *ptr, int len)
1013{
1014 return !memcmp(ptr, (unsigned char *) &static_pvt_me_key+60, len);
1015}
1016
1017enum hdstat
1018query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
1019{
1020 int q_nr, i, t_depth, t_dev_type;
1021 enum devstat ccode;
1022 struct ap_status_word stat_word;
1023 enum hdstat stat;
1024 int break_out;
1025
1026 q_nr = (deviceNr << SKIP_BITL) + cdx;
1027 stat = HD_BUSY;
1028 ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
1029 PDEBUG("ccode %d response_code %02X\n", ccode, stat_word.response_code);
1030 break_out = 0;
1031 for (i = 0; i < resetNr; i++) {
1032 if (ccode > 3) {
1033 PRINTKC("Exception testing device %d\n", i);
1034 return HD_TSQ_EXCEPTION;
1035 }
1036 switch (ccode) {
1037 case 0:
1038 PDEBUG("t_dev_type %d\n", t_dev_type);
1039 break_out = 1;
1040 stat = HD_ONLINE;
1041 *q_depth = t_depth + 1;
1042 switch (t_dev_type) {
1043 case OTHER_HW:
1044 stat = HD_NOT_THERE;
1045 *dev_type = NILDEV;
1046 break;
1047 case PCICA_HW:
1048 *dev_type = PCICA;
1049 break;
1050 case PCICC_HW:
1051 *dev_type = PCICC;
1052 break;
1053 case PCIXCC_HW:
1054 *dev_type = PCIXCC_UNK;
1055 break;
1056 case CEX2C_HW:
1057 *dev_type = CEX2C;
1058 break;
1059 default:
1060 *dev_type = NILDEV;
1061 break;
1062 }
1063 PDEBUG("available device %d: Q depth = %d, dev "
1064 "type = %d, stat = %02X%02X%02X%02X\n",
1065 deviceNr, *q_depth, *dev_type,
1066 stat_word.q_stat_flags,
1067 stat_word.response_code,
1068 stat_word.reserved[0],
1069 stat_word.reserved[1]);
1070 break;
1071 case 3:
1072 switch (stat_word.response_code) {
1073 case AP_RESPONSE_NORMAL:
1074 stat = HD_ONLINE;
1075 break_out = 1;
1076 *q_depth = t_depth + 1;
1077 *dev_type = t_dev_type;
1078 PDEBUG("cc3, available device "
1079 "%d: Q depth = %d, dev "
1080 "type = %d, stat = "
1081 "%02X%02X%02X%02X\n",
1082 deviceNr, *q_depth,
1083 *dev_type,
1084 stat_word.q_stat_flags,
1085 stat_word.response_code,
1086 stat_word.reserved[0],
1087 stat_word.reserved[1]);
1088 break;
1089 case AP_RESPONSE_Q_NOT_AVAIL:
1090 stat = HD_NOT_THERE;
1091 break_out = 1;
1092 break;
1093 case AP_RESPONSE_RESET_IN_PROGRESS:
1094 PDEBUG("device %d in reset\n",
1095 deviceNr);
1096 break;
1097 case AP_RESPONSE_DECONFIGURED:
1098 stat = HD_DECONFIGURED;
1099 break_out = 1;
1100 break;
1101 case AP_RESPONSE_CHECKSTOPPED:
1102 stat = HD_CHECKSTOPPED;
1103 break_out = 1;
1104 break;
1105 case AP_RESPONSE_BUSY:
1106 PDEBUG("device %d busy\n",
1107 deviceNr);
1108 break;
1109 default:
1110 break;
1111 }
1112 break;
1113 default:
1114 stat = HD_NOT_THERE;
1115 break_out = 1;
1116 break;
1117 }
1118 if (break_out)
1119 break;
1120
1121 udelay(5);
1122
1123 ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
1124 }
1125 return stat;
1126}
1127
1128enum devstat
1129reset_device(int deviceNr, int cdx, int resetNr)
1130{
1131 int q_nr, ccode = 0, dummy_qdepth, dummy_devType, i;
1132 struct ap_status_word stat_word;
1133 enum devstat stat;
1134 int break_out;
1135
1136 q_nr = (deviceNr << SKIP_BITL) + cdx;
1137 stat = DEV_GONE;
1138 ccode = resetq(q_nr, &stat_word);
1139 if (ccode > 3)
1140 return DEV_RSQ_EXCEPTION;
1141
1142 break_out = 0;
1143 for (i = 0; i < resetNr; i++) {
1144 switch (ccode) {
1145 case 0:
1146 stat = DEV_ONLINE;
1147 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1148 break_out = 1;
1149 break;
1150 case 3:
1151 switch (stat_word.response_code) {
1152 case AP_RESPONSE_NORMAL:
1153 stat = DEV_ONLINE;
1154 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1155 break_out = 1;
1156 break;
1157 case AP_RESPONSE_Q_NOT_AVAIL:
1158 case AP_RESPONSE_DECONFIGURED:
1159 case AP_RESPONSE_CHECKSTOPPED:
1160 stat = DEV_GONE;
1161 break_out = 1;
1162 break;
1163 case AP_RESPONSE_RESET_IN_PROGRESS:
1164 case AP_RESPONSE_BUSY:
1165 default:
1166 break;
1167 }
1168 break;
1169 default:
1170 stat = DEV_GONE;
1171 break_out = 1;
1172 break;
1173 }
1174 if (break_out == 1)
1175 break;
1176 udelay(5);
1177
1178 ccode = testq(q_nr, &dummy_qdepth, &dummy_devType, &stat_word);
1179 if (ccode > 3) {
1180 stat = DEV_TSQ_EXCEPTION;
1181 break;
1182 }
1183 }
1184 PDEBUG("Number of testq's needed for reset: %d\n", i);
1185
1186 if (i >= resetNr) {
1187 stat = DEV_GONE;
1188 }
1189
1190 return stat;
1191}
1192
1193#ifdef DEBUG_HYDRA_MSGS
1194static inline void
1195print_buffer(unsigned char *buffer, int bufflen)
1196{
1197 int i;
1198 for (i = 0; i < bufflen; i += 16) {
1199 PRINTK("%04X: %02X%02X%02X%02X %02X%02X%02X%02X "
1200 "%02X%02X%02X%02X %02X%02X%02X%02X\n", i,
1201 buffer[i+0], buffer[i+1], buffer[i+2], buffer[i+3],
1202 buffer[i+4], buffer[i+5], buffer[i+6], buffer[i+7],
1203 buffer[i+8], buffer[i+9], buffer[i+10], buffer[i+11],
1204 buffer[i+12], buffer[i+13], buffer[i+14], buffer[i+15]);
1205 }
1206}
1207#endif
1208
1209enum devstat
1210send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext)
1211{
1212 struct ap_status_word stat_word;
1213 enum devstat stat;
1214 int ccode;
1215
1216 ((struct request_msg_ext *) msg_ext)->q_nr =
1217 (dev_nr << SKIP_BITL) + cdx;
1218 PDEBUG("msg_len passed to sen: %d\n", msg_len);
1219 PDEBUG("q number passed to sen: %02x%02x%02x%02x\n",
1220 msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3]);
1221 stat = DEV_GONE;
1222
1223#ifdef DEBUG_HYDRA_MSGS
1224 PRINTK("Request header: %02X%02X%02X%02X %02X%02X%02X%02X "
1225 "%02X%02X%02X%02X\n",
1226 msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3],
1227 msg_ext[4], msg_ext[5], msg_ext[6], msg_ext[7],
1228 msg_ext[8], msg_ext[9], msg_ext[10], msg_ext[11]);
1229 print_buffer(msg_ext+CALLER_HEADER, msg_len);
1230#endif
1231
1232 ccode = sen(msg_len, msg_ext, &stat_word);
1233 if (ccode > 3)
1234 return DEV_SEN_EXCEPTION;
1235
1236 PDEBUG("nq cc: %u, st: %02x%02x%02x%02x\n",
1237 ccode, stat_word.q_stat_flags, stat_word.response_code,
1238 stat_word.reserved[0], stat_word.reserved[1]);
1239 switch (ccode) {
1240 case 0:
1241 stat = DEV_ONLINE;
1242 break;
1243 case 1:
1244 stat = DEV_GONE;
1245 break;
1246 case 3:
1247 switch (stat_word.response_code) {
1248 case AP_RESPONSE_NORMAL:
1249 stat = DEV_ONLINE;
1250 break;
1251 case AP_RESPONSE_Q_FULL:
1252 stat = DEV_QUEUE_FULL;
1253 break;
1254 default:
1255 stat = DEV_GONE;
1256 break;
1257 }
1258 break;
1259 default:
1260 stat = DEV_GONE;
1261 break;
1262 }
1263
1264 return stat;
1265}
1266
1267enum devstat
1268receive_from_AP(int dev_nr, int cdx, int resplen, unsigned char *resp,
1269 unsigned char *psmid)
1270{
1271 int ccode;
1272 struct ap_status_word stat_word;
1273 enum devstat stat;
1274
1275 memset(resp, 0x00, 8);
1276
1277 ccode = rec((dev_nr << SKIP_BITL) + cdx, resplen, resp, psmid,
1278 &stat_word);
1279 if (ccode > 3)
1280 return DEV_REC_EXCEPTION;
1281
1282 PDEBUG("dq cc: %u, st: %02x%02x%02x%02x\n",
1283 ccode, stat_word.q_stat_flags, stat_word.response_code,
1284 stat_word.reserved[0], stat_word.reserved[1]);
1285
1286 stat = DEV_GONE;
1287 switch (ccode) {
1288 case 0:
1289 stat = DEV_ONLINE;
1290#ifdef DEBUG_HYDRA_MSGS
1291 print_buffer(resp, resplen);
1292#endif
1293 break;
1294 case 3:
1295 switch (stat_word.response_code) {
1296 case AP_RESPONSE_NORMAL:
1297 stat = DEV_ONLINE;
1298 break;
1299 case AP_RESPONSE_NO_PENDING_REPLY:
1300 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1301 stat = DEV_EMPTY;
1302 else
1303 stat = DEV_NO_WORK;
1304 break;
1305 case AP_RESPONSE_INDEX_TOO_BIG:
1306 case AP_RESPONSE_NO_FIRST_PART:
1307 case AP_RESPONSE_MESSAGE_TOO_BIG:
1308 stat = DEV_BAD_MESSAGE;
1309 break;
1310 default:
1311 break;
1312 }
1313 break;
1314 default:
1315 break;
1316 }
1317
1318 return stat;
1319}
1320
1321static inline int
1322pad_msg(unsigned char *buffer, int totalLength, int msgLength)
1323{
1324 int pad_len;
1325
1326 for (pad_len = 0; pad_len < (totalLength - msgLength); pad_len++)
1327 if (buffer[pad_len] != 0x00)
1328 break;
1329 pad_len -= 3;
1330 if (pad_len < 8)
1331 return SEN_PAD_ERROR;
1332
1333 buffer[0] = 0x00;
1334 buffer[1] = 0x02;
1335
1336 memcpy(buffer+2, static_pad, pad_len);
1337
1338 buffer[pad_len + 2] = 0x00;
1339
1340 return 0;
1341}
1342
1343static inline int
1344is_common_public_key(unsigned char *key, int len)
1345{
1346 int i;
1347
1348 for (i = 0; i < len; i++)
1349 if (key[i])
1350 break;
1351 key += i;
1352 len -= i;
1353 if (((len == 1) && (key[0] == 3)) ||
1354 ((len == 3) && (key[0] == 1) && (key[1] == 0) && (key[2] == 1)))
1355 return 1;
1356
1357 return 0;
1358}
1359
1360static int
1361ICAMEX_msg_to_type4MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
1362 union type4_msg *z90cMsg_p)
1363{
1364 int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
1365 unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
1366 union type4_msg *tmp_type4_msg;
1367
1368 mod_len = icaMex_p->inputdatalength;
1369
1370 msg_size = ((mod_len <= 128) ? TYPE4_SME_LEN : TYPE4_LME_LEN) +
1371 CALLER_HEADER;
1372
1373 memset(z90cMsg_p, 0, msg_size);
1374
1375 tmp_type4_msg = (union type4_msg *)
1376 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
1377
1378 tmp_type4_msg->sme.header.msg_type_code = TYPE4_TYPE_CODE;
1379 tmp_type4_msg->sme.header.request_code = TYPE4_REQU_CODE;
1380
1381 if (mod_len <= 128) {
1382 tmp_type4_msg->sme.header.msg_fmt = TYPE4_SME_FMT;
1383 tmp_type4_msg->sme.header.msg_len = TYPE4_SME_LEN;
1384 mod_tgt = tmp_type4_msg->sme.modulus;
1385 mod_tgt_len = sizeof(tmp_type4_msg->sme.modulus);
1386 exp_tgt = tmp_type4_msg->sme.exponent;
1387 exp_tgt_len = sizeof(tmp_type4_msg->sme.exponent);
1388 inp_tgt = tmp_type4_msg->sme.message;
1389 inp_tgt_len = sizeof(tmp_type4_msg->sme.message);
1390 } else {
1391 tmp_type4_msg->lme.header.msg_fmt = TYPE4_LME_FMT;
1392 tmp_type4_msg->lme.header.msg_len = TYPE4_LME_LEN;
1393 mod_tgt = tmp_type4_msg->lme.modulus;
1394 mod_tgt_len = sizeof(tmp_type4_msg->lme.modulus);
1395 exp_tgt = tmp_type4_msg->lme.exponent;
1396 exp_tgt_len = sizeof(tmp_type4_msg->lme.exponent);
1397 inp_tgt = tmp_type4_msg->lme.message;
1398 inp_tgt_len = sizeof(tmp_type4_msg->lme.message);
1399 }
1400
1401 mod_tgt += (mod_tgt_len - mod_len);
1402 if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
1403 return SEN_RELEASED;
1404 if (is_empty(mod_tgt, mod_len))
1405 return SEN_USER_ERROR;
1406 exp_tgt += (exp_tgt_len - mod_len);
1407 if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
1408 return SEN_RELEASED;
1409 if (is_empty(exp_tgt, mod_len))
1410 return SEN_USER_ERROR;
1411 inp_tgt += (inp_tgt_len - mod_len);
1412 if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
1413 return SEN_RELEASED;
1414 if (is_empty(inp_tgt, mod_len))
1415 return SEN_USER_ERROR;
1416
1417 *z90cMsg_l_p = msg_size - CALLER_HEADER;
1418
1419 return 0;
1420}
1421
1422static int
1423ICACRT_msg_to_type4CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
1424 int *z90cMsg_l_p, union type4_msg *z90cMsg_p)
1425{
1426 int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
1427 dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len;
1428 unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt;
1429 union type4_msg *tmp_type4_msg;
1430
1431 mod_len = icaMsg_p->inputdatalength;
1432 short_len = mod_len / 2;
1433 long_len = mod_len / 2 + 8;
1434
1435 tmp_size = ((mod_len <= 128) ? TYPE4_SCR_LEN : TYPE4_LCR_LEN) +
1436 CALLER_HEADER;
1437
1438 memset(z90cMsg_p, 0, tmp_size);
1439
1440 tmp_type4_msg = (union type4_msg *)
1441 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
1442
1443 tmp_type4_msg->scr.header.msg_type_code = TYPE4_TYPE_CODE;
1444 tmp_type4_msg->scr.header.request_code = TYPE4_REQU_CODE;
1445 if (mod_len <= 128) {
1446 tmp_type4_msg->scr.header.msg_fmt = TYPE4_SCR_FMT;
1447 tmp_type4_msg->scr.header.msg_len = TYPE4_SCR_LEN;
1448 p_tgt = tmp_type4_msg->scr.p;
1449 p_tgt_len = sizeof(tmp_type4_msg->scr.p);
1450 q_tgt = tmp_type4_msg->scr.q;
1451 q_tgt_len = sizeof(tmp_type4_msg->scr.q);
1452 dp_tgt = tmp_type4_msg->scr.dp;
1453 dp_tgt_len = sizeof(tmp_type4_msg->scr.dp);
1454 dq_tgt = tmp_type4_msg->scr.dq;
1455 dq_tgt_len = sizeof(tmp_type4_msg->scr.dq);
1456 u_tgt = tmp_type4_msg->scr.u;
1457 u_tgt_len = sizeof(tmp_type4_msg->scr.u);
1458 inp_tgt = tmp_type4_msg->scr.message;
1459 inp_tgt_len = sizeof(tmp_type4_msg->scr.message);
1460 } else {
1461 tmp_type4_msg->lcr.header.msg_fmt = TYPE4_LCR_FMT;
1462 tmp_type4_msg->lcr.header.msg_len = TYPE4_LCR_LEN;
1463 p_tgt = tmp_type4_msg->lcr.p;
1464 p_tgt_len = sizeof(tmp_type4_msg->lcr.p);
1465 q_tgt = tmp_type4_msg->lcr.q;
1466 q_tgt_len = sizeof(tmp_type4_msg->lcr.q);
1467 dp_tgt = tmp_type4_msg->lcr.dp;
1468 dp_tgt_len = sizeof(tmp_type4_msg->lcr.dp);
1469 dq_tgt = tmp_type4_msg->lcr.dq;
1470 dq_tgt_len = sizeof(tmp_type4_msg->lcr.dq);
1471 u_tgt = tmp_type4_msg->lcr.u;
1472 u_tgt_len = sizeof(tmp_type4_msg->lcr.u);
1473 inp_tgt = tmp_type4_msg->lcr.message;
1474 inp_tgt_len = sizeof(tmp_type4_msg->lcr.message);
1475 }
1476
1477 p_tgt += (p_tgt_len - long_len);
1478 if (copy_from_user(p_tgt, icaMsg_p->np_prime, long_len))
1479 return SEN_RELEASED;
1480 if (is_empty(p_tgt, long_len))
1481 return SEN_USER_ERROR;
1482 q_tgt += (q_tgt_len - short_len);
1483 if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
1484 return SEN_RELEASED;
1485 if (is_empty(q_tgt, short_len))
1486 return SEN_USER_ERROR;
1487 dp_tgt += (dp_tgt_len - long_len);
1488 if (copy_from_user(dp_tgt, icaMsg_p->bp_key, long_len))
1489 return SEN_RELEASED;
1490 if (is_empty(dp_tgt, long_len))
1491 return SEN_USER_ERROR;
1492 dq_tgt += (dq_tgt_len - short_len);
1493 if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
1494 return SEN_RELEASED;
1495 if (is_empty(dq_tgt, short_len))
1496 return SEN_USER_ERROR;
1497 u_tgt += (u_tgt_len - long_len);
1498 if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv, long_len))
1499 return SEN_RELEASED;
1500 if (is_empty(u_tgt, long_len))
1501 return SEN_USER_ERROR;
1502 inp_tgt += (inp_tgt_len - mod_len);
1503 if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
1504 return SEN_RELEASED;
1505 if (is_empty(inp_tgt, mod_len))
1506 return SEN_USER_ERROR;
1507
1508 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1509
1510 return 0;
1511}
1512
1513static int
1514ICAMEX_msg_to_type6MEX_de_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1515 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1516{
1517 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
1518 unsigned char *temp;
1519 struct type6_hdr *tp6Hdr_p;
1520 struct CPRB *cprb_p;
1521 struct cca_private_ext_ME *key_p;
1522 static int deprecated_msg_count = 0;
1523
1524 mod_len = icaMsg_p->inputdatalength;
1525 tmp_size = FIXED_TYPE6_ME_LEN + mod_len;
1526 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1527 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1528 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1529
1530 memset(z90cMsg_p, 0, tmp_size);
1531
1532 temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1533 memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
1534 tp6Hdr_p = (struct type6_hdr *)temp;
1535 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1536 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1537
1538 temp += sizeof(struct type6_hdr);
1539 memcpy(temp, &static_cprb, sizeof(struct CPRB));
1540 cprb_p = (struct CPRB *) temp;
1541 cprb_p->usage_domain[0]= (unsigned char)cdx;
1542 itoLe2(&parmBlock_l, cprb_p->req_parml);
1543 itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
1544
1545 temp += sizeof(struct CPRB);
1546 memcpy(temp, &static_pkd_function_and_rules,
1547 sizeof(struct function_and_rules_block));
1548
1549 temp += sizeof(struct function_and_rules_block);
1550 vud_len = 2 + icaMsg_p->inputdatalength;
1551 itoLe2(&vud_len, temp);
1552
1553 temp += 2;
1554 if (copy_from_user(temp, icaMsg_p->inputdata, mod_len))
1555 return SEN_RELEASED;
1556 if (is_empty(temp, mod_len))
1557 return SEN_USER_ERROR;
1558
1559 temp += mod_len;
1560 memcpy(temp, &static_T6_keyBlock_hdr, sizeof(struct T6_keyBlock_hdr));
1561
1562 temp += sizeof(struct T6_keyBlock_hdr);
1563 memcpy(temp, &static_pvt_me_key, sizeof(struct cca_private_ext_ME));
1564 key_p = (struct cca_private_ext_ME *)temp;
1565 temp = key_p->pvtMESec.exponent + sizeof(key_p->pvtMESec.exponent)
1566 - mod_len;
1567 if (copy_from_user(temp, icaMsg_p->b_key, mod_len))
1568 return SEN_RELEASED;
1569 if (is_empty(temp, mod_len))
1570 return SEN_USER_ERROR;
1571
1572 if (is_common_public_key(temp, mod_len)) {
1573 if (deprecated_msg_count < 20) {
1574 PRINTK("Common public key used for modex decrypt\n");
1575 deprecated_msg_count++;
1576 if (deprecated_msg_count == 20)
1577 PRINTK("No longer issuing messages about common"
1578 " public key for modex decrypt.\n");
1579 }
1580 return SEN_NOT_AVAIL;
1581 }
1582
1583 temp = key_p->pvtMESec.modulus + sizeof(key_p->pvtMESec.modulus)
1584 - mod_len;
1585 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1586 return SEN_RELEASED;
1587 if (is_empty(temp, mod_len))
1588 return SEN_USER_ERROR;
1589
1590 key_p->pubMESec.modulus_bit_len = 8 * mod_len;
1591
1592 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1593
1594 return 0;
1595}
1596
1597static int
1598ICAMEX_msg_to_type6MEX_en_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1599 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1600{
1601 int mod_len, vud_len, exp_len, key_len;
1602 int pad_len, tmp_size, total_CPRB_len, parmBlock_l, i;
1603 unsigned char *temp_exp, *exp_p, *temp;
1604 struct type6_hdr *tp6Hdr_p;
1605 struct CPRB *cprb_p;
1606 struct cca_public_key *key_p;
1607 struct T6_keyBlock_hdr *keyb_p;
1608
1609 temp_exp = kmalloc(256, GFP_KERNEL);
1610 if (!temp_exp)
1611 return EGETBUFF;
1612 mod_len = icaMsg_p->inputdatalength;
1613 if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
1614 kfree(temp_exp);
1615 return SEN_RELEASED;
1616 }
1617 if (is_empty(temp_exp, mod_len)) {
1618 kfree(temp_exp);
1619 return SEN_USER_ERROR;
1620 }
1621
1622 exp_p = temp_exp;
1623 for (i = 0; i < mod_len; i++)
1624 if (exp_p[i])
1625 break;
1626 if (i >= mod_len) {
1627 kfree(temp_exp);
1628 return SEN_USER_ERROR;
1629 }
1630
1631 exp_len = mod_len - i;
1632 exp_p += i;
1633
1634 PDEBUG("exp_len after computation: %08x\n", exp_len);
1635 tmp_size = FIXED_TYPE6_ME_EN_LEN + 2 * mod_len + exp_len;
1636 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1637 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1638 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1639
1640 vud_len = 2 + mod_len;
1641 memset(z90cMsg_p, 0, tmp_size);
1642
1643 temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1644 memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
1645 tp6Hdr_p = (struct type6_hdr *)temp;
1646 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1647 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1648 memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
1649 sizeof(static_PKE_function_code));
1650 temp += sizeof(struct type6_hdr);
1651 memcpy(temp, &static_cprb, sizeof(struct CPRB));
1652 cprb_p = (struct CPRB *) temp;
1653 cprb_p->usage_domain[0]= (unsigned char)cdx;
1654 itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
1655 temp += sizeof(struct CPRB);
1656 memcpy(temp, &static_pke_function_and_rules,
1657 sizeof(struct function_and_rules_block));
1658 temp += sizeof(struct function_and_rules_block);
1659 temp += 2;
1660 if (copy_from_user(temp, icaMsg_p->inputdata, mod_len)) {
1661 kfree(temp_exp);
1662 return SEN_RELEASED;
1663 }
1664 if (is_empty(temp, mod_len)) {
1665 kfree(temp_exp);
1666 return SEN_USER_ERROR;
1667 }
1668 if ((temp[0] != 0x00) || (temp[1] != 0x02)) {
1669 kfree(temp_exp);
1670 return SEN_NOT_AVAIL;
1671 }
1672 for (i = 2; i < mod_len; i++)
1673 if (temp[i] == 0x00)
1674 break;
1675 if ((i < 9) || (i > (mod_len - 2))) {
1676 kfree(temp_exp);
1677 return SEN_NOT_AVAIL;
1678 }
1679 pad_len = i + 1;
1680 vud_len = mod_len - pad_len;
1681 memmove(temp, temp+pad_len, vud_len);
1682 temp -= 2;
1683 vud_len += 2;
1684 itoLe2(&vud_len, temp);
1685 temp += (vud_len);
1686 keyb_p = (struct T6_keyBlock_hdr *)temp;
1687 temp += sizeof(struct T6_keyBlock_hdr);
1688 memcpy(temp, &static_public_key, sizeof(static_public_key));
1689 key_p = (struct cca_public_key *)temp;
1690 temp = key_p->pubSec.exponent;
1691 memcpy(temp, exp_p, exp_len);
1692 kfree(temp_exp);
1693 temp += exp_len;
1694 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1695 return SEN_RELEASED;
1696 if (is_empty(temp, mod_len))
1697 return SEN_USER_ERROR;
1698 key_p->pubSec.modulus_bit_len = 8 * mod_len;
1699 key_p->pubSec.modulus_byte_len = mod_len;
1700 key_p->pubSec.exponent_len = exp_len;
1701 key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
1702 key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
1703 key_p->pubHdr.token_length = key_len;
1704 key_len += 4;
1705 itoLe2(&key_len, keyb_p->ulen);
1706 key_len += 2;
1707 itoLe2(&key_len, keyb_p->blen);
1708 parmBlock_l -= pad_len;
1709 itoLe2(&parmBlock_l, cprb_p->req_parml);
1710 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1711
1712 return 0;
1713}
1714
1715static int
1716ICACRT_msg_to_type6CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
1717 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1718{
1719 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
1720 int long_len, pad_len, keyPartsLen, tmp_l;
1721 unsigned char *tgt_p, *temp;
1722 struct type6_hdr *tp6Hdr_p;
1723 struct CPRB *cprb_p;
1724 struct cca_token_hdr *keyHdr_p;
1725 struct cca_pvt_ext_CRT_sec *pvtSec_p;
1726 struct cca_public_sec *pubSec_p;
1727
1728 mod_len = icaMsg_p->inputdatalength;
1729 short_len = mod_len / 2;
1730 long_len = 8 + short_len;
1731 keyPartsLen = 3 * long_len + 2 * short_len;
1732 pad_len = (8 - (keyPartsLen % 8)) % 8;
1733 keyPartsLen += pad_len + mod_len;
1734 tmp_size = FIXED_TYPE6_CR_LEN + keyPartsLen + mod_len;
1735 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1736 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1737 vud_len = 2 + mod_len;
1738 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1739
1740 memset(z90cMsg_p, 0, tmp_size);
1741 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1742 memcpy(tgt_p, &static_type6_hdr, sizeof(struct type6_hdr));
1743 tp6Hdr_p = (struct type6_hdr *)tgt_p;
1744 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1745 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1746 tgt_p += sizeof(struct type6_hdr);
1747 cprb_p = (struct CPRB *) tgt_p;
1748 memcpy(tgt_p, &static_cprb, sizeof(struct CPRB));
1749 cprb_p->usage_domain[0]= *((unsigned char *)(&(cdx))+3);
1750 itoLe2(&parmBlock_l, cprb_p->req_parml);
1751 memcpy(cprb_p->rpl_parml, cprb_p->req_parml,
1752 sizeof(cprb_p->req_parml));
1753 tgt_p += sizeof(struct CPRB);
1754 memcpy(tgt_p, &static_pkd_function_and_rules,
1755 sizeof(struct function_and_rules_block));
1756 tgt_p += sizeof(struct function_and_rules_block);
1757 itoLe2(&vud_len, tgt_p);
1758 tgt_p += 2;
1759 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
1760 return SEN_RELEASED;
1761 if (is_empty(tgt_p, mod_len))
1762 return SEN_USER_ERROR;
1763 tgt_p += mod_len;
1764 tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
1765 sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
1766 itoLe2(&tmp_l, tgt_p);
1767 temp = tgt_p + 2;
1768 tmp_l -= 2;
1769 itoLe2(&tmp_l, temp);
1770 tgt_p += sizeof(struct T6_keyBlock_hdr);
1771 keyHdr_p = (struct cca_token_hdr *)tgt_p;
1772 keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
1773 tmp_l -= 4;
1774 keyHdr_p->token_length = tmp_l;
1775 tgt_p += sizeof(struct cca_token_hdr);
1776 pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
1777 pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
1778 pvtSec_p->section_length =
1779 sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
1780 pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
1781 pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
1782 pvtSec_p->p_len = long_len;
1783 pvtSec_p->q_len = short_len;
1784 pvtSec_p->dp_len = long_len;
1785 pvtSec_p->dq_len = short_len;
1786 pvtSec_p->u_len = long_len;
1787 pvtSec_p->mod_len = mod_len;
1788 pvtSec_p->pad_len = pad_len;
1789 tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
1790 if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
1791 return SEN_RELEASED;
1792 if (is_empty(tgt_p, long_len))
1793 return SEN_USER_ERROR;
1794 tgt_p += long_len;
1795 if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
1796 return SEN_RELEASED;
1797 if (is_empty(tgt_p, short_len))
1798 return SEN_USER_ERROR;
1799 tgt_p += short_len;
1800 if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
1801 return SEN_RELEASED;
1802 if (is_empty(tgt_p, long_len))
1803 return SEN_USER_ERROR;
1804 tgt_p += long_len;
1805 if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
1806 return SEN_RELEASED;
1807 if (is_empty(tgt_p, short_len))
1808 return SEN_USER_ERROR;
1809 tgt_p += short_len;
1810 if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
1811 return SEN_RELEASED;
1812 if (is_empty(tgt_p, long_len))
1813 return SEN_USER_ERROR;
1814 tgt_p += long_len;
1815 tgt_p += pad_len;
1816 memset(tgt_p, 0xFF, mod_len);
1817 tgt_p += mod_len;
1818 memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
1819 pubSec_p = (struct cca_public_sec *) tgt_p;
1820 pubSec_p->modulus_bit_len = 8 * mod_len;
1821 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1822
1823 return 0;
1824}
1825
1826static int
1827ICAMEX_msg_to_type6MEX_msgX(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1828 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
1829 int dev_type)
1830{
1831 int mod_len, exp_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
1832 int key_len, i;
1833 unsigned char *temp_exp, *tgt_p, *temp, *exp_p;
1834 struct type6_hdr *tp6Hdr_p;
1835 struct CPRBX *cprbx_p;
1836 struct cca_public_key *key_p;
1837 struct T6_keyBlock_hdrX *keyb_p;
1838
1839 temp_exp = kmalloc(256, GFP_KERNEL);
1840 if (!temp_exp)
1841 return EGETBUFF;
1842 mod_len = icaMsg_p->inputdatalength;
1843 if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
1844 kfree(temp_exp);
1845 return SEN_RELEASED;
1846 }
1847 if (is_empty(temp_exp, mod_len)) {
1848 kfree(temp_exp);
1849 return SEN_USER_ERROR;
1850 }
1851 exp_p = temp_exp;
1852 for (i = 0; i < mod_len; i++)
1853 if (exp_p[i])
1854 break;
1855 if (i >= mod_len) {
1856 kfree(temp_exp);
1857 return SEN_USER_ERROR;
1858 }
1859 exp_len = mod_len - i;
1860 exp_p += i;
1861 PDEBUG("exp_len after computation: %08x\n", exp_len);
1862 tmp_size = FIXED_TYPE6_ME_EN_LENX + 2 * mod_len + exp_len;
1863 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1864 parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
1865 tmp_size = tmp_size + CALLER_HEADER;
1866 vud_len = 2 + mod_len;
1867 memset(z90cMsg_p, 0, tmp_size);
1868 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1869 memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
1870 tp6Hdr_p = (struct type6_hdr *)tgt_p;
1871 tp6Hdr_p->ToCardLen1 = total_CPRB_len;
1872 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
1873 memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
1874 sizeof(static_PKE_function_code));
1875 tgt_p += sizeof(struct type6_hdr);
1876 memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
1877 cprbx_p = (struct CPRBX *) tgt_p;
1878 cprbx_p->domain = (unsigned short)cdx;
1879 cprbx_p->rpl_msgbl = RESPONSE_CPRBX_SIZE;
1880 tgt_p += sizeof(struct CPRBX);
1881 if (dev_type == PCIXCC_MCL2)
1882 memcpy(tgt_p, &static_pke_function_and_rulesX_MCL2,
1883 sizeof(struct function_and_rules_block));
1884 else
1885 memcpy(tgt_p, &static_pke_function_and_rulesX,
1886 sizeof(struct function_and_rules_block));
1887 tgt_p += sizeof(struct function_and_rules_block);
1888
1889 tgt_p += 2;
1890 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len)) {
1891 kfree(temp_exp);
1892 return SEN_RELEASED;
1893 }
1894 if (is_empty(tgt_p, mod_len)) {
1895 kfree(temp_exp);
1896 return SEN_USER_ERROR;
1897 }
1898 tgt_p -= 2;
1899 *((short *)tgt_p) = (short) vud_len;
1900 tgt_p += vud_len;
1901 keyb_p = (struct T6_keyBlock_hdrX *)tgt_p;
1902 tgt_p += sizeof(struct T6_keyBlock_hdrX);
1903 memcpy(tgt_p, &static_public_key, sizeof(static_public_key));
1904 key_p = (struct cca_public_key *)tgt_p;
1905 temp = key_p->pubSec.exponent;
1906 memcpy(temp, exp_p, exp_len);
1907 kfree(temp_exp);
1908 temp += exp_len;
1909 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1910 return SEN_RELEASED;
1911 if (is_empty(temp, mod_len))
1912 return SEN_USER_ERROR;
1913 key_p->pubSec.modulus_bit_len = 8 * mod_len;
1914 key_p->pubSec.modulus_byte_len = mod_len;
1915 key_p->pubSec.exponent_len = exp_len;
1916 key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
1917 key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
1918 key_p->pubHdr.token_length = key_len;
1919 key_len += 4;
1920 keyb_p->ulen = (unsigned short)key_len;
1921 key_len += 2;
1922 keyb_p->blen = (unsigned short)key_len;
1923 cprbx_p->req_parml = parmBlock_l;
1924 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1925
1926 return 0;
1927}
1928
1929static int
1930ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
1931 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
1932 int dev_type)
1933{
1934 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
1935 int long_len, pad_len, keyPartsLen, tmp_l;
1936 unsigned char *tgt_p, *temp;
1937 struct type6_hdr *tp6Hdr_p;
1938 struct CPRBX *cprbx_p;
1939 struct cca_token_hdr *keyHdr_p;
1940 struct cca_pvt_ext_CRT_sec *pvtSec_p;
1941 struct cca_public_sec *pubSec_p;
1942
1943 mod_len = icaMsg_p->inputdatalength;
1944 short_len = mod_len / 2;
1945 long_len = 8 + short_len;
1946 keyPartsLen = 3 * long_len + 2 * short_len;
1947 pad_len = (8 - (keyPartsLen % 8)) % 8;
1948 keyPartsLen += pad_len + mod_len;
1949 tmp_size = FIXED_TYPE6_CR_LENX + keyPartsLen + mod_len;
1950 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1951 parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
1952 vud_len = 2 + mod_len;
1953 tmp_size = tmp_size + CALLER_HEADER;
1954 memset(z90cMsg_p, 0, tmp_size);
1955 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1956 memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
1957 tp6Hdr_p = (struct type6_hdr *)tgt_p;
1958 tp6Hdr_p->ToCardLen1 = total_CPRB_len;
1959 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
1960 tgt_p += sizeof(struct type6_hdr);
1961 cprbx_p = (struct CPRBX *) tgt_p;
1962 memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
1963 cprbx_p->domain = (unsigned short)cdx;
1964 cprbx_p->req_parml = parmBlock_l;
1965 cprbx_p->rpl_msgbl = parmBlock_l;
1966 tgt_p += sizeof(struct CPRBX);
1967 if (dev_type == PCIXCC_MCL2)
1968 memcpy(tgt_p, &static_pkd_function_and_rulesX_MCL2,
1969 sizeof(struct function_and_rules_block));
1970 else
1971 memcpy(tgt_p, &static_pkd_function_and_rulesX,
1972 sizeof(struct function_and_rules_block));
1973 tgt_p += sizeof(struct function_and_rules_block);
1974 *((short *)tgt_p) = (short) vud_len;
1975 tgt_p += 2;
1976 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
1977 return SEN_RELEASED;
1978 if (is_empty(tgt_p, mod_len))
1979 return SEN_USER_ERROR;
1980 tgt_p += mod_len;
1981 tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
1982 sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
1983 *((short *)tgt_p) = (short) tmp_l;
1984 temp = tgt_p + 2;
1985 tmp_l -= 2;
1986 *((short *)temp) = (short) tmp_l;
1987 tgt_p += sizeof(struct T6_keyBlock_hdr);
1988 keyHdr_p = (struct cca_token_hdr *)tgt_p;
1989 keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
1990 tmp_l -= 4;
1991 keyHdr_p->token_length = tmp_l;
1992 tgt_p += sizeof(struct cca_token_hdr);
1993 pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
1994 pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
1995 pvtSec_p->section_length =
1996 sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
1997 pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
1998 pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
1999 pvtSec_p->p_len = long_len;
2000 pvtSec_p->q_len = short_len;
2001 pvtSec_p->dp_len = long_len;
2002 pvtSec_p->dq_len = short_len;
2003 pvtSec_p->u_len = long_len;
2004 pvtSec_p->mod_len = mod_len;
2005 pvtSec_p->pad_len = pad_len;
2006 tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
2007 if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
2008 return SEN_RELEASED;
2009 if (is_empty(tgt_p, long_len))
2010 return SEN_USER_ERROR;
2011 tgt_p += long_len;
2012 if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
2013 return SEN_RELEASED;
2014 if (is_empty(tgt_p, short_len))
2015 return SEN_USER_ERROR;
2016 tgt_p += short_len;
2017 if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
2018 return SEN_RELEASED;
2019 if (is_empty(tgt_p, long_len))
2020 return SEN_USER_ERROR;
2021 tgt_p += long_len;
2022 if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
2023 return SEN_RELEASED;
2024 if (is_empty(tgt_p, short_len))
2025 return SEN_USER_ERROR;
2026 tgt_p += short_len;
2027 if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
2028 return SEN_RELEASED;
2029 if (is_empty(tgt_p, long_len))
2030 return SEN_USER_ERROR;
2031 tgt_p += long_len;
2032 tgt_p += pad_len;
2033 memset(tgt_p, 0xFF, mod_len);
2034 tgt_p += mod_len;
2035 memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
2036 pubSec_p = (struct cca_public_sec *) tgt_p;
2037 pubSec_p->modulus_bit_len = 8 * mod_len;
2038 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2039
2040 return 0;
2041}
2042
2043int
2044convert_request(unsigned char *buffer, int func, unsigned short function,
2045 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p)
2046{
2047 if (dev_type == PCICA) {
2048 if (func == ICARSACRT)
2049 return ICACRT_msg_to_type4CRT_msg(
2050 (struct ica_rsa_modexpo_crt *) buffer,
2051 msg_l_p, (union type4_msg *) msg_p);
2052 else
2053 return ICAMEX_msg_to_type4MEX_msg(
2054 (struct ica_rsa_modexpo *) buffer,
2055 msg_l_p, (union type4_msg *) msg_p);
2056 }
2057 if (dev_type == PCICC) {
2058 if (func == ICARSACRT)
2059 return ICACRT_msg_to_type6CRT_msg(
2060 (struct ica_rsa_modexpo_crt *) buffer,
2061 cdx, msg_l_p, (struct type6_msg *)msg_p);
2062 if (function == PCI_FUNC_KEY_ENCRYPT)
2063 return ICAMEX_msg_to_type6MEX_en_msg(
2064 (struct ica_rsa_modexpo *) buffer,
2065 cdx, msg_l_p, (struct type6_msg *) msg_p);
2066 else
2067 return ICAMEX_msg_to_type6MEX_de_msg(
2068 (struct ica_rsa_modexpo *) buffer,
2069 cdx, msg_l_p, (struct type6_msg *) msg_p);
2070 }
2071 if ((dev_type == PCIXCC_MCL2) ||
2072 (dev_type == PCIXCC_MCL3) ||
2073 (dev_type == CEX2C)) {
2074 if (func == ICARSACRT)
2075 return ICACRT_msg_to_type6CRT_msgX(
2076 (struct ica_rsa_modexpo_crt *) buffer,
2077 cdx, msg_l_p, (struct type6_msg *) msg_p,
2078 dev_type);
2079 else
2080 return ICAMEX_msg_to_type6MEX_msgX(
2081 (struct ica_rsa_modexpo *) buffer,
2082 cdx, msg_l_p, (struct type6_msg *) msg_p,
2083 dev_type);
2084 }
2085
2086 return 0;
2087}
2088
2089int ext_bitlens_msg_count = 0;
2090static inline void
2091unset_ext_bitlens(void)
2092{
2093 if (!ext_bitlens_msg_count) {
2094 PRINTK("Unable to use coprocessors for extended bitlengths. "
2095 "Using PCICAs (if present) for extended bitlengths. "
2096 "This is not an error.\n");
2097 ext_bitlens_msg_count++;
2098 }
2099 ext_bitlens = 0;
2100}
2101
2102int
2103convert_response(unsigned char *response, unsigned char *buffer,
2104 int *respbufflen_p, unsigned char *resp_buff)
2105{
2106 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
2107 struct type82_hdr *t82h_p = (struct type82_hdr *) response;
2108 struct type84_hdr *t84h_p = (struct type84_hdr *) response;
2109 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
2110 int reply_code, service_rc, service_rs, src_l;
2111 unsigned char *src_p, *tgt_p;
2112 struct CPRB *cprb_p;
2113 struct CPRBX *cprbx_p;
2114
2115 src_p = 0;
2116 reply_code = 0;
2117 service_rc = 0;
2118 service_rs = 0;
2119 src_l = 0;
2120 switch (t82h_p->type) {
2121 case TYPE82_RSP_CODE:
2122 reply_code = t82h_p->reply_code;
2123 src_p = (unsigned char *)t82h_p;
2124 PRINTK("Hardware error: Type 82 Message Header: "
2125 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
2126 src_p[0], src_p[1], src_p[2], src_p[3],
2127 src_p[4], src_p[5], src_p[6], src_p[7]);
2128 break;
2129 case TYPE84_RSP_CODE:
2130 src_l = icaMsg_p->outputdatalength;
2131 src_p = response + (int)t84h_p->len - src_l;
2132 break;
2133 case TYPE86_RSP_CODE:
2134 reply_code = t86m_p->hdr.reply_code;
2135 if (reply_code != 0)
2136 break;
2137 cprb_p = (struct CPRB *)
2138 (response + sizeof(struct type86_fmt2_msg));
2139 cprbx_p = (struct CPRBX *) cprb_p;
2140 if (cprb_p->cprb_ver_id != 0x02) {
2141 le2toI(cprb_p->ccp_rtcode, &service_rc);
2142 if (service_rc != 0) {
2143 le2toI(cprb_p->ccp_rscode, &service_rs);
2144 if ((service_rc == 8) && (service_rs == 66))
2145 PDEBUG("Bad block format on PCICC\n");
2146 else if ((service_rc == 8) && (service_rs == 770)) {
2147 PDEBUG("Invalid key length on PCICC\n");
2148 unset_ext_bitlens();
2149 return REC_USE_PCICA;
2150 }
2151 else if ((service_rc == 8) && (service_rs == 783)) {
2152 PDEBUG("Extended bitlengths not enabled"
2153 "on PCICC\n");
2154 unset_ext_bitlens();
2155 return REC_USE_PCICA;
2156 }
2157 else
2158 PRINTK("service rc/rs: %d/%d\n",
2159 service_rc, service_rs);
2160 return REC_OPERAND_INV;
2161 }
2162 src_p = (unsigned char *)cprb_p + sizeof(struct CPRB);
2163 src_p += 4;
2164 le2toI(src_p, &src_l);
2165 src_l -= 2;
2166 src_p += 2;
2167 } else {
2168 service_rc = (int)cprbx_p->ccp_rtcode;
2169 if (service_rc != 0) {
2170 service_rs = (int) cprbx_p->ccp_rscode;
2171 if ((service_rc == 8) && (service_rs == 66))
2172 PDEBUG("Bad block format on PCXICC\n");
2173 else if ((service_rc == 8) && (service_rs == 770)) {
2174 PDEBUG("Invalid key length on PCIXCC\n");
2175 unset_ext_bitlens();
2176 return REC_USE_PCICA;
2177 }
2178 else if ((service_rc == 8) && (service_rs == 783)) {
2179 PDEBUG("Extended bitlengths not enabled"
2180 "on PCIXCC\n");
2181 unset_ext_bitlens();
2182 return REC_USE_PCICA;
2183 }
2184 else
2185 PRINTK("service rc/rs: %d/%d\n",
2186 service_rc, service_rs);
2187 return REC_OPERAND_INV;
2188 }
2189 src_p = (unsigned char *)
2190 cprbx_p + sizeof(struct CPRBX);
2191 src_p += 4;
2192 src_l = (int)(*((short *) src_p));
2193 src_l -= 2;
2194 src_p += 2;
2195 }
2196 break;
2197 default:
2198 return REC_BAD_MESSAGE;
2199 }
2200
2201 if (reply_code)
2202 switch (reply_code) {
2203 case REPLY_ERROR_OPERAND_INVALID:
2204 return REC_OPERAND_INV;
2205 case REPLY_ERROR_OPERAND_SIZE:
2206 return REC_OPERAND_SIZE;
2207 case REPLY_ERROR_EVEN_MOD_IN_OPND:
2208 return REC_EVEN_MOD;
2209 case REPLY_ERROR_MESSAGE_TYPE:
2210 return WRONG_DEVICE_TYPE;
2211 case REPLY_ERROR_TRANSPORT_FAIL:
2212 PRINTKW("Transport failed (APFS = %02X%02X%02X%02X)\n",
2213 t86m_p->apfs[0], t86m_p->apfs[1],
2214 t86m_p->apfs[2], t86m_p->apfs[3]);
2215 return REC_HARDWAR_ERR;
2216 default:
2217 PRINTKW("reply code = %d\n", reply_code);
2218 return REC_HARDWAR_ERR;
2219 }
2220
2221 if (service_rc != 0)
2222 return REC_OPERAND_INV;
2223
2224 if ((src_l > icaMsg_p->outputdatalength) ||
2225 (src_l > RESPBUFFSIZE) ||
2226 (src_l <= 0))
2227 return REC_OPERAND_SIZE;
2228
2229 PDEBUG("Length returned = %d\n", src_l);
2230 tgt_p = resp_buff + icaMsg_p->outputdatalength - src_l;
2231 memcpy(tgt_p, src_p, src_l);
2232 if ((t82h_p->type == TYPE86_RSP_CODE) && (resp_buff < tgt_p)) {
2233 memset(resp_buff, 0, icaMsg_p->outputdatalength - src_l);
2234 if (pad_msg(resp_buff, icaMsg_p->outputdatalength, src_l))
2235 return REC_INVALID_PAD;
2236 }
2237 *respbufflen_p = icaMsg_p->outputdatalength;
2238 if (*respbufflen_p == 0)
2239 PRINTK("Zero *respbufflen_p\n");
2240
2241 return 0;
2242}
2243
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
new file mode 100644
index 000000000000..a98c00c02559
--- /dev/null
+++ b/drivers/s390/crypto/z90main.c
@@ -0,0 +1,3563 @@
1/*
2 * linux/drivers/s390/crypto/z90main.c
3 *
4 * z90crypt 1.3.2
5 *
6 * Copyright (C) 2001, 2004 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/uaccess.h> // copy_(from|to)_user
28#include <linux/compat.h>
29#include <linux/compiler.h>
30#include <linux/delay.h> // mdelay
31#include <linux/init.h>
32#include <linux/interrupt.h> // for tasklets
33#include <linux/ioctl32.h>
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/kobject_uevent.h>
37#include <linux/proc_fs.h>
38#include <linux/syscalls.h>
39#include <linux/version.h>
40#include "z90crypt.h"
41#include "z90common.h"
42#ifndef Z90CRYPT_USE_HOTPLUG
43#include <linux/miscdevice.h>
44#endif
45
46#define VERSION_CODE(vers, rel, seq) (((vers)<<16) | ((rel)<<8) | (seq))
47#if LINUX_VERSION_CODE < VERSION_CODE(2,4,0) /* version < 2.4 */
48# error "This kernel is too old: not supported"
49#endif
50#if LINUX_VERSION_CODE > VERSION_CODE(2,7,0) /* version > 2.6 */
51# error "This kernel is too recent: not supported by this file"
52#endif
53
54#define VERSION_Z90MAIN_C "$Revision: 1.57 $"
55
56static char z90main_version[] __initdata =
57 "z90main.o (" VERSION_Z90MAIN_C "/"
58 VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
59
60extern char z90hardware_version[];
61
62/**
63 * Defaults that may be modified.
64 */
65
66#ifndef Z90CRYPT_USE_HOTPLUG
67/**
68 * You can specify a different minor at compile time.
69 */
70#ifndef Z90CRYPT_MINOR
71#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
72#endif
73#else
74/**
75 * You can specify a different major at compile time.
76 */
77#ifndef Z90CRYPT_MAJOR
78#define Z90CRYPT_MAJOR 0
79#endif
80#endif
81
82/**
83 * You can specify a different domain at compile time or on the insmod
84 * command line.
85 */
86#ifndef DOMAIN_INDEX
87#define DOMAIN_INDEX -1
88#endif
89
90/**
91 * This is the name under which the device is registered in /proc/modules.
92 */
93#define REG_NAME "z90crypt"
94
95/**
96 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
97 * older than CLEANUPTIME seconds in the past.
98 */
99#ifndef CLEANUPTIME
100#define CLEANUPTIME 20
101#endif
102
103/**
104 * Config should run every CONFIGTIME seconds
105 */
106#ifndef CONFIGTIME
107#define CONFIGTIME 30
108#endif
109
110/**
111 * The first execution of the config task should take place
112 * immediately after initialization
113 */
114#ifndef INITIAL_CONFIGTIME
115#define INITIAL_CONFIGTIME 1
116#endif
117
118/**
119 * Reader should run every READERTIME milliseconds
120 * With the 100Hz patch for s390, z90crypt can lock the system solid while
121 * under heavy load. We'll try to avoid that.
122 */
123#ifndef READERTIME
124#if HZ > 1000
125#define READERTIME 2
126#else
127#define READERTIME 10
128#endif
129#endif
130
131/**
132 * turn long device array index into device pointer
133 */
134#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
135
136/**
137 * turn short device array index into long device array index
138 */
139#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
140
141/**
142 * turn short device array index into device pointer
143 */
144#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
145
146/**
147 * Status for a work-element
148 */
149#define STAT_DEFAULT 0x00 // request has not been processed
150
151#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
152 // else, device is determined each write
153#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
154 // before being sent to the hardware.
155#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
156// 0x20 // UNUSED state
157#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
158#define STAT_NOWORK 0x00 // bits off: no work on any queue
159#define STAT_RDWRMASK 0x30 // mask for bits 5-4
160
161/**
162 * Macros to check the status RDWRMASK
163 */
164#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
165#define SET_RDWRMASK(statbyte, newval) \
166 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
167
168/**
169 * Audit Trail. Progress of a Work element
170 * audit[0]: Unless noted otherwise, these bits are all set by the process
171 */
172#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
173#define FP_BUFFREQ 0x40 // Low Level buffer requested
174#define FP_BUFFGOT 0x20 // Low Level buffer obtained
175#define FP_SENT 0x10 // Work element sent to a crypto device
176 // (may be set by process or by reader task)
177#define FP_PENDING 0x08 // Work element placed on pending queue
178 // (may be set by process or by reader task)
179#define FP_REQUEST 0x04 // Work element placed on request queue
180#define FP_ASLEEP 0x02 // Work element about to sleep
181#define FP_AWAKE 0x01 // Work element has been awakened
182
183/**
184 * audit[1]: These bits are set by the reader task and/or the cleanup task
185 */
186#define FP_NOTPENDING 0x80 // Work element removed from pending queue
187#define FP_AWAKENING 0x40 // Caller about to be awakened
188#define FP_TIMEDOUT 0x20 // Caller timed out
189#define FP_RESPSIZESET 0x10 // Response size copied to work element
190#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
191#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
192#define FP_REMREQUEST 0x02 // Work element removed from request queue
193#define FP_SIGNALED 0x01 // Work element was awakened by a signal
194
195/**
196 * audit[2]: unused
197 */
198
199/**
200 * state of the file handle in private_data.status
201 */
202#define STAT_OPEN 0
203#define STAT_CLOSED 1
204
205/**
206 * PID() expands to the process ID of the current process
207 */
208#define PID() (current->pid)
209
210/**
211 * Selected Constants. The number of APs and the number of devices
212 */
213#ifndef Z90CRYPT_NUM_APS
214#define Z90CRYPT_NUM_APS 64
215#endif
216#ifndef Z90CRYPT_NUM_DEVS
217#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
218#endif
219
220/**
221 * Buffer size for receiving responses. The maximum Response Size
222 * is actually the maximum request size, since in an error condition
223 * the request itself may be returned unchanged.
224 */
225#define MAX_RESPONSE_SIZE 0x0000077C
226
227/**
228 * A count and status-byte mask
229 */
230struct status {
231 int st_count; // # of enabled devices
232 int disabled_count; // # of disabled devices
233 int user_disabled_count; // # of devices disabled via proc fs
234 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
235};
236
237/**
238 * The array of device indexes is a mechanism for fast indexing into
239 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
240 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
241 * z90CDeviceIndex[2] is 47.
242 */
243struct device_x {
244 int device_index[Z90CRYPT_NUM_DEVS];
245};
246
247/**
248 * All devices are arranged in a single array: 64 APs
249 */
250struct device {
251 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
252 // PCIXCC_MCL3, CEX2C
253 enum devstat dev_stat; // current device status
254 int dev_self_x; // Index in array
255 int disabled; // Set when device is in error
256 int user_disabled; // Set when device is disabled by user
257 int dev_q_depth; // q depth
258 unsigned char * dev_resp_p; // Response buffer address
259 int dev_resp_l; // Response Buffer length
260 int dev_caller_count; // Number of callers
261 int dev_total_req_cnt; // # requests for device since load
262 struct list_head dev_caller_list; // List of callers
263};
264
265/**
266 * There's a struct status and a struct device_x for each device type.
267 */
268struct hdware_block {
269 struct status hdware_mask;
270 struct status type_mask[Z90CRYPT_NUM_TYPES];
271 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
272 unsigned char device_type_array[Z90CRYPT_NUM_APS];
273};
274
275/**
276 * z90crypt is the topmost data structure in the hierarchy.
277 */
278struct z90crypt {
279 int max_count; // Nr of possible crypto devices
280 struct status mask;
281 int q_depth_array[Z90CRYPT_NUM_DEVS];
282 int dev_type_array[Z90CRYPT_NUM_DEVS];
283 struct device_x overall_device_x; // array device indexes
284 struct device * device_p[Z90CRYPT_NUM_DEVS];
285 int terminating;
286 int domain_established;// TRUE: domain has been found
287 int cdx; // Crypto Domain Index
288 int len; // Length of this data structure
289 struct hdware_block *hdware_info;
290};
291
292/**
293 * An array of these structures is pointed to from dev_caller
294 * The length of the array depends on the device type. For APs,
295 * there are 8.
296 *
297 * The caller buffer is allocated to the user at OPEN. At WRITE,
298 * it contains the request; at READ, the response. The function
299 * send_to_crypto_device converts the request to device-dependent
300 * form and use the caller's OPEN-allocated buffer for the response.
301 */
302struct caller {
303 int caller_buf_l; // length of original request
304 unsigned char * caller_buf_p; // Original request on WRITE
305 int caller_dev_dep_req_l; // len device dependent request
306 unsigned char * caller_dev_dep_req_p; // Device dependent form
307 unsigned char caller_id[8]; // caller-supplied message id
308 struct list_head caller_liste;
309 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
310};
311
312/**
313 * Function prototypes from z90hardware.c
314 */
315enum hdstat query_online(int, int, int, int *, int *);
316enum devstat reset_device(int, int, int);
317enum devstat send_to_AP(int, int, int, unsigned char *);
318enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *);
319int convert_request(unsigned char *, int, short, int, int, int *,
320 unsigned char *);
321int convert_response(unsigned char *, unsigned char *, int *, unsigned char *);
322
323/**
324 * Low level function prototypes
325 */
326static int create_z90crypt(int *);
327static int refresh_z90crypt(int *);
328static int find_crypto_devices(struct status *);
329static int create_crypto_device(int);
330static int destroy_crypto_device(int);
331static void destroy_z90crypt(void);
332static int refresh_index_array(struct status *, struct device_x *);
333static int probe_device_type(struct device *);
334static int probe_PCIXCC_type(struct device *);
335
336/**
337 * proc fs definitions
338 */
339static struct proc_dir_entry *z90crypt_entry;
340
341/**
342 * data structures
343 */
344
345/**
346 * work_element.opener points back to this structure
347 */
348struct priv_data {
349 pid_t opener_pid;
350 unsigned char status; // 0: open 1: closed
351};
352
353/**
354 * A work element is allocated for each request
355 */
356struct work_element {
357 struct priv_data *priv_data;
358 pid_t pid;
359 int devindex; // index of device processing this w_e
360 // (If request did not specify device,
361 // -1 until placed onto a queue)
362 int devtype;
363 struct list_head liste; // used for requestq and pendingq
364 char buffer[128]; // local copy of user request
365 int buff_size; // size of the buffer for the request
366 char resp_buff[RESPBUFFSIZE];
367 int resp_buff_size;
368 char __user * resp_addr; // address of response in user space
369 unsigned int funccode; // function code of request
370 wait_queue_head_t waitq;
371 unsigned long requestsent; // time at which the request was sent
372 atomic_t alarmrung; // wake-up signal
373 unsigned char caller_id[8]; // pid + counter, for this w_e
374 unsigned char status[1]; // bits to mark status of the request
375 unsigned char audit[3]; // record of work element's progress
376 unsigned char * requestptr; // address of request buffer
377 int retcode; // return code of request
378};
379
380/**
381 * High level function prototypes
382 */
383static int z90crypt_open(struct inode *, struct file *);
384static int z90crypt_release(struct inode *, struct file *);
385static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
386static ssize_t z90crypt_write(struct file *, const char __user *,
387 size_t, loff_t *);
388static int z90crypt_ioctl(struct inode *, struct file *,
389 unsigned int, unsigned long);
390
391static void z90crypt_reader_task(unsigned long);
392static void z90crypt_schedule_reader_task(unsigned long);
393static void z90crypt_config_task(unsigned long);
394static void z90crypt_cleanup_task(unsigned long);
395
396static int z90crypt_status(char *, char **, off_t, int, int *, void *);
397static int z90crypt_status_write(struct file *, const char __user *,
398 unsigned long, void *);
399
400/**
401 * Hotplug support
402 */
403
404#ifdef Z90CRYPT_USE_HOTPLUG
405#define Z90CRYPT_HOTPLUG_ADD 1
406#define Z90CRYPT_HOTPLUG_REMOVE 2
407
408static void z90crypt_hotplug_event(int, int, int);
409#endif
410
411/**
412 * Storage allocated at initialization and used throughout the life of
413 * this insmod
414 */
415#ifdef Z90CRYPT_USE_HOTPLUG
416static int z90crypt_major = Z90CRYPT_MAJOR;
417#endif
418
419static int domain = DOMAIN_INDEX;
420static struct z90crypt z90crypt;
421static int quiesce_z90crypt;
422static spinlock_t queuespinlock;
423static struct list_head request_list;
424static int requestq_count;
425static struct list_head pending_list;
426static int pendingq_count;
427
428static struct tasklet_struct reader_tasklet;
429static struct timer_list reader_timer;
430static struct timer_list config_timer;
431static struct timer_list cleanup_timer;
432static atomic_t total_open;
433static atomic_t z90crypt_step;
434
435static struct file_operations z90crypt_fops = {
436 .owner = THIS_MODULE,
437 .read = z90crypt_read,
438 .write = z90crypt_write,
439 .ioctl = z90crypt_ioctl,
440 .open = z90crypt_open,
441 .release = z90crypt_release
442};
443
444#ifndef Z90CRYPT_USE_HOTPLUG
445static struct miscdevice z90crypt_misc_device = {
446 .minor = Z90CRYPT_MINOR,
447 .name = DEV_NAME,
448 .fops = &z90crypt_fops,
449 .devfs_name = DEV_NAME
450};
451#endif
452
453/**
454 * Documentation values.
455 */
456MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
457 "and Jochen Roehrig");
458MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
459 "Copyright 2001, 2004 IBM Corporation");
460MODULE_LICENSE("GPL");
461module_param(domain, int, 0);
462MODULE_PARM_DESC(domain, "domain index for device");
463
464#ifdef CONFIG_COMPAT
465/**
466 * ioctl32 conversion routines
467 */
468struct ica_rsa_modexpo_32 { // For 32-bit callers
469 compat_uptr_t inputdata;
470 unsigned int inputdatalength;
471 compat_uptr_t outputdata;
472 unsigned int outputdatalength;
473 compat_uptr_t b_key;
474 compat_uptr_t n_modulus;
475};
476
477static int
478trans_modexpo32(unsigned int fd, unsigned int cmd, unsigned long arg,
479 struct file *file)
480{
481 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
482 struct ica_rsa_modexpo_32 mex32k;
483 struct ica_rsa_modexpo __user *mex64;
484 int ret = 0;
485 unsigned int i;
486
487 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
488 return -EFAULT;
489 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
490 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
491 return -EFAULT;
492 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
493 return -EFAULT;
494 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
495 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
496 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
497 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
498 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
499 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
500 return -EFAULT;
501 ret = sys_ioctl(fd, cmd, (unsigned long)mex64);
502 if (!ret)
503 if (__get_user(i, &mex64->outputdatalength) ||
504 __put_user(i, &mex32u->outputdatalength))
505 ret = -EFAULT;
506 return ret;
507}
508
509struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
510 compat_uptr_t inputdata;
511 unsigned int inputdatalength;
512 compat_uptr_t outputdata;
513 unsigned int outputdatalength;
514 compat_uptr_t bp_key;
515 compat_uptr_t bq_key;
516 compat_uptr_t np_prime;
517 compat_uptr_t nq_prime;
518 compat_uptr_t u_mult_inv;
519};
520
521static int
522trans_modexpo_crt32(unsigned int fd, unsigned int cmd, unsigned long arg,
523 struct file *file)
524{
525 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
526 struct ica_rsa_modexpo_crt_32 crt32k;
527 struct ica_rsa_modexpo_crt __user *crt64;
528 int ret = 0;
529 unsigned int i;
530
531 if (!access_ok(VERIFY_WRITE, crt32u,
532 sizeof(struct ica_rsa_modexpo_crt_32)))
533 return -EFAULT;
534 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
535 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
536 return -EFAULT;
537 if (copy_from_user(&crt32k, crt32u,
538 sizeof(struct ica_rsa_modexpo_crt_32)))
539 return -EFAULT;
540 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
541 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
542 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
543 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
544 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
545 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
546 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
547 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
548 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
549 ret = -EFAULT;
550 if (!ret)
551 ret = sys_ioctl(fd, cmd, (unsigned long)crt64);
552 if (!ret)
553 if (__get_user(i, &crt64->outputdatalength) ||
554 __put_user(i, &crt32u->outputdatalength))
555 ret = -EFAULT;
556 return ret;
557}
558
559static int compatible_ioctls[] = {
560 ICAZ90STATUS, Z90QUIESCE, Z90STAT_TOTALCOUNT, Z90STAT_PCICACOUNT,
561 Z90STAT_PCICCCOUNT, Z90STAT_PCIXCCCOUNT, Z90STAT_PCIXCCMCL2COUNT,
562 Z90STAT_PCIXCCMCL3COUNT, Z90STAT_CEX2CCOUNT, Z90STAT_REQUESTQ_COUNT,
563 Z90STAT_PENDINGQ_COUNT, Z90STAT_TOTALOPEN_COUNT, Z90STAT_DOMAIN_INDEX,
564 Z90STAT_STATUS_MASK, Z90STAT_QDEPTH_MASK, Z90STAT_PERDEV_REQCNT,
565};
566
567static void z90_unregister_ioctl32s(void)
568{
569 int i;
570
571 unregister_ioctl32_conversion(ICARSAMODEXPO);
572 unregister_ioctl32_conversion(ICARSACRT);
573
574 for(i = 0; i < ARRAY_SIZE(compatible_ioctls); i++)
575 unregister_ioctl32_conversion(compatible_ioctls[i]);
576}
577
578static int z90_register_ioctl32s(void)
579{
580 int result, i;
581
582 result = register_ioctl32_conversion(ICARSAMODEXPO, trans_modexpo32);
583 if (result == -EBUSY) {
584 unregister_ioctl32_conversion(ICARSAMODEXPO);
585 result = register_ioctl32_conversion(ICARSAMODEXPO,
586 trans_modexpo32);
587 }
588 if (result)
589 return result;
590 result = register_ioctl32_conversion(ICARSACRT, trans_modexpo_crt32);
591 if (result == -EBUSY) {
592 unregister_ioctl32_conversion(ICARSACRT);
593 result = register_ioctl32_conversion(ICARSACRT,
594 trans_modexpo_crt32);
595 }
596 if (result)
597 return result;
598
599 for(i = 0; i < ARRAY_SIZE(compatible_ioctls); i++) {
600 result = register_ioctl32_conversion(compatible_ioctls[i], 0);
601 if (result == -EBUSY) {
602 unregister_ioctl32_conversion(compatible_ioctls[i]);
603 result = register_ioctl32_conversion(
604 compatible_ioctls[i], 0);
605 }
606 if (result)
607 return result;
608 }
609 return 0;
610}
611#else // !CONFIG_COMPAT
612static inline void z90_unregister_ioctl32s(void)
613{
614}
615
616static inline int z90_register_ioctl32s(void)
617{
618 return 0;
619}
620#endif
621
622/**
623 * The module initialization code.
624 */
625static int __init
626z90crypt_init_module(void)
627{
628 int result, nresult;
629 struct proc_dir_entry *entry;
630
631 PDEBUG("PID %d\n", PID());
632
633 if ((domain < -1) || (domain > 15)) {
634 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
635 return -EINVAL;
636 }
637
638#ifndef Z90CRYPT_USE_HOTPLUG
639 /* Register as misc device with given minor (or get a dynamic one). */
640 result = misc_register(&z90crypt_misc_device);
641 if (result < 0) {
642 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
643 z90crypt_misc_device.minor, result);
644 return result;
645 }
646#else
647 /* Register the major (or get a dynamic one). */
648 result = register_chrdev(z90crypt_major, REG_NAME, &z90crypt_fops);
649 if (result < 0) {
650 PRINTKW("register_chrdev (major %d) failed with %d.\n",
651 z90crypt_major, result);
652 return result;
653 }
654
655 if (z90crypt_major == 0)
656 z90crypt_major = result;
657#endif
658
659 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
660
661 result = create_z90crypt(&domain);
662 if (result != 0) {
663 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
664 domain, result);
665 result = -ENOMEM;
666 goto init_module_cleanup;
667 }
668
669 if (result == 0) {
670 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
671 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
672 __DATE__, __TIME__);
673 PRINTKN("%s\n", z90main_version);
674 PRINTKN("%s\n", z90hardware_version);
675 PDEBUG("create_z90crypt (domain index %d) successful.\n",
676 domain);
677 } else
678 PRINTK("No devices at startup\n");
679
680#ifdef Z90CRYPT_USE_HOTPLUG
681 /* generate hotplug event for device node generation */
682 z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_ADD);
683#endif
684
685 /* Initialize globals. */
686 spin_lock_init(&queuespinlock);
687
688 INIT_LIST_HEAD(&pending_list);
689 pendingq_count = 0;
690
691 INIT_LIST_HEAD(&request_list);
692 requestq_count = 0;
693
694 quiesce_z90crypt = 0;
695
696 atomic_set(&total_open, 0);
697 atomic_set(&z90crypt_step, 0);
698
699 /* Set up the cleanup task. */
700 init_timer(&cleanup_timer);
701 cleanup_timer.function = z90crypt_cleanup_task;
702 cleanup_timer.data = 0;
703 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
704 add_timer(&cleanup_timer);
705
706 /* Set up the proc file system */
707 entry = create_proc_entry("driver/z90crypt", 0644, 0);
708 if (entry) {
709 entry->nlink = 1;
710 entry->data = 0;
711 entry->read_proc = z90crypt_status;
712 entry->write_proc = z90crypt_status_write;
713 }
714 else
715 PRINTK("Couldn't create z90crypt proc entry\n");
716 z90crypt_entry = entry;
717
718 /* Set up the configuration task. */
719 init_timer(&config_timer);
720 config_timer.function = z90crypt_config_task;
721 config_timer.data = 0;
722 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
723 add_timer(&config_timer);
724
725 /* Set up the reader task */
726 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
727 init_timer(&reader_timer);
728 reader_timer.function = z90crypt_schedule_reader_task;
729 reader_timer.data = 0;
730 reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
731 add_timer(&reader_timer);
732
733 if ((result = z90_register_ioctl32s()))
734 goto init_module_cleanup;
735
736 return 0; // success
737
738init_module_cleanup:
739 z90_unregister_ioctl32s();
740
741#ifndef Z90CRYPT_USE_HOTPLUG
742 if ((nresult = misc_deregister(&z90crypt_misc_device)))
743 PRINTK("misc_deregister failed with %d.\n", nresult);
744 else
745 PDEBUG("misc_deregister successful.\n");
746#else
747 if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
748 PRINTK("unregister_chrdev failed with %d.\n", nresult);
749 else
750 PDEBUG("unregister_chrdev successful.\n");
751#endif
752
753 return result; // failure
754}
755
756/**
757 * The module termination code
758 */
759static void __exit
760z90crypt_cleanup_module(void)
761{
762 int nresult;
763
764 PDEBUG("PID %d\n", PID());
765
766 z90_unregister_ioctl32s();
767
768 remove_proc_entry("driver/z90crypt", 0);
769
770#ifndef Z90CRYPT_USE_HOTPLUG
771 if ((nresult = misc_deregister(&z90crypt_misc_device)))
772 PRINTK("misc_deregister failed with %d.\n", nresult);
773 else
774 PDEBUG("misc_deregister successful.\n");
775#else
776 z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_REMOVE);
777
778 if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
779 PRINTK("unregister_chrdev failed with %d.\n", nresult);
780 else
781 PDEBUG("unregister_chrdev successful.\n");
782#endif
783
784 /* Remove the tasks */
785 tasklet_kill(&reader_tasklet);
786 del_timer(&reader_timer);
787 del_timer(&config_timer);
788 del_timer(&cleanup_timer);
789
790 destroy_z90crypt();
791
792 PRINTKN("Unloaded.\n");
793}
794
795/**
796 * Functions running under a process id
797 *
798 * The I/O functions:
799 * z90crypt_open
800 * z90crypt_release
801 * z90crypt_read
802 * z90crypt_write
803 * z90crypt_ioctl
804 * z90crypt_status
805 * z90crypt_status_write
806 * disable_card
807 * enable_card
808 * scan_char
809 * scan_string
810 *
811 * Helper functions:
812 * z90crypt_rsa
813 * z90crypt_prepare
814 * z90crypt_send
815 * z90crypt_process_results
816 *
817 */
818static int
819z90crypt_open(struct inode *inode, struct file *filp)
820{
821 struct priv_data *private_data_p;
822
823 if (quiesce_z90crypt)
824 return -EQUIESCE;
825
826 private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
827 if (!private_data_p) {
828 PRINTK("Memory allocate failed\n");
829 return -ENOMEM;
830 }
831
832 memset((void *)private_data_p, 0, sizeof(struct priv_data));
833 private_data_p->status = STAT_OPEN;
834 private_data_p->opener_pid = PID();
835 filp->private_data = private_data_p;
836 atomic_inc(&total_open);
837
838 return 0;
839}
840
841static int
842z90crypt_release(struct inode *inode, struct file *filp)
843{
844 struct priv_data *private_data_p = filp->private_data;
845
846 PDEBUG("PID %d (filp %p)\n", PID(), filp);
847
848 private_data_p->status = STAT_CLOSED;
849 memset(private_data_p, 0, sizeof(struct priv_data));
850 kfree(private_data_p);
851 atomic_dec(&total_open);
852
853 return 0;
854}
855
856/*
857 * there are two read functions, of which compile options will choose one
858 * without USE_GET_RANDOM_BYTES
859 * => read() always returns -EPERM;
860 * otherwise
861 * => read() uses get_random_bytes() kernel function
862 */
863#ifndef USE_GET_RANDOM_BYTES
864/**
865 * z90crypt_read will not be supported beyond z90crypt 1.3.1
866 */
867static ssize_t
868z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
869{
870 PDEBUG("filp %p (PID %d)\n", filp, PID());
871 return -EPERM;
872}
873#else // we want to use get_random_bytes
874/**
875 * read() just returns a string of random bytes. Since we have no way
876 * to generate these cryptographically, we just execute get_random_bytes
877 * for the length specified.
878 */
879#include <linux/random.h>
880static ssize_t
881z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
882{
883 unsigned char *temp_buff;
884
885 PDEBUG("filp %p (PID %d)\n", filp, PID());
886
887 if (quiesce_z90crypt)
888 return -EQUIESCE;
889 if (count < 0) {
890 PRINTK("Requested random byte count negative: %ld\n", count);
891 return -EINVAL;
892 }
893 if (count > RESPBUFFSIZE) {
894 PDEBUG("count[%d] > RESPBUFFSIZE", count);
895 return -EINVAL;
896 }
897 if (count == 0)
898 return 0;
899 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
900 if (!temp_buff) {
901 PRINTK("Memory allocate failed\n");
902 return -ENOMEM;
903 }
904 get_random_bytes(temp_buff, count);
905
906 if (copy_to_user(buf, temp_buff, count) != 0) {
907 kfree(temp_buff);
908 return -EFAULT;
909 }
910 kfree(temp_buff);
911 return count;
912}
913#endif
914
915/**
916 * Write is is not allowed
917 */
918static ssize_t
919z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
920{
921 PDEBUG("filp %p (PID %d)\n", filp, PID());
922 return -EPERM;
923}
924
925/**
926 * New status functions
927 */
928static inline int
929get_status_totalcount(void)
930{
931 return z90crypt.hdware_info->hdware_mask.st_count;
932}
933
934static inline int
935get_status_PCICAcount(void)
936{
937 return z90crypt.hdware_info->type_mask[PCICA].st_count;
938}
939
940static inline int
941get_status_PCICCcount(void)
942{
943 return z90crypt.hdware_info->type_mask[PCICC].st_count;
944}
945
946static inline int
947get_status_PCIXCCcount(void)
948{
949 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
950 z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
951}
952
953static inline int
954get_status_PCIXCCMCL2count(void)
955{
956 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
957}
958
959static inline int
960get_status_PCIXCCMCL3count(void)
961{
962 return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
963}
964
965static inline int
966get_status_CEX2Ccount(void)
967{
968 return z90crypt.hdware_info->type_mask[CEX2C].st_count;
969}
970
971static inline int
972get_status_requestq_count(void)
973{
974 return requestq_count;
975}
976
977static inline int
978get_status_pendingq_count(void)
979{
980 return pendingq_count;
981}
982
983static inline int
984get_status_totalopen_count(void)
985{
986 return atomic_read(&total_open);
987}
988
989static inline int
990get_status_domain_index(void)
991{
992 return z90crypt.cdx;
993}
994
995static inline unsigned char *
996get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
997{
998 int i, ix;
999
1000 memcpy(status, z90crypt.hdware_info->device_type_array,
1001 Z90CRYPT_NUM_APS);
1002
1003 for (i = 0; i < get_status_totalcount(); i++) {
1004 ix = SHRT2LONG(i);
1005 if (LONG2DEVPTR(ix)->user_disabled)
1006 status[ix] = 0x0d;
1007 }
1008
1009 return status;
1010}
1011
1012static inline unsigned char *
1013get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
1014{
1015 int i, ix;
1016
1017 memset(qdepth, 0, Z90CRYPT_NUM_APS);
1018
1019 for (i = 0; i < get_status_totalcount(); i++) {
1020 ix = SHRT2LONG(i);
1021 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
1022 }
1023
1024 return qdepth;
1025}
1026
1027static inline unsigned int *
1028get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
1029{
1030 int i, ix;
1031
1032 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
1033
1034 for (i = 0; i < get_status_totalcount(); i++) {
1035 ix = SHRT2LONG(i);
1036 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
1037 }
1038
1039 return reqcnt;
1040}
1041
1042static inline void
1043init_work_element(struct work_element *we_p,
1044 struct priv_data *priv_data, pid_t pid)
1045{
1046 int step;
1047
1048 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
1049 /* Come up with a unique id for this caller. */
1050 step = atomic_inc_return(&z90crypt_step);
1051 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
1052 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
1053 we_p->pid = pid;
1054 we_p->priv_data = priv_data;
1055 we_p->status[0] = STAT_DEFAULT;
1056 we_p->audit[0] = 0x00;
1057 we_p->audit[1] = 0x00;
1058 we_p->audit[2] = 0x00;
1059 we_p->resp_buff_size = 0;
1060 we_p->retcode = 0;
1061 we_p->devindex = -1;
1062 we_p->devtype = -1;
1063 atomic_set(&we_p->alarmrung, 0);
1064 init_waitqueue_head(&we_p->waitq);
1065 INIT_LIST_HEAD(&(we_p->liste));
1066}
1067
1068static inline int
1069allocate_work_element(struct work_element **we_pp,
1070 struct priv_data *priv_data_p, pid_t pid)
1071{
1072 struct work_element *we_p;
1073
1074 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
1075 if (!we_p)
1076 return -ENOMEM;
1077 init_work_element(we_p, priv_data_p, pid);
1078 *we_pp = we_p;
1079 return 0;
1080}
1081
1082static inline void
1083remove_device(struct device *device_p)
1084{
1085 if (!device_p || (device_p->disabled != 0))
1086 return;
1087 device_p->disabled = 1;
1088 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
1089 z90crypt.hdware_info->hdware_mask.disabled_count++;
1090}
1091
1092/**
1093 * Bitlength limits for each card
1094 *
1095 * There are new MCLs which allow more bitlengths. See the table for details.
1096 * The MCL must be applied and the newer bitlengths enabled for these to work.
1097 *
1098 * Card Type Old limit New limit
1099 * PCICC 512-1024 512-2048
1100 * PCIXCC_MCL2 512-2048 no change (applying this MCL == card is MCL3+)
1101 * PCIXCC_MCL3 512-2048 128-2048
1102 * CEX2C 512-2048 128-2048
1103 *
1104 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
1105 * MCL to just one card in a machine. We assume, at first, that all cards have
1106 * these capabilities.
1107 */
1108int ext_bitlens = 1; // This is global
1109#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
1110#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
1111#define PCICC_MIN_MOD_SIZE 64 // 512 bits
1112#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
1113#define MAX_MOD_SIZE 256 // 2048 bits
1114
1115static inline int
1116select_device_type(int *dev_type_p, int bytelength)
1117{
1118 static int count = 0;
1119 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use;
1120 struct status *stat;
1121 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1122 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1123 (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV))
1124 return -1;
1125 if (*dev_type_p != ANYDEV) {
1126 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
1127 if (stat->st_count >
1128 (stat->disabled_count + stat->user_disabled_count))
1129 return 0;
1130 return -1;
1131 }
1132
1133 /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */
1134 stat = &z90crypt.hdware_info->type_mask[PCICA];
1135 PCICA_avail = stat->st_count -
1136 (stat->disabled_count + stat->user_disabled_count);
1137 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
1138 PCIXCC_MCL3_avail = stat->st_count -
1139 (stat->disabled_count + stat->user_disabled_count);
1140 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1141 CEX2C_avail = stat->st_count -
1142 (stat->disabled_count + stat->user_disabled_count);
1143 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) {
1144 /**
1145 * bitlength is a factor, PCICA is the most capable, even with
1146 * the new MCL.
1147 */
1148 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1149 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1150 if (!PCICA_avail)
1151 return -1;
1152 else {
1153 *dev_type_p = PCICA;
1154 return 0;
1155 }
1156 }
1157
1158 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1159 CEX2C_avail);
1160 if (index_to_use < PCICA_avail)
1161 *dev_type_p = PCICA;
1162 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1163 *dev_type_p = PCIXCC_MCL3;
1164 else
1165 *dev_type_p = CEX2C;
1166 count++;
1167 return 0;
1168 }
1169
1170 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
1171 if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
1172 return -1;
1173 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
1174 if (stat->st_count >
1175 (stat->disabled_count + stat->user_disabled_count)) {
1176 *dev_type_p = PCIXCC_MCL2;
1177 return 0;
1178 }
1179
1180 /**
1181 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
1182 * (if we don't have the MCL applied and the newer bitlengths enabled)
1183 * cannot go to a PCICC
1184 */
1185 if ((bytelength < PCICC_MIN_MOD_SIZE) ||
1186 (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
1187 return -1;
1188 }
1189 stat = &z90crypt.hdware_info->type_mask[PCICC];
1190 if (stat->st_count >
1191 (stat->disabled_count + stat->user_disabled_count)) {
1192 *dev_type_p = PCICC;
1193 return 0;
1194 }
1195
1196 return -1;
1197}
1198
1199/**
1200 * Try the selected number, then the selected type (can be ANYDEV)
1201 */
1202static inline int
1203select_device(int *dev_type_p, int *device_nr_p, int bytelength)
1204{
1205 int i, indx, devTp, low_count, low_indx;
1206 struct device_x *index_p;
1207 struct device *dev_ptr;
1208
1209 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
1210 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
1211 PDEBUG("trying index = %d\n", *device_nr_p);
1212 dev_ptr = z90crypt.device_p[*device_nr_p];
1213
1214 if (dev_ptr &&
1215 (dev_ptr->dev_stat != DEV_GONE) &&
1216 (dev_ptr->disabled == 0) &&
1217 (dev_ptr->user_disabled == 0)) {
1218 PDEBUG("selected by number, index = %d\n",
1219 *device_nr_p);
1220 *dev_type_p = dev_ptr->dev_type;
1221 return *device_nr_p;
1222 }
1223 }
1224 *device_nr_p = -1;
1225 PDEBUG("trying type = %d\n", *dev_type_p);
1226 devTp = *dev_type_p;
1227 if (select_device_type(&devTp, bytelength) == -1) {
1228 PDEBUG("failed to select by type\n");
1229 return -1;
1230 }
1231 PDEBUG("selected type = %d\n", devTp);
1232 index_p = &z90crypt.hdware_info->type_x_addr[devTp];
1233 low_count = 0x0000FFFF;
1234 low_indx = -1;
1235 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
1236 indx = index_p->device_index[i];
1237 dev_ptr = z90crypt.device_p[indx];
1238 if (dev_ptr &&
1239 (dev_ptr->dev_stat != DEV_GONE) &&
1240 (dev_ptr->disabled == 0) &&
1241 (dev_ptr->user_disabled == 0) &&
1242 (devTp == dev_ptr->dev_type) &&
1243 (low_count > dev_ptr->dev_caller_count)) {
1244 low_count = dev_ptr->dev_caller_count;
1245 low_indx = indx;
1246 }
1247 }
1248 *device_nr_p = low_indx;
1249 return low_indx;
1250}
1251
1252static inline int
1253send_to_crypto_device(struct work_element *we_p)
1254{
1255 struct caller *caller_p;
1256 struct device *device_p;
1257 int dev_nr;
1258 int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
1259
1260 if (!we_p->requestptr)
1261 return SEN_FATAL_ERROR;
1262 caller_p = (struct caller *)we_p->requestptr;
1263 dev_nr = we_p->devindex;
1264 if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
1265 if (z90crypt.hdware_info->hdware_mask.st_count != 0)
1266 return SEN_RETRY;
1267 else
1268 return SEN_NOT_AVAIL;
1269 }
1270 we_p->devindex = dev_nr;
1271 device_p = z90crypt.device_p[dev_nr];
1272 if (!device_p)
1273 return SEN_NOT_AVAIL;
1274 if (device_p->dev_type != we_p->devtype)
1275 return SEN_RETRY;
1276 if (device_p->dev_caller_count >= device_p->dev_q_depth)
1277 return SEN_QUEUE_FULL;
1278 PDEBUG("device number prior to send: %d\n", dev_nr);
1279 switch (send_to_AP(dev_nr, z90crypt.cdx,
1280 caller_p->caller_dev_dep_req_l,
1281 caller_p->caller_dev_dep_req_p)) {
1282 case DEV_SEN_EXCEPTION:
1283 PRINTKC("Exception during send to device %d\n", dev_nr);
1284 z90crypt.terminating = 1;
1285 return SEN_FATAL_ERROR;
1286 case DEV_GONE:
1287 PRINTK("Device %d not available\n", dev_nr);
1288 remove_device(device_p);
1289 return SEN_NOT_AVAIL;
1290 case DEV_EMPTY:
1291 return SEN_NOT_AVAIL;
1292 case DEV_NO_WORK:
1293 return SEN_FATAL_ERROR;
1294 case DEV_BAD_MESSAGE:
1295 return SEN_USER_ERROR;
1296 case DEV_QUEUE_FULL:
1297 return SEN_QUEUE_FULL;
1298 default:
1299 case DEV_ONLINE:
1300 break;
1301 }
1302 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
1303 device_p->dev_caller_count++;
1304 return 0;
1305}
1306
1307/**
1308 * Send puts the user's work on one of two queues:
1309 * the pending queue if the send was successful
1310 * the request queue if the send failed because device full or busy
1311 */
1312static inline int
1313z90crypt_send(struct work_element *we_p, const char *buf)
1314{
1315 int rv;
1316
1317 PDEBUG("PID %d\n", PID());
1318
1319 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
1320 PDEBUG("PID %d tried to send more work but has outstanding "
1321 "work.\n", PID());
1322 return -EWORKPEND;
1323 }
1324 we_p->devindex = -1; // Reset device number
1325 spin_lock_irq(&queuespinlock);
1326 rv = send_to_crypto_device(we_p);
1327 switch (rv) {
1328 case 0:
1329 we_p->requestsent = jiffies;
1330 we_p->audit[0] |= FP_SENT;
1331 list_add_tail(&we_p->liste, &pending_list);
1332 ++pendingq_count;
1333 we_p->audit[0] |= FP_PENDING;
1334 break;
1335 case SEN_BUSY:
1336 case SEN_QUEUE_FULL:
1337 rv = 0;
1338 we_p->devindex = -1; // any device will do
1339 we_p->requestsent = jiffies;
1340 list_add_tail(&we_p->liste, &request_list);
1341 ++requestq_count;
1342 we_p->audit[0] |= FP_REQUEST;
1343 break;
1344 case SEN_RETRY:
1345 rv = -ERESTARTSYS;
1346 break;
1347 case SEN_NOT_AVAIL:
1348 PRINTK("*** No devices available.\n");
1349 rv = we_p->retcode = -ENODEV;
1350 we_p->status[0] |= STAT_FAILED;
1351 break;
1352 case REC_OPERAND_INV:
1353 case REC_OPERAND_SIZE:
1354 case REC_EVEN_MOD:
1355 case REC_INVALID_PAD:
1356 rv = we_p->retcode = -EINVAL;
1357 we_p->status[0] |= STAT_FAILED;
1358 break;
1359 default:
1360 we_p->retcode = rv;
1361 we_p->status[0] |= STAT_FAILED;
1362 break;
1363 }
1364 if (rv != -ERESTARTSYS)
1365 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1366 spin_unlock_irq(&queuespinlock);
1367 if (rv == 0)
1368 tasklet_schedule(&reader_tasklet);
1369 return rv;
1370}
1371
1372/**
1373 * process_results copies the user's work from kernel space.
1374 */
1375static inline int
1376z90crypt_process_results(struct work_element *we_p, char __user *buf)
1377{
1378 int rv;
1379
1380 PDEBUG("we_p %p (PID %d)\n", we_p, PID());
1381
1382 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
1383 SET_RDWRMASK(we_p->status[0], STAT_READPEND);
1384
1385 rv = 0;
1386 if (!we_p->buffer) {
1387 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1388 we_p, PID());
1389 rv = -ENOBUFF;
1390 }
1391
1392 if (!rv)
1393 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
1394 PDEBUG("copy_to_user failed: rv = %d\n", rv);
1395 rv = -EFAULT;
1396 }
1397
1398 if (!rv)
1399 rv = we_p->retcode;
1400 if (!rv)
1401 if (we_p->resp_buff_size
1402 && copy_to_user(we_p->resp_addr, we_p->resp_buff,
1403 we_p->resp_buff_size))
1404 rv = -EFAULT;
1405
1406 SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
1407 return rv;
1408}
1409
1410static unsigned char NULL_psmid[8] =
1411{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1412
1413/**
1414 * Used in device configuration functions
1415 */
1416#define MAX_RESET 90
1417
1418/**
1419 * This is used only for PCICC support
1420 */
1421static inline int
1422is_PKCS11_padded(unsigned char *buffer, int length)
1423{
1424 int i;
1425 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
1426 return 0;
1427 for (i = 2; i < length; i++)
1428 if (buffer[i] != 0xFF)
1429 break;
1430 if ((i < 10) || (i == length))
1431 return 0;
1432 if (buffer[i] != 0x00)
1433 return 0;
1434 return 1;
1435}
1436
1437/**
1438 * This is used only for PCICC support
1439 */
1440static inline int
1441is_PKCS12_padded(unsigned char *buffer, int length)
1442{
1443 int i;
1444 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
1445 return 0;
1446 for (i = 2; i < length; i++)
1447 if (buffer[i] == 0x00)
1448 break;
1449 if ((i < 10) || (i == length))
1450 return 0;
1451 if (buffer[i] != 0x00)
1452 return 0;
1453 return 1;
1454}
1455
1456/**
1457 * builds struct caller and converts message from generic format to
1458 * device-dependent format
1459 * func is ICARSAMODEXPO or ICARSACRT
1460 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1461 */
1462static inline int
1463build_caller(struct work_element *we_p, short function)
1464{
1465 int rv;
1466 struct caller *caller_p = (struct caller *)we_p->requestptr;
1467
1468 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1469 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1470 (we_p->devtype != CEX2C))
1471 return SEN_NOT_AVAIL;
1472
1473 memcpy(caller_p->caller_id, we_p->caller_id,
1474 sizeof(caller_p->caller_id));
1475 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
1476 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
1477 caller_p->caller_buf_p = we_p->buffer;
1478 INIT_LIST_HEAD(&(caller_p->caller_liste));
1479
1480 rv = convert_request(we_p->buffer, we_p->funccode, function,
1481 z90crypt.cdx, we_p->devtype,
1482 &caller_p->caller_dev_dep_req_l,
1483 caller_p->caller_dev_dep_req_p);
1484 if (rv) {
1485 if (rv == SEN_NOT_AVAIL)
1486 PDEBUG("request can't be processed on hdwr avail\n");
1487 else
1488 PRINTK("Error from convert_request: %d\n", rv);
1489 }
1490 else
1491 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
1492 return rv;
1493}
1494
1495static inline void
1496unbuild_caller(struct device *device_p, struct caller *caller_p)
1497{
1498 if (!caller_p)
1499 return;
1500 if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
1501 if (!list_empty(&caller_p->caller_liste)) {
1502 list_del_init(&caller_p->caller_liste);
1503 device_p->dev_caller_count--;
1504 }
1505 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
1506}
1507
1508static inline int
1509get_crypto_request_buffer(struct work_element *we_p)
1510{
1511 struct ica_rsa_modexpo *mex_p;
1512 struct ica_rsa_modexpo_crt *crt_p;
1513 unsigned char *temp_buffer;
1514 short function;
1515 int rv;
1516
1517 mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
1518 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
1519
1520 PDEBUG("device type input = %d\n", we_p->devtype);
1521
1522 if (z90crypt.terminating)
1523 return REC_NO_RESPONSE;
1524 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
1525 PRINTK("psmid zeroes\n");
1526 return SEN_FATAL_ERROR;
1527 }
1528 if (!we_p->buffer) {
1529 PRINTK("buffer pointer NULL\n");
1530 return SEN_USER_ERROR;
1531 }
1532 if (!we_p->requestptr) {
1533 PRINTK("caller pointer NULL\n");
1534 return SEN_USER_ERROR;
1535 }
1536
1537 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1538 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1539 (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) {
1540 PRINTK("invalid device type\n");
1541 return SEN_USER_ERROR;
1542 }
1543
1544 if ((mex_p->inputdatalength < 1) ||
1545 (mex_p->inputdatalength > MAX_MOD_SIZE)) {
1546 PRINTK("inputdatalength[%d] is not valid\n",
1547 mex_p->inputdatalength);
1548 return SEN_USER_ERROR;
1549 }
1550
1551 if (mex_p->outputdatalength < mex_p->inputdatalength) {
1552 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1553 mex_p->outputdatalength, mex_p->inputdatalength);
1554 return SEN_USER_ERROR;
1555 }
1556
1557 if (!mex_p->inputdata || !mex_p->outputdata) {
1558 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1559 mex_p->outputdata, mex_p->inputdata);
1560 return SEN_USER_ERROR;
1561 }
1562
1563 /**
1564 * As long as outputdatalength is big enough, we can set the
1565 * outputdatalength equal to the inputdatalength, since that is the
1566 * number of bytes we will copy in any case
1567 */
1568 mex_p->outputdatalength = mex_p->inputdatalength;
1569
1570 rv = 0;
1571 switch (we_p->funccode) {
1572 case ICARSAMODEXPO:
1573 if (!mex_p->b_key || !mex_p->n_modulus)
1574 rv = SEN_USER_ERROR;
1575 break;
1576 case ICARSACRT:
1577 if (!IS_EVEN(crt_p->inputdatalength)) {
1578 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1579 crt_p->inputdatalength);
1580 rv = SEN_USER_ERROR;
1581 break;
1582 }
1583 if (!crt_p->bp_key ||
1584 !crt_p->bq_key ||
1585 !crt_p->np_prime ||
1586 !crt_p->nq_prime ||
1587 !crt_p->u_mult_inv) {
1588 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1589 crt_p->bp_key, crt_p->bq_key,
1590 crt_p->np_prime, crt_p->nq_prime,
1591 crt_p->u_mult_inv);
1592 rv = SEN_USER_ERROR;
1593 }
1594 break;
1595 default:
1596 PRINTK("bad func = %d\n", we_p->funccode);
1597 rv = SEN_USER_ERROR;
1598 break;
1599 }
1600 if (rv != 0)
1601 return rv;
1602
1603 if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
1604 return SEN_NOT_AVAIL;
1605
1606 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
1607 sizeof(struct caller);
1608 if (copy_from_user(temp_buffer, mex_p->inputdata,
1609 mex_p->inputdatalength) != 0)
1610 return SEN_RELEASED;
1611
1612 function = PCI_FUNC_KEY_ENCRYPT;
1613 switch (we_p->devtype) {
1614 /* PCICA does everything with a simple RSA mod-expo operation */
1615 case PCICA:
1616 function = PCI_FUNC_KEY_ENCRYPT;
1617 break;
1618 /**
1619 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
1620 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1621 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
1622 * mod-expo operation
1623 */
1624 case PCIXCC_MCL2:
1625 if (we_p->funccode == ICARSAMODEXPO)
1626 function = PCI_FUNC_KEY_ENCRYPT;
1627 else
1628 function = PCI_FUNC_KEY_DECRYPT;
1629 break;
1630 case PCIXCC_MCL3:
1631 case CEX2C:
1632 if (we_p->funccode == ICARSAMODEXPO)
1633 function = PCI_FUNC_KEY_ENCRYPT;
1634 else
1635 function = PCI_FUNC_KEY_DECRYPT;
1636 break;
1637 /**
1638 * PCICC does everything as a PKCS-1.2 format request
1639 */
1640 case PCICC:
1641 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1642 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
1643 return SEN_NOT_AVAIL;
1644 }
1645 if (we_p->funccode == ICARSAMODEXPO) {
1646 if (is_PKCS12_padded(temp_buffer,
1647 mex_p->inputdatalength))
1648 function = PCI_FUNC_KEY_ENCRYPT;
1649 else
1650 function = PCI_FUNC_KEY_DECRYPT;
1651 } else
1652 /* all CRT forms are decrypts */
1653 function = PCI_FUNC_KEY_DECRYPT;
1654 break;
1655 }
1656 PDEBUG("function: %04x\n", function);
1657 rv = build_caller(we_p, function);
1658 PDEBUG("rv from build_caller = %d\n", rv);
1659 return rv;
1660}
1661
1662static inline int
1663z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
1664 const char __user *buffer)
1665{
1666 int rv;
1667
1668 we_p->devindex = -1;
1669 if (funccode == ICARSAMODEXPO)
1670 we_p->buff_size = sizeof(struct ica_rsa_modexpo);
1671 else
1672 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
1673
1674 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
1675 return -EFAULT;
1676
1677 we_p->audit[0] |= FP_COPYFROM;
1678 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1679 we_p->funccode = funccode;
1680 we_p->devtype = -1;
1681 we_p->audit[0] |= FP_BUFFREQ;
1682 rv = get_crypto_request_buffer(we_p);
1683 switch (rv) {
1684 case 0:
1685 we_p->audit[0] |= FP_BUFFGOT;
1686 break;
1687 case SEN_USER_ERROR:
1688 rv = -EINVAL;
1689 break;
1690 case SEN_QUEUE_FULL:
1691 rv = 0;
1692 break;
1693 case SEN_RELEASED:
1694 rv = -EFAULT;
1695 break;
1696 case REC_NO_RESPONSE:
1697 rv = -ENODEV;
1698 break;
1699 case SEN_NOT_AVAIL:
1700 case EGETBUFF:
1701 rv = -EGETBUFF;
1702 break;
1703 default:
1704 PRINTK("rv = %d\n", rv);
1705 rv = -EGETBUFF;
1706 break;
1707 }
1708 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
1709 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
1710 return rv;
1711}
1712
1713static inline void
1714purge_work_element(struct work_element *we_p)
1715{
1716 struct list_head *lptr;
1717
1718 spin_lock_irq(&queuespinlock);
1719 list_for_each(lptr, &request_list) {
1720 if (lptr == &we_p->liste) {
1721 list_del_init(lptr);
1722 requestq_count--;
1723 break;
1724 }
1725 }
1726 list_for_each(lptr, &pending_list) {
1727 if (lptr == &we_p->liste) {
1728 list_del_init(lptr);
1729 pendingq_count--;
1730 break;
1731 }
1732 }
1733 spin_unlock_irq(&queuespinlock);
1734}
1735
1736/**
1737 * Build the request and send it.
1738 */
1739static inline int
1740z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1741 unsigned int cmd, unsigned long arg)
1742{
1743 struct work_element *we_p;
1744 int rv;
1745
1746 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
1747 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
1748 return rv;
1749 }
1750 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
1751 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
1752 if (!rv)
1753 if ((rv = z90crypt_send(we_p, (const char *)arg)))
1754 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
1755 if (!rv) {
1756 we_p->audit[0] |= FP_ASLEEP;
1757 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
1758 we_p->audit[0] |= FP_AWAKE;
1759 rv = we_p->retcode;
1760 }
1761 if (!rv)
1762 rv = z90crypt_process_results(we_p, (char __user *)arg);
1763
1764 if ((we_p->status[0] & STAT_FAILED)) {
1765 switch (rv) {
1766 /**
1767 * EINVAL *after* receive is almost always a padding error or
1768 * length error issued by a coprocessor (not an accelerator).
1769 * We convert this return value to -EGETBUFF which should
1770 * trigger a fallback to software.
1771 */
1772 case -EINVAL:
1773 if (we_p->devtype != PCICA)
1774 rv = -EGETBUFF;
1775 break;
1776 case -ETIMEOUT:
1777 if (z90crypt.mask.st_count > 0)
1778 rv = -ERESTARTSYS; // retry with another
1779 else
1780 rv = -ENODEV; // no cards left
1781 /* fall through to clean up request queue */
1782 case -ERESTARTSYS:
1783 case -ERELEASED:
1784 switch (CHK_RDWRMASK(we_p->status[0])) {
1785 case STAT_WRITTEN:
1786 purge_work_element(we_p);
1787 break;
1788 case STAT_READPEND:
1789 case STAT_NOWORK:
1790 default:
1791 break;
1792 }
1793 break;
1794 default:
1795 we_p->status[0] ^= STAT_FAILED;
1796 break;
1797 }
1798 }
1799 free_page((long)we_p);
1800 return rv;
1801}
1802
1803/**
1804 * This function is a little long, but it's really just one large switch
1805 * statement.
1806 */
1807static int
1808z90crypt_ioctl(struct inode *inode, struct file *filp,
1809 unsigned int cmd, unsigned long arg)
1810{
1811 struct priv_data *private_data_p = filp->private_data;
1812 unsigned char *status;
1813 unsigned char *qdepth;
1814 unsigned int *reqcnt;
1815 struct ica_z90_status *pstat;
1816 int ret, i, loopLim, tempstat;
1817 static int deprecated_msg_count1 = 0;
1818 static int deprecated_msg_count2 = 0;
1819
1820 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
1821 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1822 cmd,
1823 !_IOC_DIR(cmd) ? "NO"
1824 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
1825 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
1826 : "WR")),
1827 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
1828
1829 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
1830 PRINTK("cmd 0x%08X contains bad magic\n", cmd);
1831 return -ENOTTY;
1832 }
1833
1834 ret = 0;
1835 switch (cmd) {
1836 case ICARSAMODEXPO:
1837 case ICARSACRT:
1838 if (quiesce_z90crypt) {
1839 ret = -EQUIESCE;
1840 break;
1841 }
1842 ret = -ENODEV; // Default if no devices
1843 loopLim = z90crypt.hdware_info->hdware_mask.st_count -
1844 (z90crypt.hdware_info->hdware_mask.disabled_count +
1845 z90crypt.hdware_info->hdware_mask.user_disabled_count);
1846 for (i = 0; i < loopLim; i++) {
1847 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
1848 if (ret != -ERESTARTSYS)
1849 break;
1850 }
1851 if (ret == -ERESTARTSYS)
1852 ret = -ENODEV;
1853 break;
1854
1855 case Z90STAT_TOTALCOUNT:
1856 tempstat = get_status_totalcount();
1857 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
1858 ret = -EFAULT;
1859 break;
1860
1861 case Z90STAT_PCICACOUNT:
1862 tempstat = get_status_PCICAcount();
1863 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1864 ret = -EFAULT;
1865 break;
1866
1867 case Z90STAT_PCICCCOUNT:
1868 tempstat = get_status_PCICCcount();
1869 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1870 ret = -EFAULT;
1871 break;
1872
1873 case Z90STAT_PCIXCCMCL2COUNT:
1874 tempstat = get_status_PCIXCCMCL2count();
1875 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1876 ret = -EFAULT;
1877 break;
1878
1879 case Z90STAT_PCIXCCMCL3COUNT:
1880 tempstat = get_status_PCIXCCMCL3count();
1881 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1882 ret = -EFAULT;
1883 break;
1884
1885 case Z90STAT_CEX2CCOUNT:
1886 tempstat = get_status_CEX2Ccount();
1887 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1888 ret = -EFAULT;
1889 break;
1890
1891 case Z90STAT_REQUESTQ_COUNT:
1892 tempstat = get_status_requestq_count();
1893 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1894 ret = -EFAULT;
1895 break;
1896
1897 case Z90STAT_PENDINGQ_COUNT:
1898 tempstat = get_status_pendingq_count();
1899 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1900 ret = -EFAULT;
1901 break;
1902
1903 case Z90STAT_TOTALOPEN_COUNT:
1904 tempstat = get_status_totalopen_count();
1905 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1906 ret = -EFAULT;
1907 break;
1908
1909 case Z90STAT_DOMAIN_INDEX:
1910 tempstat = get_status_domain_index();
1911 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1912 ret = -EFAULT;
1913 break;
1914
1915 case Z90STAT_STATUS_MASK:
1916 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1917 if (!status) {
1918 PRINTK("kmalloc for status failed!\n");
1919 ret = -ENOMEM;
1920 break;
1921 }
1922 get_status_status_mask(status);
1923 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
1924 != 0)
1925 ret = -EFAULT;
1926 kfree(status);
1927 break;
1928
1929 case Z90STAT_QDEPTH_MASK:
1930 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1931 if (!qdepth) {
1932 PRINTK("kmalloc for qdepth failed!\n");
1933 ret = -ENOMEM;
1934 break;
1935 }
1936 get_status_qdepth_mask(qdepth);
1937 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
1938 ret = -EFAULT;
1939 kfree(qdepth);
1940 break;
1941
1942 case Z90STAT_PERDEV_REQCNT:
1943 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
1944 if (!reqcnt) {
1945 PRINTK("kmalloc for reqcnt failed!\n");
1946 ret = -ENOMEM;
1947 break;
1948 }
1949 get_status_perdevice_reqcnt(reqcnt);
1950 if (copy_to_user((char __user *) arg, reqcnt,
1951 Z90CRYPT_NUM_APS * sizeof(int)) != 0)
1952 ret = -EFAULT;
1953 kfree(reqcnt);
1954 break;
1955
1956 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1957 case ICAZ90STATUS:
1958 if (deprecated_msg_count1 < 20) {
1959 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1960 deprecated_msg_count1++;
1961 if (deprecated_msg_count1 == 20)
1962 PRINTK("No longer issuing messages related to "
1963 "deprecated call to ICAZ90STATUS.\n");
1964 }
1965
1966 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
1967 if (!pstat) {
1968 PRINTK("kmalloc for pstat failed!\n");
1969 ret = -ENOMEM;
1970 break;
1971 }
1972
1973 pstat->totalcount = get_status_totalcount();
1974 pstat->leedslitecount = get_status_PCICAcount();
1975 pstat->leeds2count = get_status_PCICCcount();
1976 pstat->requestqWaitCount = get_status_requestq_count();
1977 pstat->pendingqWaitCount = get_status_pendingq_count();
1978 pstat->totalOpenCount = get_status_totalopen_count();
1979 pstat->cryptoDomain = get_status_domain_index();
1980 get_status_status_mask(pstat->status);
1981 get_status_qdepth_mask(pstat->qdepth);
1982
1983 if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
1984 sizeof(struct ica_z90_status)) != 0)
1985 ret = -EFAULT;
1986 kfree(pstat);
1987 break;
1988
1989 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1990 case Z90STAT_PCIXCCCOUNT:
1991 if (deprecated_msg_count2 < 20) {
1992 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
1993 deprecated_msg_count2++;
1994 if (deprecated_msg_count2 == 20)
1995 PRINTK("No longer issuing messages about depre"
1996 "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
1997 }
1998
1999 tempstat = get_status_PCIXCCcount();
2000 if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
2001 ret = -EFAULT;
2002 break;
2003
2004 case Z90QUIESCE:
2005 if (current->euid != 0) {
2006 PRINTK("QUIESCE fails: euid %d\n",
2007 current->euid);
2008 ret = -EACCES;
2009 } else {
2010 PRINTK("QUIESCE device from PID %d\n", PID());
2011 quiesce_z90crypt = 1;
2012 }
2013 break;
2014
2015 default:
2016 /* user passed an invalid IOCTL number */
2017 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
2018 ret = -ENOTTY;
2019 break;
2020 }
2021
2022 return ret;
2023}
2024
2025static inline int
2026sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
2027{
2028 int hl, i;
2029
2030 hl = 0;
2031 for (i = 0; i < len; i++)
2032 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
2033 hl += sprintf(outaddr+hl, " ");
2034
2035 return hl;
2036}
2037
2038static inline int
2039sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
2040{
2041 int hl, inl, c, cx;
2042
2043 hl = sprintf(outaddr, " ");
2044 inl = 0;
2045 for (c = 0; c < (len / 16); c++) {
2046 hl += sprintcl(outaddr+hl, addr+inl, 16);
2047 inl += 16;
2048 }
2049
2050 cx = len%16;
2051 if (cx) {
2052 hl += sprintcl(outaddr+hl, addr+inl, cx);
2053 inl += cx;
2054 }
2055
2056 hl += sprintf(outaddr+hl, "\n");
2057
2058 return hl;
2059}
2060
2061static inline int
2062sprinthx(unsigned char *title, unsigned char *outaddr,
2063 unsigned char *addr, unsigned int len)
2064{
2065 int hl, inl, r, rx;
2066
2067 hl = sprintf(outaddr, "\n%s\n", title);
2068 inl = 0;
2069 for (r = 0; r < (len / 64); r++) {
2070 hl += sprintrw(outaddr+hl, addr+inl, 64);
2071 inl += 64;
2072 }
2073 rx = len % 64;
2074 if (rx) {
2075 hl += sprintrw(outaddr+hl, addr+inl, rx);
2076 inl += rx;
2077 }
2078
2079 hl += sprintf(outaddr+hl, "\n");
2080
2081 return hl;
2082}
2083
2084static inline int
2085sprinthx4(unsigned char *title, unsigned char *outaddr,
2086 unsigned int *array, unsigned int len)
2087{
2088 int hl, r;
2089
2090 hl = sprintf(outaddr, "\n%s\n", title);
2091
2092 for (r = 0; r < len; r++) {
2093 if ((r % 8) == 0)
2094 hl += sprintf(outaddr+hl, " ");
2095 hl += sprintf(outaddr+hl, "%08X ", array[r]);
2096 if ((r % 8) == 7)
2097 hl += sprintf(outaddr+hl, "\n");
2098 }
2099
2100 hl += sprintf(outaddr+hl, "\n");
2101
2102 return hl;
2103}
2104
2105static int
2106z90crypt_status(char *resp_buff, char **start, off_t offset,
2107 int count, int *eof, void *data)
2108{
2109 unsigned char *workarea;
2110 int len;
2111
2112 /* resp_buff is a page. Use the right half for a work area */
2113 workarea = resp_buff+2000;
2114 len = 0;
2115 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
2116 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
2117 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
2118 get_status_domain_index());
2119 len += sprintf(resp_buff+len, "Total device count: %d\n",
2120 get_status_totalcount());
2121 len += sprintf(resp_buff+len, "PCICA count: %d\n",
2122 get_status_PCICAcount());
2123 len += sprintf(resp_buff+len, "PCICC count: %d\n",
2124 get_status_PCICCcount());
2125 len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
2126 get_status_PCIXCCMCL2count());
2127 len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
2128 get_status_PCIXCCMCL3count());
2129 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2130 get_status_CEX2Ccount());
2131 len += sprintf(resp_buff+len, "requestq count: %d\n",
2132 get_status_requestq_count());
2133 len += sprintf(resp_buff+len, "pendingq count: %d\n",
2134 get_status_pendingq_count());
2135 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2136 get_status_totalopen_count());
2137 len += sprinthx(
2138 "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), "
2139 "4: PCIXCC (MCL3), 5: CEX2C",
2140 resp_buff+len,
2141 get_status_status_mask(workarea),
2142 Z90CRYPT_NUM_APS);
2143 len += sprinthx("Waiting work element counts",
2144 resp_buff+len,
2145 get_status_qdepth_mask(workarea),
2146 Z90CRYPT_NUM_APS);
2147 len += sprinthx4(
2148 "Per-device successfully completed request counts",
2149 resp_buff+len,
2150 get_status_perdevice_reqcnt((unsigned int *)workarea),
2151 Z90CRYPT_NUM_APS);
2152 *eof = 1;
2153 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
2154 return len;
2155}
2156
2157static inline void
2158disable_card(int card_index)
2159{
2160 struct device *devp;
2161
2162 devp = LONG2DEVPTR(card_index);
2163 if (!devp || devp->user_disabled)
2164 return;
2165 devp->user_disabled = 1;
2166 z90crypt.hdware_info->hdware_mask.user_disabled_count++;
2167 if (devp->dev_type == -1)
2168 return;
2169 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
2170}
2171
2172static inline void
2173enable_card(int card_index)
2174{
2175 struct device *devp;
2176
2177 devp = LONG2DEVPTR(card_index);
2178 if (!devp || !devp->user_disabled)
2179 return;
2180 devp->user_disabled = 0;
2181 z90crypt.hdware_info->hdware_mask.user_disabled_count--;
2182 if (devp->dev_type == -1)
2183 return;
2184 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2185}
2186
2187static inline int
2188scan_char(unsigned char *bf, unsigned int len,
2189 unsigned int *offs, unsigned int *p_eof, unsigned char c)
2190{
2191 unsigned int i, found;
2192
2193 found = 0;
2194 for (i = 0; i < len; i++) {
2195 if (bf[i] == c) {
2196 found = 1;
2197 break;
2198 }
2199 if (bf[i] == '\0') {
2200 *p_eof = 1;
2201 break;
2202 }
2203 if (bf[i] == '\n') {
2204 break;
2205 }
2206 }
2207 *offs = i+1;
2208 return found;
2209}
2210
2211static inline int
2212scan_string(unsigned char *bf, unsigned int len,
2213 unsigned int *offs, unsigned int *p_eof, unsigned char *s)
2214{
2215 unsigned int temp_len, temp_offs, found, eof;
2216
2217 temp_len = temp_offs = found = eof = 0;
2218 while (!eof && !found) {
2219 found = scan_char(bf+temp_len, len-temp_len,
2220 &temp_offs, &eof, *s);
2221
2222 temp_len += temp_offs;
2223 if (eof) {
2224 found = 0;
2225 break;
2226 }
2227
2228 if (found) {
2229 if (len >= temp_offs+strlen(s)) {
2230 found = !strncmp(bf+temp_len-1, s, strlen(s));
2231 if (found) {
2232 *offs = temp_len+strlen(s)-1;
2233 break;
2234 }
2235 } else {
2236 found = 0;
2237 *p_eof = 1;
2238 break;
2239 }
2240 }
2241 }
2242 return found;
2243}
2244
2245static int
2246z90crypt_status_write(struct file *file, const char __user *buffer,
2247 unsigned long count, void *data)
2248{
2249 int i, j, len, offs, found, eof;
2250 unsigned char *lbuf;
2251 unsigned int local_count;
2252
2253#define LBUFSIZE 600
2254 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2255 if (!lbuf) {
2256 PRINTK("kmalloc failed!\n");
2257 return 0;
2258 }
2259
2260 if (count <= 0)
2261 return 0;
2262
2263 local_count = UMIN((unsigned int)count, LBUFSIZE-1);
2264
2265 if (copy_from_user(lbuf, buffer, local_count) != 0) {
2266 kfree(lbuf);
2267 return -EFAULT;
2268 }
2269
2270 lbuf[local_count-1] = '\0';
2271
2272 len = 0;
2273 eof = 0;
2274 found = 0;
2275 while (!eof) {
2276 found = scan_string(lbuf+len, local_count-len, &offs, &eof,
2277 "Online devices");
2278 len += offs;
2279 if (found == 1)
2280 break;
2281 }
2282
2283 if (eof) {
2284 kfree(lbuf);
2285 return count;
2286 }
2287
2288 if (found)
2289 found = scan_char(lbuf+len, local_count-len, &offs, &eof, '\n');
2290
2291 if (!found || eof) {
2292 kfree(lbuf);
2293 return count;
2294 }
2295
2296 len += offs;
2297 j = 0;
2298 for (i = 0; i < 80; i++) {
2299 switch (*(lbuf+len+i)) {
2300 case '\t':
2301 case ' ':
2302 break;
2303 case '\n':
2304 default:
2305 eof = 1;
2306 break;
2307 case '0':
2308 case '1':
2309 case '2':
2310 case '3':
2311 case '4':
2312 case '5':
2313 j++;
2314 break;
2315 case 'd':
2316 case 'D':
2317 disable_card(j);
2318 j++;
2319 break;
2320 case 'e':
2321 case 'E':
2322 enable_card(j);
2323 j++;
2324 break;
2325 }
2326 if (eof)
2327 break;
2328 }
2329
2330 kfree(lbuf);
2331 return count;
2332}
2333
2334/**
2335 * Functions that run under a timer, with no process id
2336 *
2337 * The task functions:
2338 * z90crypt_reader_task
2339 * helper_send_work
2340 * helper_handle_work_element
2341 * helper_receive_rc
2342 * z90crypt_config_task
2343 * z90crypt_cleanup_task
2344 *
2345 * Helper functions:
2346 * z90crypt_schedule_reader_timer
2347 * z90crypt_schedule_reader_task
2348 * z90crypt_schedule_config_task
2349 * z90crypt_schedule_cleanup_task
2350 */
2351static inline int
2352receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
2353 unsigned char *buff, unsigned char __user **dest_p_p)
2354{
2355 int dv, rv;
2356 struct device *dev_ptr;
2357 struct caller *caller_p;
2358 struct ica_rsa_modexpo *icaMsg_p;
2359 struct list_head *ptr, *tptr;
2360
2361 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
2362
2363 if (z90crypt.terminating)
2364 return REC_FATAL_ERROR;
2365
2366 caller_p = 0;
2367 dev_ptr = z90crypt.device_p[index];
2368 rv = 0;
2369 do {
2370 if (!dev_ptr || dev_ptr->disabled) {
2371 rv = REC_NO_WORK; // a disabled device can't return work
2372 break;
2373 }
2374 if (dev_ptr->dev_self_x != index) {
2375 PRINTKC("Corrupt dev ptr\n");
2376 z90crypt.terminating = 1;
2377 rv = REC_FATAL_ERROR;
2378 break;
2379 }
2380 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
2381 dv = DEV_REC_EXCEPTION;
2382 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2383 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
2384 } else {
2385 PDEBUG("Dequeue called for device %d\n", index);
2386 dv = receive_from_AP(index, z90crypt.cdx,
2387 dev_ptr->dev_resp_l,
2388 dev_ptr->dev_resp_p, psmid);
2389 }
2390 switch (dv) {
2391 case DEV_REC_EXCEPTION:
2392 rv = REC_FATAL_ERROR;
2393 z90crypt.terminating = 1;
2394 PRINTKC("Exception in receive from device %d\n",
2395 index);
2396 break;
2397 case DEV_ONLINE:
2398 rv = 0;
2399 break;
2400 case DEV_EMPTY:
2401 rv = REC_EMPTY;
2402 break;
2403 case DEV_NO_WORK:
2404 rv = REC_NO_WORK;
2405 break;
2406 case DEV_BAD_MESSAGE:
2407 case DEV_GONE:
2408 case REC_HARDWAR_ERR:
2409 default:
2410 rv = REC_NO_RESPONSE;
2411 break;
2412 }
2413 if (rv)
2414 break;
2415 if (dev_ptr->dev_caller_count <= 0) {
2416 rv = REC_USER_GONE;
2417 break;
2418 }
2419
2420 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
2421 caller_p = list_entry(ptr, struct caller, caller_liste);
2422 if (!memcmp(caller_p->caller_id, psmid,
2423 sizeof(caller_p->caller_id))) {
2424 if (!list_empty(&caller_p->caller_liste)) {
2425 list_del_init(ptr);
2426 dev_ptr->dev_caller_count--;
2427 break;
2428 }
2429 }
2430 caller_p = 0;
2431 }
2432 if (!caller_p) {
2433 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
2434 "%02X%02X%02X in device list\n",
2435 psmid[0], psmid[1], psmid[2], psmid[3],
2436 psmid[4], psmid[5], psmid[6], psmid[7]);
2437 rv = REC_USER_GONE;
2438 break;
2439 }
2440
2441 PDEBUG("caller_p after successful receive: %p\n", caller_p);
2442 rv = convert_response(dev_ptr->dev_resp_p,
2443 caller_p->caller_buf_p, buff_len_p, buff);
2444 switch (rv) {
2445 case REC_USE_PCICA:
2446 break;
2447 case REC_OPERAND_INV:
2448 case REC_OPERAND_SIZE:
2449 case REC_EVEN_MOD:
2450 case REC_INVALID_PAD:
2451 PDEBUG("device %d: 'user error' %d\n", index, rv);
2452 break;
2453 case WRONG_DEVICE_TYPE:
2454 case REC_HARDWAR_ERR:
2455 case REC_BAD_MESSAGE:
2456 PRINTKW("device %d: hardware error %d\n", index, rv);
2457 rv = REC_NO_RESPONSE;
2458 break;
2459 default:
2460 PDEBUG("device %d: rv = %d\n", index, rv);
2461 break;
2462 }
2463 } while (0);
2464
2465 switch (rv) {
2466 case 0:
2467 PDEBUG("Successful receive from device %d\n", index);
2468 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
2469 *dest_p_p = icaMsg_p->outputdata;
2470 if (*buff_len_p == 0)
2471 PRINTK("Zero *buff_len_p\n");
2472 break;
2473 case REC_NO_RESPONSE:
2474 PRINTKW("Removing device %d from availability\n", index);
2475 remove_device(dev_ptr);
2476 break;
2477 }
2478
2479 if (caller_p)
2480 unbuild_caller(dev_ptr, caller_p);
2481
2482 return rv;
2483}
2484
2485static inline void
2486helper_send_work(int index)
2487{
2488 struct work_element *rq_p;
2489 int rv;
2490
2491 if (list_empty(&request_list))
2492 return;
2493 requestq_count--;
2494 rq_p = list_entry(request_list.next, struct work_element, liste);
2495 list_del_init(&rq_p->liste);
2496 rq_p->audit[1] |= FP_REMREQUEST;
2497 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
2498 rq_p->devindex = SHRT2LONG(index);
2499 rv = send_to_crypto_device(rq_p);
2500 if (rv == 0) {
2501 rq_p->requestsent = jiffies;
2502 rq_p->audit[0] |= FP_SENT;
2503 list_add_tail(&rq_p->liste, &pending_list);
2504 ++pendingq_count;
2505 rq_p->audit[0] |= FP_PENDING;
2506 } else {
2507 switch (rv) {
2508 case REC_OPERAND_INV:
2509 case REC_OPERAND_SIZE:
2510 case REC_EVEN_MOD:
2511 case REC_INVALID_PAD:
2512 rq_p->retcode = -EINVAL;
2513 break;
2514 case SEN_NOT_AVAIL:
2515 case SEN_RETRY:
2516 case REC_NO_RESPONSE:
2517 default:
2518 if (z90crypt.mask.st_count > 1)
2519 rq_p->retcode =
2520 -ERESTARTSYS;
2521 else
2522 rq_p->retcode = -ENODEV;
2523 break;
2524 }
2525 rq_p->status[0] |= STAT_FAILED;
2526 rq_p->audit[1] |= FP_AWAKENING;
2527 atomic_set(&rq_p->alarmrung, 1);
2528 wake_up(&rq_p->waitq);
2529 }
2530 } else {
2531 if (z90crypt.mask.st_count > 1)
2532 rq_p->retcode = -ERESTARTSYS;
2533 else
2534 rq_p->retcode = -ENODEV;
2535 rq_p->status[0] |= STAT_FAILED;
2536 rq_p->audit[1] |= FP_AWAKENING;
2537 atomic_set(&rq_p->alarmrung, 1);
2538 wake_up(&rq_p->waitq);
2539 }
2540}
2541
2542static inline void
2543helper_handle_work_element(int index, unsigned char psmid[8], int rc,
2544 int buff_len, unsigned char *buff,
2545 unsigned char __user *resp_addr)
2546{
2547 struct work_element *pq_p;
2548 struct list_head *lptr, *tptr;
2549
2550 pq_p = 0;
2551 list_for_each_safe(lptr, tptr, &pending_list) {
2552 pq_p = list_entry(lptr, struct work_element, liste);
2553 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
2554 list_del_init(lptr);
2555 pendingq_count--;
2556 pq_p->audit[1] |= FP_NOTPENDING;
2557 break;
2558 }
2559 pq_p = 0;
2560 }
2561
2562 if (!pq_p) {
2563 PRINTK("device %d has work but no caller exists on pending Q\n",
2564 SHRT2LONG(index));
2565 return;
2566 }
2567
2568 switch (rc) {
2569 case 0:
2570 pq_p->resp_buff_size = buff_len;
2571 pq_p->audit[1] |= FP_RESPSIZESET;
2572 if (buff_len) {
2573 pq_p->resp_addr = resp_addr;
2574 pq_p->audit[1] |= FP_RESPADDRCOPIED;
2575 memcpy(pq_p->resp_buff, buff, buff_len);
2576 pq_p->audit[1] |= FP_RESPBUFFCOPIED;
2577 }
2578 break;
2579 case REC_OPERAND_INV:
2580 case REC_OPERAND_SIZE:
2581 case REC_EVEN_MOD:
2582 case REC_INVALID_PAD:
2583 PDEBUG("-EINVAL after application error %d\n", rc);
2584 pq_p->retcode = -EINVAL;
2585 pq_p->status[0] |= STAT_FAILED;
2586 break;
2587 case REC_USE_PCICA:
2588 pq_p->retcode = -ERESTARTSYS;
2589 pq_p->status[0] |= STAT_FAILED;
2590 break;
2591 case REC_NO_RESPONSE:
2592 default:
2593 if (z90crypt.mask.st_count > 1)
2594 pq_p->retcode = -ERESTARTSYS;
2595 else
2596 pq_p->retcode = -ENODEV;
2597 pq_p->status[0] |= STAT_FAILED;
2598 break;
2599 }
2600 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
2601 pq_p->audit[1] |= FP_AWAKENING;
2602 atomic_set(&pq_p->alarmrung, 1);
2603 wake_up(&pq_p->waitq);
2604 }
2605}
2606
2607/**
2608 * return TRUE if the work element should be removed from the queue
2609 */
2610static inline int
2611helper_receive_rc(int index, int *rc_p)
2612{
2613 switch (*rc_p) {
2614 case 0:
2615 case REC_OPERAND_INV:
2616 case REC_OPERAND_SIZE:
2617 case REC_EVEN_MOD:
2618 case REC_INVALID_PAD:
2619 case REC_USE_PCICA:
2620 break;
2621
2622 case REC_BUSY:
2623 case REC_NO_WORK:
2624 case REC_EMPTY:
2625 case REC_RETRY_DEV:
2626 case REC_FATAL_ERROR:
2627 return 0;
2628
2629 case REC_NO_RESPONSE:
2630 break;
2631
2632 default:
2633 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
2634 *rc_p, SHRT2LONG(index));
2635 *rc_p = REC_NO_RESPONSE;
2636 break;
2637 }
2638 return 1;
2639}
2640
2641static inline void
2642z90crypt_schedule_reader_timer(void)
2643{
2644 if (timer_pending(&reader_timer))
2645 return;
2646 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
2647 PRINTK("Timer pending while modifying reader timer\n");
2648}
2649
2650static void
2651z90crypt_reader_task(unsigned long ptr)
2652{
2653 int workavail, index, rc, buff_len;
2654 unsigned char psmid[8];
2655 unsigned char __user *resp_addr;
2656 static unsigned char buff[1024];
2657
2658 /**
2659 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2660 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
2661 * loop, there is no work remaining on the queues.
2662 */
2663 resp_addr = 0;
2664 workavail = 2;
2665 buff_len = 0;
2666 while (workavail) {
2667 workavail--;
2668 rc = 0;
2669 spin_lock_irq(&queuespinlock);
2670 memset(buff, 0x00, sizeof(buff));
2671
2672 /* Dequeue once from each device in round robin. */
2673 for (index = 0; index < z90crypt.mask.st_count; index++) {
2674 PDEBUG("About to receive.\n");
2675 rc = receive_from_crypto_device(SHRT2LONG(index),
2676 psmid,
2677 &buff_len,
2678 buff,
2679 &resp_addr);
2680 PDEBUG("Dequeued: rc = %d.\n", rc);
2681
2682 if (helper_receive_rc(index, &rc)) {
2683 if (rc != REC_NO_RESPONSE) {
2684 helper_send_work(index);
2685 workavail = 2;
2686 }
2687
2688 helper_handle_work_element(index, psmid, rc,
2689 buff_len, buff,
2690 resp_addr);
2691 }
2692
2693 if (rc == REC_FATAL_ERROR)
2694 PRINTKW("REC_FATAL_ERROR from device %d!\n",
2695 SHRT2LONG(index));
2696 }
2697 spin_unlock_irq(&queuespinlock);
2698 }
2699
2700 if (pendingq_count + requestq_count)
2701 z90crypt_schedule_reader_timer();
2702}
2703
2704static inline void
2705z90crypt_schedule_config_task(unsigned int expiration)
2706{
2707 if (timer_pending(&config_timer))
2708 return;
2709 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
2710 PRINTK("Timer pending while modifying config timer\n");
2711}
2712
2713static void
2714z90crypt_config_task(unsigned long ptr)
2715{
2716 int rc;
2717
2718 PDEBUG("jiffies %ld\n", jiffies);
2719
2720 if ((rc = refresh_z90crypt(&z90crypt.cdx)))
2721 PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
2722 /* If return was fatal, don't bother reconfiguring */
2723 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
2724 z90crypt_schedule_config_task(CONFIGTIME);
2725}
2726
2727static inline void
2728z90crypt_schedule_cleanup_task(void)
2729{
2730 if (timer_pending(&cleanup_timer))
2731 return;
2732 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
2733 PRINTK("Timer pending while modifying cleanup timer\n");
2734}
2735
2736static inline void
2737helper_drain_queues(void)
2738{
2739 struct work_element *pq_p;
2740 struct list_head *lptr, *tptr;
2741
2742 list_for_each_safe(lptr, tptr, &pending_list) {
2743 pq_p = list_entry(lptr, struct work_element, liste);
2744 pq_p->retcode = -ENODEV;
2745 pq_p->status[0] |= STAT_FAILED;
2746 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2747 (struct caller *)pq_p->requestptr);
2748 list_del_init(lptr);
2749 pendingq_count--;
2750 pq_p->audit[1] |= FP_NOTPENDING;
2751 pq_p->audit[1] |= FP_AWAKENING;
2752 atomic_set(&pq_p->alarmrung, 1);
2753 wake_up(&pq_p->waitq);
2754 }
2755
2756 list_for_each_safe(lptr, tptr, &request_list) {
2757 pq_p = list_entry(lptr, struct work_element, liste);
2758 pq_p->retcode = -ENODEV;
2759 pq_p->status[0] |= STAT_FAILED;
2760 list_del_init(lptr);
2761 requestq_count--;
2762 pq_p->audit[1] |= FP_REMREQUEST;
2763 pq_p->audit[1] |= FP_AWAKENING;
2764 atomic_set(&pq_p->alarmrung, 1);
2765 wake_up(&pq_p->waitq);
2766 }
2767}
2768
2769static inline void
2770helper_timeout_requests(void)
2771{
2772 struct work_element *pq_p;
2773 struct list_head *lptr, *tptr;
2774 long timelimit;
2775
2776 timelimit = jiffies - (CLEANUPTIME * HZ);
2777 /* The list is in strict chronological order */
2778 list_for_each_safe(lptr, tptr, &pending_list) {
2779 pq_p = list_entry(lptr, struct work_element, liste);
2780 if (pq_p->requestsent >= timelimit)
2781 break;
2782 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2783 ((struct caller *)pq_p->requestptr)->caller_id[0],
2784 ((struct caller *)pq_p->requestptr)->caller_id[1],
2785 ((struct caller *)pq_p->requestptr)->caller_id[2],
2786 ((struct caller *)pq_p->requestptr)->caller_id[3],
2787 ((struct caller *)pq_p->requestptr)->caller_id[4],
2788 ((struct caller *)pq_p->requestptr)->caller_id[5],
2789 ((struct caller *)pq_p->requestptr)->caller_id[6],
2790 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2791 pq_p->retcode = -ETIMEOUT;
2792 pq_p->status[0] |= STAT_FAILED;
2793 /* get this off any caller queue it may be on */
2794 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2795 (struct caller *) pq_p->requestptr);
2796 list_del_init(lptr);
2797 pendingq_count--;
2798 pq_p->audit[1] |= FP_TIMEDOUT;
2799 pq_p->audit[1] |= FP_NOTPENDING;
2800 pq_p->audit[1] |= FP_AWAKENING;
2801 atomic_set(&pq_p->alarmrung, 1);
2802 wake_up(&pq_p->waitq);
2803 }
2804
2805 /**
2806 * If pending count is zero, items left on the request queue may
2807 * never be processed.
2808 */
2809 if (pendingq_count <= 0) {
2810 list_for_each_safe(lptr, tptr, &request_list) {
2811 pq_p = list_entry(lptr, struct work_element, liste);
2812 if (pq_p->requestsent >= timelimit)
2813 break;
2814 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2815 ((struct caller *)pq_p->requestptr)->caller_id[0],
2816 ((struct caller *)pq_p->requestptr)->caller_id[1],
2817 ((struct caller *)pq_p->requestptr)->caller_id[2],
2818 ((struct caller *)pq_p->requestptr)->caller_id[3],
2819 ((struct caller *)pq_p->requestptr)->caller_id[4],
2820 ((struct caller *)pq_p->requestptr)->caller_id[5],
2821 ((struct caller *)pq_p->requestptr)->caller_id[6],
2822 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2823 pq_p->retcode = -ETIMEOUT;
2824 pq_p->status[0] |= STAT_FAILED;
2825 list_del_init(lptr);
2826 requestq_count--;
2827 pq_p->audit[1] |= FP_TIMEDOUT;
2828 pq_p->audit[1] |= FP_REMREQUEST;
2829 pq_p->audit[1] |= FP_AWAKENING;
2830 atomic_set(&pq_p->alarmrung, 1);
2831 wake_up(&pq_p->waitq);
2832 }
2833 }
2834}
2835
2836static void
2837z90crypt_cleanup_task(unsigned long ptr)
2838{
2839 PDEBUG("jiffies %ld\n", jiffies);
2840 spin_lock_irq(&queuespinlock);
2841 if (z90crypt.mask.st_count <= 0) // no devices!
2842 helper_drain_queues();
2843 else
2844 helper_timeout_requests();
2845 spin_unlock_irq(&queuespinlock);
2846 z90crypt_schedule_cleanup_task();
2847}
2848
2849static void
2850z90crypt_schedule_reader_task(unsigned long ptr)
2851{
2852 tasklet_schedule(&reader_tasklet);
2853}
2854
2855/**
2856 * Lowlevel Functions:
2857 *
2858 * create_z90crypt: creates and initializes basic data structures
2859 * refresh_z90crypt: re-initializes basic data structures
2860 * find_crypto_devices: returns a count and mask of hardware status
2861 * create_crypto_device: builds the descriptor for a device
2862 * destroy_crypto_device: unallocates the descriptor for a device
2863 * destroy_z90crypt: drains all work, unallocates structs
2864 */
2865
2866/**
2867 * build the z90crypt root structure using the given domain index
2868 */
2869static int
2870create_z90crypt(int *cdx_p)
2871{
2872 struct hdware_block *hdware_blk_p;
2873
2874 memset(&z90crypt, 0x00, sizeof(struct z90crypt));
2875 z90crypt.domain_established = 0;
2876 z90crypt.len = sizeof(struct z90crypt);
2877 z90crypt.max_count = Z90CRYPT_NUM_DEVS;
2878 z90crypt.cdx = *cdx_p;
2879
2880 hdware_blk_p = (struct hdware_block *)
2881 kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
2882 if (!hdware_blk_p) {
2883 PDEBUG("kmalloc for hardware block failed\n");
2884 return ENOMEM;
2885 }
2886 memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
2887 z90crypt.hdware_info = hdware_blk_p;
2888
2889 return 0;
2890}
2891
2892static inline int
2893helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
2894{
2895 enum hdstat hd_stat;
2896 int q_depth, dev_type;
2897 int indx, chkdom, numdomains;
2898
2899 q_depth = dev_type = numdomains = 0;
2900 for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
2901 for (indx = 0; indx < z90crypt.max_count; indx++) {
2902 hd_stat = HD_NOT_THERE;
2903 numdomains = 0;
2904 for (chkdom = 0; chkdom <= 15; chkdom++) {
2905 hd_stat = query_online(indx, chkdom, MAX_RESET,
2906 &q_depth, &dev_type);
2907 if (hd_stat == HD_TSQ_EXCEPTION) {
2908 z90crypt.terminating = 1;
2909 PRINTKC("exception taken!\n");
2910 break;
2911 }
2912 if (hd_stat == HD_ONLINE) {
2913 cdx_array[numdomains++] = chkdom;
2914 if (*cdx_p == chkdom) {
2915 *correct_cdx_found = 1;
2916 break;
2917 }
2918 }
2919 }
2920 if ((*correct_cdx_found == 1) || (numdomains != 0))
2921 break;
2922 if (z90crypt.terminating)
2923 break;
2924 }
2925 return numdomains;
2926}
2927
2928static inline int
2929probe_crypto_domain(int *cdx_p)
2930{
2931 int cdx_array[16];
2932 char cdx_array_text[53], temp[5];
2933 int correct_cdx_found, numdomains;
2934
2935 correct_cdx_found = 0;
2936 numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
2937
2938 if (z90crypt.terminating)
2939 return TSQ_FATAL_ERROR;
2940
2941 if (correct_cdx_found)
2942 return 0;
2943
2944 if (numdomains == 0) {
2945 PRINTKW("Unable to find crypto domain: No devices found\n");
2946 return Z90C_NO_DEVICES;
2947 }
2948
2949 if (numdomains == 1) {
2950 if (*cdx_p == -1) {
2951 *cdx_p = cdx_array[0];
2952 return 0;
2953 }
2954 PRINTKW("incorrect domain: specified = %d, found = %d\n",
2955 *cdx_p, cdx_array[0]);
2956 return Z90C_INCORRECT_DOMAIN;
2957 }
2958
2959 numdomains--;
2960 sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
2961 while (numdomains) {
2962 numdomains--;
2963 sprintf(temp, ", %d", cdx_array[numdomains]);
2964 strcat(cdx_array_text, temp);
2965 }
2966
2967 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
2968 *cdx_p, cdx_array_text);
2969 return Z90C_AMBIGUOUS_DOMAIN;
2970}
2971
2972static int
2973refresh_z90crypt(int *cdx_p)
2974{
2975 int i, j, indx, rv;
2976 static struct status local_mask;
2977 struct device *devPtr;
2978 unsigned char oldStat, newStat;
2979 int return_unchanged;
2980
2981 if (z90crypt.len != sizeof(z90crypt))
2982 return ENOTINIT;
2983 if (z90crypt.terminating)
2984 return TSQ_FATAL_ERROR;
2985 rv = 0;
2986 if (!z90crypt.hdware_info->hdware_mask.st_count &&
2987 !z90crypt.domain_established) {
2988 rv = probe_crypto_domain(cdx_p);
2989 if (z90crypt.terminating)
2990 return TSQ_FATAL_ERROR;
2991 if (rv == Z90C_NO_DEVICES)
2992 return 0; // try later
2993 if (rv)
2994 return rv;
2995 z90crypt.cdx = *cdx_p;
2996 z90crypt.domain_established = 1;
2997 }
2998 rv = find_crypto_devices(&local_mask);
2999 if (rv) {
3000 PRINTK("find crypto devices returned %d\n", rv);
3001 return rv;
3002 }
3003 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
3004 sizeof(struct status))) {
3005 return_unchanged = 1;
3006 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
3007 /**
3008 * Check for disabled cards. If any device is marked
3009 * disabled, destroy it.
3010 */
3011 for (j = 0;
3012 j < z90crypt.hdware_info->type_mask[i].st_count;
3013 j++) {
3014 indx = z90crypt.hdware_info->type_x_addr[i].
3015 device_index[j];
3016 devPtr = z90crypt.device_p[indx];
3017 if (devPtr && devPtr->disabled) {
3018 local_mask.st_mask[indx] = HD_NOT_THERE;
3019 return_unchanged = 0;
3020 }
3021 }
3022 }
3023 if (return_unchanged == 1)
3024 return 0;
3025 }
3026
3027 spin_lock_irq(&queuespinlock);
3028 for (i = 0; i < z90crypt.max_count; i++) {
3029 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
3030 newStat = local_mask.st_mask[i];
3031 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
3032 destroy_crypto_device(i);
3033 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
3034 rv = create_crypto_device(i);
3035 if (rv >= REC_FATAL_ERROR)
3036 return rv;
3037 if (rv != 0) {
3038 local_mask.st_mask[i] = HD_NOT_THERE;
3039 local_mask.st_count--;
3040 }
3041 }
3042 }
3043 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
3044 sizeof(local_mask.st_mask));
3045 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
3046 z90crypt.hdware_info->hdware_mask.disabled_count =
3047 local_mask.disabled_count;
3048 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
3049 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
3050 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
3051 &(z90crypt.hdware_info->type_x_addr[i]));
3052 spin_unlock_irq(&queuespinlock);
3053
3054 return rv;
3055}
3056
3057static int
3058find_crypto_devices(struct status *deviceMask)
3059{
3060 int i, q_depth, dev_type;
3061 enum hdstat hd_stat;
3062
3063 deviceMask->st_count = 0;
3064 deviceMask->disabled_count = 0;
3065 deviceMask->user_disabled_count = 0;
3066
3067 for (i = 0; i < z90crypt.max_count; i++) {
3068 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
3069 &dev_type);
3070 if (hd_stat == HD_TSQ_EXCEPTION) {
3071 z90crypt.terminating = 1;
3072 PRINTKC("Exception during probe for crypto devices\n");
3073 return TSQ_FATAL_ERROR;
3074 }
3075 deviceMask->st_mask[i] = hd_stat;
3076 if (hd_stat == HD_ONLINE) {
3077 PDEBUG("Got an online crypto!: %d\n", i);
3078 PDEBUG("Got a queue depth of %d\n", q_depth);
3079 PDEBUG("Got a device type of %d\n", dev_type);
3080 if (q_depth <= 0)
3081 return TSQ_FATAL_ERROR;
3082 deviceMask->st_count++;
3083 z90crypt.q_depth_array[i] = q_depth;
3084 z90crypt.dev_type_array[i] = dev_type;
3085 }
3086 }
3087
3088 return 0;
3089}
3090
3091static int
3092refresh_index_array(struct status *status_str, struct device_x *index_array)
3093{
3094 int i, count;
3095 enum devstat stat;
3096
3097 i = -1;
3098 count = 0;
3099 do {
3100 stat = status_str->st_mask[++i];
3101 if (stat == DEV_ONLINE)
3102 index_array->device_index[count++] = i;
3103 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
3104
3105 return count;
3106}
3107
3108static int
3109create_crypto_device(int index)
3110{
3111 int rv, devstat, total_size;
3112 struct device *dev_ptr;
3113 struct status *type_str_p;
3114 int deviceType;
3115
3116 dev_ptr = z90crypt.device_p[index];
3117 if (!dev_ptr) {
3118 total_size = sizeof(struct device) +
3119 z90crypt.q_depth_array[index] * sizeof(int);
3120
3121 dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
3122 if (!dev_ptr) {
3123 PRINTK("kmalloc device %d failed\n", index);
3124 return ENOMEM;
3125 }
3126 memset(dev_ptr, 0, total_size);
3127 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
3128 if (!dev_ptr->dev_resp_p) {
3129 kfree(dev_ptr);
3130 PRINTK("kmalloc device %d rec buffer failed\n", index);
3131 return ENOMEM;
3132 }
3133 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
3134 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
3135 }
3136
3137 devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
3138 if (devstat == DEV_RSQ_EXCEPTION) {
3139 PRINTK("exception during reset device %d\n", index);
3140 kfree(dev_ptr->dev_resp_p);
3141 kfree(dev_ptr);
3142 return RSQ_FATAL_ERROR;
3143 }
3144 if (devstat == DEV_ONLINE) {
3145 dev_ptr->dev_self_x = index;
3146 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3147 if (dev_ptr->dev_type == NILDEV) {
3148 rv = probe_device_type(dev_ptr);
3149 if (rv) {
3150 PRINTK("rv = %d from probe_device_type %d\n",
3151 rv, index);
3152 kfree(dev_ptr->dev_resp_p);
3153 kfree(dev_ptr);
3154 return rv;
3155 }
3156 }
3157 if (dev_ptr->dev_type == PCIXCC_UNK) {
3158 rv = probe_PCIXCC_type(dev_ptr);
3159 if (rv) {
3160 PRINTK("rv = %d from probe_PCIXCC_type %d\n",
3161 rv, index);
3162 kfree(dev_ptr->dev_resp_p);
3163 kfree(dev_ptr);
3164 return rv;
3165 }
3166 }
3167 deviceType = dev_ptr->dev_type;
3168 z90crypt.dev_type_array[index] = deviceType;
3169 if (deviceType == PCICA)
3170 z90crypt.hdware_info->device_type_array[index] = 1;
3171 else if (deviceType == PCICC)
3172 z90crypt.hdware_info->device_type_array[index] = 2;
3173 else if (deviceType == PCIXCC_MCL2)
3174 z90crypt.hdware_info->device_type_array[index] = 3;
3175 else if (deviceType == PCIXCC_MCL3)
3176 z90crypt.hdware_info->device_type_array[index] = 4;
3177 else if (deviceType == CEX2C)
3178 z90crypt.hdware_info->device_type_array[index] = 5;
3179 else
3180 z90crypt.hdware_info->device_type_array[index] = -1;
3181 }
3182
3183 /**
3184 * 'q_depth' returned by the hardware is one less than
3185 * the actual depth
3186 */
3187 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
3188 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3189 dev_ptr->dev_stat = devstat;
3190 dev_ptr->disabled = 0;
3191 z90crypt.device_p[index] = dev_ptr;
3192
3193 if (devstat == DEV_ONLINE) {
3194 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
3195 z90crypt.mask.st_mask[index] = DEV_ONLINE;
3196 z90crypt.mask.st_count++;
3197 }
3198 deviceType = dev_ptr->dev_type;
3199 type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
3200 if (type_str_p->st_mask[index] != DEV_ONLINE) {
3201 type_str_p->st_mask[index] = DEV_ONLINE;
3202 type_str_p->st_count++;
3203 }
3204 }
3205
3206 return 0;
3207}
3208
3209static int
3210destroy_crypto_device(int index)
3211{
3212 struct device *dev_ptr;
3213 int t, disabledFlag;
3214
3215 dev_ptr = z90crypt.device_p[index];
3216
3217 /* remember device type; get rid of device struct */
3218 if (dev_ptr) {
3219 disabledFlag = dev_ptr->disabled;
3220 t = dev_ptr->dev_type;
3221 if (dev_ptr->dev_resp_p)
3222 kfree(dev_ptr->dev_resp_p);
3223 kfree(dev_ptr);
3224 } else {
3225 disabledFlag = 0;
3226 t = -1;
3227 }
3228 z90crypt.device_p[index] = 0;
3229
3230 /* if the type is valid, remove the device from the type_mask */
3231 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
3232 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
3233 z90crypt.hdware_info->type_mask[t].st_count--;
3234 if (disabledFlag == 1)
3235 z90crypt.hdware_info->type_mask[t].disabled_count--;
3236 }
3237 if (z90crypt.mask.st_mask[index] != DEV_GONE) {
3238 z90crypt.mask.st_mask[index] = DEV_GONE;
3239 z90crypt.mask.st_count--;
3240 }
3241 z90crypt.hdware_info->device_type_array[index] = 0;
3242
3243 return 0;
3244}
3245
3246static void
3247destroy_z90crypt(void)
3248{
3249 int i;
3250 for (i = 0; i < z90crypt.max_count; i++)
3251 if (z90crypt.device_p[i])
3252 destroy_crypto_device(i);
3253 if (z90crypt.hdware_info)
3254 kfree((void *)z90crypt.hdware_info);
3255 memset((void *)&z90crypt, 0, sizeof(z90crypt));
3256}
3257
3258static unsigned char static_testmsg[384] = {
32590x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
32600x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
32610x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
32620x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
32630x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32640x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32650x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
32660x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32670xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32680x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32690x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32700x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32710x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
32720x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
32730x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
32740x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
32750x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
32760x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
32770x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
32780x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
32790x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
32800xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
32810x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
32820x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3283};
3284
3285static int
3286probe_device_type(struct device *devPtr)
3287{
3288 int rv, dv, i, index, length;
3289 unsigned char psmid[8];
3290 static unsigned char loc_testmsg[sizeof(static_testmsg)];
3291
3292 index = devPtr->dev_self_x;
3293 rv = 0;
3294 do {
3295 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
3296 length = sizeof(static_testmsg) - 24;
3297 /* the -24 allows for the header */
3298 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3299 if (dv) {
3300 PDEBUG("dv returned by send during probe: %d\n", dv);
3301 if (dv == DEV_SEN_EXCEPTION) {
3302 rv = SEN_FATAL_ERROR;
3303 PRINTKC("exception in send to AP %d\n", index);
3304 break;
3305 }
3306 PDEBUG("return value from send_to_AP: %d\n", rv);
3307 switch (dv) {
3308 case DEV_GONE:
3309 PDEBUG("dev %d not available\n", index);
3310 rv = SEN_NOT_AVAIL;
3311 break;
3312 case DEV_ONLINE:
3313 rv = 0;
3314 break;
3315 case DEV_EMPTY:
3316 rv = SEN_NOT_AVAIL;
3317 break;
3318 case DEV_NO_WORK:
3319 rv = SEN_FATAL_ERROR;
3320 break;
3321 case DEV_BAD_MESSAGE:
3322 rv = SEN_USER_ERROR;
3323 break;
3324 case DEV_QUEUE_FULL:
3325 rv = SEN_QUEUE_FULL;
3326 break;
3327 default:
3328 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3329 rv = SEN_NOT_AVAIL;
3330 break;
3331 }
3332 }
3333
3334 if (rv)
3335 break;
3336
3337 for (i = 0; i < 6; i++) {
3338 mdelay(300);
3339 dv = receive_from_AP(index, z90crypt.cdx,
3340 devPtr->dev_resp_l,
3341 devPtr->dev_resp_p, psmid);
3342 PDEBUG("dv returned by DQ = %d\n", dv);
3343 if (dv == DEV_REC_EXCEPTION) {
3344 rv = REC_FATAL_ERROR;
3345 PRINTKC("exception in dequeue %d\n",
3346 index);
3347 break;
3348 }
3349 switch (dv) {
3350 case DEV_ONLINE:
3351 rv = 0;
3352 break;
3353 case DEV_EMPTY:
3354 rv = REC_EMPTY;
3355 break;
3356 case DEV_NO_WORK:
3357 rv = REC_NO_WORK;
3358 break;
3359 case DEV_BAD_MESSAGE:
3360 case DEV_GONE:
3361 default:
3362 rv = REC_NO_RESPONSE;
3363 break;
3364 }
3365 if ((rv != 0) && (rv != REC_NO_WORK))
3366 break;
3367 if (rv == 0)
3368 break;
3369 }
3370 if (rv)
3371 break;
3372 rv = (devPtr->dev_resp_p[0] == 0x00) &&
3373 (devPtr->dev_resp_p[1] == 0x86);
3374 if (rv)
3375 devPtr->dev_type = PCICC;
3376 else
3377 devPtr->dev_type = PCICA;
3378 rv = 0;
3379 } while (0);
3380 /* In a general error case, the card is not marked online */
3381 return rv;
3382}
3383
3384static unsigned char MCL3_testmsg[] = {
33850x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
33860x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33870x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33880x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33890x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
33900x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
33910x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
33920x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
33930x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33940x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33950x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33960x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33970x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33980x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
33990x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
34000x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
34010x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
34020x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
34030x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
34040x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
34050x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
34060x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
34070x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
34080xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
34090x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
34100x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
34110x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
34120x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
34130x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
34140xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
34150xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
34160x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
34170x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
34180xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
34190x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
3420};
3421
3422static int
3423probe_PCIXCC_type(struct device *devPtr)
3424{
3425 int rv, dv, i, index, length;
3426 unsigned char psmid[8];
3427 static unsigned char loc_testmsg[548];
3428 struct CPRBX *cprbx_p;
3429
3430 index = devPtr->dev_self_x;
3431 rv = 0;
3432 do {
3433 memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
3434 length = sizeof(MCL3_testmsg) - 0x0C;
3435 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3436 if (dv) {
3437 PDEBUG("dv returned = %d\n", dv);
3438 if (dv == DEV_SEN_EXCEPTION) {
3439 rv = SEN_FATAL_ERROR;
3440 PRINTKC("exception in send to AP %d\n", index);
3441 break;
3442 }
3443 PDEBUG("return value from send_to_AP: %d\n", rv);
3444 switch (dv) {
3445 case DEV_GONE:
3446 PDEBUG("dev %d not available\n", index);
3447 rv = SEN_NOT_AVAIL;
3448 break;
3449 case DEV_ONLINE:
3450 rv = 0;
3451 break;
3452 case DEV_EMPTY:
3453 rv = SEN_NOT_AVAIL;
3454 break;
3455 case DEV_NO_WORK:
3456 rv = SEN_FATAL_ERROR;
3457 break;
3458 case DEV_BAD_MESSAGE:
3459 rv = SEN_USER_ERROR;
3460 break;
3461 case DEV_QUEUE_FULL:
3462 rv = SEN_QUEUE_FULL;
3463 break;
3464 default:
3465 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3466 rv = SEN_NOT_AVAIL;
3467 break;
3468 }
3469 }
3470
3471 if (rv)
3472 break;
3473
3474 for (i = 0; i < 6; i++) {
3475 mdelay(300);
3476 dv = receive_from_AP(index, z90crypt.cdx,
3477 devPtr->dev_resp_l,
3478 devPtr->dev_resp_p, psmid);
3479 PDEBUG("dv returned by DQ = %d\n", dv);
3480 if (dv == DEV_REC_EXCEPTION) {
3481 rv = REC_FATAL_ERROR;
3482 PRINTKC("exception in dequeue %d\n",
3483 index);
3484 break;
3485 }
3486 switch (dv) {
3487 case DEV_ONLINE:
3488 rv = 0;
3489 break;
3490 case DEV_EMPTY:
3491 rv = REC_EMPTY;
3492 break;
3493 case DEV_NO_WORK:
3494 rv = REC_NO_WORK;
3495 break;
3496 case DEV_BAD_MESSAGE:
3497 case DEV_GONE:
3498 default:
3499 rv = REC_NO_RESPONSE;
3500 break;
3501 }
3502 if ((rv != 0) && (rv != REC_NO_WORK))
3503 break;
3504 if (rv == 0)
3505 break;
3506 }
3507 if (rv)
3508 break;
3509 cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
3510 if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
3511 devPtr->dev_type = PCIXCC_MCL2;
3512 PDEBUG("device %d is MCL2\n", index);
3513 } else {
3514 devPtr->dev_type = PCIXCC_MCL3;
3515 PDEBUG("device %d is MCL3\n", index);
3516 }
3517 } while (0);
3518 /* In a general error case, the card is not marked online */
3519 return rv;
3520}
3521
3522#ifdef Z90CRYPT_USE_HOTPLUG
3523static void
3524z90crypt_hotplug_event(int dev_major, int dev_minor, int action)
3525{
3526#ifdef CONFIG_HOTPLUG
3527 char *argv[3];
3528 char *envp[6];
3529 char major[20];
3530 char minor[20];
3531
3532 sprintf(major, "MAJOR=%d", dev_major);
3533 sprintf(minor, "MINOR=%d", dev_minor);
3534
3535 argv[0] = hotplug_path;
3536 argv[1] = "z90crypt";
3537 argv[2] = 0;
3538
3539 envp[0] = "HOME=/";
3540 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
3541
3542 switch (action) {
3543 case Z90CRYPT_HOTPLUG_ADD:
3544 envp[2] = "ACTION=add";
3545 break;
3546 case Z90CRYPT_HOTPLUG_REMOVE:
3547 envp[2] = "ACTION=remove";
3548 break;
3549 default:
3550 BUG();
3551 break;
3552 }
3553 envp[3] = major;
3554 envp[4] = minor;
3555 envp[5] = 0;
3556
3557 call_usermodehelper(argv[0], argv, envp, 0);
3558#endif
3559}
3560#endif
3561
3562module_init(z90crypt_init_module);
3563module_exit(z90crypt_cleanup_module);
diff --git a/drivers/s390/ebcdic.c b/drivers/s390/ebcdic.c
new file mode 100644
index 000000000000..99c98da15473
--- /dev/null
+++ b/drivers/s390/ebcdic.c
@@ -0,0 +1,246 @@
1/*
2 * arch/s390/kernel/ebcdic.c
3 * ECBDIC -> ASCII, ASCII -> ECBDIC conversion tables.
4 *
5 * S390 version
6 * Copyright (C) 1998 IBM Corporation
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <asm/types.h>
11
12/*
13 * ASCII -> EBCDIC
14 */
15__u8 _ascebc[256] =
16{
17 /*00 NL SH SX EX ET NQ AK BL */
18 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
19 /*08 BS HT LF VT FF CR SO SI */
20 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
21 /*10 DL D1 D2 D3 D4 NK SN EB */
22 0x10, 0x11, 0x12, 0x13, 0x3C, 0x15, 0x32, 0x26,
23 /*18 CN EM SB EC FS GS RS US */
24 0x18, 0x19, 0x3F, 0x27, 0x1C, 0x1D, 0x1E, 0x1F,
25 /*20 SP ! " # $ % & ' */
26 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
27 /*28 ( ) * + , - . / */
28 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
29 /*30 0 1 2 3 4 5 6 7 */
30 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
31 /*38 8 9 : ; < = > ? */
32 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
33 /*40 @ A B C D E F G */
34 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
35 /*48 H I J K L M N O */
36 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
37 /*50 P Q R S T U V W */
38 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
39 /*58 X Y Z [ \ ] ^ _ */
40 0xE7, 0xE8, 0xE9, 0xAD, 0xE0, 0xBD, 0x5F, 0x6D,
41 /*60 ` a b c d e f g */
42 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
43 /*68 h i j k l m n o */
44 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
45 /*70 p q r s t u v w */
46 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
47 /*78 x y z { | } ~ DL */
48 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07,
49 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
50 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
51 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
52 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
53 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
54 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
55 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
56 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
57 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
58 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
59 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
60 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
61 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
62 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
63 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
64 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0xFF
65};
66
67/*
68 * EBCDIC -> ASCII
69 */
70__u8 _ebcasc[256] =
71{
72 /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
73 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
74 /* 0x08 -GE -SPS -RPT VT FF CR SO SI */
75 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
76 /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
77 -ENP ->LF */
78 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
79 /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
80 -IUS */
81 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
82 /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
83 -INP */
84 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
85 /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
86 -SW */
87 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
88 /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
89 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
90 /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
91 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
92 /* 0x40 SP RSP ä ---- */
93 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
94 /* 0x48 . < ( + | */
95 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
96 /* 0x50 & ---- */
97 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
98 /* 0x58 ß ! $ * ) ; */
99 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
100 /* 0x60 - / ---- Ä ---- ---- ---- */
101 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
102 /* 0x68 ---- , % _ > ? */
103 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
104 /* 0x70 ---- ---- ---- ---- ---- ---- ---- */
105 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
106 /* 0x78 * ` : # @ ' = " */
107 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
108 /* 0x80 * a b c d e f g */
109 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
110 /* 0x88 h i ---- ---- ---- */
111 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
112 /* 0x90 ° j k l m n o p */
113 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
114 /* 0x98 q r ---- ---- */
115 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
116 /* 0xA0 ~ s t u v w x */
117 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
118 /* 0xA8 y z ---- ---- ---- ---- */
119 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
120 /* 0xB0 ^ ---- § ---- */
121 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
122 /* 0xB8 ---- [ ] ---- ---- ---- ---- */
123 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
124 /* 0xC0 { A B C D E F G */
125 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
126 /* 0xC8 H I ---- ö ---- */
127 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
128 /* 0xD0 } J K L M N O P */
129 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
130 /* 0xD8 Q R ---- ü */
131 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
132 /* 0xE0 \ S T U V W X */
133 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
134 /* 0xE8 Y Z ---- Ö ---- ---- ---- */
135 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
136 /* 0xF0 0 1 2 3 4 5 6 7 */
137 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
138 /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
139 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
140};
141
142/*
143 * EBCDIC (capitals) -> ASCII (small case)
144 */
145__u8 _ebcasc_reduce_case[256] =
146{
147 /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
148 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
149
150 /* 0x08 -GE -SPS -RPT VT FF CR SO SI */
151 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
152
153 /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
154 -ENP ->LF */
155 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
156
157 /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
158 -IUS */
159 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
160
161 /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
162 -INP */
163 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
164
165 /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
166 -SW */
167 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
168
169 /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
170 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
171
172 /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
173 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
174
175 /* 0x40 SP RSP ä ---- */
176 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
177
178 /* 0x48 . < ( + | */
179 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
180
181 /* 0x50 & ---- */
182 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
183
184 /* 0x58 ß ! $ * ) ; */
185 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
186
187 /* 0x60 - / ---- Ä ---- ---- ---- */
188 0x2D, 0x2F, 0x07, 0x84, 0x07, 0x07, 0x07, 0x8F,
189
190 /* 0x68 ---- , % _ > ? */
191 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
192
193 /* 0x70 ---- ---- ---- ---- ---- ---- ---- */
194 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
195
196 /* 0x78 * ` : # @ ' = " */
197 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
198
199 /* 0x80 * a b c d e f g */
200 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
201
202 /* 0x88 h i ---- ---- ---- */
203 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
204
205 /* 0x90 ° j k l m n o p */
206 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
207
208 /* 0x98 q r ---- ---- */
209 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
210
211 /* 0xA0 ~ s t u v w x */
212 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
213
214 /* 0xA8 y z ---- ---- ---- ---- */
215 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
216
217 /* 0xB0 ^ ---- § ---- */
218 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
219
220 /* 0xB8 ---- [ ] ---- ---- ---- ---- */
221 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
222
223 /* 0xC0 { A B C D E F G */
224 0x7B, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
225
226 /* 0xC8 H I ---- ö ---- */
227 0x68, 0x69, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
228
229 /* 0xD0 } J K L M N O P */
230 0x7D, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
231
232 /* 0xD8 Q R ---- ü */
233 0x71, 0x72, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
234
235 /* 0xE0 \ S T U V W X */
236 0x5C, 0xF6, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
237
238 /* 0xE8 Y Z ---- Ö ---- ---- ---- */
239 0x79, 0x7A, 0xFD, 0x07, 0x94, 0x07, 0x07, 0x07,
240
241 /* 0xF0 0 1 2 3 4 5 6 7 */
242 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
243
244 /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
245 0x38, 0x39, 0x07, 0x07, 0x81, 0x07, 0x07, 0x07
246};
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
new file mode 100644
index 000000000000..a7efc394515e
--- /dev/null
+++ b/drivers/s390/net/Kconfig
@@ -0,0 +1,108 @@
1menu "S/390 network device drivers"
2 depends on NETDEVICES && ARCH_S390
3
4config LCS
5 tristate "Lan Channel Station Interface"
6 depends on NETDEVICES && (NET_ETHERNET || TR || FDDI)
7 help
8 Select this option if you want to use LCS networking on IBM S/390
9 or zSeries. This device driver supports Token Ring (IEEE 802.5),
10 FDDI (IEEE 802.7) and Ethernet.
11 This option is also available as a module which will be
12 called lcs.ko. If you do not know what it is, it's safe to say "Y".
13
14config CTC
15 tristate "CTC device support"
16 depends on NETDEVICES
17 help
18 Select this option if you want to use channel-to-channel networking
19 on IBM S/390 or zSeries. This device driver supports real CTC
20 coupling using ESCON. It also supports virtual CTCs when running
21 under VM. It will use the channel device configuration if this is
22 available. This option is also available as a module which will be
23 called ctc.ko. If you do not know what it is, it's safe to say "Y".
24
25config IUCV
26 tristate "IUCV support (VM only)"
27 help
28 Select this option if you want to use inter-user communication
29 under VM or VIF. If unsure, say "Y" to enable a fast communication
30 link between VM guests. At boot time the user ID of the guest needs
31 to be passed to the kernel. Note that both kernels need to be
32 compiled with this option and both need to be booted with the user ID
33 of the other VM guest.
34
35config NETIUCV
36 tristate "IUCV network device support (VM only)"
37 depends on IUCV && NETDEVICES
38 help
39 Select this option if you want to use inter-user communication
40 vehicle networking under VM or VIF. It enables a fast communication
41 link between VM guests. Using ifconfig a point-to-point connection
42 can be established to the Linux for zSeries and S7390 system
43 running on the other VM guest. This option is also available
44 as a module which will be called netiucv.ko. If unsure, say "Y".
45
46config SMSGIUCV
47 tristate "IUCV special message support (VM only)"
48 depends on IUCV
49 help
50 Select this option if you want to be able to receive SMSG messages
51 from other VM guest systems.
52
53config CLAW
54 tristate "CLAW device support"
55 depends on NETDEVICES
56 help
57 This driver supports channel attached CLAW devices.
58 CLAW is Common Link Access for Workstation. Common devices
59 that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
60 To compile as a module choose M here: The module will be called
61 claw.ko to compile into the kernel choose Y
62
63config QETH
64 tristate "Gigabit Ethernet device support"
65 depends on NETDEVICES && IP_MULTICAST && QDIO
66 help
67 This driver supports the IBM S/390 and zSeries OSA Express adapters
68 in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
69 interfaces in QDIO and HIPER mode.
70
71 For details please refer to the documentation provided by IBM at
72 <http://www10.software.ibm.com/developerworks/opensource/linux390>
73
74 To compile this driver as a module, choose M here: the
75 module will be called qeth.ko.
76
77
78comment "Gigabit Ethernet default settings"
79 depends on QETH
80
81config QETH_IPV6
82 bool "IPv6 support for gigabit ethernet"
83 depends on (QETH = IPV6) || (QETH && IPV6 = 'y')
84 help
85 If CONFIG_QETH is switched on, this option will include IPv6
86 support in the qeth device driver.
87
88config QETH_VLAN
89 bool "VLAN support for gigabit ethernet"
90 depends on (QETH = VLAN_8021Q) || (QETH && VLAN_8021Q = 'y')
91 help
92 If CONFIG_QETH is switched on, this option will include IEEE
93 802.1q VLAN support in the qeth device driver.
94
95config QETH_PERF_STATS
96 bool "Performance statistics in /proc"
97 depends on QETH
98 help
99 When switched on, this option will add a file in the proc-fs
100 (/proc/qeth_perf_stats) containing performance statistics. It
101 may slightly impact performance, so this is only recommended for
102 internal tuning of the device driver.
103
104config CCWGROUP
105 tristate
106 default (LCS || CTC || QETH)
107
108endmenu
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
new file mode 100644
index 000000000000..7cabb80a2e41
--- /dev/null
+++ b/drivers/s390/net/Makefile
@@ -0,0 +1,14 @@
1#
2# S/390 network devices
3#
4
5ctc-objs := ctcmain.o ctctty.o ctcdbug.o
6
7obj-$(CONFIG_IUCV) += iucv.o
8obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
9obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
10obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
11obj-$(CONFIG_LCS) += lcs.o cu3088.o
12qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o qeth_tso.o
13qeth-$(CONFIG_PROC_FS) += qeth_proc.o
14obj-$(CONFIG_QETH) += qeth.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
new file mode 100644
index 000000000000..06804d39a9c6
--- /dev/null
+++ b/drivers/s390/net/claw.c
@@ -0,0 +1,4447 @@
1/*
2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver
4 *
5 * $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $
6 *
7 * Linux fo zSeries version
8 * Copyright (C) 2002,2005 IBM Corporation
9 * Author(s) Original code written by:
10 * Kazuo Iimura (iimura@jp.ibm.com)
11 * Rewritten by
12 * Andy Richter (richtera@us.ibm.com)
13 * Marc Price (mwprice@us.ibm.com)
14 *
15 * sysfs parms:
16 * group x.x.rrrr,x.x.wwww
17 * read_buffer nnnnnnn
18 * write_buffer nnnnnn
19 * host_name aaaaaaaa
20 * adapter_name aaaaaaaa
21 * api_type aaaaaaaa
22 *
23 * eg.
24 * group 0.0.0200 0.0.0201
25 * read_buffer 25
26 * write_buffer 20
27 * host_name LINUX390
28 * adapter_name RS6K
29 * api_type TCPIP
30 *
31 * where
32 *
33 * The device id is decided by the order entries
34 * are added to the group the first is claw0 the second claw1
35 * up to CLAW_MAX_DEV
36 *
37 * rrrr - the first of 2 consecutive device addresses used for the
38 * CLAW protocol.
39 * The specified address is always used as the input (Read)
40 * channel and the next address is used as the output channel.
41 *
42 * wwww - the second of 2 consecutive device addresses used for
43 * the CLAW protocol.
44 * The specified address is always used as the output
45 * channel and the previous address is used as the input channel.
46 *
47 * read_buffer - specifies number of input buffers to allocate.
48 * write_buffer - specifies number of output buffers to allocate.
49 * host_name - host name
50 * adaptor_name - adaptor name
51 * api_type - API type TCPIP or API will be sent and expected
52 * as ws_name
53 *
54 * Note the following requirements:
55 * 1) host_name must match the configured adapter_name on the remote side
56 * 2) adaptor_name must match the configured host name on the remote side
57 *
58 * Change History
59 * 1.00 Initial release shipped
60 * 1.10 Changes for Buffer allocation
61 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
62 * 1.25 Added Packing support
63 */
64#include <asm/bitops.h>
65#include <asm/ccwdev.h>
66#include <asm/ccwgroup.h>
67#include <asm/debug.h>
68#include <asm/idals.h>
69#include <asm/io.h>
70
71#include <linux/ctype.h>
72#include <linux/delay.h>
73#include <linux/errno.h>
74#include <linux/if_arp.h>
75#include <linux/init.h>
76#include <linux/interrupt.h>
77#include <linux/ip.h>
78#include <linux/kernel.h>
79#include <linux/module.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/proc_fs.h>
83#include <linux/sched.h>
84#include <linux/signal.h>
85#include <linux/skbuff.h>
86#include <linux/slab.h>
87#include <linux/string.h>
88#include <linux/tcp.h>
89#include <linux/timer.h>
90#include <linux/types.h>
91#include <linux/version.h>
92
93#include "cu3088.h"
94#include "claw.h"
95
96MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
97MODULE_DESCRIPTION("Linux for zSeries CLAW Driver\n" \
98 "Copyright 2000,2005 IBM Corporation\n");
99MODULE_LICENSE("GPL");
100
101/* Debugging is based on DEBUGMSG, IOTRACE, or FUNCTRACE options:
102 DEBUGMSG - Enables output of various debug messages in the code
103 IOTRACE - Enables output of CCW and other IO related traces
104 FUNCTRACE - Enables output of function entry/exit trace
105 Define any combination of above options to enable tracing
106
107 CLAW also uses the s390dbf file system see claw_trace and claw_setup
108*/
109
110/* following enables tracing */
111//#define DEBUGMSG
112//#define IOTRACE
113//#define FUNCTRACE
114
115#ifdef DEBUGMSG
116#define DEBUG
117#endif
118
119#ifdef IOTRACE
120#define DEBUG
121#endif
122
123#ifdef FUNCTRACE
124#define DEBUG
125#endif
126
127 char debug_buffer[255];
128/**
129 * Debug Facility Stuff
130 */
131static debug_info_t *claw_dbf_setup;
132static debug_info_t *claw_dbf_trace;
133
134/**
135 * CLAW Debug Facility functions
136 */
137static void
138claw_unregister_debug_facility(void)
139{
140 if (claw_dbf_setup)
141 debug_unregister(claw_dbf_setup);
142 if (claw_dbf_trace)
143 debug_unregister(claw_dbf_trace);
144}
145
146static int
147claw_register_debug_facility(void)
148{
149 claw_dbf_setup = debug_register("claw_setup", 1, 1, 8);
150 claw_dbf_trace = debug_register("claw_trace", 1, 2, 8);
151 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
152 printk(KERN_WARNING "Not enough memory for debug facility.\n");
153 claw_unregister_debug_facility();
154 return -ENOMEM;
155 }
156 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
157 debug_set_level(claw_dbf_setup, 2);
158 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
159 debug_set_level(claw_dbf_trace, 2);
160 return 0;
161}
162
163static inline void
164claw_set_busy(struct net_device *dev)
165{
166 ((struct claw_privbk *) dev->priv)->tbusy=1;
167 eieio();
168}
169
170static inline void
171claw_clear_busy(struct net_device *dev)
172{
173 clear_bit(0, &(((struct claw_privbk *) dev->priv)->tbusy));
174 netif_wake_queue(dev);
175 eieio();
176}
177
178static inline int
179claw_check_busy(struct net_device *dev)
180{
181 eieio();
182 return ((struct claw_privbk *) dev->priv)->tbusy;
183}
184
185static inline void
186claw_setbit_busy(int nr,struct net_device *dev)
187{
188 netif_stop_queue(dev);
189 set_bit(nr, (void *)&(((struct claw_privbk *)dev->priv)->tbusy));
190}
191
192static inline void
193claw_clearbit_busy(int nr,struct net_device *dev)
194{
195 clear_bit(nr,(void *)&(((struct claw_privbk *)dev->priv)->tbusy));
196 netif_wake_queue(dev);
197}
198
199static inline int
200claw_test_and_setbit_busy(int nr,struct net_device *dev)
201{
202 netif_stop_queue(dev);
203 return test_and_set_bit(nr,
204 (void *)&(((struct claw_privbk *) dev->priv)->tbusy));
205}
206
207
208/* Functions for the DEV methods */
209
210static int claw_probe(struct ccwgroup_device *cgdev);
211static void claw_remove_device(struct ccwgroup_device *cgdev);
212static void claw_purge_skb_queue(struct sk_buff_head *q);
213static int claw_new_device(struct ccwgroup_device *cgdev);
214static int claw_shutdown_device(struct ccwgroup_device *cgdev);
215static int claw_tx(struct sk_buff *skb, struct net_device *dev);
216static int claw_change_mtu( struct net_device *dev, int new_mtu);
217static int claw_open(struct net_device *dev);
218static void claw_irq_handler(struct ccw_device *cdev,
219 unsigned long intparm, struct irb *irb);
220static void claw_irq_tasklet ( unsigned long data );
221static int claw_release(struct net_device *dev);
222static void claw_write_retry ( struct chbk * p_ch );
223static void claw_write_next ( struct chbk * p_ch );
224static void claw_timer ( struct chbk * p_ch );
225
226/* Functions */
227static int add_claw_reads(struct net_device *dev,
228 struct ccwbk* p_first, struct ccwbk* p_last);
229static void inline ccw_check_return_code (struct ccw_device *cdev,
230 int return_code);
231static void inline ccw_check_unit_check (struct chbk * p_ch,
232 unsigned char sense );
233static int find_link(struct net_device *dev, char *host_name, char *ws_name );
234static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
235static int init_ccw_bk(struct net_device *dev);
236static void probe_error( struct ccwgroup_device *cgdev);
237static struct net_device_stats *claw_stats(struct net_device *dev);
238static int inline pages_to_order_of_mag(int num_of_pages);
239static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
240#ifdef DEBUG
241static void dumpit (char *buf, int len);
242#endif
243/* sysfs Functions */
244static ssize_t claw_hname_show(struct device *dev, char *buf);
245static ssize_t claw_hname_write(struct device *dev,
246 const char *buf, size_t count);
247static ssize_t claw_adname_show(struct device *dev, char *buf);
248static ssize_t claw_adname_write(struct device *dev,
249 const char *buf, size_t count);
250static ssize_t claw_apname_show(struct device *dev, char *buf);
251static ssize_t claw_apname_write(struct device *dev,
252 const char *buf, size_t count);
253static ssize_t claw_wbuff_show(struct device *dev, char *buf);
254static ssize_t claw_wbuff_write(struct device *dev,
255 const char *buf, size_t count);
256static ssize_t claw_rbuff_show(struct device *dev, char *buf);
257static ssize_t claw_rbuff_write(struct device *dev,
258 const char *buf, size_t count);
259static int claw_add_files(struct device *dev);
260static void claw_remove_files(struct device *dev);
261
262/* Functions for System Validate */
263static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
264static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
265 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
266static int claw_snd_conn_req(struct net_device *dev, __u8 link);
267static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
268static int claw_snd_sys_validate_rsp(struct net_device *dev,
269 struct clawctl * p_ctl, __u32 return_code);
270static int claw_strt_conn_req(struct net_device *dev );
271static void claw_strt_read ( struct net_device *dev, int lock );
272static void claw_strt_out_IO( struct net_device *dev );
273static void claw_free_wrt_buf( struct net_device *dev );
274
275/* Functions for unpack reads */
276static void unpack_read (struct net_device *dev );
277
278/* ccwgroup table */
279
280static struct ccwgroup_driver claw_group_driver = {
281 .owner = THIS_MODULE,
282 .name = "claw",
283 .max_slaves = 2,
284 .driver_id = 0xC3D3C1E6,
285 .probe = claw_probe,
286 .remove = claw_remove_device,
287 .set_online = claw_new_device,
288 .set_offline = claw_shutdown_device,
289};
290
291/*
292*
293* Key functions
294*/
295
296/*----------------------------------------------------------------*
297 * claw_probe *
298 * this function is called for each CLAW device. *
299 *----------------------------------------------------------------*/
300static int
301claw_probe(struct ccwgroup_device *cgdev)
302{
303 int rc;
304 struct claw_privbk *privptr=NULL;
305
306#ifdef FUNCTRACE
307 printk(KERN_INFO "%s Enter\n",__FUNCTION__);
308#endif
309 CLAW_DBF_TEXT(2,setup,"probe");
310 if (!get_device(&cgdev->dev))
311 return -ENODEV;
312#ifdef DEBUGMSG
313 printk(KERN_INFO "claw: variable cgdev =\n");
314 dumpit((char *)cgdev, sizeof(struct ccwgroup_device));
315#endif
316 privptr = kmalloc(sizeof(struct claw_privbk), GFP_KERNEL);
317 if (privptr == NULL) {
318 probe_error(cgdev);
319 put_device(&cgdev->dev);
320 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
321 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
322 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
323 return -ENOMEM;
324 }
325 memset(privptr,0x00,sizeof(struct claw_privbk));
326 privptr->p_mtc_envelope= kmalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
327 privptr->p_env = kmalloc(sizeof(struct claw_env), GFP_KERNEL);
328 if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
329 probe_error(cgdev);
330 put_device(&cgdev->dev);
331 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
332 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
333 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
334 return -ENOMEM;
335 }
336 memset(privptr->p_mtc_envelope, 0x00, MAX_ENVELOPE_SIZE);
337 memset(privptr->p_env, 0x00, sizeof(struct claw_env));
338 memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
339 memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
340 memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
341 privptr->p_env->packing = 0;
342 privptr->p_env->write_buffers = 5;
343 privptr->p_env->read_buffers = 5;
344 privptr->p_env->read_size = CLAW_FRAME_SIZE;
345 privptr->p_env->write_size = CLAW_FRAME_SIZE;
346 rc = claw_add_files(&cgdev->dev);
347 if (rc) {
348 probe_error(cgdev);
349 put_device(&cgdev->dev);
350 printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n",
351 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
352 CLAW_DBF_TEXT_(2,setup,"probex%d",rc);
353 return rc;
354 }
355 printk(KERN_INFO "claw: sysfs files added for %s\n",cgdev->cdev[0]->dev.bus_id);
356 privptr->p_env->p_priv = privptr;
357 cgdev->cdev[0]->handler = claw_irq_handler;
358 cgdev->cdev[1]->handler = claw_irq_handler;
359 cgdev->dev.driver_data = privptr;
360#ifdef FUNCTRACE
361 printk(KERN_INFO "claw:%s exit on line %d, "
362 "rc = 0\n",__FUNCTION__,__LINE__);
363#endif
364 CLAW_DBF_TEXT(2,setup,"prbext 0");
365
366 return 0;
367} /* end of claw_probe */
368
369/*-------------------------------------------------------------------*
370 * claw_tx *
371 *-------------------------------------------------------------------*/
372
373static int
374claw_tx(struct sk_buff *skb, struct net_device *dev)
375{
376 int rc;
377 struct claw_privbk *privptr=dev->priv;
378 unsigned long saveflags;
379 struct chbk *p_ch;
380
381#ifdef FUNCTRACE
382 printk(KERN_INFO "%s:%s enter\n",dev->name,__FUNCTION__);
383#endif
384 CLAW_DBF_TEXT(4,trace,"claw_tx");
385 p_ch=&privptr->channel[WRITE];
386 if (skb == NULL) {
387 printk(KERN_WARNING "%s: null pointer passed as sk_buffer\n",
388 dev->name);
389 privptr->stats.tx_dropped++;
390#ifdef FUNCTRACE
391 printk(KERN_INFO "%s: %s() exit on line %d, rc = EIO\n",
392 dev->name,__FUNCTION__, __LINE__);
393#endif
394 CLAW_DBF_TEXT_(2,trace,"clawtx%d",-EIO);
395 return -EIO;
396 }
397
398#ifdef IOTRACE
399 printk(KERN_INFO "%s: variable sk_buff=\n",dev->name);
400 dumpit((char *) skb, sizeof(struct sk_buff));
401 printk(KERN_INFO "%s: variable dev=\n",dev->name);
402 dumpit((char *) dev, sizeof(struct net_device));
403#endif
404 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
405 rc=claw_hw_tx( skb, dev, 1 );
406 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
407#ifdef FUNCTRACE
408 printk(KERN_INFO "%s:%s exit on line %d, rc = %d\n",
409 dev->name, __FUNCTION__, __LINE__, rc);
410#endif
411 CLAW_DBF_TEXT_(4,trace,"clawtx%d",rc);
412 return rc;
413} /* end of claw_tx */
414
415/*------------------------------------------------------------------*
416 * pack the collect queue into an skb and return it *
417 * If not packing just return the top skb from the queue *
418 *------------------------------------------------------------------*/
419
420static struct sk_buff *
421claw_pack_skb(struct claw_privbk *privptr)
422{
423 struct sk_buff *new_skb,*held_skb;
424 struct chbk *p_ch = &privptr->channel[WRITE];
425 struct claw_env *p_env = privptr->p_env;
426 int pkt_cnt,pk_ind,so_far;
427
428 new_skb = NULL; /* assume no dice */
429 pkt_cnt = 0;
430 CLAW_DBF_TEXT(4,trace,"PackSKBe");
431 if (skb_queue_len(&p_ch->collect_queue) > 0) {
432 /* some data */
433 held_skb = skb_dequeue(&p_ch->collect_queue);
434 if (p_env->packing != DO_PACKED)
435 return held_skb;
436 if (held_skb)
437 atomic_dec(&held_skb->users);
438 else
439 return NULL;
440 /* get a new SKB we will pack at least one */
441 new_skb = dev_alloc_skb(p_env->write_size);
442 if (new_skb == NULL) {
443 atomic_inc(&held_skb->users);
444 skb_queue_head(&p_ch->collect_queue,held_skb);
445 return NULL;
446 }
447 /* we have packed packet and a place to put it */
448 pk_ind = 1;
449 so_far = 0;
450 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
451 while ((pk_ind) && (held_skb != NULL)) {
452 if (held_skb->len+so_far <= p_env->write_size-8) {
453 memcpy(skb_put(new_skb,held_skb->len),
454 held_skb->data,held_skb->len);
455 privptr->stats.tx_packets++;
456 so_far += held_skb->len;
457 pkt_cnt++;
458 dev_kfree_skb_irq(held_skb);
459 held_skb = skb_dequeue(&p_ch->collect_queue);
460 if (held_skb)
461 atomic_dec(&held_skb->users);
462 } else {
463 pk_ind = 0;
464 atomic_inc(&held_skb->users);
465 skb_queue_head(&p_ch->collect_queue,held_skb);
466 }
467 }
468#ifdef IOTRACE
469 printk(KERN_INFO "%s: %s() Packed %d len %d\n",
470 p_env->ndev->name,
471 __FUNCTION__,pkt_cnt,new_skb->len);
472#endif
473 }
474 CLAW_DBF_TEXT(4,trace,"PackSKBx");
475 return new_skb;
476}
477
478/*-------------------------------------------------------------------*
479 * claw_change_mtu *
480 * *
481 *-------------------------------------------------------------------*/
482
483static int
484claw_change_mtu(struct net_device *dev, int new_mtu)
485{
486 struct claw_privbk *privptr=dev->priv;
487 int buff_size;
488#ifdef FUNCTRACE
489 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
490#endif
491#ifdef DEBUGMSG
492 printk(KERN_INFO "variable dev =\n");
493 dumpit((char *) dev, sizeof(struct net_device));
494 printk(KERN_INFO "variable new_mtu = %d\n", new_mtu);
495#endif
496 CLAW_DBF_TEXT(4,trace,"setmtu");
497 buff_size = privptr->p_env->write_size;
498 if ((new_mtu < 60) || (new_mtu > buff_size)) {
499#ifdef FUNCTRACE
500 printk(KERN_INFO "%s:%s Exit on line %d, rc=EINVAL\n",
501 dev->name,
502 __FUNCTION__, __LINE__);
503#endif
504 return -EINVAL;
505 }
506 dev->mtu = new_mtu;
507#ifdef FUNCTRACE
508 printk(KERN_INFO "%s:%s Exit on line %d\n",dev->name,
509 __FUNCTION__, __LINE__);
510#endif
511 return 0;
512} /* end of claw_change_mtu */
513
514
515/*-------------------------------------------------------------------*
516 * claw_open *
517 * *
518 *-------------------------------------------------------------------*/
519static int
520claw_open(struct net_device *dev)
521{
522
523 int rc;
524 int i;
525 unsigned long saveflags=0;
526 unsigned long parm;
527 struct claw_privbk *privptr;
528 DECLARE_WAITQUEUE(wait, current);
529 struct timer_list timer;
530 struct ccwbk *p_buf;
531
532#ifdef FUNCTRACE
533 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
534#endif
535 CLAW_DBF_TEXT(4,trace,"open");
536 if (!dev | (dev->name[0] == 0x00)) {
537 CLAW_DBF_TEXT(2,trace,"BadDev");
538 printk(KERN_WARNING "claw: Bad device at open failing \n");
539 return -ENODEV;
540 }
541 privptr = (struct claw_privbk *)dev->priv;
542 /* allocate and initialize CCW blocks */
543 if (privptr->buffs_alloc == 0) {
544 rc=init_ccw_bk(dev);
545 if (rc) {
546 printk(KERN_INFO "%s:%s Exit on line %d, rc=ENOMEM\n",
547 dev->name,
548 __FUNCTION__, __LINE__);
549 CLAW_DBF_TEXT(2,trace,"openmem");
550 return -ENOMEM;
551 }
552 }
553 privptr->system_validate_comp=0;
554 privptr->release_pend=0;
555 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
556 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
557 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
558 privptr->p_env->packing=PACKING_ASK;
559 } else {
560 privptr->p_env->packing=0;
561 privptr->p_env->read_size=CLAW_FRAME_SIZE;
562 privptr->p_env->write_size=CLAW_FRAME_SIZE;
563 }
564 claw_set_busy(dev);
565 tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet,
566 (unsigned long) &privptr->channel[READ]);
567 for ( i = 0; i < 2; i++) {
568 CLAW_DBF_TEXT_(2,trace,"opn_ch%d",i);
569 init_waitqueue_head(&privptr->channel[i].wait);
570 /* skb_queue_head_init(&p_ch->io_queue); */
571 if (i == WRITE)
572 skb_queue_head_init(
573 &privptr->channel[WRITE].collect_queue);
574 privptr->channel[i].flag_a = 0;
575 privptr->channel[i].IO_active = 0;
576 privptr->channel[i].flag &= ~CLAW_TIMER;
577 init_timer(&timer);
578 timer.function = (void *)claw_timer;
579 timer.data = (unsigned long)(&privptr->channel[i]);
580 timer.expires = jiffies + 15*HZ;
581 add_timer(&timer);
582 spin_lock_irqsave(get_ccwdev_lock(
583 privptr->channel[i].cdev), saveflags);
584 parm = (unsigned long) &privptr->channel[i];
585 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
586 rc = 0;
587 add_wait_queue(&privptr->channel[i].wait, &wait);
588 rc = ccw_device_halt(
589 (struct ccw_device *)privptr->channel[i].cdev,parm);
590 set_current_state(TASK_INTERRUPTIBLE);
591 spin_unlock_irqrestore(
592 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
593 schedule();
594 set_current_state(TASK_RUNNING);
595 remove_wait_queue(&privptr->channel[i].wait, &wait);
596 if(rc != 0)
597 ccw_check_return_code(privptr->channel[i].cdev, rc);
598 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
599 del_timer(&timer);
600 }
601 if ((((privptr->channel[READ].last_dstat |
602 privptr->channel[WRITE].last_dstat) &
603 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
604 (((privptr->channel[READ].flag |
605 privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
606#ifdef DEBUGMSG
607 printk(KERN_INFO "%s: channel problems during open - read:"
608 " %02x - write: %02x\n",
609 dev->name,
610 privptr->channel[READ].last_dstat,
611 privptr->channel[WRITE].last_dstat);
612#endif
613 printk(KERN_INFO "%s: remote side is not ready\n", dev->name);
614 CLAW_DBF_TEXT(2,trace,"notrdy");
615
616 for ( i = 0; i < 2; i++) {
617 spin_lock_irqsave(
618 get_ccwdev_lock(privptr->channel[i].cdev),
619 saveflags);
620 parm = (unsigned long) &privptr->channel[i];
621 privptr->channel[i].claw_state = CLAW_STOP;
622 rc = ccw_device_halt(
623 (struct ccw_device *)&privptr->channel[i].cdev,
624 parm);
625 spin_unlock_irqrestore(
626 get_ccwdev_lock(privptr->channel[i].cdev),
627 saveflags);
628 if (rc != 0) {
629 ccw_check_return_code(
630 privptr->channel[i].cdev, rc);
631 }
632 }
633 free_pages((unsigned long)privptr->p_buff_ccw,
634 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
635 if (privptr->p_env->read_size < PAGE_SIZE) {
636 free_pages((unsigned long)privptr->p_buff_read,
637 (int)pages_to_order_of_mag(
638 privptr->p_buff_read_num));
639 }
640 else {
641 p_buf=privptr->p_read_active_first;
642 while (p_buf!=NULL) {
643 free_pages((unsigned long)p_buf->p_buffer,
644 (int)pages_to_order_of_mag(
645 privptr->p_buff_pages_perread ));
646 p_buf=p_buf->next;
647 }
648 }
649 if (privptr->p_env->write_size < PAGE_SIZE ) {
650 free_pages((unsigned long)privptr->p_buff_write,
651 (int)pages_to_order_of_mag(
652 privptr->p_buff_write_num));
653 }
654 else {
655 p_buf=privptr->p_write_active_first;
656 while (p_buf!=NULL) {
657 free_pages((unsigned long)p_buf->p_buffer,
658 (int)pages_to_order_of_mag(
659 privptr->p_buff_pages_perwrite ));
660 p_buf=p_buf->next;
661 }
662 }
663 privptr->buffs_alloc = 0;
664 privptr->channel[READ].flag= 0x00;
665 privptr->channel[WRITE].flag = 0x00;
666 privptr->p_buff_ccw=NULL;
667 privptr->p_buff_read=NULL;
668 privptr->p_buff_write=NULL;
669 claw_clear_busy(dev);
670#ifdef FUNCTRACE
671 printk(KERN_INFO "%s:%s Exit on line %d, rc=EIO\n",
672 dev->name,__FUNCTION__,__LINE__);
673#endif
674 CLAW_DBF_TEXT(2,trace,"open EIO");
675 return -EIO;
676 }
677
678 /* Send SystemValidate command */
679
680 claw_clear_busy(dev);
681
682#ifdef FUNCTRACE
683 printk(KERN_INFO "%s:%s Exit on line %d, rc=0\n",
684 dev->name,__FUNCTION__,__LINE__);
685#endif
686 CLAW_DBF_TEXT(4,trace,"openok");
687 return 0;
688} /* end of claw_open */
689
690/*-------------------------------------------------------------------*
691* *
692* claw_irq_handler *
693* *
694*--------------------------------------------------------------------*/
695static void
696claw_irq_handler(struct ccw_device *cdev,
697 unsigned long intparm, struct irb *irb)
698{
699 struct chbk *p_ch = NULL;
700 struct claw_privbk *privptr = NULL;
701 struct net_device *dev = NULL;
702 struct claw_env *p_env;
703 struct chbk *p_ch_r=NULL;
704
705
706#ifdef FUNCTRACE
707 printk(KERN_INFO "%s enter \n",__FUNCTION__);
708#endif
709 CLAW_DBF_TEXT(4,trace,"clawirq");
710 /* Bypass all 'unsolicited interrupts' */
711 if (!cdev->dev.driver_data) {
712 printk(KERN_WARNING "claw: unsolicited interrupt for device:"
713 "%s received c-%02x d-%02x\n",
714 cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat);
715#ifdef FUNCTRACE
716 printk(KERN_INFO "claw: %s() "
717 "exit on line %d\n",__FUNCTION__,__LINE__);
718#endif
719 CLAW_DBF_TEXT(2,trace,"badirq");
720 return;
721 }
722 privptr = (struct claw_privbk *)cdev->dev.driver_data;
723
724 /* Try to extract channel from driver data. */
725 if (privptr->channel[READ].cdev == cdev)
726 p_ch = &privptr->channel[READ];
727 else if (privptr->channel[WRITE].cdev == cdev)
728 p_ch = &privptr->channel[WRITE];
729 else {
730 printk(KERN_WARNING "claw: Can't determine channel for "
731 "interrupt, device %s\n", cdev->dev.bus_id);
732 CLAW_DBF_TEXT(2,trace,"badchan");
733 return;
734 }
735 CLAW_DBF_TEXT_(4,trace,"IRQCH=%d",p_ch->flag);
736
737 dev = (struct net_device *) (p_ch->ndev);
738 p_env=privptr->p_env;
739
740#ifdef IOTRACE
741 printk(KERN_INFO "%s: interrupt for device: %04x "
742 "received c-%02x d-%02x state-%02x\n",
743 dev->name, p_ch->devno, irb->scsw.cstat,
744 irb->scsw.dstat, p_ch->claw_state);
745#endif
746
747 /* Copy interruption response block. */
748 memcpy(p_ch->irb, irb, sizeof(struct irb));
749
750 /* Check for good subchannel return code, otherwise error message */
751 if (irb->scsw.cstat && !(irb->scsw.cstat & SCHN_STAT_PCI)) {
752 printk(KERN_INFO "%s: subchannel check for device: %04x -"
753 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
754 dev->name, p_ch->devno,
755 irb->scsw.cstat, irb->scsw.dstat,irb->scsw.cpa);
756#ifdef IOTRACE
757 dumpit((char *)irb,sizeof(struct irb));
758 dumpit((char *)(unsigned long)irb->scsw.cpa,
759 sizeof(struct ccw1));
760#endif
761#ifdef FUNCTRACE
762 printk(KERN_INFO "%s:%s Exit on line %d\n",
763 dev->name,__FUNCTION__,__LINE__);
764#endif
765 CLAW_DBF_TEXT(2,trace,"chanchk");
766 /* return; */
767 }
768
769 /* Check the reason-code of a unit check */
770 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
771 ccw_check_unit_check(p_ch, irb->ecw[0]);
772 }
773
774 /* State machine to bring the connection up, down and to restart */
775 p_ch->last_dstat = irb->scsw.dstat;
776
777 switch (p_ch->claw_state) {
778 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
779#ifdef DEBUGMSG
780 printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name);
781#endif
782 if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
783 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
784 (p_ch->irb->scsw.stctl ==
785 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
786#ifdef FUNCTRACE
787 printk(KERN_INFO "%s:%s Exit on line %d\n",
788 dev->name,__FUNCTION__,__LINE__);
789#endif
790 return;
791 }
792 wake_up(&p_ch->wait); /* wake up claw_release */
793
794#ifdef DEBUGMSG
795 printk(KERN_INFO "%s: CLAW_STOP exit\n", dev->name);
796#endif
797#ifdef FUNCTRACE
798 printk(KERN_INFO "%s:%s Exit on line %d\n",
799 dev->name,__FUNCTION__,__LINE__);
800#endif
801 CLAW_DBF_TEXT(4,trace,"stop");
802 return;
803
804 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
805#ifdef DEBUGMSG
806 printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n",
807 dev->name);
808#endif
809 if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
810 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
811 (p_ch->irb->scsw.stctl ==
812 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
813#ifdef FUNCTRACE
814 printk(KERN_INFO "%s:%s Exit on line %d\n",
815 dev->name,__FUNCTION__,__LINE__);
816#endif
817 CLAW_DBF_TEXT(4,trace,"haltio");
818 return;
819 }
820 if (p_ch->flag == CLAW_READ) {
821 p_ch->claw_state = CLAW_START_READ;
822 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
823 }
824 else
825 if (p_ch->flag == CLAW_WRITE) {
826 p_ch->claw_state = CLAW_START_WRITE;
827 /* send SYSTEM_VALIDATE */
828 claw_strt_read(dev, LOCK_NO);
829 claw_send_control(dev,
830 SYSTEM_VALIDATE_REQUEST,
831 0, 0, 0,
832 p_env->host_name,
833 p_env->adapter_name );
834 } else {
835 printk(KERN_WARNING "claw: unsolicited "
836 "interrupt for device:"
837 "%s received c-%02x d-%02x\n",
838 cdev->dev.bus_id,
839 irb->scsw.cstat,
840 irb->scsw.dstat);
841 return;
842 }
843#ifdef DEBUGMSG
844 printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO exit\n",
845 dev->name);
846#endif
847#ifdef FUNCTRACE
848 printk(KERN_INFO "%s:%s Exit on line %d\n",
849 dev->name,__FUNCTION__,__LINE__);
850#endif
851 CLAW_DBF_TEXT(4,trace,"haltio");
852 return;
853 case CLAW_START_READ:
854 CLAW_DBF_TEXT(4,trace,"ReadIRQ");
855 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
856 clear_bit(0, (void *)&p_ch->IO_active);
857 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
858 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
859 (p_ch->irb->ecw[0]) == 0)
860 {
861 privptr->stats.rx_errors++;
862 printk(KERN_INFO "%s: Restart is "
863 "required after remote "
864 "side recovers \n",
865 dev->name);
866 }
867#ifdef FUNCTRACE
868 printk(KERN_INFO "%s:%s Exit on line %d\n",
869 dev->name,__FUNCTION__,__LINE__);
870#endif
871 CLAW_DBF_TEXT(4,trace,"notrdy");
872 return;
873 }
874 if ((p_ch->irb->scsw.cstat & SCHN_STAT_PCI) &&
875 (p_ch->irb->scsw.dstat==0)) {
876 if (test_and_set_bit(CLAW_BH_ACTIVE,
877 (void *)&p_ch->flag_a) == 0) {
878 tasklet_schedule(&p_ch->tasklet);
879 }
880 else {
881 CLAW_DBF_TEXT(4,trace,"PCINoBH");
882 }
883#ifdef FUNCTRACE
884 printk(KERN_INFO "%s:%s Exit on line %d\n",
885 dev->name,__FUNCTION__,__LINE__);
886#endif
887 CLAW_DBF_TEXT(4,trace,"PCI_read");
888 return;
889 }
890 if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
891 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
892 (p_ch->irb->scsw.stctl ==
893 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
894#ifdef FUNCTRACE
895 printk(KERN_INFO "%s:%s Exit on line %d\n",
896 dev->name,__FUNCTION__,__LINE__);
897#endif
898 CLAW_DBF_TEXT(4,trace,"SPend_rd");
899 return;
900 }
901 clear_bit(0, (void *)&p_ch->IO_active);
902 claw_clearbit_busy(TB_RETRY,dev);
903 if (test_and_set_bit(CLAW_BH_ACTIVE,
904 (void *)&p_ch->flag_a) == 0) {
905 tasklet_schedule(&p_ch->tasklet);
906 }
907 else {
908 CLAW_DBF_TEXT(4,trace,"RdBHAct");
909 }
910
911#ifdef DEBUGMSG
912 printk(KERN_INFO "%s: process CLAW_START_READ exit\n",
913 dev->name);
914#endif
915#ifdef FUNCTRACE
916 printk(KERN_INFO "%s:%s Exit on line %d\n",
917 dev->name,__FUNCTION__,__LINE__);
918#endif
919 CLAW_DBF_TEXT(4,trace,"RdIRQXit");
920 return;
921 case CLAW_START_WRITE:
922 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
923 printk(KERN_INFO "%s: Unit Check Occured in "
924 "write channel\n",dev->name);
925 clear_bit(0, (void *)&p_ch->IO_active);
926 if (p_ch->irb->ecw[0] & 0x80 ) {
927 printk(KERN_INFO "%s: Resetting Event "
928 "occurred:\n",dev->name);
929 init_timer(&p_ch->timer);
930 p_ch->timer.function =
931 (void *)claw_write_retry;
932 p_ch->timer.data = (unsigned long)p_ch;
933 p_ch->timer.expires = jiffies + 10*HZ;
934 add_timer(&p_ch->timer);
935 printk(KERN_INFO "%s: write connection "
936 "restarting\n",dev->name);
937 }
938#ifdef FUNCTRACE
939 printk(KERN_INFO "%s:%s Exit on line %d\n",
940 dev->name,__FUNCTION__,__LINE__);
941#endif
942 CLAW_DBF_TEXT(4,trace,"rstrtwrt");
943 return;
944 }
945 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) {
946 clear_bit(0, (void *)&p_ch->IO_active);
947 printk(KERN_INFO "%s: Unit Exception "
948 "Occured in write channel\n",
949 dev->name);
950 }
951 if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
952 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
953 (p_ch->irb->scsw.stctl ==
954 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
955#ifdef FUNCTRACE
956 printk(KERN_INFO "%s:%s Exit on line %d\n",
957 dev->name,__FUNCTION__,__LINE__);
958#endif
959 CLAW_DBF_TEXT(4,trace,"writeUE");
960 return;
961 }
962 clear_bit(0, (void *)&p_ch->IO_active);
963 if (claw_test_and_setbit_busy(TB_TX,dev)==0) {
964 claw_write_next(p_ch);
965 claw_clearbit_busy(TB_TX,dev);
966 claw_clear_busy(dev);
967 }
968 p_ch_r=(struct chbk *)&privptr->channel[READ];
969 if (test_and_set_bit(CLAW_BH_ACTIVE,
970 (void *)&p_ch_r->flag_a) == 0) {
971 tasklet_schedule(&p_ch_r->tasklet);
972 }
973
974#ifdef DEBUGMSG
975 printk(KERN_INFO "%s: process CLAW_START_WRITE exit\n",
976 dev->name);
977#endif
978#ifdef FUNCTRACE
979 printk(KERN_INFO "%s:%s Exit on line %d\n",
980 dev->name,__FUNCTION__,__LINE__);
981#endif
982 CLAW_DBF_TEXT(4,trace,"StWtExit");
983 return;
984 default:
985 printk(KERN_WARNING "%s: wrong selection code - irq "
986 "state=%d\n",dev->name,p_ch->claw_state);
987#ifdef FUNCTRACE
988 printk(KERN_INFO "%s:%s Exit on line %d\n",
989 dev->name,__FUNCTION__,__LINE__);
990#endif
991 CLAW_DBF_TEXT(2,trace,"badIRQ");
992 return;
993 }
994
995} /* end of claw_irq_handler */
996
997
998/*-------------------------------------------------------------------*
999* claw_irq_tasklet *
1000* *
1001*--------------------------------------------------------------------*/
1002static void
1003claw_irq_tasklet ( unsigned long data )
1004{
1005 struct chbk * p_ch;
1006 struct net_device *dev;
1007 struct claw_privbk * privptr;
1008
1009 p_ch = (struct chbk *) data;
1010 dev = (struct net_device *)p_ch->ndev;
1011#ifdef FUNCTRACE
1012 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
1013#endif
1014#ifdef DEBUGMSG
1015 printk(KERN_INFO "%s: variable p_ch =\n",dev->name);
1016 dumpit((char *) p_ch, sizeof(struct chbk));
1017#endif
1018 CLAW_DBF_TEXT(4,trace,"IRQtask");
1019
1020 privptr = (struct claw_privbk *) dev->priv;
1021
1022#ifdef DEBUGMSG
1023 printk(KERN_INFO "%s: bh routine - state-%02x\n" ,
1024 dev->name, p_ch->claw_state);
1025#endif
1026
1027 unpack_read(dev);
1028 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
1029 CLAW_DBF_TEXT(4,trace,"TskletXt");
1030#ifdef FUNCTRACE
1031 printk(KERN_INFO "%s:%s Exit on line %d\n",
1032 dev->name,__FUNCTION__,__LINE__);
1033#endif
1034 return;
1035} /* end of claw_irq_bh */
1036
1037/*-------------------------------------------------------------------*
1038* claw_release *
1039* *
1040*--------------------------------------------------------------------*/
1041static int
1042claw_release(struct net_device *dev)
1043{
1044 int rc;
1045 int i;
1046 unsigned long saveflags;
1047 unsigned long parm;
1048 struct claw_privbk *privptr;
1049 DECLARE_WAITQUEUE(wait, current);
1050 struct ccwbk* p_this_ccw;
1051 struct ccwbk* p_buf;
1052
1053 if (!dev)
1054 return 0;
1055 privptr = (struct claw_privbk *) dev->priv;
1056 if (!privptr)
1057 return 0;
1058#ifdef FUNCTRACE
1059 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
1060#endif
1061 CLAW_DBF_TEXT(4,trace,"release");
1062#ifdef DEBUGMSG
1063 printk(KERN_INFO "%s: variable dev =\n",dev->name);
1064 dumpit((char *) dev, sizeof(struct net_device));
1065 printk(KERN_INFO "Priv Buffalloc %d\n",privptr->buffs_alloc);
1066 printk(KERN_INFO "Priv p_buff_ccw = %p\n",&privptr->p_buff_ccw);
1067#endif
1068 privptr->release_pend=1;
1069 claw_setbit_busy(TB_STOP,dev);
1070 for ( i = 1; i >=0 ; i--) {
1071 spin_lock_irqsave(
1072 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
1073 /* del_timer(&privptr->channel[READ].timer); */
1074 privptr->channel[i].claw_state = CLAW_STOP;
1075 privptr->channel[i].IO_active = 0;
1076 parm = (unsigned long) &privptr->channel[i];
1077 if (i == WRITE)
1078 claw_purge_skb_queue(
1079 &privptr->channel[WRITE].collect_queue);
1080 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
1081 if (privptr->system_validate_comp==0x00) /* never opened? */
1082 init_waitqueue_head(&privptr->channel[i].wait);
1083 add_wait_queue(&privptr->channel[i].wait, &wait);
1084 set_current_state(TASK_INTERRUPTIBLE);
1085 spin_unlock_irqrestore(
1086 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
1087 schedule();
1088 set_current_state(TASK_RUNNING);
1089 remove_wait_queue(&privptr->channel[i].wait, &wait);
1090 if (rc != 0) {
1091 ccw_check_return_code(privptr->channel[i].cdev, rc);
1092 }
1093 }
1094 if (privptr->pk_skb != NULL) {
1095 dev_kfree_skb(privptr->pk_skb);
1096 privptr->pk_skb = NULL;
1097 }
1098 if(privptr->buffs_alloc != 1) {
1099#ifdef FUNCTRACE
1100 printk(KERN_INFO "%s:%s Exit on line %d\n",
1101 dev->name,__FUNCTION__,__LINE__);
1102#endif
1103 CLAW_DBF_TEXT(4,trace,"none2fre");
1104 return 0;
1105 }
1106 CLAW_DBF_TEXT(4,trace,"freebufs");
1107 if (privptr->p_buff_ccw != NULL) {
1108 free_pages((unsigned long)privptr->p_buff_ccw,
1109 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
1110 }
1111 CLAW_DBF_TEXT(4,trace,"freeread");
1112 if (privptr->p_env->read_size < PAGE_SIZE) {
1113 if (privptr->p_buff_read != NULL) {
1114 free_pages((unsigned long)privptr->p_buff_read,
1115 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
1116 }
1117 }
1118 else {
1119 p_buf=privptr->p_read_active_first;
1120 while (p_buf!=NULL) {
1121 free_pages((unsigned long)p_buf->p_buffer,
1122 (int)pages_to_order_of_mag(
1123 privptr->p_buff_pages_perread ));
1124 p_buf=p_buf->next;
1125 }
1126 }
1127 CLAW_DBF_TEXT(4,trace,"freewrit");
1128 if (privptr->p_env->write_size < PAGE_SIZE ) {
1129 free_pages((unsigned long)privptr->p_buff_write,
1130 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
1131 }
1132 else {
1133 p_buf=privptr->p_write_active_first;
1134 while (p_buf!=NULL) {
1135 free_pages((unsigned long)p_buf->p_buffer,
1136 (int)pages_to_order_of_mag(
1137 privptr->p_buff_pages_perwrite ));
1138 p_buf=p_buf->next;
1139 }
1140 }
1141 CLAW_DBF_TEXT(4,trace,"clearptr");
1142 privptr->buffs_alloc = 0;
1143 privptr->p_buff_ccw=NULL;
1144 privptr->p_buff_read=NULL;
1145 privptr->p_buff_write=NULL;
1146 privptr->system_validate_comp=0;
1147 privptr->release_pend=0;
1148 /* Remove any writes that were pending and reset all reads */
1149 p_this_ccw=privptr->p_read_active_first;
1150 while (p_this_ccw!=NULL) {
1151 p_this_ccw->header.length=0xffff;
1152 p_this_ccw->header.opcode=0xff;
1153 p_this_ccw->header.flag=0x00;
1154 p_this_ccw=p_this_ccw->next;
1155 }
1156
1157 while (privptr->p_write_active_first!=NULL) {
1158 p_this_ccw=privptr->p_write_active_first;
1159 p_this_ccw->header.flag=CLAW_PENDING;
1160 privptr->p_write_active_first=p_this_ccw->next;
1161 p_this_ccw->next=privptr->p_write_free_chain;
1162 privptr->p_write_free_chain=p_this_ccw;
1163 ++privptr->write_free_count;
1164 }
1165 privptr->p_write_active_last=NULL;
1166 privptr->mtc_logical_link = -1;
1167 privptr->mtc_skipping = 1;
1168 privptr->mtc_offset=0;
1169
1170 if (((privptr->channel[READ].last_dstat |
1171 privptr->channel[WRITE].last_dstat) &
1172 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
1173 printk(KERN_WARNING "%s: channel problems during close - "
1174 "read: %02x - write: %02x\n",
1175 dev->name,
1176 privptr->channel[READ].last_dstat,
1177 privptr->channel[WRITE].last_dstat);
1178 CLAW_DBF_TEXT(2,trace,"badclose");
1179 }
1180#ifdef FUNCTRACE
1181 printk(KERN_INFO "%s:%s Exit on line %d\n",
1182 dev->name,__FUNCTION__,__LINE__);
1183#endif
1184 CLAW_DBF_TEXT(4,trace,"rlsexit");
1185 return 0;
1186} /* end of claw_release */
1187
1188
1189
1190/*-------------------------------------------------------------------*
1191* claw_write_retry *
1192* *
1193*--------------------------------------------------------------------*/
1194
1195static void
1196claw_write_retry ( struct chbk *p_ch )
1197{
1198
1199 struct net_device *dev=p_ch->ndev;
1200
1201
1202#ifdef FUNCTRACE
1203 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
1204 printk(KERN_INFO "claw: variable p_ch =\n");
1205 dumpit((char *) p_ch, sizeof(struct chbk));
1206#endif
1207 CLAW_DBF_TEXT(4,trace,"w_retry");
1208 if (p_ch->claw_state == CLAW_STOP) {
1209#ifdef FUNCTRACE
1210 printk(KERN_INFO "%s:%s Exit on line %d\n",
1211 dev->name,__FUNCTION__,__LINE__);
1212#endif
1213 return;
1214 }
1215#ifdef DEBUGMSG
1216 printk( KERN_INFO "%s:%s state-%02x\n" ,
1217 dev->name,
1218 __FUNCTION__,
1219 p_ch->claw_state);
1220#endif
1221 claw_strt_out_IO( dev );
1222#ifdef FUNCTRACE
1223 printk(KERN_INFO "%s:%s Exit on line %d\n",
1224 dev->name,__FUNCTION__,__LINE__);
1225#endif
1226 CLAW_DBF_TEXT(4,trace,"rtry_xit");
1227 return;
1228} /* end of claw_write_retry */
1229
1230
1231/*-------------------------------------------------------------------*
1232* claw_write_next *
1233* *
1234*--------------------------------------------------------------------*/
1235
1236static void
1237claw_write_next ( struct chbk * p_ch )
1238{
1239
1240 struct net_device *dev;
1241 struct claw_privbk *privptr=NULL;
1242 struct sk_buff *pk_skb;
1243 int rc;
1244
1245#ifdef FUNCTRACE
1246 printk(KERN_INFO "%s:%s Enter \n",p_ch->ndev->name,__FUNCTION__);
1247 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
1248 dumpit((char *) p_ch, sizeof(struct chbk));
1249#endif
1250 CLAW_DBF_TEXT(4,trace,"claw_wrt");
1251 if (p_ch->claw_state == CLAW_STOP)
1252 return;
1253 dev = (struct net_device *) p_ch->ndev;
1254 privptr = (struct claw_privbk *) dev->priv;
1255 claw_free_wrt_buf( dev );
1256 if ((privptr->write_free_count > 0) &&
1257 (skb_queue_len(&p_ch->collect_queue) > 0)) {
1258 pk_skb = claw_pack_skb(privptr);
1259 while (pk_skb != NULL) {
1260 rc = claw_hw_tx( pk_skb, dev,1);
1261 if (privptr->write_free_count > 0) {
1262 pk_skb = claw_pack_skb(privptr);
1263 } else
1264 pk_skb = NULL;
1265 }
1266 }
1267 if (privptr->p_write_active_first!=NULL) {
1268 claw_strt_out_IO(dev);
1269 }
1270
1271#ifdef FUNCTRACE
1272 printk(KERN_INFO "%s:%s Exit on line %d\n",
1273 dev->name,__FUNCTION__,__LINE__);
1274#endif
1275 return;
1276} /* end of claw_write_next */
1277
1278/*-------------------------------------------------------------------*
1279* *
1280* claw_timer *
1281*--------------------------------------------------------------------*/
1282
1283static void
1284claw_timer ( struct chbk * p_ch )
1285{
1286#ifdef FUNCTRACE
1287 printk(KERN_INFO "%s:%s Entry\n",p_ch->ndev->name,__FUNCTION__);
1288 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
1289 dumpit((char *) p_ch, sizeof(struct chbk));
1290#endif
1291 CLAW_DBF_TEXT(4,trace,"timer");
1292 p_ch->flag |= CLAW_TIMER;
1293 wake_up(&p_ch->wait);
1294#ifdef FUNCTRACE
1295 printk(KERN_INFO "%s:%s Exit on line %d\n",
1296 p_ch->ndev->name,__FUNCTION__,__LINE__);
1297#endif
1298 return;
1299} /* end of claw_timer */
1300
1301
1302/*
1303*
1304* functions
1305*/
1306
1307
1308/*-------------------------------------------------------------------*
1309* *
1310* pages_to_order_of_mag *
1311* *
1312* takes a number of pages from 1 to 512 and returns the *
1313* log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1314* of magnitude get_free_pages() has an upper order of 9 *
1315*--------------------------------------------------------------------*/
1316
1317static int inline
1318pages_to_order_of_mag(int num_of_pages)
1319{
1320 int order_of_mag=1; /* assume 2 pages */
1321 int nump=2;
1322#ifdef FUNCTRACE
1323 printk(KERN_INFO "%s Enter pages = %d \n",__FUNCTION__,num_of_pages);
1324#endif
1325 CLAW_DBF_TEXT_(5,trace,"pages%d",num_of_pages);
1326 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1327 /* 512 pages = 2Meg on 4k page systems */
1328 if (num_of_pages >= 512) {return 9; }
1329 /* we have two or more pages order is at least 1 */
1330 for (nump=2 ;nump <= 512;nump*=2) {
1331 if (num_of_pages <= nump)
1332 break;
1333 order_of_mag +=1;
1334 }
1335 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1336#ifdef FUNCTRACE
1337 printk(KERN_INFO "%s Exit on line %d, order = %d\n",
1338 __FUNCTION__,__LINE__, order_of_mag);
1339#endif
1340 CLAW_DBF_TEXT_(5,trace,"mag%d",order_of_mag);
1341 return order_of_mag;
1342}
1343
1344/*-------------------------------------------------------------------*
1345* *
1346* add_claw_reads *
1347* *
1348*--------------------------------------------------------------------*/
1349static int
1350add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1351 struct ccwbk* p_last)
1352{
1353 struct claw_privbk *privptr;
1354 struct ccw1 temp_ccw;
1355 struct endccw * p_end;
1356#ifdef IOTRACE
1357 struct ccwbk* p_buf;
1358#endif
1359#ifdef FUNCTRACE
1360 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
1361#endif
1362#ifdef DEBUGMSG
1363 printk(KERN_INFO "dev\n");
1364 dumpit((char *) dev, sizeof(struct net_device));
1365 printk(KERN_INFO "p_first\n");
1366 dumpit((char *) p_first, sizeof(struct ccwbk));
1367 printk(KERN_INFO "p_last\n");
1368 dumpit((char *) p_last, sizeof(struct ccwbk));
1369#endif
1370 CLAW_DBF_TEXT(4,trace,"addreads");
1371 privptr = dev->priv;
1372 p_end = privptr->p_end_ccw;
1373
1374 /* first CCW and last CCW contains a new set of read channel programs
1375 * to apend the running channel programs
1376 */
1377 if ( p_first==NULL) {
1378#ifdef FUNCTRACE
1379 printk(KERN_INFO "%s:%s Exit on line %d\n",
1380 dev->name,__FUNCTION__,__LINE__);
1381#endif
1382 CLAW_DBF_TEXT(4,trace,"addexit");
1383 return 0;
1384 }
1385
1386 /* set up ending CCW sequence for this segment */
1387 if (p_end->read1) {
1388 p_end->read1=0x00; /* second ending CCW is now active */
1389 /* reset ending CCWs and setup TIC CCWs */
1390 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1391 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1392 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1393 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1394 p_end->read2_nop2.cda=0;
1395 p_end->read2_nop2.count=1;
1396 }
1397 else {
1398 p_end->read1=0x01; /* first ending CCW is now active */
1399 /* reset ending CCWs and setup TIC CCWs */
1400 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1401 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1402 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1403 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1404 p_end->read1_nop2.cda=0;
1405 p_end->read1_nop2.count=1;
1406 }
1407
1408 if ( privptr-> p_read_active_first ==NULL ) {
1409#ifdef DEBUGMSG
1410 printk(KERN_INFO "%s:%s p_read_active_frist == NULL \n",
1411 dev->name,__FUNCTION__);
1412 printk(KERN_INFO "%s:%s Read active first/last changed \n",
1413 dev->name,__FUNCTION__);
1414#endif
1415 privptr-> p_read_active_first= p_first; /* set new first */
1416 privptr-> p_read_active_last = p_last; /* set new last */
1417 }
1418 else {
1419
1420#ifdef DEBUGMSG
1421 printk(KERN_INFO "%s:%s Read in progress \n",
1422 dev->name,__FUNCTION__);
1423#endif
1424 /* set up TIC ccw */
1425 temp_ccw.cda= (__u32)__pa(&p_first->read);
1426 temp_ccw.count=0;
1427 temp_ccw.flags=0;
1428 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1429
1430
1431 if (p_end->read1) {
1432
1433 /* first set of CCW's is chained to the new read */
1434 /* chain, so the second set is chained to the active chain. */
1435 /* Therefore modify the second set to point to the new */
1436 /* read chain set up TIC CCWs */
1437 /* make sure we update the CCW so channel doesn't fetch it */
1438 /* when it's only half done */
1439 memcpy( &p_end->read2_nop2, &temp_ccw ,
1440 sizeof(struct ccw1));
1441 privptr->p_read_active_last->r_TIC_1.cda=
1442 (__u32)__pa(&p_first->read);
1443 privptr->p_read_active_last->r_TIC_2.cda=
1444 (__u32)__pa(&p_first->read);
1445 }
1446 else {
1447 /* make sure we update the CCW so channel doesn't */
1448 /* fetch it when it is only half done */
1449 memcpy( &p_end->read1_nop2, &temp_ccw ,
1450 sizeof(struct ccw1));
1451 privptr->p_read_active_last->r_TIC_1.cda=
1452 (__u32)__pa(&p_first->read);
1453 privptr->p_read_active_last->r_TIC_2.cda=
1454 (__u32)__pa(&p_first->read);
1455 }
1456 /* chain in new set of blocks */
1457 privptr->p_read_active_last->next = p_first;
1458 privptr->p_read_active_last=p_last;
1459 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1460#ifdef IOTRACE
1461 printk(KERN_INFO "%s:%s dump p_last CCW BK \n",dev->name,__FUNCTION__);
1462 dumpit((char *)p_last, sizeof(struct ccwbk));
1463 printk(KERN_INFO "%s:%s dump p_end CCW BK \n",dev->name,__FUNCTION__);
1464 dumpit((char *)p_end, sizeof(struct endccw));
1465
1466 printk(KERN_INFO "%s:%s dump p_first CCW BK \n",dev->name,__FUNCTION__);
1467 dumpit((char *)p_first, sizeof(struct ccwbk));
1468 printk(KERN_INFO "%s:%s Dump Active CCW chain \n",
1469 dev->name,__FUNCTION__);
1470 p_buf=privptr->p_read_active_first;
1471 while (p_buf!=NULL) {
1472 dumpit((char *)p_buf, sizeof(struct ccwbk));
1473 p_buf=p_buf->next;
1474 }
1475#endif
1476#ifdef FUNCTRACE
1477 printk(KERN_INFO "%s:%s Exit on line %d\n",
1478 dev->name,__FUNCTION__,__LINE__);
1479#endif
1480 CLAW_DBF_TEXT(4,trace,"addexit");
1481 return 0;
1482} /* end of add_claw_reads */
1483
1484/*-------------------------------------------------------------------*
1485 * ccw_check_return_code *
1486 * *
1487 *-------------------------------------------------------------------*/
1488
1489static void inline
1490ccw_check_return_code(struct ccw_device *cdev, int return_code)
1491{
1492#ifdef FUNCTRACE
1493 printk(KERN_INFO "%s: %s() > enter \n",
1494 cdev->dev.bus_id,__FUNCTION__);
1495#endif
1496 CLAW_DBF_TEXT(4,trace,"ccwret");
1497#ifdef DEBUGMSG
1498 printk(KERN_INFO "variable cdev =\n");
1499 dumpit((char *) cdev, sizeof(struct ccw_device));
1500 printk(KERN_INFO "variable return_code = %d\n",return_code);
1501#endif
1502 if (return_code != 0) {
1503 switch (return_code) {
1504 case -EBUSY:
1505 printk(KERN_INFO "%s: Busy !\n",
1506 cdev->dev.bus_id);
1507 break;
1508 case -ENODEV:
1509 printk(KERN_EMERG "%s: Missing device called "
1510 "for IO ENODEV\n", cdev->dev.bus_id);
1511 break;
1512 case -EIO:
1513 printk(KERN_EMERG "%s: Status pending... EIO \n",
1514 cdev->dev.bus_id);
1515 break;
1516 case -EINVAL:
1517 printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n",
1518 cdev->dev.bus_id);
1519 break;
1520 default:
1521 printk(KERN_EMERG "%s: Unknown error in "
1522 "Do_IO %d\n",cdev->dev.bus_id, return_code);
1523 }
1524 }
1525#ifdef FUNCTRACE
1526 printk(KERN_INFO "%s: %s() > exit on line %d\n",
1527 cdev->dev.bus_id,__FUNCTION__,__LINE__);
1528#endif
1529 CLAW_DBF_TEXT(4,trace,"ccwret");
1530} /* end of ccw_check_return_code */
1531
1532/*-------------------------------------------------------------------*
1533* ccw_check_unit_check *
1534*--------------------------------------------------------------------*/
1535
1536static void inline
1537ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1538{
1539 struct net_device *dev = p_ch->ndev;
1540
1541#ifdef FUNCTRACE
1542 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__);
1543#endif
1544#ifdef DEBUGMSG
1545 printk(KERN_INFO "%s: variable dev =\n",dev->name);
1546 dumpit((char *)dev, sizeof(struct net_device));
1547 printk(KERN_INFO "%s: variable sense =\n",dev->name);
1548 dumpit((char *)&sense, 2);
1549#endif
1550 CLAW_DBF_TEXT(4,trace,"unitchek");
1551
1552 printk(KERN_INFO "%s: Unit Check with sense byte:0x%04x\n",
1553 dev->name, sense);
1554
1555 if (sense & 0x40) {
1556 if (sense & 0x01) {
1557 printk(KERN_WARNING "%s: Interface disconnect or "
1558 "Selective reset "
1559 "occurred (remote side)\n", dev->name);
1560 }
1561 else {
1562 printk(KERN_WARNING "%s: System reset occured"
1563 " (remote side)\n", dev->name);
1564 }
1565 }
1566 else if (sense & 0x20) {
1567 if (sense & 0x04) {
1568 printk(KERN_WARNING "%s: Data-streaming "
1569 "timeout)\n", dev->name);
1570 }
1571 else {
1572 printk(KERN_WARNING "%s: Data-transfer parity"
1573 " error\n", dev->name);
1574 }
1575 }
1576 else if (sense & 0x10) {
1577 if (sense & 0x20) {
1578 printk(KERN_WARNING "%s: Hardware malfunction "
1579 "(remote side)\n", dev->name);
1580 }
1581 else {
1582 printk(KERN_WARNING "%s: read-data parity error "
1583 "(remote side)\n", dev->name);
1584 }
1585 }
1586
1587#ifdef FUNCTRACE
1588 printk(KERN_INFO "%s: %s() exit on line %d\n",
1589 dev->name,__FUNCTION__,__LINE__);
1590#endif
1591} /* end of ccw_check_unit_check */
1592
1593
1594
1595/*-------------------------------------------------------------------*
1596* Dump buffer format *
1597* *
1598*--------------------------------------------------------------------*/
1599#ifdef DEBUG
1600static void
1601dumpit(char* buf, int len)
1602{
1603
1604 __u32 ct, sw, rm, dup;
1605 char *ptr, *rptr;
1606 char tbuf[82], tdup[82];
1607#if (CONFIG_ARCH_S390X)
1608 char addr[22];
1609#else
1610 char addr[12];
1611#endif
1612 char boff[12];
1613 char bhex[82], duphex[82];
1614 char basc[40];
1615
1616 sw = 0;
1617 rptr =ptr=buf;
1618 rm = 16;
1619 duphex[0] = 0x00;
1620 dup = 0;
1621 for ( ct=0; ct < len; ct++, ptr++, rptr++ ) {
1622 if (sw == 0) {
1623#if (CONFIG_ARCH_S390X)
1624 sprintf(addr, "%16.16lX",(unsigned long)rptr);
1625#else
1626 sprintf(addr, "%8.8X",(__u32)rptr);
1627#endif
1628 sprintf(boff, "%4.4X", (__u32)ct);
1629 bhex[0] = '\0';
1630 basc[0] = '\0';
1631 }
1632 if ((sw == 4) || (sw == 12)) {
1633 strcat(bhex, " ");
1634 }
1635 if (sw == 8) {
1636 strcat(bhex, " ");
1637 }
1638#if (CONFIG_ARCH_S390X)
1639 sprintf(tbuf,"%2.2lX", (unsigned long)*ptr);
1640#else
1641 sprintf(tbuf,"%2.2X", (__u32)*ptr);
1642#endif
1643 tbuf[2] = '\0';
1644 strcat(bhex, tbuf);
1645 if ((0!=isprint(*ptr)) && (*ptr >= 0x20)) {
1646 basc[sw] = *ptr;
1647 }
1648 else {
1649 basc[sw] = '.';
1650 }
1651 basc[sw+1] = '\0';
1652 sw++;
1653 rm--;
1654 if (sw==16) {
1655 if ((strcmp(duphex, bhex)) !=0) {
1656 if (dup !=0) {
1657 sprintf(tdup,"Duplicate as above to"
1658 " %s", addr);
1659 printk( KERN_INFO " "
1660 " --- %s ---\n",tdup);
1661 }
1662 printk( KERN_INFO " %s (+%s) : %s [%s]\n",
1663 addr, boff, bhex, basc);
1664 dup = 0;
1665 strcpy(duphex, bhex);
1666 }
1667 else {
1668 dup++;
1669 }
1670 sw = 0;
1671 rm = 16;
1672 }
1673 } /* endfor */
1674
1675 if (sw != 0) {
1676 for ( ; rm > 0; rm--, sw++ ) {
1677 if ((sw==4) || (sw==12)) strcat(bhex, " ");
1678 if (sw==8) strcat(bhex, " ");
1679 strcat(bhex, " ");
1680 strcat(basc, " ");
1681 }
1682 if (dup !=0) {
1683 sprintf(tdup,"Duplicate as above to %s", addr);
1684 printk( KERN_INFO " --- %s ---\n",
1685 tdup);
1686 }
1687 printk( KERN_INFO " %s (+%s) : %s [%s]\n",
1688 addr, boff, bhex, basc);
1689 }
1690 else {
1691 if (dup >=1) {
1692 sprintf(tdup,"Duplicate as above to %s", addr);
1693 printk( KERN_INFO " --- %s ---\n",
1694 tdup);
1695 }
1696 if (dup !=0) {
1697 printk( KERN_INFO " %s (+%s) : %s [%s]\n",
1698 addr, boff, bhex, basc);
1699 }
1700 }
1701 return;
1702
1703} /* end of dumpit */
1704#endif
1705
1706/*-------------------------------------------------------------------*
1707* find_link *
1708*--------------------------------------------------------------------*/
1709static int
1710find_link(struct net_device *dev, char *host_name, char *ws_name )
1711{
1712 struct claw_privbk *privptr;
1713 struct claw_env *p_env;
1714 int rc=0;
1715
1716#ifdef FUNCTRACE
1717 printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__);
1718#endif
1719 CLAW_DBF_TEXT(2,setup,"findlink");
1720#ifdef DEBUGMSG
1721 printk(KERN_INFO "%s: variable dev = \n",dev->name);
1722 dumpit((char *) dev, sizeof(struct net_device));
1723 printk(KERN_INFO "%s: variable host_name = %s\n",dev->name, host_name);
1724 printk(KERN_INFO "%s: variable ws_name = %s\n",dev->name, ws_name);
1725#endif
1726 privptr=dev->priv;
1727 p_env=privptr->p_env;
1728 switch (p_env->packing)
1729 {
1730 case PACKING_ASK:
1731 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1732 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1733 rc = EINVAL;
1734 break;
1735 case DO_PACKED:
1736 case PACK_SEND:
1737 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1738 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1739 rc = EINVAL;
1740 break;
1741 default:
1742 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1743 (memcmp(p_env->api_type , ws_name, 8)!=0))
1744 rc = EINVAL;
1745 break;
1746 }
1747
1748#ifdef FUNCTRACE
1749 printk(KERN_INFO "%s:%s Exit on line %d\n",
1750 dev->name,__FUNCTION__,__LINE__);
1751#endif
1752 return 0;
1753} /* end of find_link */
1754
1755/*-------------------------------------------------------------------*
1756 * claw_hw_tx *
1757 * *
1758 * *
1759 *-------------------------------------------------------------------*/
1760
1761static int
1762claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1763{
1764 int rc=0;
1765 struct claw_privbk *privptr;
1766 struct ccwbk *p_this_ccw;
1767 struct ccwbk *p_first_ccw;
1768 struct ccwbk *p_last_ccw;
1769 __u32 numBuffers;
1770 signed long len_of_data;
1771 unsigned long bytesInThisBuffer;
1772 unsigned char *pDataAddress;
1773 struct endccw *pEnd;
1774 struct ccw1 tempCCW;
1775 struct chbk *p_ch;
1776 struct claw_env *p_env;
1777 int lock;
1778 struct clawph *pk_head;
1779 struct chbk *ch;
1780#ifdef IOTRACE
1781 struct ccwbk *p_buf;
1782#endif
1783#ifdef FUNCTRACE
1784 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__);
1785#endif
1786 CLAW_DBF_TEXT(4,trace,"hw_tx");
1787#ifdef DEBUGMSG
1788 printk(KERN_INFO "%s: variable dev skb =\n",dev->name);
1789 dumpit((char *) skb, sizeof(struct sk_buff));
1790 printk(KERN_INFO "%s: variable dev =\n",dev->name);
1791 dumpit((char *) dev, sizeof(struct net_device));
1792 printk(KERN_INFO "%s: variable linkid = %ld\n",dev->name,linkid);
1793#endif
1794 privptr = (struct claw_privbk *) (dev->priv);
1795 p_ch=(struct chbk *)&privptr->channel[WRITE];
1796 p_env =privptr->p_env;
1797#ifdef IOTRACE
1798 printk(KERN_INFO "%s: %s() dump sk_buff \n",dev->name,__FUNCTION__);
1799 dumpit((char *)skb ,sizeof(struct sk_buff));
1800#endif
1801 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1802 /* scan the write queue to free any completed write packets */
1803 p_first_ccw=NULL;
1804 p_last_ccw=NULL;
1805 if ((p_env->packing >= PACK_SEND) &&
1806 (skb->cb[1] != 'P')) {
1807 skb_push(skb,sizeof(struct clawph));
1808 pk_head=(struct clawph *)skb->data;
1809 pk_head->len=skb->len-sizeof(struct clawph);
1810 if (pk_head->len%4) {
1811 pk_head->len+= 4-(pk_head->len%4);
1812 skb_pad(skb,4-(pk_head->len%4));
1813 skb_put(skb,4-(pk_head->len%4));
1814 }
1815 if (p_env->packing == DO_PACKED)
1816 pk_head->link_num = linkid;
1817 else
1818 pk_head->link_num = 0;
1819 pk_head->flag = 0x00;
1820 skb_pad(skb,4);
1821 skb->cb[1] = 'P';
1822 }
1823 if (linkid == 0) {
1824 if (claw_check_busy(dev)) {
1825 if (privptr->write_free_count!=0) {
1826 claw_clear_busy(dev);
1827 }
1828 else {
1829 claw_strt_out_IO(dev );
1830 claw_free_wrt_buf( dev );
1831 if (privptr->write_free_count==0) {
1832#ifdef IOTRACE
1833 printk(KERN_INFO "%s: "
1834 "(claw_check_busy) no free write "
1835 "buffers\n", dev->name);
1836#endif
1837 ch = &privptr->channel[WRITE];
1838 atomic_inc(&skb->users);
1839 skb_queue_tail(&ch->collect_queue, skb);
1840 goto Done;
1841 }
1842 else {
1843 claw_clear_busy(dev);
1844 }
1845 }
1846 }
1847 /* tx lock */
1848 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1849#ifdef DEBUGMSG
1850 printk(KERN_INFO "%s: busy (claw_test_and_setbit_"
1851 "busy)\n", dev->name);
1852#endif
1853 ch = &privptr->channel[WRITE];
1854 atomic_inc(&skb->users);
1855 skb_queue_tail(&ch->collect_queue, skb);
1856 claw_strt_out_IO(dev );
1857 rc=-EBUSY;
1858 goto Done2;
1859 }
1860 }
1861 /* See how many write buffers are required to hold this data */
1862 numBuffers= ( skb->len + privptr->p_env->write_size - 1) /
1863 ( privptr->p_env->write_size);
1864
1865 /* If that number of buffers isn't available, give up for now */
1866 if (privptr->write_free_count < numBuffers ||
1867 privptr->p_write_free_chain == NULL ) {
1868
1869 claw_setbit_busy(TB_NOBUFFER,dev);
1870
1871#ifdef DEBUGMSG
1872 printk(KERN_INFO "%s: busy (claw_setbit_busy"
1873 "(TB_NOBUFFER))\n", dev->name);
1874 printk(KERN_INFO " free_count: %d, numBuffers : %d\n",
1875 (int)privptr->write_free_count,(int) numBuffers );
1876#endif
1877 ch = &privptr->channel[WRITE];
1878 atomic_inc(&skb->users);
1879 skb_queue_tail(&ch->collect_queue, skb);
1880 CLAW_DBF_TEXT(2,trace,"clawbusy");
1881 goto Done2;
1882 }
1883 pDataAddress=skb->data;
1884 len_of_data=skb->len;
1885
1886 while (len_of_data > 0) {
1887#ifdef DEBUGMSG
1888 printk(KERN_INFO "%s: %s() length-of-data is %ld \n",
1889 dev->name ,__FUNCTION__,len_of_data);
1890 dumpit((char *)pDataAddress ,64);
1891#endif
1892 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1893 if (p_this_ccw == NULL) { /* lost the race */
1894 ch = &privptr->channel[WRITE];
1895 atomic_inc(&skb->users);
1896 skb_queue_tail(&ch->collect_queue, skb);
1897 goto Done2;
1898 }
1899 privptr->p_write_free_chain=p_this_ccw->next;
1900 p_this_ccw->next=NULL;
1901 --privptr->write_free_count; /* -1 */
1902 bytesInThisBuffer=len_of_data;
1903 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1904 len_of_data-=bytesInThisBuffer;
1905 pDataAddress+=(unsigned long)bytesInThisBuffer;
1906 /* setup write CCW */
1907 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1908 if (len_of_data>0) {
1909 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1910 }
1911 p_this_ccw->write.count=bytesInThisBuffer;
1912 /* now add to end of this chain */
1913 if (p_first_ccw==NULL) {
1914 p_first_ccw=p_this_ccw;
1915 }
1916 if (p_last_ccw!=NULL) {
1917 p_last_ccw->next=p_this_ccw;
1918 /* set up TIC ccws */
1919 p_last_ccw->w_TIC_1.cda=
1920 (__u32)__pa(&p_this_ccw->write);
1921 }
1922 p_last_ccw=p_this_ccw; /* save new last block */
1923#ifdef IOTRACE
1924 printk(KERN_INFO "%s: %s() > CCW and Buffer %ld bytes long \n",
1925 dev->name,__FUNCTION__,bytesInThisBuffer);
1926 dumpit((char *)p_this_ccw, sizeof(struct ccwbk));
1927 dumpit((char *)p_this_ccw->p_buffer, 64);
1928#endif
1929 }
1930
1931 /* FirstCCW and LastCCW now contain a new set of write channel
1932 * programs to append to the running channel program
1933 */
1934
1935 if (p_first_ccw!=NULL) {
1936 /* setup ending ccw sequence for this segment */
1937 pEnd=privptr->p_end_ccw;
1938 if (pEnd->write1) {
1939 pEnd->write1=0x00; /* second end ccw is now active */
1940 /* set up Tic CCWs */
1941 p_last_ccw->w_TIC_1.cda=
1942 (__u32)__pa(&pEnd->write2_nop1);
1943 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1944 pEnd->write2_nop2.flags =
1945 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1946 pEnd->write2_nop2.cda=0;
1947 pEnd->write2_nop2.count=1;
1948 }
1949 else { /* end of if (pEnd->write1)*/
1950 pEnd->write1=0x01; /* first end ccw is now active */
1951 /* set up Tic CCWs */
1952 p_last_ccw->w_TIC_1.cda=
1953 (__u32)__pa(&pEnd->write1_nop1);
1954 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1955 pEnd->write1_nop2.flags =
1956 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1957 pEnd->write1_nop2.cda=0;
1958 pEnd->write1_nop2.count=1;
1959 } /* end if if (pEnd->write1) */
1960
1961
1962 if (privptr->p_write_active_first==NULL ) {
1963 privptr->p_write_active_first=p_first_ccw;
1964 privptr->p_write_active_last=p_last_ccw;
1965 }
1966 else {
1967
1968 /* set up Tic CCWs */
1969
1970 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1971 tempCCW.count=0;
1972 tempCCW.flags=0;
1973 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1974
1975 if (pEnd->write1) {
1976
1977 /*
1978 * first set of ending CCW's is chained to the new write
1979 * chain, so the second set is chained to the active chain
1980 * Therefore modify the second set to point the new write chain.
1981 * make sure we update the CCW atomically
1982 * so channel does not fetch it when it's only half done
1983 */
1984 memcpy( &pEnd->write2_nop2, &tempCCW ,
1985 sizeof(struct ccw1));
1986 privptr->p_write_active_last->w_TIC_1.cda=
1987 (__u32)__pa(&p_first_ccw->write);
1988 }
1989 else {
1990
1991 /*make sure we update the CCW atomically
1992 *so channel does not fetch it when it's only half done
1993 */
1994 memcpy(&pEnd->write1_nop2, &tempCCW ,
1995 sizeof(struct ccw1));
1996 privptr->p_write_active_last->w_TIC_1.cda=
1997 (__u32)__pa(&p_first_ccw->write);
1998
1999 } /* end if if (pEnd->write1) */
2000
2001 privptr->p_write_active_last->next=p_first_ccw;
2002 privptr->p_write_active_last=p_last_ccw;
2003 }
2004
2005 } /* endif (p_first_ccw!=NULL) */
2006
2007
2008#ifdef IOTRACE
2009 printk(KERN_INFO "%s: %s() > Dump Active CCW chain \n",
2010 dev->name,__FUNCTION__);
2011 p_buf=privptr->p_write_active_first;
2012 while (p_buf!=NULL) {
2013 dumpit((char *)p_buf, sizeof(struct ccwbk));
2014 p_buf=p_buf->next;
2015 }
2016 p_buf=(struct ccwbk*)privptr->p_end_ccw;
2017 dumpit((char *)p_buf, sizeof(struct endccw));
2018#endif
2019 dev_kfree_skb(skb);
2020 if (linkid==0) {
2021 lock=LOCK_NO;
2022 }
2023 else {
2024 lock=LOCK_YES;
2025 }
2026 claw_strt_out_IO(dev );
2027 /* if write free count is zero , set NOBUFFER */
2028#ifdef DEBUGMSG
2029 printk(KERN_INFO "%s: %s() > free_count is %d\n",
2030 dev->name,__FUNCTION__,
2031 (int) privptr->write_free_count );
2032#endif
2033 if (privptr->write_free_count==0) {
2034 claw_setbit_busy(TB_NOBUFFER,dev);
2035 }
2036Done2:
2037 claw_clearbit_busy(TB_TX,dev);
2038Done:
2039#ifdef FUNCTRACE
2040 printk(KERN_INFO "%s: %s() > exit on line %d, rc = %d \n",
2041 dev->name,__FUNCTION__,__LINE__, rc);
2042#endif
2043 return(rc);
2044} /* end of claw_hw_tx */
2045
2046/*-------------------------------------------------------------------*
2047* *
2048* init_ccw_bk *
2049* *
2050*--------------------------------------------------------------------*/
2051
2052static int
2053init_ccw_bk(struct net_device *dev)
2054{
2055
2056 __u32 ccw_blocks_required;
2057 __u32 ccw_blocks_perpage;
2058 __u32 ccw_pages_required;
2059 __u32 claw_reads_perpage=1;
2060 __u32 claw_read_pages;
2061 __u32 claw_writes_perpage=1;
2062 __u32 claw_write_pages;
2063 void *p_buff=NULL;
2064 struct ccwbk*p_free_chain;
2065 struct ccwbk*p_buf;
2066 struct ccwbk*p_last_CCWB;
2067 struct ccwbk*p_first_CCWB;
2068 struct endccw *p_endccw=NULL;
2069 addr_t real_address;
2070 struct claw_privbk *privptr=dev->priv;
2071 struct clawh *pClawH=NULL;
2072 addr_t real_TIC_address;
2073 int i,j;
2074#ifdef FUNCTRACE
2075 printk(KERN_INFO "%s: %s() enter \n",dev->name,__FUNCTION__);
2076#endif
2077 CLAW_DBF_TEXT(4,trace,"init_ccw");
2078#ifdef DEBUGMSG
2079 printk(KERN_INFO "%s: variable dev =\n",dev->name);
2080 dumpit((char *) dev, sizeof(struct net_device));
2081#endif
2082
2083 /* initialize statistics field */
2084 privptr->active_link_ID=0;
2085 /* initialize ccwbk pointers */
2086 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
2087 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
2088 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
2089 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
2090 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
2091 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
2092 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
2093 privptr->buffs_alloc = 0;
2094 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
2095 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
2096 /* initialize free write ccwbk counter */
2097 privptr->write_free_count=0; /* number of free bufs on write chain */
2098 p_last_CCWB = NULL;
2099 p_first_CCWB= NULL;
2100 /*
2101 * We need 1 CCW block for each read buffer, 1 for each
2102 * write buffer, plus 1 for ClawSignalBlock
2103 */
2104 ccw_blocks_required =
2105 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
2106#ifdef DEBUGMSG
2107 printk(KERN_INFO "%s: %s() "
2108 "ccw_blocks_required=%d\n",
2109 dev->name,__FUNCTION__,
2110 ccw_blocks_required);
2111 printk(KERN_INFO "%s: %s() "
2112 "PAGE_SIZE=0x%x\n",
2113 dev->name,__FUNCTION__,
2114 (unsigned int)PAGE_SIZE);
2115 printk(KERN_INFO "%s: %s() > "
2116 "PAGE_MASK=0x%x\n",
2117 dev->name,__FUNCTION__,
2118 (unsigned int)PAGE_MASK);
2119#endif
2120 /*
2121 * compute number of CCW blocks that will fit in a page
2122 */
2123 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
2124 ccw_pages_required=
2125 (ccw_blocks_required+ccw_blocks_perpage -1) /
2126 ccw_blocks_perpage;
2127
2128#ifdef DEBUGMSG
2129 printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n",
2130 dev->name,__FUNCTION__,
2131 ccw_blocks_perpage);
2132 printk(KERN_INFO "%s: %s() > ccw_pages_required=%d\n",
2133 dev->name,__FUNCTION__,
2134 ccw_pages_required);
2135#endif
2136 /*
2137 * read and write sizes are set by 2 constants in claw.h
2138 * 4k and 32k. Unpacked values other than 4k are not going to
2139 * provide good performance. With packing buffers support 32k
2140 * buffers are used.
2141 */
2142 if (privptr->p_env->read_size < PAGE_SIZE) {
2143 claw_reads_perpage= PAGE_SIZE / privptr->p_env->read_size;
2144 claw_read_pages= (privptr->p_env->read_buffers +
2145 claw_reads_perpage -1) / claw_reads_perpage;
2146 }
2147 else { /* > or equal */
2148 privptr->p_buff_pages_perread=
2149 (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE;
2150 claw_read_pages=
2151 privptr->p_env->read_buffers * privptr->p_buff_pages_perread;
2152 }
2153 if (privptr->p_env->write_size < PAGE_SIZE) {
2154 claw_writes_perpage=
2155 PAGE_SIZE / privptr->p_env->write_size;
2156 claw_write_pages=
2157 (privptr->p_env->write_buffers + claw_writes_perpage -1) /
2158 claw_writes_perpage;
2159
2160 }
2161 else { /* > or equal */
2162 privptr->p_buff_pages_perwrite=
2163 (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE;
2164 claw_write_pages=
2165 privptr->p_env->write_buffers * privptr->p_buff_pages_perwrite;
2166 }
2167#ifdef DEBUGMSG
2168 if (privptr->p_env->read_size < PAGE_SIZE) {
2169 printk(KERN_INFO "%s: %s() reads_perpage=%d\n",
2170 dev->name,__FUNCTION__,
2171 claw_reads_perpage);
2172 }
2173 else {
2174 printk(KERN_INFO "%s: %s() pages_perread=%d\n",
2175 dev->name,__FUNCTION__,
2176 privptr->p_buff_pages_perread);
2177 }
2178 printk(KERN_INFO "%s: %s() read_pages=%d\n",
2179 dev->name,__FUNCTION__,
2180 claw_read_pages);
2181 if (privptr->p_env->write_size < PAGE_SIZE) {
2182 printk(KERN_INFO "%s: %s() writes_perpage=%d\n",
2183 dev->name,__FUNCTION__,
2184 claw_writes_perpage);
2185 }
2186 else {
2187 printk(KERN_INFO "%s: %s() pages_perwrite=%d\n",
2188 dev->name,__FUNCTION__,
2189 privptr->p_buff_pages_perwrite);
2190 }
2191 printk(KERN_INFO "%s: %s() write_pages=%d\n",
2192 dev->name,__FUNCTION__,
2193 claw_write_pages);
2194#endif
2195
2196
2197 /*
2198 * allocate ccw_pages_required
2199 */
2200 if (privptr->p_buff_ccw==NULL) {
2201 privptr->p_buff_ccw=
2202 (void *)__get_free_pages(__GFP_DMA,
2203 (int)pages_to_order_of_mag(ccw_pages_required ));
2204 if (privptr->p_buff_ccw==NULL) {
2205 printk(KERN_INFO "%s: %s() "
2206 "__get_free_pages for CCWs failed : "
2207 "pages is %d\n",
2208 dev->name,__FUNCTION__,
2209 ccw_pages_required );
2210#ifdef FUNCTRACE
2211 printk(KERN_INFO "%s: %s() > "
2212 "exit on line %d, rc = ENOMEM\n",
2213 dev->name,__FUNCTION__,
2214 __LINE__);
2215#endif
2216 return -ENOMEM;
2217 }
2218 privptr->p_buff_ccw_num=ccw_pages_required;
2219 }
2220 memset(privptr->p_buff_ccw, 0x00,
2221 privptr->p_buff_ccw_num * PAGE_SIZE);
2222
2223 /*
2224 * obtain ending ccw block address
2225 *
2226 */
2227 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
2228 real_address = (__u32)__pa(privptr->p_end_ccw);
2229 /* Initialize ending CCW block */
2230#ifdef DEBUGMSG
2231 printk(KERN_INFO "%s: %s() begin initialize ending CCW blocks\n",
2232 dev->name,__FUNCTION__);
2233#endif
2234
2235 p_endccw=privptr->p_end_ccw;
2236 p_endccw->real=real_address;
2237 p_endccw->write1=0x00;
2238 p_endccw->read1=0x00;
2239
2240 /* write1_nop1 */
2241 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
2242 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2243 p_endccw->write1_nop1.count = 1;
2244 p_endccw->write1_nop1.cda = 0;
2245
2246 /* write1_nop2 */
2247 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
2248 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
2249 p_endccw->write1_nop2.count = 1;
2250 p_endccw->write1_nop2.cda = 0;
2251
2252 /* write2_nop1 */
2253 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
2254 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2255 p_endccw->write2_nop1.count = 1;
2256 p_endccw->write2_nop1.cda = 0;
2257
2258 /* write2_nop2 */
2259 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
2260 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
2261 p_endccw->write2_nop2.count = 1;
2262 p_endccw->write2_nop2.cda = 0;
2263
2264 /* read1_nop1 */
2265 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
2266 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2267 p_endccw->read1_nop1.count = 1;
2268 p_endccw->read1_nop1.cda = 0;
2269
2270 /* read1_nop2 */
2271 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
2272 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
2273 p_endccw->read1_nop2.count = 1;
2274 p_endccw->read1_nop2.cda = 0;
2275
2276 /* read2_nop1 */
2277 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
2278 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2279 p_endccw->read2_nop1.count = 1;
2280 p_endccw->read2_nop1.cda = 0;
2281
2282 /* read2_nop2 */
2283 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
2284 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
2285 p_endccw->read2_nop2.count = 1;
2286 p_endccw->read2_nop2.cda = 0;
2287
2288#ifdef IOTRACE
2289 printk(KERN_INFO "%s: %s() dump claw ending CCW BK \n",
2290 dev->name,__FUNCTION__);
2291 dumpit((char *)p_endccw, sizeof(struct endccw));
2292#endif
2293
2294 /*
2295 * Build a chain of CCWs
2296 *
2297 */
2298
2299#ifdef DEBUGMSG
2300 printk(KERN_INFO "%s: %s() Begin build a chain of CCW buffer \n",
2301 dev->name,__FUNCTION__);
2302#endif
2303 p_buff=privptr->p_buff_ccw;
2304
2305 p_free_chain=NULL;
2306 for (i=0 ; i < ccw_pages_required; i++ ) {
2307 real_address = (__u32)__pa(p_buff);
2308 p_buf=p_buff;
2309 for (j=0 ; j < ccw_blocks_perpage ; j++) {
2310 p_buf->next = p_free_chain;
2311 p_free_chain = p_buf;
2312 p_buf->real=(__u32)__pa(p_buf);
2313 ++p_buf;
2314 }
2315 p_buff+=PAGE_SIZE;
2316 }
2317#ifdef DEBUGMSG
2318 printk(KERN_INFO "%s: %s() "
2319 "End build a chain of CCW buffer \n",
2320 dev->name,__FUNCTION__);
2321 p_buf=p_free_chain;
2322 while (p_buf!=NULL) {
2323 dumpit((char *)p_buf, sizeof(struct ccwbk));
2324 p_buf=p_buf->next;
2325 }
2326#endif
2327
2328 /*
2329 * Initialize ClawSignalBlock
2330 *
2331 */
2332#ifdef DEBUGMSG
2333 printk(KERN_INFO "%s: %s() "
2334 "Begin initialize ClawSignalBlock \n",
2335 dev->name,__FUNCTION__);
2336#endif
2337 if (privptr->p_claw_signal_blk==NULL) {
2338 privptr->p_claw_signal_blk=p_free_chain;
2339 p_free_chain=p_free_chain->next;
2340 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
2341 pClawH->length=0xffff;
2342 pClawH->opcode=0xff;
2343 pClawH->flag=CLAW_BUSY;
2344 }
2345#ifdef DEBUGMSG
2346 printk(KERN_INFO "%s: %s() > End initialize "
2347 "ClawSignalBlock\n",
2348 dev->name,__FUNCTION__);
2349 dumpit((char *)privptr->p_claw_signal_blk, sizeof(struct ccwbk));
2350#endif
2351
2352 /*
2353 * allocate write_pages_required and add to free chain
2354 */
2355 if (privptr->p_buff_write==NULL) {
2356 if (privptr->p_env->write_size < PAGE_SIZE) {
2357 privptr->p_buff_write=
2358 (void *)__get_free_pages(__GFP_DMA,
2359 (int)pages_to_order_of_mag(claw_write_pages ));
2360 if (privptr->p_buff_write==NULL) {
2361 printk(KERN_INFO "%s: %s() __get_free_pages for write"
2362 " bufs failed : get is for %d pages\n",
2363 dev->name,__FUNCTION__,claw_write_pages );
2364 free_pages((unsigned long)privptr->p_buff_ccw,
2365 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
2366 privptr->p_buff_ccw=NULL;
2367#ifdef FUNCTRACE
2368 printk(KERN_INFO "%s: %s() > exit on line %d,"
2369 "rc = ENOMEM\n",
2370 dev->name,__FUNCTION__,__LINE__);
2371#endif
2372 return -ENOMEM;
2373 }
2374 /*
2375 * Build CLAW write free chain
2376 *
2377 */
2378
2379 memset(privptr->p_buff_write, 0x00,
2380 ccw_pages_required * PAGE_SIZE);
2381#ifdef DEBUGMSG
2382 printk(KERN_INFO "%s: %s() Begin build claw write free "
2383 "chain \n",dev->name,__FUNCTION__);
2384#endif
2385 privptr->p_write_free_chain=NULL;
2386
2387 p_buff=privptr->p_buff_write;
2388
2389 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
2390 p_buf = p_free_chain; /* get a CCW */
2391 p_free_chain = p_buf->next;
2392 p_buf->next =privptr->p_write_free_chain;
2393 privptr->p_write_free_chain = p_buf;
2394 p_buf-> p_buffer = (struct clawbuf *)p_buff;
2395 p_buf-> write.cda = (__u32)__pa(p_buff);
2396 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2397 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
2398 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2399 p_buf-> w_read_FF.count = 1;
2400 p_buf-> w_read_FF.cda =
2401 (__u32)__pa(&p_buf-> header.flag);
2402 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
2403 p_buf-> w_TIC_1.flags = 0;
2404 p_buf-> w_TIC_1.count = 0;
2405
2406 if (((unsigned long)p_buff+privptr->p_env->write_size) >=
2407 ((unsigned long)(p_buff+2*
2408 (privptr->p_env->write_size) -1) & PAGE_MASK)) {
2409 p_buff= p_buff+privptr->p_env->write_size;
2410 }
2411 }
2412 }
2413 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
2414 {
2415 privptr->p_write_free_chain=NULL;
2416 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
2417 p_buff=(void *)__get_free_pages(__GFP_DMA,
2418 (int)pages_to_order_of_mag(
2419 privptr->p_buff_pages_perwrite) );
2420#ifdef IOTRACE
2421 printk(KERN_INFO "%s:%s __get_free_pages "
2422 "for writes buf: get for %d pages\n",
2423 dev->name,__FUNCTION__,
2424 privptr->p_buff_pages_perwrite);
2425#endif
2426 if (p_buff==NULL) {
2427 printk(KERN_INFO "%s:%s __get_free_pages"
2428 "for writes buf failed : get is for %d pages\n",
2429 dev->name,
2430 __FUNCTION__,
2431 privptr->p_buff_pages_perwrite );
2432 free_pages((unsigned long)privptr->p_buff_ccw,
2433 (int)pages_to_order_of_mag(
2434 privptr->p_buff_ccw_num));
2435 privptr->p_buff_ccw=NULL;
2436 p_buf=privptr->p_buff_write;
2437 while (p_buf!=NULL) {
2438 free_pages((unsigned long)
2439 p_buf->p_buffer,
2440 (int)pages_to_order_of_mag(
2441 privptr->p_buff_pages_perwrite));
2442 p_buf=p_buf->next;
2443 }
2444#ifdef FUNCTRACE
2445 printk(KERN_INFO "%s: %s exit on line %d, rc = ENOMEM\n",
2446 dev->name,
2447 __FUNCTION__,
2448 __LINE__);
2449#endif
2450 return -ENOMEM;
2451 } /* Error on get_pages */
2452 memset(p_buff, 0x00, privptr->p_env->write_size );
2453 p_buf = p_free_chain;
2454 p_free_chain = p_buf->next;
2455 p_buf->next = privptr->p_write_free_chain;
2456 privptr->p_write_free_chain = p_buf;
2457 privptr->p_buff_write = p_buf;
2458 p_buf->p_buffer=(struct clawbuf *)p_buff;
2459 p_buf-> write.cda = (__u32)__pa(p_buff);
2460 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2461 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
2462 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2463 p_buf-> w_read_FF.count = 1;
2464 p_buf-> w_read_FF.cda =
2465 (__u32)__pa(&p_buf-> header.flag);
2466 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
2467 p_buf-> w_TIC_1.flags = 0;
2468 p_buf-> w_TIC_1.count = 0;
2469 } /* for all write_buffers */
2470
2471 } /* else buffers are PAGE_SIZE or bigger */
2472
2473 }
2474 privptr->p_buff_write_num=claw_write_pages;
2475 privptr->write_free_count=privptr->p_env->write_buffers;
2476
2477
2478#ifdef DEBUGMSG
2479 printk(KERN_INFO "%s:%s End build claw write free chain \n",
2480 dev->name,__FUNCTION__);
2481 p_buf=privptr->p_write_free_chain;
2482 while (p_buf!=NULL) {
2483 dumpit((char *)p_buf, sizeof(struct ccwbk));
2484 p_buf=p_buf->next;
2485 }
2486#endif
2487 /*
2488 * allocate read_pages_required and chain to free chain
2489 */
2490 if (privptr->p_buff_read==NULL) {
2491 if (privptr->p_env->read_size < PAGE_SIZE) {
2492 privptr->p_buff_read=
2493 (void *)__get_free_pages(__GFP_DMA,
2494 (int)pages_to_order_of_mag(claw_read_pages) );
2495 if (privptr->p_buff_read==NULL) {
2496 printk(KERN_INFO "%s: %s() "
2497 "__get_free_pages for read buf failed : "
2498 "get is for %d pages\n",
2499 dev->name,__FUNCTION__,claw_read_pages );
2500 free_pages((unsigned long)privptr->p_buff_ccw,
2501 (int)pages_to_order_of_mag(
2502 privptr->p_buff_ccw_num));
2503 /* free the write pages size is < page size */
2504 free_pages((unsigned long)privptr->p_buff_write,
2505 (int)pages_to_order_of_mag(
2506 privptr->p_buff_write_num));
2507 privptr->p_buff_ccw=NULL;
2508 privptr->p_buff_write=NULL;
2509#ifdef FUNCTRACE
2510 printk(KERN_INFO "%s: %s() > exit on line %d, rc ="
2511 " ENOMEM\n",dev->name,__FUNCTION__,__LINE__);
2512#endif
2513 return -ENOMEM;
2514 }
2515 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
2516 privptr->p_buff_read_num=claw_read_pages;
2517 /*
2518 * Build CLAW read free chain
2519 *
2520 */
2521#ifdef DEBUGMSG
2522 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
2523 dev->name,__FUNCTION__);
2524#endif
2525 p_buff=privptr->p_buff_read;
2526 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
2527 p_buf = p_free_chain;
2528 p_free_chain = p_buf->next;
2529
2530 if (p_last_CCWB==NULL) {
2531 p_buf->next=NULL;
2532 real_TIC_address=0;
2533 p_last_CCWB=p_buf;
2534 }
2535 else {
2536 p_buf->next=p_first_CCWB;
2537 real_TIC_address=
2538 (__u32)__pa(&p_first_CCWB -> read );
2539 }
2540
2541 p_first_CCWB=p_buf;
2542
2543 p_buf->p_buffer=(struct clawbuf *)p_buff;
2544 /* initialize read command */
2545 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
2546 p_buf-> read.cda = (__u32)__pa(p_buff);
2547 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2548 p_buf-> read.count = privptr->p_env->read_size;
2549
2550 /* initialize read_h command */
2551 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
2552 p_buf-> read_h.cda =
2553 (__u32)__pa(&(p_buf->header));
2554 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2555 p_buf-> read_h.count = sizeof(struct clawh);
2556
2557 /* initialize Signal command */
2558 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
2559 p_buf-> signal.cda =
2560 (__u32)__pa(&(pClawH->flag));
2561 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2562 p_buf-> signal.count = 1;
2563
2564 /* initialize r_TIC_1 command */
2565 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
2566 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
2567 p_buf-> r_TIC_1.flags = 0;
2568 p_buf-> r_TIC_1.count = 0;
2569
2570 /* initialize r_read_FF command */
2571 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
2572 p_buf-> r_read_FF.cda =
2573 (__u32)__pa(&(pClawH->flag));
2574 p_buf-> r_read_FF.flags =
2575 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
2576 p_buf-> r_read_FF.count = 1;
2577
2578 /* initialize r_TIC_2 */
2579 memcpy(&p_buf->r_TIC_2,
2580 &p_buf->r_TIC_1, sizeof(struct ccw1));
2581
2582 /* initialize Header */
2583 p_buf->header.length=0xffff;
2584 p_buf->header.opcode=0xff;
2585 p_buf->header.flag=CLAW_PENDING;
2586
2587 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
2588 ((unsigned long)(p_buff+2*(privptr->p_env->read_size) -1)
2589 & PAGE_MASK) ) {
2590 p_buff= p_buff+privptr->p_env->read_size;
2591 }
2592 else {
2593 p_buff=
2594 (void *)((unsigned long)
2595 (p_buff+2*(privptr->p_env->read_size) -1)
2596 & PAGE_MASK) ;
2597 }
2598 } /* for read_buffers */
2599 } /* read_size < PAGE_SIZE */
2600 else { /* read Size >= PAGE_SIZE */
2601
2602#ifdef DEBUGMSG
2603 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
2604 dev->name,__FUNCTION__);
2605#endif
2606 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
2607 p_buff = (void *)__get_free_pages(__GFP_DMA,
2608 (int)pages_to_order_of_mag(privptr->p_buff_pages_perread) );
2609 if (p_buff==NULL) {
2610 printk(KERN_INFO "%s: %s() __get_free_pages for read "
2611 "buf failed : get is for %d pages\n",
2612 dev->name,__FUNCTION__,
2613 privptr->p_buff_pages_perread );
2614 free_pages((unsigned long)privptr->p_buff_ccw,
2615 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
2616 /* free the write pages */
2617 p_buf=privptr->p_buff_write;
2618 while (p_buf!=NULL) {
2619 free_pages((unsigned long)p_buf->p_buffer,
2620 (int)pages_to_order_of_mag(
2621 privptr->p_buff_pages_perwrite ));
2622 p_buf=p_buf->next;
2623 }
2624 /* free any read pages already alloc */
2625 p_buf=privptr->p_buff_read;
2626 while (p_buf!=NULL) {
2627 free_pages((unsigned long)p_buf->p_buffer,
2628 (int)pages_to_order_of_mag(
2629 privptr->p_buff_pages_perread ));
2630 p_buf=p_buf->next;
2631 }
2632 privptr->p_buff_ccw=NULL;
2633 privptr->p_buff_write=NULL;
2634#ifdef FUNCTRACE
2635 printk(KERN_INFO "%s: %s() exit on line %d, rc = ENOMEM\n",
2636 dev->name,__FUNCTION__,
2637 __LINE__);
2638#endif
2639 return -ENOMEM;
2640 }
2641 memset(p_buff, 0x00, privptr->p_env->read_size);
2642 p_buf = p_free_chain;
2643 privptr->p_buff_read = p_buf;
2644 p_free_chain = p_buf->next;
2645
2646 if (p_last_CCWB==NULL) {
2647 p_buf->next=NULL;
2648 real_TIC_address=0;
2649 p_last_CCWB=p_buf;
2650 }
2651 else {
2652 p_buf->next=p_first_CCWB;
2653 real_TIC_address=
2654 (addr_t)__pa(
2655 &p_first_CCWB -> read );
2656 }
2657
2658 p_first_CCWB=p_buf;
2659 /* save buff address */
2660 p_buf->p_buffer=(struct clawbuf *)p_buff;
2661 /* initialize read command */
2662 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
2663 p_buf-> read.cda = (__u32)__pa(p_buff);
2664 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2665 p_buf-> read.count = privptr->p_env->read_size;
2666
2667 /* initialize read_h command */
2668 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
2669 p_buf-> read_h.cda =
2670 (__u32)__pa(&(p_buf->header));
2671 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2672 p_buf-> read_h.count = sizeof(struct clawh);
2673
2674 /* initialize Signal command */
2675 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
2676 p_buf-> signal.cda =
2677 (__u32)__pa(&(pClawH->flag));
2678 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2679 p_buf-> signal.count = 1;
2680
2681 /* initialize r_TIC_1 command */
2682 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
2683 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
2684 p_buf-> r_TIC_1.flags = 0;
2685 p_buf-> r_TIC_1.count = 0;
2686
2687 /* initialize r_read_FF command */
2688 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
2689 p_buf-> r_read_FF.cda =
2690 (__u32)__pa(&(pClawH->flag));
2691 p_buf-> r_read_FF.flags =
2692 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
2693 p_buf-> r_read_FF.count = 1;
2694
2695 /* initialize r_TIC_2 */
2696 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
2697 sizeof(struct ccw1));
2698
2699 /* initialize Header */
2700 p_buf->header.length=0xffff;
2701 p_buf->header.opcode=0xff;
2702 p_buf->header.flag=CLAW_PENDING;
2703
2704 } /* For read_buffers */
2705 } /* read_size >= PAGE_SIZE */
2706 } /* pBuffread = NULL */
2707#ifdef DEBUGMSG
2708 printk(KERN_INFO "%s: %s() > End build claw read free chain \n",
2709 dev->name,__FUNCTION__);
2710 p_buf=p_first_CCWB;
2711 while (p_buf!=NULL) {
2712 dumpit((char *)p_buf, sizeof(struct ccwbk));
2713 p_buf=p_buf->next;
2714 }
2715
2716#endif
2717 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
2718 privptr->buffs_alloc = 1;
2719#ifdef FUNCTRACE
2720 printk(KERN_INFO "%s: %s() exit on line %d\n",
2721 dev->name,__FUNCTION__,__LINE__);
2722#endif
2723 return 0;
2724} /* end of init_ccw_bk */
2725
2726/*-------------------------------------------------------------------*
2727* *
2728* probe_error *
2729* *
2730*--------------------------------------------------------------------*/
2731
2732static void
2733probe_error( struct ccwgroup_device *cgdev)
2734{
2735 struct claw_privbk *privptr;
2736#ifdef FUNCTRACE
2737 printk(KERN_INFO "%s enter \n",__FUNCTION__);
2738#endif
2739 CLAW_DBF_TEXT(4,trace,"proberr");
2740#ifdef DEBUGMSG
2741 printk(KERN_INFO "%s variable cgdev =\n",__FUNCTION__);
2742 dumpit((char *) cgdev, sizeof(struct ccwgroup_device));
2743#endif
2744 privptr=(struct claw_privbk *)cgdev->dev.driver_data;
2745 if (privptr!=NULL) {
2746 if (privptr->p_env != NULL) {
2747 kfree(privptr->p_env);
2748 privptr->p_env=NULL;
2749 }
2750 if (privptr->p_mtc_envelope!=NULL) {
2751 kfree(privptr->p_mtc_envelope);
2752 privptr->p_mtc_envelope=NULL;
2753 }
2754 kfree(privptr);
2755 privptr=NULL;
2756 }
2757#ifdef FUNCTRACE
2758 printk(KERN_INFO "%s > exit on line %d\n",
2759 __FUNCTION__,__LINE__);
2760#endif
2761
2762 return;
2763} /* probe_error */
2764
2765
2766
2767/*-------------------------------------------------------------------*
2768* claw_process_control *
2769* *
2770* *
2771*--------------------------------------------------------------------*/
2772
2773static int
2774claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2775{
2776
2777 struct clawbuf *p_buf;
2778 struct clawctl ctlbk;
2779 struct clawctl *p_ctlbk;
2780 char temp_host_name[8];
2781 char temp_ws_name[8];
2782 struct claw_privbk *privptr;
2783 struct claw_env *p_env;
2784 struct sysval *p_sysval;
2785 struct conncmd *p_connect=NULL;
2786 int rc;
2787 struct chbk *p_ch = NULL;
2788#ifdef FUNCTRACE
2789 printk(KERN_INFO "%s: %s() > enter \n",
2790 dev->name,__FUNCTION__);
2791#endif
2792 CLAW_DBF_TEXT(2,setup,"clw_cntl");
2793#ifdef DEBUGMSG
2794 printk(KERN_INFO "%s: variable dev =\n",dev->name);
2795 dumpit((char *) dev, sizeof(struct net_device));
2796 printk(KERN_INFO "%s: variable p_ccw =\n",dev->name);
2797 dumpit((char *) p_ccw, sizeof(struct ccwbk *));
2798#endif
2799 udelay(1000); /* Wait a ms for the control packets to
2800 *catch up to each other */
2801 privptr=dev->priv;
2802 p_env=privptr->p_env;
2803 memcpy( &temp_host_name, p_env->host_name, 8);
2804 memcpy( &temp_ws_name, p_env->adapter_name , 8);
2805 printk(KERN_INFO "%s: CLAW device %.8s: "
2806 "Received Control Packet\n",
2807 dev->name, temp_ws_name);
2808 if (privptr->release_pend==1) {
2809#ifdef FUNCTRACE
2810 printk(KERN_INFO "%s: %s() > "
2811 "exit on line %d, rc=0\n",
2812 dev->name,__FUNCTION__,__LINE__);
2813#endif
2814 return 0;
2815 }
2816 p_buf=p_ccw->p_buffer;
2817 p_ctlbk=&ctlbk;
2818 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2819 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2820 } else {
2821 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2822 }
2823#ifdef IOTRACE
2824 printk(KERN_INFO "%s: dump claw control data inbound\n",dev->name);
2825 dumpit((char *)p_ctlbk, sizeof(struct clawctl));
2826#endif
2827 switch (p_ctlbk->command)
2828 {
2829 case SYSTEM_VALIDATE_REQUEST:
2830 if (p_ctlbk->version!=CLAW_VERSION_ID) {
2831 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2832 CLAW_RC_WRONG_VERSION );
2833 printk("%s: %d is wrong version id. "
2834 "Expected %d\n",
2835 dev->name, p_ctlbk->version,
2836 CLAW_VERSION_ID);
2837 }
2838 p_sysval=(struct sysval *)&(p_ctlbk->data);
2839 printk( "%s: Recv Sys Validate Request: "
2840 "Vers=%d,link_id=%d,Corr=%d,WS name=%."
2841 "8s,Host name=%.8s\n",
2842 dev->name, p_ctlbk->version,
2843 p_ctlbk->linkid,
2844 p_ctlbk->correlator,
2845 p_sysval->WS_name,
2846 p_sysval->host_name);
2847 if (0!=memcmp(temp_host_name,p_sysval->host_name,8)) {
2848 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2849 CLAW_RC_NAME_MISMATCH );
2850 CLAW_DBF_TEXT(2,setup,"HSTBAD");
2851 CLAW_DBF_TEXT_(2,setup,"%s",p_sysval->host_name);
2852 CLAW_DBF_TEXT_(2,setup,"%s",temp_host_name);
2853 printk(KERN_INFO "%s: Host name mismatch\n",
2854 dev->name);
2855 printk(KERN_INFO "%s: Received :%s: "
2856 "expected :%s: \n",
2857 dev->name,
2858 p_sysval->host_name,
2859 temp_host_name);
2860 }
2861 if (0!=memcmp(temp_ws_name,p_sysval->WS_name,8)) {
2862 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2863 CLAW_RC_NAME_MISMATCH );
2864 CLAW_DBF_TEXT(2,setup,"WSNBAD");
2865 CLAW_DBF_TEXT_(2,setup,"%s",p_sysval->WS_name);
2866 CLAW_DBF_TEXT_(2,setup,"%s",temp_ws_name);
2867 printk(KERN_INFO "%s: WS name mismatch\n",
2868 dev->name);
2869 printk(KERN_INFO "%s: Received :%s: "
2870 "expected :%s: \n",
2871 dev->name,
2872 p_sysval->WS_name,
2873 temp_ws_name);
2874 }
2875 if (( p_sysval->write_frame_size < p_env->write_size) &&
2876 ( p_env->packing == 0)) {
2877 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2878 CLAW_RC_HOST_RCV_TOO_SMALL );
2879 printk(KERN_INFO "%s: host write size is too "
2880 "small\n", dev->name);
2881 CLAW_DBF_TEXT(2,setup,"wrtszbad");
2882 }
2883 if (( p_sysval->read_frame_size < p_env->read_size) &&
2884 ( p_env->packing == 0)) {
2885 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2886 CLAW_RC_HOST_RCV_TOO_SMALL );
2887 printk(KERN_INFO "%s: host read size is too "
2888 "small\n", dev->name);
2889 CLAW_DBF_TEXT(2,setup,"rdsizbad");
2890 }
2891 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0 );
2892 printk("%s: CLAW device %.8s: System validate"
2893 " completed.\n",dev->name, temp_ws_name);
2894 printk("%s: sys Validate Rsize:%d Wsize:%d\n",dev->name,
2895 p_sysval->read_frame_size,p_sysval->write_frame_size);
2896 privptr->system_validate_comp=1;
2897 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
2898 p_env->packing = PACKING_ASK;
2899 }
2900 claw_strt_conn_req(dev);
2901 break;
2902
2903 case SYSTEM_VALIDATE_RESPONSE:
2904 p_sysval=(struct sysval *)&(p_ctlbk->data);
2905 printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d,"
2906 "WS name=%.8s,Host name=%.8s\n",
2907 dev->name,
2908 p_ctlbk->version,
2909 p_ctlbk->correlator,
2910 p_ctlbk->rc,
2911 p_sysval->WS_name,
2912 p_sysval->host_name);
2913 switch (p_ctlbk->rc)
2914 {
2915 case 0:
2916 printk(KERN_INFO "%s: CLAW device "
2917 "%.8s: System validate "
2918 "completed.\n",
2919 dev->name, temp_ws_name);
2920 if (privptr->system_validate_comp == 0)
2921 claw_strt_conn_req(dev);
2922 privptr->system_validate_comp=1;
2923 break;
2924 case CLAW_RC_NAME_MISMATCH:
2925 printk(KERN_INFO "%s: Sys Validate "
2926 "Resp : Host, WS name is "
2927 "mismatch\n",
2928 dev->name);
2929 break;
2930 case CLAW_RC_WRONG_VERSION:
2931 printk(KERN_INFO "%s: Sys Validate "
2932 "Resp : Wrong version\n",
2933 dev->name);
2934 break;
2935 case CLAW_RC_HOST_RCV_TOO_SMALL:
2936 printk(KERN_INFO "%s: Sys Validate "
2937 "Resp : bad frame size\n",
2938 dev->name);
2939 break;
2940 default:
2941 printk(KERN_INFO "%s: Sys Validate "
2942 "error code=%d \n",
2943 dev->name, p_ctlbk->rc );
2944 break;
2945 }
2946 break;
2947
2948 case CONNECTION_REQUEST:
2949 p_connect=(struct conncmd *)&(p_ctlbk->data);
2950 printk(KERN_INFO "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2951 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2952 dev->name,
2953 p_ctlbk->version,
2954 p_ctlbk->linkid,
2955 p_ctlbk->correlator,
2956 p_connect->host_name,
2957 p_connect->WS_name);
2958 if (privptr->active_link_ID!=0 ) {
2959 claw_snd_disc(dev, p_ctlbk);
2960 printk(KERN_INFO "%s: Conn Req error : "
2961 "already logical link is active \n",
2962 dev->name);
2963 }
2964 if (p_ctlbk->linkid!=1 ) {
2965 claw_snd_disc(dev, p_ctlbk);
2966 printk(KERN_INFO "%s: Conn Req error : "
2967 "req logical link id is not 1\n",
2968 dev->name);
2969 }
2970 rc=find_link(dev,
2971 p_connect->host_name, p_connect->WS_name);
2972 if (rc!=0) {
2973 claw_snd_disc(dev, p_ctlbk);
2974 printk(KERN_INFO "%s: Conn Req error : "
2975 "req appl name does not match\n",
2976 dev->name);
2977 }
2978 claw_send_control(dev,
2979 CONNECTION_CONFIRM, p_ctlbk->linkid,
2980 p_ctlbk->correlator,
2981 0, p_connect->host_name,
2982 p_connect->WS_name);
2983 if (p_env->packing == PACKING_ASK) {
2984 printk("%s: Now Pack ask\n",dev->name);
2985 p_env->packing = PACK_SEND;
2986 claw_snd_conn_req(dev,0);
2987 }
2988 printk(KERN_INFO "%s: CLAW device %.8s: Connection "
2989 "completed link_id=%d.\n",
2990 dev->name, temp_ws_name,
2991 p_ctlbk->linkid);
2992 privptr->active_link_ID=p_ctlbk->linkid;
2993 p_ch=&privptr->channel[WRITE];
2994 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2995 break;
2996 case CONNECTION_RESPONSE:
2997 p_connect=(struct conncmd *)&(p_ctlbk->data);
2998 printk(KERN_INFO "%s: Revc Conn Resp: Vers=%d,link_id=%d,"
2999 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
3000 dev->name,
3001 p_ctlbk->version,
3002 p_ctlbk->linkid,
3003 p_ctlbk->correlator,
3004 p_ctlbk->rc,
3005 p_connect->host_name,
3006 p_connect->WS_name);
3007
3008 if (p_ctlbk->rc !=0 ) {
3009 printk(KERN_INFO "%s: Conn Resp error: rc=%d \n",
3010 dev->name, p_ctlbk->rc);
3011 return 1;
3012 }
3013 rc=find_link(dev,
3014 p_connect->host_name, p_connect->WS_name);
3015 if (rc!=0) {
3016 claw_snd_disc(dev, p_ctlbk);
3017 printk(KERN_INFO "%s: Conn Resp error: "
3018 "req appl name does not match\n",
3019 dev->name);
3020 }
3021 /* should be until CONNECTION_CONFIRM */
3022 privptr->active_link_ID = - (p_ctlbk->linkid);
3023 break;
3024 case CONNECTION_CONFIRM:
3025 p_connect=(struct conncmd *)&(p_ctlbk->data);
3026 printk(KERN_INFO "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
3027 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
3028 dev->name,
3029 p_ctlbk->version,
3030 p_ctlbk->linkid,
3031 p_ctlbk->correlator,
3032 p_connect->host_name,
3033 p_connect->WS_name);
3034 if (p_ctlbk->linkid== -(privptr->active_link_ID)) {
3035 privptr->active_link_ID=p_ctlbk->linkid;
3036 if (p_env->packing > PACKING_ASK) {
3037 printk(KERN_INFO "%s: Confirmed Now packing\n",dev->name);
3038 p_env->packing = DO_PACKED;
3039 }
3040 p_ch=&privptr->channel[WRITE];
3041 wake_up(&p_ch->wait);
3042 }
3043 else {
3044 printk(KERN_INFO "%s: Conn confirm: "
3045 "unexpected linkid=%d \n",
3046 dev->name, p_ctlbk->linkid);
3047 claw_snd_disc(dev, p_ctlbk);
3048 }
3049 break;
3050 case DISCONNECT:
3051 printk(KERN_INFO "%s: Disconnect: "
3052 "Vers=%d,link_id=%d,Corr=%d\n",
3053 dev->name, p_ctlbk->version,
3054 p_ctlbk->linkid, p_ctlbk->correlator);
3055 if ((p_ctlbk->linkid == 2) &&
3056 (p_env->packing == PACK_SEND)) {
3057 privptr->active_link_ID = 1;
3058 p_env->packing = DO_PACKED;
3059 }
3060 else
3061 privptr->active_link_ID=0;
3062 break;
3063 case CLAW_ERROR:
3064 printk(KERN_INFO "%s: CLAW ERROR detected\n",
3065 dev->name);
3066 break;
3067 default:
3068 printk(KERN_INFO "%s: Unexpected command code=%d \n",
3069 dev->name, p_ctlbk->command);
3070 break;
3071 }
3072
3073#ifdef FUNCTRACE
3074 printk(KERN_INFO "%s: %s() exit on line %d, rc = 0\n",
3075 dev->name,__FUNCTION__,__LINE__);
3076#endif
3077
3078 return 0;
3079} /* end of claw_process_control */
3080
3081
3082/*-------------------------------------------------------------------*
3083* claw_send_control *
3084* *
3085*--------------------------------------------------------------------*/
3086
3087static int
3088claw_send_control(struct net_device *dev, __u8 type, __u8 link,
3089 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
3090{
3091 struct claw_privbk *privptr;
3092 struct clawctl *p_ctl;
3093 struct sysval *p_sysval;
3094 struct conncmd *p_connect;
3095 struct sk_buff *skb;
3096
3097#ifdef FUNCTRACE
3098 printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__);
3099#endif
3100 CLAW_DBF_TEXT(2,setup,"sndcntl");
3101#ifdef DEBUGMSG
3102 printk(KERN_INFO "%s: Sending Control Packet \n",dev->name);
3103 printk(KERN_INFO "%s: variable type = 0x%X, link = "
3104 "%d, correlator = %d, rc = %d\n",
3105 dev->name,type, link, correlator, rc);
3106 printk(KERN_INFO "%s: variable local_name = %s, "
3107 "remote_name = %s\n",dev->name, local_name, remote_name);
3108#endif
3109 privptr=dev->priv;
3110 p_ctl=(struct clawctl *)&privptr->ctl_bk;
3111
3112 p_ctl->command=type;
3113 p_ctl->version=CLAW_VERSION_ID;
3114 p_ctl->linkid=link;
3115 p_ctl->correlator=correlator;
3116 p_ctl->rc=rc;
3117
3118 p_sysval=(struct sysval *)&p_ctl->data;
3119 p_connect=(struct conncmd *)&p_ctl->data;
3120
3121 switch (p_ctl->command) {
3122 case SYSTEM_VALIDATE_REQUEST:
3123 case SYSTEM_VALIDATE_RESPONSE:
3124 memcpy(&p_sysval->host_name, local_name, 8);
3125 memcpy(&p_sysval->WS_name, remote_name, 8);
3126 if (privptr->p_env->packing > 0) {
3127 p_sysval->read_frame_size=DEF_PACK_BUFSIZE;
3128 p_sysval->write_frame_size=DEF_PACK_BUFSIZE;
3129 } else {
3130 /* how big is the piggest group of packets */
3131 p_sysval->read_frame_size=privptr->p_env->read_size;
3132 p_sysval->write_frame_size=privptr->p_env->write_size;
3133 }
3134 memset(&p_sysval->reserved, 0x00, 4);
3135 break;
3136 case CONNECTION_REQUEST:
3137 case CONNECTION_RESPONSE:
3138 case CONNECTION_CONFIRM:
3139 case DISCONNECT:
3140 memcpy(&p_sysval->host_name, local_name, 8);
3141 memcpy(&p_sysval->WS_name, remote_name, 8);
3142 if (privptr->p_env->packing > 0) {
3143 /* How big is the biggest packet */
3144 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
3145 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
3146 } else {
3147 memset(&p_connect->reserved1, 0x00, 4);
3148 memset(&p_connect->reserved2, 0x00, 4);
3149 }
3150 break;
3151 default:
3152 break;
3153 }
3154
3155 /* write Control Record to the device */
3156
3157
3158 skb = dev_alloc_skb(sizeof(struct clawctl));
3159 if (!skb) {
3160 printk( "%s:%s low on mem, returning...\n",
3161 dev->name,__FUNCTION__);
3162#ifdef DEBUG
3163 printk(KERN_INFO "%s:%s Exit, rc = ENOMEM\n",
3164 dev->name,__FUNCTION__);
3165#endif
3166 return -ENOMEM;
3167 }
3168 memcpy(skb_put(skb, sizeof(struct clawctl)),
3169 p_ctl, sizeof(struct clawctl));
3170#ifdef IOTRACE
3171 printk(KERN_INFO "%s: outbnd claw cntl data \n",dev->name);
3172 dumpit((char *)p_ctl,sizeof(struct clawctl));
3173#endif
3174 if (privptr->p_env->packing >= PACK_SEND)
3175 claw_hw_tx(skb, dev, 1);
3176 else
3177 claw_hw_tx(skb, dev, 0);
3178#ifdef FUNCTRACE
3179 printk(KERN_INFO "%s:%s Exit on line %d\n",
3180 dev->name,__FUNCTION__,__LINE__);
3181#endif
3182
3183 return 0;
3184} /* end of claw_send_control */
3185
3186/*-------------------------------------------------------------------*
3187* claw_snd_conn_req *
3188* *
3189*--------------------------------------------------------------------*/
3190static int
3191claw_snd_conn_req(struct net_device *dev, __u8 link)
3192{
3193 int rc;
3194 struct claw_privbk *privptr=dev->priv;
3195 struct clawctl *p_ctl;
3196
3197#ifdef FUNCTRACE
3198 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
3199#endif
3200 CLAW_DBF_TEXT(2,setup,"snd_conn");
3201#ifdef DEBUGMSG
3202 printk(KERN_INFO "%s: variable link = %X, dev =\n",dev->name, link);
3203 dumpit((char *) dev, sizeof(struct net_device));
3204#endif
3205 rc = 1;
3206 p_ctl=(struct clawctl *)&privptr->ctl_bk;
3207 p_ctl->linkid = link;
3208 if ( privptr->system_validate_comp==0x00 ) {
3209#ifdef FUNCTRACE
3210 printk(KERN_INFO "%s:%s Exit on line %d, rc = 1\n",
3211 dev->name,__FUNCTION__,__LINE__);
3212#endif
3213 return rc;
3214 }
3215 if (privptr->p_env->packing == PACKING_ASK )
3216 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
3217 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
3218 if (privptr->p_env->packing == PACK_SEND) {
3219 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
3220 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
3221 }
3222 if (privptr->p_env->packing == 0)
3223 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
3224 HOST_APPL_NAME, privptr->p_env->api_type);
3225#ifdef FUNCTRACE
3226 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3227 dev->name,__FUNCTION__,__LINE__, rc);
3228#endif
3229 return rc;
3230
3231} /* end of claw_snd_conn_req */
3232
3233
3234/*-------------------------------------------------------------------*
3235* claw_snd_disc *
3236* *
3237*--------------------------------------------------------------------*/
3238
3239static int
3240claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
3241{
3242 int rc;
3243 struct conncmd * p_connect;
3244
3245#ifdef FUNCTRACE
3246 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3247#endif
3248 CLAW_DBF_TEXT(2,setup,"snd_dsc");
3249#ifdef DEBUGMSG
3250 printk(KERN_INFO "%s: variable dev =\n",dev->name);
3251 dumpit((char *) dev, sizeof(struct net_device));
3252 printk(KERN_INFO "%s: variable p_ctl",dev->name);
3253 dumpit((char *) p_ctl, sizeof(struct clawctl));
3254#endif
3255 p_connect=(struct conncmd *)&p_ctl->data;
3256
3257 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
3258 p_ctl->correlator, 0,
3259 p_connect->host_name, p_connect->WS_name);
3260#ifdef FUNCTRACE
3261 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3262 dev->name,__FUNCTION__, __LINE__, rc);
3263#endif
3264 return rc;
3265} /* end of claw_snd_disc */
3266
3267
3268/*-------------------------------------------------------------------*
3269* claw_snd_sys_validate_rsp *
3270* *
3271*--------------------------------------------------------------------*/
3272
3273static int
3274claw_snd_sys_validate_rsp(struct net_device *dev,
3275 struct clawctl *p_ctl, __u32 return_code)
3276{
3277 struct claw_env * p_env;
3278 struct claw_privbk *privptr;
3279 int rc;
3280
3281#ifdef FUNCTRACE
3282 printk(KERN_INFO "%s:%s Enter\n",
3283 dev->name,__FUNCTION__);
3284#endif
3285 CLAW_DBF_TEXT(2,setup,"chkresp");
3286#ifdef DEBUGMSG
3287 printk(KERN_INFO "%s: variable return_code = %d, dev =\n",
3288 dev->name, return_code);
3289 dumpit((char *) dev, sizeof(struct net_device));
3290 printk(KERN_INFO "%s: variable p_ctl =\n",dev->name);
3291 dumpit((char *) p_ctl, sizeof(struct clawctl));
3292#endif
3293 privptr = dev->priv;
3294 p_env=privptr->p_env;
3295 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
3296 p_ctl->linkid,
3297 p_ctl->correlator,
3298 return_code,
3299 p_env->host_name,
3300 p_env->adapter_name );
3301#ifdef FUNCTRACE
3302 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3303 dev->name,__FUNCTION__,__LINE__, rc);
3304#endif
3305 return rc;
3306} /* end of claw_snd_sys_validate_rsp */
3307
3308/*-------------------------------------------------------------------*
3309* claw_strt_conn_req *
3310* *
3311*--------------------------------------------------------------------*/
3312
3313static int
3314claw_strt_conn_req(struct net_device *dev )
3315{
3316 int rc;
3317
3318#ifdef FUNCTRACE
3319 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3320#endif
3321 CLAW_DBF_TEXT(2,setup,"conn_req");
3322#ifdef DEBUGMSG
3323 printk(KERN_INFO "%s: variable dev =\n",dev->name);
3324 dumpit((char *) dev, sizeof(struct net_device));
3325#endif
3326 rc=claw_snd_conn_req(dev, 1);
3327#ifdef FUNCTRACE
3328 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3329 dev->name,__FUNCTION__,__LINE__, rc);
3330#endif
3331 return rc;
3332} /* end of claw_strt_conn_req */
3333
3334
3335
3336/*-------------------------------------------------------------------*
3337 * claw_stats *
3338 *-------------------------------------------------------------------*/
3339
3340static struct
3341net_device_stats *claw_stats(struct net_device *dev)
3342{
3343 struct claw_privbk *privptr;
3344#ifdef FUNCTRACE
3345 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3346#endif
3347 CLAW_DBF_TEXT(4,trace,"stats");
3348 privptr = dev->priv;
3349#ifdef FUNCTRACE
3350 printk(KERN_INFO "%s:%s Exit on line %d\n",
3351 dev->name,__FUNCTION__,__LINE__);
3352#endif
3353 return &privptr->stats;
3354} /* end of claw_stats */
3355
3356
3357/*-------------------------------------------------------------------*
3358* unpack_read *
3359* *
3360*--------------------------------------------------------------------*/
3361static void
3362unpack_read(struct net_device *dev )
3363{
3364 struct sk_buff *skb;
3365 struct claw_privbk *privptr;
3366 struct claw_env *p_env;
3367 struct ccwbk *p_this_ccw;
3368 struct ccwbk *p_first_ccw;
3369 struct ccwbk *p_last_ccw;
3370 struct clawph *p_packh;
3371 void *p_packd;
3372 struct clawctl *p_ctlrec=NULL;
3373
3374 __u32 len_of_data;
3375 __u32 pack_off;
3376 __u8 link_num;
3377 __u8 mtc_this_frm=0;
3378 __u32 bytes_to_mov;
3379 struct chbk *p_ch = NULL;
3380 int i=0;
3381 int p=0;
3382
3383#ifdef FUNCTRACE
3384 printk(KERN_INFO "%s:%s enter \n",dev->name,__FUNCTION__);
3385#endif
3386 CLAW_DBF_TEXT(4,trace,"unpkread");
3387 p_first_ccw=NULL;
3388 p_last_ccw=NULL;
3389 p_packh=NULL;
3390 p_packd=NULL;
3391 privptr=dev->priv;
3392 p_env = privptr->p_env;
3393 p_this_ccw=privptr->p_read_active_first;
3394 i=0;
3395 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
3396#ifdef IOTRACE
3397 printk(KERN_INFO "%s p_this_ccw \n",dev->name);
3398 dumpit((char*)p_this_ccw, sizeof(struct ccwbk));
3399 printk(KERN_INFO "%s Inbound p_this_ccw->p_buffer(64)"
3400 " pk=%d \n",dev->name,p_env->packing);
3401 dumpit((char *)p_this_ccw->p_buffer, 64 );
3402#endif
3403 pack_off = 0;
3404 p = 0;
3405 p_this_ccw->header.flag=CLAW_PENDING;
3406 privptr->p_read_active_first=p_this_ccw->next;
3407 p_this_ccw->next=NULL;
3408 p_packh = (struct clawph *)p_this_ccw->p_buffer;
3409 if ((p_env->packing == PACK_SEND) &&
3410 (p_packh->len == 32) &&
3411 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
3412 p_packh++; /* peek past pack header */
3413 p_ctlrec = (struct clawctl *)p_packh;
3414 p_packh--; /* un peek */
3415 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
3416 (p_ctlrec->command == CONNECTION_CONFIRM))
3417 p_env->packing = DO_PACKED;
3418 }
3419 if (p_env->packing == DO_PACKED)
3420 link_num=p_packh->link_num;
3421 else
3422 link_num=p_this_ccw->header.opcode / 8;
3423 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
3424#ifdef DEBUGMSG
3425 printk(KERN_INFO "%s: %s > More_to_come is ON\n",
3426 dev->name,__FUNCTION__);
3427#endif
3428 mtc_this_frm=1;
3429 if (p_this_ccw->header.length!=
3430 privptr->p_env->read_size ) {
3431 printk(KERN_INFO " %s: Invalid frame detected "
3432 "length is %02x\n" ,
3433 dev->name, p_this_ccw->header.length);
3434 }
3435 }
3436
3437 if (privptr->mtc_skipping) {
3438 /*
3439 * We're in the mode of skipping past a
3440 * multi-frame message
3441 * that we can't process for some reason or other.
3442 * The first frame without the More-To-Come flag is
3443 * the last frame of the skipped message.
3444 */
3445 /* in case of More-To-Come not set in this frame */
3446 if (mtc_this_frm==0) {
3447 privptr->mtc_skipping=0; /* Ok, the end */
3448 privptr->mtc_logical_link=-1;
3449 }
3450#ifdef DEBUGMSG
3451 printk(KERN_INFO "%s:%s goto next "
3452 "frame from MoretoComeSkip \n",
3453 dev->name,__FUNCTION__);
3454#endif
3455 goto NextFrame;
3456 }
3457
3458 if (link_num==0) {
3459 claw_process_control(dev, p_this_ccw);
3460#ifdef DEBUGMSG
3461 printk(KERN_INFO "%s:%s goto next "
3462 "frame from claw_process_control \n",
3463 dev->name,__FUNCTION__);
3464#endif
3465 CLAW_DBF_TEXT(4,trace,"UnpkCntl");
3466 goto NextFrame;
3467 }
3468unpack_next:
3469 if (p_env->packing == DO_PACKED) {
3470 if (pack_off > p_env->read_size)
3471 goto NextFrame;
3472 p_packd = p_this_ccw->p_buffer+pack_off;
3473 p_packh = (struct clawph *) p_packd;
3474 if ((p_packh->len == 0) || /* all done with this frame? */
3475 (p_packh->flag != 0))
3476 goto NextFrame;
3477 bytes_to_mov = p_packh->len;
3478 pack_off += bytes_to_mov+sizeof(struct clawph);
3479 p++;
3480 } else {
3481 bytes_to_mov=p_this_ccw->header.length;
3482 }
3483 if (privptr->mtc_logical_link<0) {
3484#ifdef DEBUGMSG
3485 printk(KERN_INFO "%s: %s mtc_logical_link < 0 \n",
3486 dev->name,__FUNCTION__);
3487#endif
3488
3489 /*
3490 * if More-To-Come is set in this frame then we don't know
3491 * length of entire message, and hence have to allocate
3492 * large buffer */
3493
3494 /* We are starting a new envelope */
3495 privptr->mtc_offset=0;
3496 privptr->mtc_logical_link=link_num;
3497 }
3498
3499 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
3500 /* error */
3501#ifdef DEBUGMSG
3502 printk(KERN_INFO "%s: %s > goto next "
3503 "frame from MoretoComeSkip \n",
3504 dev->name,
3505 __FUNCTION__);
3506 printk(KERN_INFO " bytes_to_mov %d > (MAX_ENVELOPE_"
3507 "SIZE-privptr->mtc_offset %d)\n",
3508 bytes_to_mov,(MAX_ENVELOPE_SIZE- privptr->mtc_offset));
3509#endif
3510 privptr->stats.rx_frame_errors++;
3511 goto NextFrame;
3512 }
3513 if (p_env->packing == DO_PACKED) {
3514 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
3515 p_packd+sizeof(struct clawph), bytes_to_mov);
3516
3517 } else {
3518 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
3519 p_this_ccw->p_buffer, bytes_to_mov);
3520 }
3521#ifdef DEBUGMSG
3522 printk(KERN_INFO "%s: %s() received data \n",
3523 dev->name,__FUNCTION__);
3524 if (p_env->packing == DO_PACKED)
3525 dumpit((char *)p_packd+sizeof(struct clawph),32);
3526 else
3527 dumpit((char *)p_this_ccw->p_buffer, 32);
3528 printk(KERN_INFO "%s: %s() bytelength %d \n",
3529 dev->name,__FUNCTION__,bytes_to_mov);
3530#endif
3531 if (mtc_this_frm==0) {
3532 len_of_data=privptr->mtc_offset+bytes_to_mov;
3533 skb=dev_alloc_skb(len_of_data);
3534 if (skb) {
3535 memcpy(skb_put(skb,len_of_data),
3536 privptr->p_mtc_envelope,
3537 len_of_data);
3538 skb->mac.raw=skb->data;
3539 skb->dev=dev;
3540 skb->protocol=htons(ETH_P_IP);
3541 skb->ip_summed=CHECKSUM_UNNECESSARY;
3542 privptr->stats.rx_packets++;
3543 privptr->stats.rx_bytes+=len_of_data;
3544 netif_rx(skb);
3545#ifdef DEBUGMSG
3546 printk(KERN_INFO "%s: %s() netif_"
3547 "rx(skb) completed \n",
3548 dev->name,__FUNCTION__);
3549#endif
3550 }
3551 else {
3552 privptr->stats.rx_dropped++;
3553 printk(KERN_WARNING "%s: %s() low on memory\n",
3554 dev->name,__FUNCTION__);
3555 }
3556 privptr->mtc_offset=0;
3557 privptr->mtc_logical_link=-1;
3558 }
3559 else {
3560 privptr->mtc_offset+=bytes_to_mov;
3561 }
3562 if (p_env->packing == DO_PACKED)
3563 goto unpack_next;
3564NextFrame:
3565 /*
3566 * Remove ThisCCWblock from active read queue, and add it
3567 * to queue of free blocks to be reused.
3568 */
3569 i++;
3570 p_this_ccw->header.length=0xffff;
3571 p_this_ccw->header.opcode=0xff;
3572 /*
3573 * add this one to the free queue for later reuse
3574 */
3575 if (p_first_ccw==NULL) {
3576 p_first_ccw = p_this_ccw;
3577 }
3578 else {
3579 p_last_ccw->next = p_this_ccw;
3580 }
3581 p_last_ccw = p_this_ccw;
3582 /*
3583 * chain to next block on active read queue
3584 */
3585 p_this_ccw = privptr->p_read_active_first;
3586 CLAW_DBF_TEXT_(4,trace,"rxpkt %d",p);
3587 } /* end of while */
3588
3589 /* check validity */
3590
3591#ifdef IOTRACE
3592 printk(KERN_INFO "%s:%s processed frame is %d \n",
3593 dev->name,__FUNCTION__,i);
3594 printk(KERN_INFO "%s:%s F:%lx L:%lx\n",
3595 dev->name,
3596 __FUNCTION__,
3597 (unsigned long)p_first_ccw,
3598 (unsigned long)p_last_ccw);
3599#endif
3600 CLAW_DBF_TEXT_(4,trace,"rxfrm %d",i);
3601 add_claw_reads(dev, p_first_ccw, p_last_ccw);
3602 p_ch=&privptr->channel[READ];
3603 claw_strt_read(dev, LOCK_YES);
3604#ifdef FUNCTRACE
3605 printk(KERN_INFO "%s: %s exit on line %d\n",
3606 dev->name, __FUNCTION__, __LINE__);
3607#endif
3608 return;
3609} /* end of unpack_read */
3610
3611/*-------------------------------------------------------------------*
3612* claw_strt_read *
3613* *
3614*--------------------------------------------------------------------*/
3615static void
3616claw_strt_read (struct net_device *dev, int lock )
3617{
3618 int rc = 0;
3619 __u32 parm;
3620 unsigned long saveflags = 0;
3621 struct claw_privbk *privptr=dev->priv;
3622 struct ccwbk*p_ccwbk;
3623 struct chbk *p_ch;
3624 struct clawh *p_clawh;
3625 p_ch=&privptr->channel[READ];
3626
3627#ifdef FUNCTRACE
3628 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
3629 printk(KERN_INFO "%s: variable lock = %d, dev =\n",dev->name, lock);
3630 dumpit((char *) dev, sizeof(struct net_device));
3631#endif
3632 CLAW_DBF_TEXT(4,trace,"StRdNter");
3633 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
3634 p_clawh->flag=CLAW_IDLE; /* 0x00 */
3635
3636 if ((privptr->p_write_active_first!=NULL &&
3637 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
3638 (privptr->p_read_active_first!=NULL &&
3639 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
3640 p_clawh->flag=CLAW_BUSY; /* 0xff */
3641 }
3642#ifdef DEBUGMSG
3643 printk(KERN_INFO "%s:%s state-%02x\n" ,
3644 dev->name,__FUNCTION__, p_ch->claw_state);
3645#endif
3646 if (lock==LOCK_YES) {
3647 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
3648 }
3649 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
3650#ifdef DEBUGMSG
3651 printk(KERN_INFO "%s: HOT READ started in %s\n" ,
3652 dev->name,__FUNCTION__);
3653 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
3654 dumpit((char *)&p_clawh->flag , 1);
3655#endif
3656 CLAW_DBF_TEXT(4,trace,"HotRead");
3657 p_ccwbk=privptr->p_read_active_first;
3658 parm = (unsigned long) p_ch;
3659 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
3660 0xff, 0);
3661 if (rc != 0) {
3662 ccw_check_return_code(p_ch->cdev, rc);
3663 }
3664 }
3665 else {
3666#ifdef DEBUGMSG
3667 printk(KERN_INFO "%s: No READ started by %s() In progress\n" ,
3668 dev->name,__FUNCTION__);
3669#endif
3670 CLAW_DBF_TEXT(2,trace,"ReadAct");
3671 }
3672
3673 if (lock==LOCK_YES) {
3674 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
3675 }
3676#ifdef FUNCTRACE
3677 printk(KERN_INFO "%s:%s Exit on line %d\n",
3678 dev->name,__FUNCTION__,__LINE__);
3679#endif
3680 CLAW_DBF_TEXT(4,trace,"StRdExit");
3681 return;
3682} /* end of claw_strt_read */
3683
3684/*-------------------------------------------------------------------*
3685* claw_strt_out_IO *
3686* *
3687*--------------------------------------------------------------------*/
3688
3689static void
3690claw_strt_out_IO( struct net_device *dev )
3691{
3692 int rc = 0;
3693 unsigned long parm;
3694 struct claw_privbk *privptr;
3695 struct chbk *p_ch;
3696 struct ccwbk *p_first_ccw;
3697
3698#ifdef FUNCTRACE
3699 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3700#endif
3701 if (!dev) {
3702 return;
3703 }
3704 privptr=(struct claw_privbk *)dev->priv;
3705 p_ch=&privptr->channel[WRITE];
3706
3707#ifdef DEBUGMSG
3708 printk(KERN_INFO "%s:%s state-%02x\n" ,
3709 dev->name,__FUNCTION__,p_ch->claw_state);
3710#endif
3711 CLAW_DBF_TEXT(4,trace,"strt_io");
3712 p_first_ccw=privptr->p_write_active_first;
3713
3714 if (p_ch->claw_state == CLAW_STOP)
3715 return;
3716 if (p_first_ccw == NULL) {
3717#ifdef FUNCTRACE
3718 printk(KERN_INFO "%s:%s Exit on line %d\n",
3719 dev->name,__FUNCTION__,__LINE__);
3720#endif
3721 return;
3722 }
3723 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
3724 parm = (unsigned long) p_ch;
3725#ifdef DEBUGMSG
3726 printk(KERN_INFO "%s:%s do_io \n" ,dev->name,__FUNCTION__);
3727 dumpit((char *)p_first_ccw, sizeof(struct ccwbk));
3728#endif
3729 CLAW_DBF_TEXT(2,trace,"StWrtIO");
3730 rc = ccw_device_start (p_ch->cdev,&p_first_ccw->write, parm,
3731 0xff, 0);
3732 if (rc != 0) {
3733 ccw_check_return_code(p_ch->cdev, rc);
3734 }
3735 }
3736 dev->trans_start = jiffies;
3737#ifdef FUNCTRACE
3738 printk(KERN_INFO "%s:%s Exit on line %d\n",
3739 dev->name,__FUNCTION__,__LINE__);
3740#endif
3741
3742 return;
3743} /* end of claw_strt_out_IO */
3744
3745/*-------------------------------------------------------------------*
3746* Free write buffers *
3747* *
3748*--------------------------------------------------------------------*/
3749
3750static void
3751claw_free_wrt_buf( struct net_device *dev )
3752{
3753
3754 struct claw_privbk *privptr=(struct claw_privbk *)dev->priv;
3755 struct ccwbk*p_first_ccw;
3756 struct ccwbk*p_last_ccw;
3757 struct ccwbk*p_this_ccw;
3758 struct ccwbk*p_next_ccw;
3759#ifdef IOTRACE
3760 struct ccwbk*p_buf;
3761#endif
3762#ifdef FUNCTRACE
3763 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3764 printk(KERN_INFO "%s: free count = %d variable dev =\n",
3765 dev->name,privptr->write_free_count);
3766#endif
3767 CLAW_DBF_TEXT(4,trace,"freewrtb");
3768 /* scan the write queue to free any completed write packets */
3769 p_first_ccw=NULL;
3770 p_last_ccw=NULL;
3771#ifdef IOTRACE
3772 printk(KERN_INFO "%s: Dump current CCW chain \n",dev->name );
3773 p_buf=privptr->p_write_active_first;
3774 while (p_buf!=NULL) {
3775 dumpit((char *)p_buf, sizeof(struct ccwbk));
3776 p_buf=p_buf->next;
3777 }
3778 if (p_buf==NULL) {
3779 printk(KERN_INFO "%s: privptr->p_write_"
3780 "active_first==NULL\n",dev->name );
3781 }
3782 p_buf=(struct ccwbk*)privptr->p_end_ccw;
3783 dumpit((char *)p_buf, sizeof(struct endccw));
3784#endif
3785 p_this_ccw=privptr->p_write_active_first;
3786 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
3787 {
3788 p_next_ccw = p_this_ccw->next;
3789 if (((p_next_ccw!=NULL) &&
3790 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
3791 ((p_this_ccw == privptr->p_write_active_last) &&
3792 (p_this_ccw->header.flag!=CLAW_PENDING))) {
3793 /* The next CCW is OK or this is */
3794 /* the last CCW...free it @A1A */
3795 privptr->p_write_active_first=p_this_ccw->next;
3796 p_this_ccw->header.flag=CLAW_PENDING;
3797 p_this_ccw->next=privptr->p_write_free_chain;
3798 privptr->p_write_free_chain=p_this_ccw;
3799 ++privptr->write_free_count;
3800 privptr->stats.tx_bytes+= p_this_ccw->write.count;
3801 p_this_ccw=privptr->p_write_active_first;
3802 privptr->stats.tx_packets++;
3803 }
3804 else {
3805 break;
3806 }
3807 }
3808 if (privptr->write_free_count!=0) {
3809 claw_clearbit_busy(TB_NOBUFFER,dev);
3810 }
3811 /* whole chain removed? */
3812 if (privptr->p_write_active_first==NULL) {
3813 privptr->p_write_active_last=NULL;
3814#ifdef DEBUGMSG
3815 printk(KERN_INFO "%s:%s p_write_"
3816 "active_first==NULL\n",dev->name,__FUNCTION__);
3817#endif
3818 }
3819#ifdef IOTRACE
3820 printk(KERN_INFO "%s: Dump arranged CCW chain \n",dev->name );
3821 p_buf=privptr->p_write_active_first;
3822 while (p_buf!=NULL) {
3823 dumpit((char *)p_buf, sizeof(struct ccwbk));
3824 p_buf=p_buf->next;
3825 }
3826 if (p_buf==NULL) {
3827 printk(KERN_INFO "%s: privptr->p_write_active_"
3828 "first==NULL\n",dev->name );
3829 }
3830 p_buf=(struct ccwbk*)privptr->p_end_ccw;
3831 dumpit((char *)p_buf, sizeof(struct endccw));
3832#endif
3833
3834 CLAW_DBF_TEXT_(4,trace,"FWC=%d",privptr->write_free_count);
3835#ifdef FUNCTRACE
3836 printk(KERN_INFO "%s:%s Exit on line %d free_count =%d\n",
3837 dev->name,__FUNCTION__, __LINE__,privptr->write_free_count);
3838#endif
3839 return;
3840}
3841
3842/*-------------------------------------------------------------------*
3843* claw free netdevice *
3844* *
3845*--------------------------------------------------------------------*/
3846static void
3847claw_free_netdevice(struct net_device * dev, int free_dev)
3848{
3849 struct claw_privbk *privptr;
3850#ifdef FUNCTRACE
3851 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3852#endif
3853 CLAW_DBF_TEXT(2,setup,"free_dev");
3854
3855 if (!dev)
3856 return;
3857 CLAW_DBF_TEXT_(2,setup,"%s",dev->name);
3858 privptr = dev->priv;
3859 if (dev->flags & IFF_RUNNING)
3860 claw_release(dev);
3861 if (privptr) {
3862 privptr->channel[READ].ndev = NULL; /* say it's free */
3863 }
3864 dev->priv=NULL;
3865#ifdef MODULE
3866 if (free_dev) {
3867 free_netdev(dev);
3868 }
3869#endif
3870 CLAW_DBF_TEXT(2,setup,"feee_ok");
3871#ifdef FUNCTRACE
3872 printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__);
3873#endif
3874}
3875
3876/**
3877 * Claw init netdevice
3878 * Initialize everything of the net device except the name and the
3879 * channel structs.
3880 */
3881static void
3882claw_init_netdevice(struct net_device * dev)
3883{
3884#ifdef FUNCTRACE
3885 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3886#endif
3887 CLAW_DBF_TEXT(2,setup,"init_dev");
3888 CLAW_DBF_TEXT_(2,setup,"%s",dev->name);
3889 if (!dev) {
3890 printk(KERN_WARNING "claw:%s BAD Device exit line %d\n",
3891 __FUNCTION__,__LINE__);
3892 CLAW_DBF_TEXT(2,setup,"baddev");
3893 return;
3894 }
3895 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
3896 dev->hard_start_xmit = claw_tx;
3897 dev->open = claw_open;
3898 dev->stop = claw_release;
3899 dev->get_stats = claw_stats;
3900 dev->change_mtu = claw_change_mtu;
3901 dev->hard_header_len = 0;
3902 dev->addr_len = 0;
3903 dev->type = ARPHRD_SLIP;
3904 dev->tx_queue_len = 1300;
3905 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
3906 SET_MODULE_OWNER(dev);
3907#ifdef FUNCTRACE
3908 printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__);
3909#endif
3910 CLAW_DBF_TEXT(2,setup,"initok");
3911 return;
3912}
3913
3914/**
3915 * Init a new channel in the privptr->channel[i].
3916 *
3917 * @param cdev The ccw_device to be added.
3918 *
3919 * @return 0 on success, !0 on error.
3920 */
3921static int
3922add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
3923{
3924 struct chbk *p_ch;
3925
3926#ifdef FUNCTRACE
3927 printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__FUNCTION__);
3928#endif
3929 CLAW_DBF_TEXT_(2,setup,"%s",cdev->dev.bus_id);
3930 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
3931 p_ch = &privptr->channel[i];
3932 p_ch->cdev = cdev;
3933 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id);
3934 sscanf(cdev->dev.bus_id+4,"%x",&p_ch->devno);
3935 if ((p_ch->irb = kmalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
3936 printk(KERN_WARNING "%s Out of memory in %s for irb\n",
3937 p_ch->id,__FUNCTION__);
3938#ifdef FUNCTRACE
3939 printk(KERN_INFO "%s:%s Exit on line %d\n",
3940 p_ch->id,__FUNCTION__,__LINE__);
3941#endif
3942 return -ENOMEM;
3943 }
3944 memset(p_ch->irb, 0, sizeof (struct irb));
3945#ifdef FUNCTRACE
3946 printk(KERN_INFO "%s:%s Exit on line %d\n",
3947 cdev->dev.bus_id,__FUNCTION__,__LINE__);
3948#endif
3949 return 0;
3950}
3951
3952
3953/**
3954 *
3955 * Setup an interface.
3956 *
3957 * @param cgdev Device to be setup.
3958 *
3959 * @returns 0 on success, !0 on failure.
3960 */
3961static int
3962claw_new_device(struct ccwgroup_device *cgdev)
3963{
3964 struct claw_privbk *privptr;
3965 struct claw_env *p_env;
3966 struct net_device *dev;
3967 int ret;
3968
3969 pr_debug("%s() called\n", __FUNCTION__);
3970 printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id);
3971 CLAW_DBF_TEXT(2,setup,"new_dev");
3972 privptr = cgdev->dev.driver_data;
3973 cgdev->cdev[READ]->dev.driver_data = privptr;
3974 cgdev->cdev[WRITE]->dev.driver_data = privptr;
3975 if (!privptr)
3976 return -ENODEV;
3977 p_env = privptr->p_env;
3978 sscanf(cgdev->cdev[READ]->dev.bus_id+4,"%x",
3979 &p_env->devno[READ]);
3980 sscanf(cgdev->cdev[WRITE]->dev.bus_id+4,"%x",
3981 &p_env->devno[WRITE]);
3982 ret = add_channel(cgdev->cdev[0],0,privptr);
3983 if (ret == 0)
3984 ret = add_channel(cgdev->cdev[1],1,privptr);
3985 if (ret != 0) {
3986 printk(KERN_WARNING
3987 "add channel failed "
3988 "with ret = %d\n", ret);
3989 goto out;
3990 }
3991 ret = ccw_device_set_online(cgdev->cdev[READ]);
3992 if (ret != 0) {
3993 printk(KERN_WARNING
3994 "claw: ccw_device_set_online %s READ failed "
3995 "with ret = %d\n",cgdev->cdev[READ]->dev.bus_id,ret);
3996 goto out;
3997 }
3998 ret = ccw_device_set_online(cgdev->cdev[WRITE]);
3999 if (ret != 0) {
4000 printk(KERN_WARNING
4001 "claw: ccw_device_set_online %s WRITE failed "
4002 "with ret = %d\n",cgdev->cdev[WRITE]->dev.bus_id, ret);
4003 goto out;
4004 }
4005 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
4006 if (!dev) {
4007 printk(KERN_WARNING "%s:alloc_netdev failed\n",__FUNCTION__);
4008 goto out;
4009 }
4010 dev->priv = privptr;
4011 cgdev->dev.driver_data = privptr;
4012 cgdev->cdev[READ]->dev.driver_data = privptr;
4013 cgdev->cdev[WRITE]->dev.driver_data = privptr;
4014 /* sysfs magic */
4015 SET_NETDEV_DEV(dev, &cgdev->dev);
4016 if (register_netdev(dev) != 0) {
4017 claw_free_netdevice(dev, 1);
4018 CLAW_DBF_TEXT(2,trace,"regfail");
4019 goto out;
4020 }
4021 dev->flags &=~IFF_RUNNING;
4022 if (privptr->buffs_alloc == 0) {
4023 ret=init_ccw_bk(dev);
4024 if (ret !=0) {
4025 printk(KERN_WARNING
4026 "claw: init_ccw_bk failed with ret=%d\n", ret);
4027 unregister_netdev(dev);
4028 claw_free_netdevice(dev,1);
4029 CLAW_DBF_TEXT(2,trace,"ccwmem");
4030 goto out;
4031 }
4032 }
4033 privptr->channel[READ].ndev = dev;
4034 privptr->channel[WRITE].ndev = dev;
4035 privptr->p_env->ndev = dev;
4036
4037 printk(KERN_INFO "%s:readsize=%d writesize=%d "
4038 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
4039 dev->name, p_env->read_size,
4040 p_env->write_size, p_env->read_buffers,
4041 p_env->write_buffers, p_env->devno[READ],
4042 p_env->devno[WRITE]);
4043 printk(KERN_INFO "%s:host_name:%.8s, adapter_name "
4044 ":%.8s api_type: %.8s\n",
4045 dev->name, p_env->host_name,
4046 p_env->adapter_name , p_env->api_type);
4047 return 0;
4048out:
4049 ccw_device_set_offline(cgdev->cdev[1]);
4050 ccw_device_set_offline(cgdev->cdev[0]);
4051
4052 return -ENODEV;
4053}
4054
4055static void
4056claw_purge_skb_queue(struct sk_buff_head *q)
4057{
4058 struct sk_buff *skb;
4059
4060 CLAW_DBF_TEXT(4,trace,"purgque");
4061
4062 while ((skb = skb_dequeue(q))) {
4063 atomic_dec(&skb->users);
4064 dev_kfree_skb_irq(skb);
4065 }
4066}
4067
4068/**
4069 * Shutdown an interface.
4070 *
4071 * @param cgdev Device to be shut down.
4072 *
4073 * @returns 0 on success, !0 on failure.
4074 */
4075static int
4076claw_shutdown_device(struct ccwgroup_device *cgdev)
4077{
4078 struct claw_privbk *priv;
4079 struct net_device *ndev;
4080 int ret;
4081
4082 pr_debug("%s() called\n", __FUNCTION__);
4083 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
4084 priv = cgdev->dev.driver_data;
4085 if (!priv)
4086 return -ENODEV;
4087 ndev = priv->channel[READ].ndev;
4088 if (ndev) {
4089 /* Close the device */
4090 printk(KERN_INFO
4091 "%s: shuting down \n",ndev->name);
4092 if (ndev->flags & IFF_RUNNING)
4093 ret = claw_release(ndev);
4094 ndev->flags &=~IFF_RUNNING;
4095 unregister_netdev(ndev);
4096 ndev->priv = NULL; /* cgdev data, not ndev's to free */
4097 claw_free_netdevice(ndev, 1);
4098 priv->channel[READ].ndev = NULL;
4099 priv->channel[WRITE].ndev = NULL;
4100 priv->p_env->ndev = NULL;
4101 }
4102 ccw_device_set_offline(cgdev->cdev[1]);
4103 ccw_device_set_offline(cgdev->cdev[0]);
4104 return 0;
4105}
4106
4107static void
4108claw_remove_device(struct ccwgroup_device *cgdev)
4109{
4110 struct claw_privbk *priv;
4111
4112 pr_debug("%s() called\n", __FUNCTION__);
4113 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
4114 priv = cgdev->dev.driver_data;
4115 if (!priv) {
4116 printk(KERN_WARNING "claw: %s() no Priv exiting\n",__FUNCTION__);
4117 return;
4118 }
4119 printk(KERN_INFO "claw: %s() called %s will be removed.\n",
4120 __FUNCTION__,cgdev->cdev[0]->dev.bus_id);
4121 if (cgdev->state == CCWGROUP_ONLINE)
4122 claw_shutdown_device(cgdev);
4123 claw_remove_files(&cgdev->dev);
4124 if (priv->p_mtc_envelope!=NULL) {
4125 kfree(priv->p_mtc_envelope);
4126 priv->p_mtc_envelope=NULL;
4127 }
4128 if (priv->p_env != NULL) {
4129 kfree(priv->p_env);
4130 priv->p_env=NULL;
4131 }
4132 if (priv->channel[0].irb != NULL) {
4133 kfree(priv->channel[0].irb);
4134 priv->channel[0].irb=NULL;
4135 }
4136 if (priv->channel[1].irb != NULL) {
4137 kfree(priv->channel[1].irb);
4138 priv->channel[1].irb=NULL;
4139 }
4140 kfree(priv);
4141 cgdev->dev.driver_data=NULL;
4142 cgdev->cdev[READ]->dev.driver_data = NULL;
4143 cgdev->cdev[WRITE]->dev.driver_data = NULL;
4144 put_device(&cgdev->dev);
4145}
4146
4147
4148/*
4149 * sysfs attributes
4150 */
4151static ssize_t
4152claw_hname_show(struct device *dev, char *buf)
4153{
4154 struct claw_privbk *priv;
4155 struct claw_env * p_env;
4156
4157 priv = dev->driver_data;
4158 if (!priv)
4159 return -ENODEV;
4160 p_env = priv->p_env;
4161 return sprintf(buf, "%s\n",p_env->host_name);
4162}
4163
4164static ssize_t
4165claw_hname_write(struct device *dev, const char *buf, size_t count)
4166{
4167 struct claw_privbk *priv;
4168 struct claw_env * p_env;
4169
4170 priv = dev->driver_data;
4171 if (!priv)
4172 return -ENODEV;
4173 p_env = priv->p_env;
4174 if (count > MAX_NAME_LEN+1)
4175 return -EINVAL;
4176 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
4177 strncpy(p_env->host_name,buf, count);
4178 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
4179 p_env->host_name[MAX_NAME_LEN] = 0x00;
4180 CLAW_DBF_TEXT(2,setup,"HstnSet");
4181 CLAW_DBF_TEXT_(2,setup,"%s",p_env->host_name);
4182
4183 return count;
4184}
4185
4186static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
4187
4188static ssize_t
4189claw_adname_show(struct device *dev, char *buf)
4190{
4191 struct claw_privbk *priv;
4192 struct claw_env * p_env;
4193
4194 priv = dev->driver_data;
4195 if (!priv)
4196 return -ENODEV;
4197 p_env = priv->p_env;
4198 return sprintf(buf, "%s\n",p_env->adapter_name);
4199}
4200
4201static ssize_t
4202claw_adname_write(struct device *dev, const char *buf, size_t count)
4203{
4204 struct claw_privbk *priv;
4205 struct claw_env * p_env;
4206
4207 priv = dev->driver_data;
4208 if (!priv)
4209 return -ENODEV;
4210 p_env = priv->p_env;
4211 if (count > MAX_NAME_LEN+1)
4212 return -EINVAL;
4213 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
4214 strncpy(p_env->adapter_name,buf, count);
4215 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
4216 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
4217 CLAW_DBF_TEXT(2,setup,"AdnSet");
4218 CLAW_DBF_TEXT_(2,setup,"%s",p_env->adapter_name);
4219
4220 return count;
4221}
4222
4223static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
4224
4225static ssize_t
4226claw_apname_show(struct device *dev, char *buf)
4227{
4228 struct claw_privbk *priv;
4229 struct claw_env * p_env;
4230
4231 priv = dev->driver_data;
4232 if (!priv)
4233 return -ENODEV;
4234 p_env = priv->p_env;
4235 return sprintf(buf, "%s\n",
4236 p_env->api_type);
4237}
4238
4239static ssize_t
4240claw_apname_write(struct device *dev, const char *buf, size_t count)
4241{
4242 struct claw_privbk *priv;
4243 struct claw_env * p_env;
4244
4245 priv = dev->driver_data;
4246 if (!priv)
4247 return -ENODEV;
4248 p_env = priv->p_env;
4249 if (count > MAX_NAME_LEN+1)
4250 return -EINVAL;
4251 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
4252 strncpy(p_env->api_type,buf, count);
4253 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
4254 p_env->api_type[MAX_NAME_LEN] = 0x00;
4255 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
4256 p_env->read_size=DEF_PACK_BUFSIZE;
4257 p_env->write_size=DEF_PACK_BUFSIZE;
4258 p_env->packing=PACKING_ASK;
4259 CLAW_DBF_TEXT(2,setup,"PACKING");
4260 }
4261 else {
4262 p_env->packing=0;
4263 p_env->read_size=CLAW_FRAME_SIZE;
4264 p_env->write_size=CLAW_FRAME_SIZE;
4265 CLAW_DBF_TEXT(2,setup,"ApiSet");
4266 }
4267 CLAW_DBF_TEXT_(2,setup,"%s",p_env->api_type);
4268 return count;
4269}
4270
4271static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
4272
4273static ssize_t
4274claw_wbuff_show(struct device *dev, char *buf)
4275{
4276 struct claw_privbk *priv;
4277 struct claw_env * p_env;
4278
4279 priv = dev->driver_data;
4280 if (!priv)
4281 return -ENODEV;
4282 p_env = priv->p_env;
4283 return sprintf(buf, "%d\n", p_env->write_buffers);
4284}
4285
4286static ssize_t
4287claw_wbuff_write(struct device *dev, const char *buf, size_t count)
4288{
4289 struct claw_privbk *priv;
4290 struct claw_env * p_env;
4291 int nnn,max;
4292
4293 priv = dev->driver_data;
4294 if (!priv)
4295 return -ENODEV;
4296 p_env = priv->p_env;
4297 sscanf(buf, "%i", &nnn);
4298 if (p_env->packing) {
4299 max = 64;
4300 }
4301 else {
4302 max = 512;
4303 }
4304 if ((nnn > max ) || (nnn < 2))
4305 return -EINVAL;
4306 p_env->write_buffers = nnn;
4307 CLAW_DBF_TEXT(2,setup,"Wbufset");
4308 CLAW_DBF_TEXT_(2,setup,"WB=%d",p_env->write_buffers);
4309 return count;
4310}
4311
4312static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
4313
4314static ssize_t
4315claw_rbuff_show(struct device *dev, char *buf)
4316{
4317 struct claw_privbk *priv;
4318 struct claw_env * p_env;
4319
4320 priv = dev->driver_data;
4321 if (!priv)
4322 return -ENODEV;
4323 p_env = priv->p_env;
4324 return sprintf(buf, "%d\n", p_env->read_buffers);
4325}
4326
4327static ssize_t
4328claw_rbuff_write(struct device *dev, const char *buf, size_t count)
4329{
4330 struct claw_privbk *priv;
4331 struct claw_env *p_env;
4332 int nnn,max;
4333
4334 priv = dev->driver_data;
4335 if (!priv)
4336 return -ENODEV;
4337 p_env = priv->p_env;
4338 sscanf(buf, "%i", &nnn);
4339 if (p_env->packing) {
4340 max = 64;
4341 }
4342 else {
4343 max = 512;
4344 }
4345 if ((nnn > max ) || (nnn < 2))
4346 return -EINVAL;
4347 p_env->read_buffers = nnn;
4348 CLAW_DBF_TEXT(2,setup,"Rbufset");
4349 CLAW_DBF_TEXT_(2,setup,"RB=%d",p_env->read_buffers);
4350 return count;
4351}
4352
4353static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
4354
4355static struct attribute *claw_attr[] = {
4356 &dev_attr_read_buffer.attr,
4357 &dev_attr_write_buffer.attr,
4358 &dev_attr_adapter_name.attr,
4359 &dev_attr_api_type.attr,
4360 &dev_attr_host_name.attr,
4361 NULL,
4362};
4363
4364static struct attribute_group claw_attr_group = {
4365 .attrs = claw_attr,
4366};
4367
4368static int
4369claw_add_files(struct device *dev)
4370{
4371 pr_debug("%s() called\n", __FUNCTION__);
4372 CLAW_DBF_TEXT(2,setup,"add_file");
4373 return sysfs_create_group(&dev->kobj, &claw_attr_group);
4374}
4375
4376static void
4377claw_remove_files(struct device *dev)
4378{
4379 pr_debug("%s() called\n", __FUNCTION__);
4380 CLAW_DBF_TEXT(2,setup,"rem_file");
4381 sysfs_remove_group(&dev->kobj, &claw_attr_group);
4382}
4383
4384/*--------------------------------------------------------------------*
4385* claw_init and cleanup *
4386*---------------------------------------------------------------------*/
4387
4388static void __exit
4389claw_cleanup(void)
4390{
4391 unregister_cu3088_discipline(&claw_group_driver);
4392 claw_unregister_debug_facility();
4393 printk(KERN_INFO "claw: Driver unloaded\n");
4394
4395}
4396
4397/**
4398 * Initialize module.
4399 * This is called just after the module is loaded.
4400 *
4401 * @return 0 on success, !0 on error.
4402 */
4403static int __init
4404claw_init(void)
4405{
4406 int ret = 0;
4407 printk(KERN_INFO "claw: starting driver "
4408#ifdef MODULE
4409 "module "
4410#else
4411 "compiled into kernel "
4412#endif
4413 " $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $ \n");
4414
4415
4416#ifdef FUNCTRACE
4417 printk(KERN_INFO "claw: %s() enter \n",__FUNCTION__);
4418#endif
4419 ret = claw_register_debug_facility();
4420 if (ret) {
4421 printk(KERN_WARNING "claw: %s() debug_register failed %d\n",
4422 __FUNCTION__,ret);
4423 return ret;
4424 }
4425 CLAW_DBF_TEXT(2,setup,"init_mod");
4426 ret = register_cu3088_discipline(&claw_group_driver);
4427 if (ret) {
4428 claw_unregister_debug_facility();
4429 printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n",
4430 __FUNCTION__,ret);
4431 }
4432#ifdef FUNCTRACE
4433 printk(KERN_INFO "claw: %s() exit \n",__FUNCTION__);
4434#endif
4435 return ret;
4436}
4437
4438module_init(claw_init);
4439module_exit(claw_cleanup);
4440
4441
4442
4443/*--------------------------------------------------------------------*
4444* End of File *
4445*---------------------------------------------------------------------*/
4446
4447
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
new file mode 100644
index 000000000000..3df71970f601
--- /dev/null
+++ b/drivers/s390/net/claw.h
@@ -0,0 +1,335 @@
1/*******************************************************
2* Define constants *
3* *
4********************************************************/
5#define VERSION_CLAW_H "$Revision: 1.6 $"
6/*-----------------------------------------------------*
7* CCW command codes for CLAW protocol *
8*------------------------------------------------------*/
9
10#define CCW_CLAW_CMD_WRITE 0x01 /* write - not including link */
11#define CCW_CLAW_CMD_READ 0x02 /* read */
12#define CCW_CLAW_CMD_NOP 0x03 /* NOP */
13#define CCW_CLAW_CMD_SENSE 0x04 /* Sense */
14#define CCW_CLAW_CMD_SIGNAL_SMOD 0x05 /* Signal Status Modifier */
15#define CCW_CLAW_CMD_TIC 0x08 /* TIC */
16#define CCW_CLAW_CMD_READHEADER 0x12 /* read header data */
17#define CCW_CLAW_CMD_READFF 0x22 /* read an FF */
18#define CCW_CLAW_CMD_SENSEID 0xe4 /* Sense ID */
19
20
21/*-----------------------------------------------------*
22* CLAW Unique constants *
23*------------------------------------------------------*/
24
25#define MORE_to_COME_FLAG 0x04 /* OR with write CCW in case of m-t-c */
26#define CLAW_IDLE 0x00 /* flag to indicate CLAW is idle */
27#define CLAW_BUSY 0xff /* flag to indicate CLAW is busy */
28#define CLAW_PENDING 0x00 /* flag to indicate i/o is pending */
29#define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */
30
31/*-----------------------------------------------------*
32* CLAW control comand code *
33*------------------------------------------------------*/
34
35#define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */
36#define SYSTEM_VALIDATE_RESPONSE 0x02 /* System Validate response */
37#define CONNECTION_REQUEST 0x21 /* Connection request */
38#define CONNECTION_RESPONSE 0x22 /* Connection response */
39#define CONNECTION_CONFIRM 0x23 /* Connection confirm */
40#define DISCONNECT 0x24 /* Disconnect */
41#define CLAW_ERROR 0x41 /* CLAW error message */
42#define CLAW_VERSION_ID 2 /* CLAW version ID */
43
44/*-----------------------------------------------------*
45* CLAW adater sense bytes *
46*------------------------------------------------------*/
47
48#define CLAW_ADAPTER_SENSE_BYTE 0x41 /* Stop command issued to adapter */
49
50/*-----------------------------------------------------*
51* CLAW control command return codes *
52*------------------------------------------------------*/
53
54#define CLAW_RC_NAME_MISMATCH 166 /* names do not match */
55#define CLAW_RC_WRONG_VERSION 167 /* wrong CLAW version number */
56#define CLAW_RC_HOST_RCV_TOO_SMALL 180 /* Host maximum receive is */
57 /* less than Linux on zSeries*/
58 /* transmit size */
59
60/*-----------------------------------------------------*
61* CLAW Constants application name *
62*------------------------------------------------------*/
63
64#define HOST_APPL_NAME "TCPIP "
65#define WS_APPL_NAME_IP_LINK "TCPIP "
66#define WS_APPL_NAME_IP_NAME "IP "
67#define WS_APPL_NAME_API_LINK "API "
68#define WS_APPL_NAME_PACKED "PACKED "
69#define WS_NAME_NOT_DEF "NOT_DEF "
70#define PACKING_ASK 1
71#define PACK_SEND 2
72#define DO_PACKED 3
73
74#define MAX_ENVELOPE_SIZE 65536
75#define CLAW_DEFAULT_MTU_SIZE 4096
76#define DEF_PACK_BUFSIZE 32768
77#define READ 0
78#define WRITE 1
79
80#define TB_TX 0 /* sk buffer handling in process */
81#define TB_STOP 1 /* network device stop in process */
82#define TB_RETRY 2 /* retry in process */
83#define TB_NOBUFFER 3 /* no buffer on free queue */
84#define CLAW_MAX_LINK_ID 1
85#define CLAW_MAX_DEV 256 /* max claw devices */
86#define MAX_NAME_LEN 8 /* host name, adapter name length */
87#define CLAW_FRAME_SIZE 4096
88#define CLAW_ID_SIZE BUS_ID_SIZE+3
89
90/* state machine codes used in claw_irq_handler */
91
92#define CLAW_STOP 0
93#define CLAW_START_HALT_IO 1
94#define CLAW_START_SENSEID 2
95#define CLAW_START_READ 3
96#define CLAW_START_WRITE 4
97
98/*-----------------------------------------------------*
99* Lock flag *
100*------------------------------------------------------*/
101#define LOCK_YES 0
102#define LOCK_NO 1
103
104/*-----------------------------------------------------*
105* DBF Debug macros *
106*------------------------------------------------------*/
107#define CLAW_DBF_TEXT(level, name, text) \
108 do { \
109 debug_text_event(claw_dbf_##name, level, text); \
110 } while (0)
111
112#define CLAW_DBF_HEX(level,name,addr,len) \
113do { \
114 debug_event(claw_dbf_##name,level,(void*)(addr),len); \
115} while (0)
116
117#define CLAW_DBF_TEXT_(level,name,text...) \
118do { \
119 sprintf(debug_buffer, text); \
120 debug_text_event(claw_dbf_##name,level, debug_buffer);\
121} while (0)
122
123/*******************************************************
124* Define Control Blocks *
125* *
126********************************************************/
127
128/*------------------------------------------------------*/
129/* CLAW header */
130/*------------------------------------------------------*/
131
132struct clawh {
133 __u16 length; /* length of data read by preceding read CCW */
134 __u8 opcode; /* equivalent read CCW */
135 __u8 flag; /* flag of FF to indicate read was completed */
136};
137
138/*------------------------------------------------------*/
139/* CLAW Packing header 4 bytes */
140/*------------------------------------------------------*/
141struct clawph {
142 __u16 len; /* Length of Packed Data Area */
143 __u8 flag; /* Reserved not used */
144 __u8 link_num; /* Link ID */
145};
146
147/*------------------------------------------------------*/
148/* CLAW Ending struct ccwbk */
149/*------------------------------------------------------*/
150struct endccw {
151 __u32 real; /* real address of this block */
152 __u8 write1; /* write 1 is active */
153 __u8 read1; /* read 1 is active */
154 __u16 reserved; /* reserved for future use */
155 struct ccw1 write1_nop1;
156 struct ccw1 write1_nop2;
157 struct ccw1 write2_nop1;
158 struct ccw1 write2_nop2;
159 struct ccw1 read1_nop1;
160 struct ccw1 read1_nop2;
161 struct ccw1 read2_nop1;
162 struct ccw1 read2_nop2;
163};
164
165/*------------------------------------------------------*/
166/* CLAW struct ccwbk */
167/*------------------------------------------------------*/
168struct ccwbk {
169 void *next; /* pointer to next ccw block */
170 __u32 real; /* real address of this ccw */
171 void *p_buffer; /* virtual address of data */
172 struct clawh header; /* claw header */
173 struct ccw1 write; /* write CCW */
174 struct ccw1 w_read_FF; /* read FF */
175 struct ccw1 w_TIC_1; /* TIC */
176 struct ccw1 read; /* read CCW */
177 struct ccw1 read_h; /* read header */
178 struct ccw1 signal; /* signal SMOD */
179 struct ccw1 r_TIC_1; /* TIC1 */
180 struct ccw1 r_read_FF; /* read FF */
181 struct ccw1 r_TIC_2; /* TIC2 */
182};
183
184/*------------------------------------------------------*/
185/* CLAW control block */
186/*------------------------------------------------------*/
187struct clawctl {
188 __u8 command; /* control command */
189 __u8 version; /* CLAW protocol version */
190 __u8 linkid; /* link ID */
191 __u8 correlator; /* correlator */
192 __u8 rc; /* return code */
193 __u8 reserved1; /* reserved */
194 __u8 reserved2; /* reserved */
195 __u8 reserved3; /* reserved */
196 __u8 data[24]; /* command specific fields */
197};
198
199/*------------------------------------------------------*/
200/* Data for SYSTEMVALIDATE command */
201/*------------------------------------------------------*/
202struct sysval {
203 char WS_name[8]; /* Workstation System name */
204 char host_name[8]; /* Host system name */
205 __u16 read_frame_size; /* read frame size */
206 __u16 write_frame_size; /* write frame size */
207 __u8 reserved[4]; /* reserved */
208};
209
210/*------------------------------------------------------*/
211/* Data for Connect command */
212/*------------------------------------------------------*/
213struct conncmd {
214 char WS_name[8]; /* Workstation application name */
215 char host_name[8]; /* Host application name */
216 __u16 reserved1[2]; /* read frame size */
217 __u8 reserved2[4]; /* reserved */
218};
219
220/*------------------------------------------------------*/
221/* Data for CLAW error */
222/*------------------------------------------------------*/
223struct clawwerror {
224 char reserved1[8]; /* reserved */
225 char reserved2[8]; /* reserved */
226 char reserved3[8]; /* reserved */
227};
228
229/*------------------------------------------------------*/
230/* Data buffer for CLAW */
231/*------------------------------------------------------*/
232struct clawbuf {
233 char buffer[MAX_ENVELOPE_SIZE]; /* data buffer */
234};
235
236/*------------------------------------------------------*/
237/* Channel control block for read and write channel */
238/*------------------------------------------------------*/
239
240struct chbk {
241 unsigned int devno;
242 int irq;
243 char id[CLAW_ID_SIZE];
244 __u32 IO_active;
245 __u8 claw_state;
246 struct irb *irb;
247 struct ccw_device *cdev; /* pointer to the channel device */
248 struct net_device *ndev;
249 wait_queue_head_t wait;
250 struct tasklet_struct tasklet;
251 struct timer_list timer;
252 unsigned long flag_a; /* atomic flags */
253#define CLAW_BH_ACTIVE 0
254 unsigned long flag_b; /* atomic flags */
255#define CLAW_WRITE_ACTIVE 0
256 __u8 last_dstat;
257 __u8 flag;
258 struct sk_buff_head collect_queue;
259 spinlock_t collect_lock;
260#define CLAW_WRITE 0x02 /* - Set if this is a write channel */
261#define CLAW_READ 0x01 /* - Set if this is a read channel */
262#define CLAW_TIMER 0x80 /* - Set if timer made the wake_up */
263};
264
265/*--------------------------------------------------------------*
266* CLAW environment block *
267*---------------------------------------------------------------*/
268
269struct claw_env {
270 unsigned int devno[2]; /* device number */
271 char host_name[9]; /* Host name */
272 char adapter_name [9]; /* adapter name */
273 char api_type[9]; /* TCPIP, API or PACKED */
274 void *p_priv; /* privptr */
275 __u16 read_buffers; /* read buffer number */
276 __u16 write_buffers; /* write buffer number */
277 __u16 read_size; /* read buffer size */
278 __u16 write_size; /* write buffer size */
279 __u16 dev_id; /* device ident */
280 __u8 packing; /* are we packing? */
281 volatile __u8 queme_switch; /* gate for imed packing */
282 volatile unsigned long pk_delay; /* Delay for adaptive packing */
283 __u8 in_use; /* device active flag */
284 struct net_device *ndev; /* backward ptr to the net dev*/
285};
286
287/*--------------------------------------------------------------*
288* CLAW main control block *
289*---------------------------------------------------------------*/
290
291struct claw_privbk {
292 void *p_buff_ccw;
293 __u32 p_buff_ccw_num;
294 void *p_buff_read;
295 __u32 p_buff_read_num;
296 __u32 p_buff_pages_perread;
297 void *p_buff_write;
298 __u32 p_buff_write_num;
299 __u32 p_buff_pages_perwrite;
300 long active_link_ID; /* Active logical link ID */
301 struct ccwbk *p_write_free_chain; /* pointer to free ccw chain */
302 struct ccwbk *p_write_active_first; /* ptr to the first write ccw */
303 struct ccwbk *p_write_active_last; /* ptr to the last write ccw */
304 struct ccwbk *p_read_active_first; /* ptr to the first read ccw */
305 struct ccwbk *p_read_active_last; /* ptr to the last read ccw */
306 struct endccw *p_end_ccw; /*ptr to ending ccw */
307 struct ccwbk *p_claw_signal_blk; /* ptr to signal block */
308 __u32 write_free_count; /* number of free bufs for write */
309 struct net_device_stats stats; /* device status */
310 struct chbk channel[2]; /* Channel control blocks */
311 __u8 mtc_skipping;
312 int mtc_offset;
313 int mtc_logical_link;
314 void *p_mtc_envelope;
315 struct sk_buff *pk_skb; /* packing buffer */
316 int pk_cnt;
317 struct clawctl ctl_bk;
318 struct claw_env *p_env;
319 __u8 system_validate_comp;
320 __u8 release_pend;
321 __u8 checksum_received_ip_pkts;
322 __u8 buffs_alloc;
323 struct endccw end_ccw;
324 unsigned long tbusy;
325
326};
327
328
329/************************************************************/
330/* define global constants */
331/************************************************************/
332
333#define CCWBK_SIZE sizeof(struct ccwbk)
334
335
diff --git a/drivers/s390/net/ctcdbug.c b/drivers/s390/net/ctcdbug.c
new file mode 100644
index 000000000000..2c86bfa11b2f
--- /dev/null
+++ b/drivers/s390/net/ctcdbug.c
@@ -0,0 +1,83 @@
1/*
2 *
3 * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.4 $)
4 *
5 * CTC / ESCON network driver - s390 dbf exploit.
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Peter Tiedemann (ptiedem@de.ibm.com)
11 *
12 * $Revision: 1.4 $ $Date: 2004/08/04 10:11:59 $
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include "ctcdbug.h"
30
31/**
32 * Debug Facility Stuff
33 */
34debug_info_t *ctc_dbf_setup = NULL;
35debug_info_t *ctc_dbf_data = NULL;
36debug_info_t *ctc_dbf_trace = NULL;
37
38DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf);
39
40void
41ctc_unregister_dbf_views(void)
42{
43 if (ctc_dbf_setup)
44 debug_unregister(ctc_dbf_setup);
45 if (ctc_dbf_data)
46 debug_unregister(ctc_dbf_data);
47 if (ctc_dbf_trace)
48 debug_unregister(ctc_dbf_trace);
49}
50int
51ctc_register_dbf_views(void)
52{
53 ctc_dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
54 CTC_DBF_SETUP_INDEX,
55 CTC_DBF_SETUP_NR_AREAS,
56 CTC_DBF_SETUP_LEN);
57 ctc_dbf_data = debug_register(CTC_DBF_DATA_NAME,
58 CTC_DBF_DATA_INDEX,
59 CTC_DBF_DATA_NR_AREAS,
60 CTC_DBF_DATA_LEN);
61 ctc_dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
62 CTC_DBF_TRACE_INDEX,
63 CTC_DBF_TRACE_NR_AREAS,
64 CTC_DBF_TRACE_LEN);
65
66 if ((ctc_dbf_setup == NULL) || (ctc_dbf_data == NULL) ||
67 (ctc_dbf_trace == NULL)) {
68 ctc_unregister_dbf_views();
69 return -ENOMEM;
70 }
71 debug_register_view(ctc_dbf_setup, &debug_hex_ascii_view);
72 debug_set_level(ctc_dbf_setup, CTC_DBF_SETUP_LEVEL);
73
74 debug_register_view(ctc_dbf_data, &debug_hex_ascii_view);
75 debug_set_level(ctc_dbf_data, CTC_DBF_DATA_LEVEL);
76
77 debug_register_view(ctc_dbf_trace, &debug_hex_ascii_view);
78 debug_set_level(ctc_dbf_trace, CTC_DBF_TRACE_LEVEL);
79
80 return 0;
81}
82
83
diff --git a/drivers/s390/net/ctcdbug.h b/drivers/s390/net/ctcdbug.h
new file mode 100644
index 000000000000..ef8883951720
--- /dev/null
+++ b/drivers/s390/net/ctcdbug.h
@@ -0,0 +1,123 @@
1/*
2 *
3 * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.4 $)
4 *
5 * CTC / ESCON network driver - s390 dbf exploit.
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Peter Tiedemann (ptiedem@de.ibm.com)
11 *
12 * $Revision: 1.4 $ $Date: 2004/10/15 09:26:58 $
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29
30#include <asm/debug.h>
31/**
32 * Debug Facility stuff
33 */
34#define CTC_DBF_SETUP_NAME "ctc_setup"
35#define CTC_DBF_SETUP_LEN 16
36#define CTC_DBF_SETUP_INDEX 3
37#define CTC_DBF_SETUP_NR_AREAS 1
38#define CTC_DBF_SETUP_LEVEL 3
39
40#define CTC_DBF_DATA_NAME "ctc_data"
41#define CTC_DBF_DATA_LEN 128
42#define CTC_DBF_DATA_INDEX 3
43#define CTC_DBF_DATA_NR_AREAS 1
44#define CTC_DBF_DATA_LEVEL 2
45
46#define CTC_DBF_TRACE_NAME "ctc_trace"
47#define CTC_DBF_TRACE_LEN 16
48#define CTC_DBF_TRACE_INDEX 2
49#define CTC_DBF_TRACE_NR_AREAS 2
50#define CTC_DBF_TRACE_LEVEL 3
51
52#define DBF_TEXT(name,level,text) \
53 do { \
54 debug_text_event(ctc_dbf_##name,level,text); \
55 } while (0)
56
57#define DBF_HEX(name,level,addr,len) \
58 do { \
59 debug_event(ctc_dbf_##name,level,(void*)(addr),len); \
60 } while (0)
61
62DECLARE_PER_CPU(char[256], ctc_dbf_txt_buf);
63extern debug_info_t *ctc_dbf_setup;
64extern debug_info_t *ctc_dbf_data;
65extern debug_info_t *ctc_dbf_trace;
66
67
68#define DBF_TEXT_(name,level,text...) \
69 do { \
70 char* ctc_dbf_txt_buf = get_cpu_var(ctc_dbf_txt_buf); \
71 sprintf(ctc_dbf_txt_buf, text); \
72 debug_text_event(ctc_dbf_##name,level,ctc_dbf_txt_buf); \
73 put_cpu_var(ctc_dbf_txt_buf); \
74 } while (0)
75
76#define DBF_SPRINTF(name,level,text...) \
77 do { \
78 debug_sprintf_event(ctc_dbf_trace, level, ##text ); \
79 debug_sprintf_event(ctc_dbf_trace, level, text ); \
80 } while (0)
81
82
83int ctc_register_dbf_views(void);
84
85void ctc_unregister_dbf_views(void);
86
87/**
88 * some more debug stuff
89 */
90
91#define HEXDUMP16(importance,header,ptr) \
92PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
93 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
94 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
95 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
96 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
97 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
98 *(((char*)ptr)+12),*(((char*)ptr)+13), \
99 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
100PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
101 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
102 *(((char*)ptr)+16),*(((char*)ptr)+17), \
103 *(((char*)ptr)+18),*(((char*)ptr)+19), \
104 *(((char*)ptr)+20),*(((char*)ptr)+21), \
105 *(((char*)ptr)+22),*(((char*)ptr)+23), \
106 *(((char*)ptr)+24),*(((char*)ptr)+25), \
107 *(((char*)ptr)+26),*(((char*)ptr)+27), \
108 *(((char*)ptr)+28),*(((char*)ptr)+29), \
109 *(((char*)ptr)+30),*(((char*)ptr)+31));
110
111static inline void
112hex_dump(unsigned char *buf, size_t len)
113{
114 size_t i;
115
116 for (i = 0; i < len; i++) {
117 if (i && !(i % 16))
118 printk("\n");
119 printk("%02x ", *(buf + i));
120 }
121 printk("\n");
122}
123
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
new file mode 100644
index 000000000000..7266bf5ea659
--- /dev/null
+++ b/drivers/s390/net/ctcmain.c
@@ -0,0 +1,3304 @@
1/*
2 * $Id: ctcmain.c,v 1.72 2005/03/17 10:51:52 ptiedem Exp $
3 *
4 * CTC / ESCON network driver
5 *
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
9 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
10 Peter Tiedemann (ptiedem@de.ibm.com)
11 * Driver Model stuff by : Cornelia Huck <cohuck@de.ibm.com>
12 *
13 * Documentation used:
14 * - Principles of Operation (IBM doc#: SA22-7201-06)
15 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
16 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
17 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
18 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
19 *
20 * and the source of the original CTC driver by:
21 * Dieter Wellerdiek (wel@de.ibm.com)
22 * Martin Schwidefsky (schwidefsky@de.ibm.com)
23 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
24 * Jochen Röhrig (roehrig@de.ibm.com)
25 *
26 * This program is free software; you can redistribute it and/or modify
27 * it under the terms of the GNU General Public License as published by
28 * the Free Software Foundation; either version 2, or (at your option)
29 * any later version.
30 *
31 * This program is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 * GNU General Public License for more details.
35 *
36 * You should have received a copy of the GNU General Public License
37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
39 *
40 * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.72 $
41 *
42 */
43
44#undef DEBUG
45
46#include <linux/module.h>
47#include <linux/init.h>
48#include <linux/kernel.h>
49#include <linux/slab.h>
50#include <linux/errno.h>
51#include <linux/types.h>
52#include <linux/interrupt.h>
53#include <linux/timer.h>
54#include <linux/sched.h>
55#include <linux/bitops.h>
56
57#include <linux/signal.h>
58#include <linux/string.h>
59
60#include <linux/ip.h>
61#include <linux/if_arp.h>
62#include <linux/tcp.h>
63#include <linux/skbuff.h>
64#include <linux/ctype.h>
65#include <net/dst.h>
66
67#include <asm/io.h>
68#include <asm/ccwdev.h>
69#include <asm/ccwgroup.h>
70#include <asm/uaccess.h>
71
72#include <asm/idals.h>
73
74#include "ctctty.h"
75#include "fsm.h"
76#include "cu3088.h"
77#include "ctcdbug.h"
78
79MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
80MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
81MODULE_LICENSE("GPL");
82
83/**
84 * CCW commands, used in this driver.
85 */
86#define CCW_CMD_WRITE 0x01
87#define CCW_CMD_READ 0x02
88#define CCW_CMD_SET_EXTENDED 0xc3
89#define CCW_CMD_PREPARE 0xe3
90
91#define CTC_PROTO_S390 0
92#define CTC_PROTO_LINUX 1
93#define CTC_PROTO_LINUX_TTY 2
94#define CTC_PROTO_OS390 3
95#define CTC_PROTO_MAX 3
96
97#define CTC_BUFSIZE_LIMIT 65535
98#define CTC_BUFSIZE_DEFAULT 32768
99
100#define CTC_TIMEOUT_5SEC 5000
101
102#define CTC_INITIAL_BLOCKLEN 2
103
104#define READ 0
105#define WRITE 1
106
107#define CTC_ID_SIZE BUS_ID_SIZE+3
108
109
110struct ctc_profile {
111 unsigned long maxmulti;
112 unsigned long maxcqueue;
113 unsigned long doios_single;
114 unsigned long doios_multi;
115 unsigned long txlen;
116 unsigned long tx_time;
117 struct timespec send_stamp;
118};
119
120/**
121 * Definition of one channel
122 */
123struct channel {
124
125 /**
126 * Pointer to next channel in list.
127 */
128 struct channel *next;
129 char id[CTC_ID_SIZE];
130 struct ccw_device *cdev;
131
132 /**
133 * Type of this channel.
134 * CTC/A or Escon for valid channels.
135 */
136 enum channel_types type;
137
138 /**
139 * Misc. flags. See CHANNEL_FLAGS_... below
140 */
141 __u32 flags;
142
143 /**
144 * The protocol of this channel
145 */
146 __u16 protocol;
147
148 /**
149 * I/O and irq related stuff
150 */
151 struct ccw1 *ccw;
152 struct irb *irb;
153
154 /**
155 * RX/TX buffer size
156 */
157 int max_bufsize;
158
159 /**
160 * Transmit/Receive buffer.
161 */
162 struct sk_buff *trans_skb;
163
164 /**
165 * Universal I/O queue.
166 */
167 struct sk_buff_head io_queue;
168
169 /**
170 * TX queue for collecting skb's during busy.
171 */
172 struct sk_buff_head collect_queue;
173
174 /**
175 * Amount of data in collect_queue.
176 */
177 int collect_len;
178
179 /**
180 * spinlock for collect_queue and collect_len
181 */
182 spinlock_t collect_lock;
183
184 /**
185 * Timer for detecting unresposive
186 * I/O operations.
187 */
188 fsm_timer timer;
189
190 /**
191 * Retry counter for misc. operations.
192 */
193 int retry;
194
195 /**
196 * The finite state machine of this channel
197 */
198 fsm_instance *fsm;
199
200 /**
201 * The corresponding net_device this channel
202 * belongs to.
203 */
204 struct net_device *netdev;
205
206 struct ctc_profile prof;
207
208 unsigned char *trans_skb_data;
209
210 __u16 logflags;
211};
212
213#define CHANNEL_FLAGS_READ 0
214#define CHANNEL_FLAGS_WRITE 1
215#define CHANNEL_FLAGS_INUSE 2
216#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
217#define CHANNEL_FLAGS_FAILED 8
218#define CHANNEL_FLAGS_WAITIRQ 16
219#define CHANNEL_FLAGS_RWMASK 1
220#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
221
222#define LOG_FLAG_ILLEGALPKT 1
223#define LOG_FLAG_ILLEGALSIZE 2
224#define LOG_FLAG_OVERRUN 4
225#define LOG_FLAG_NOMEM 8
226
227#define CTC_LOGLEVEL_INFO 1
228#define CTC_LOGLEVEL_NOTICE 2
229#define CTC_LOGLEVEL_WARN 4
230#define CTC_LOGLEVEL_EMERG 8
231#define CTC_LOGLEVEL_ERR 16
232#define CTC_LOGLEVEL_DEBUG 32
233#define CTC_LOGLEVEL_CRIT 64
234
235#define CTC_LOGLEVEL_DEFAULT \
236(CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT)
237
238#define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1)
239
240static int loglevel = CTC_LOGLEVEL_DEFAULT;
241
242#define ctc_pr_debug(fmt, arg...) \
243do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0)
244
245#define ctc_pr_info(fmt, arg...) \
246do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0)
247
248#define ctc_pr_notice(fmt, arg...) \
249do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0)
250
251#define ctc_pr_warn(fmt, arg...) \
252do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0)
253
254#define ctc_pr_emerg(fmt, arg...) \
255do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0)
256
257#define ctc_pr_err(fmt, arg...) \
258do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0)
259
260#define ctc_pr_crit(fmt, arg...) \
261do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0)
262
263/**
264 * Linked list of all detected channels.
265 */
266static struct channel *channels = NULL;
267
268struct ctc_priv {
269 struct net_device_stats stats;
270 unsigned long tbusy;
271 /**
272 * The finite state machine of this interface.
273 */
274 fsm_instance *fsm;
275 /**
276 * The protocol of this device
277 */
278 __u16 protocol;
279 /**
280 * Timer for restarting after I/O Errors
281 */
282 fsm_timer restart_timer;
283
284 int buffer_size;
285
286 struct channel *channel[2];
287};
288
289/**
290 * Definition of our link level header.
291 */
292struct ll_header {
293 __u16 length;
294 __u16 type;
295 __u16 unused;
296};
297#define LL_HEADER_LENGTH (sizeof(struct ll_header))
298
299/**
300 * Compatibility macros for busy handling
301 * of network devices.
302 */
303static __inline__ void
304ctc_clear_busy(struct net_device * dev)
305{
306 clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
307 if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
308 netif_wake_queue(dev);
309}
310
311static __inline__ int
312ctc_test_and_set_busy(struct net_device * dev)
313{
314 if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
315 netif_stop_queue(dev);
316 return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
317}
318
319/**
320 * Print Banner.
321 */
322static void
323print_banner(void)
324{
325 static int printed = 0;
326 char vbuf[] = "$Revision: 1.72 $";
327 char *version = vbuf;
328
329 if (printed)
330 return;
331 if ((version = strchr(version, ':'))) {
332 char *p = strchr(version + 1, '$');
333 if (p)
334 *p = '\0';
335 } else
336 version = " ??? ";
337 printk(KERN_INFO "CTC driver Version%s"
338#ifdef DEBUG
339 " (DEBUG-VERSION, " __DATE__ __TIME__ ")"
340#endif
341 " initialized\n", version);
342 printed = 1;
343}
344
345/**
346 * Return type of a detected device.
347 */
348static enum channel_types
349get_channel_type(struct ccw_device_id *id)
350{
351 enum channel_types type = (enum channel_types) id->driver_info;
352
353 if (type == channel_type_ficon)
354 type = channel_type_escon;
355
356 return type;
357}
358
359/**
360 * States of the interface statemachine.
361 */
362enum dev_states {
363 DEV_STATE_STOPPED,
364 DEV_STATE_STARTWAIT_RXTX,
365 DEV_STATE_STARTWAIT_RX,
366 DEV_STATE_STARTWAIT_TX,
367 DEV_STATE_STOPWAIT_RXTX,
368 DEV_STATE_STOPWAIT_RX,
369 DEV_STATE_STOPWAIT_TX,
370 DEV_STATE_RUNNING,
371 /**
372 * MUST be always the last element!!
373 */
374 NR_DEV_STATES
375};
376
377static const char *dev_state_names[] = {
378 "Stopped",
379 "StartWait RXTX",
380 "StartWait RX",
381 "StartWait TX",
382 "StopWait RXTX",
383 "StopWait RX",
384 "StopWait TX",
385 "Running",
386};
387
388/**
389 * Events of the interface statemachine.
390 */
391enum dev_events {
392 DEV_EVENT_START,
393 DEV_EVENT_STOP,
394 DEV_EVENT_RXUP,
395 DEV_EVENT_TXUP,
396 DEV_EVENT_RXDOWN,
397 DEV_EVENT_TXDOWN,
398 DEV_EVENT_RESTART,
399 /**
400 * MUST be always the last element!!
401 */
402 NR_DEV_EVENTS
403};
404
405static const char *dev_event_names[] = {
406 "Start",
407 "Stop",
408 "RX up",
409 "TX up",
410 "RX down",
411 "TX down",
412 "Restart",
413};
414
415/**
416 * Events of the channel statemachine
417 */
418enum ch_events {
419 /**
420 * Events, representing return code of
421 * I/O operations (ccw_device_start, ccw_device_halt et al.)
422 */
423 CH_EVENT_IO_SUCCESS,
424 CH_EVENT_IO_EBUSY,
425 CH_EVENT_IO_ENODEV,
426 CH_EVENT_IO_EIO,
427 CH_EVENT_IO_UNKNOWN,
428
429 CH_EVENT_ATTNBUSY,
430 CH_EVENT_ATTN,
431 CH_EVENT_BUSY,
432
433 /**
434 * Events, representing unit-check
435 */
436 CH_EVENT_UC_RCRESET,
437 CH_EVENT_UC_RSRESET,
438 CH_EVENT_UC_TXTIMEOUT,
439 CH_EVENT_UC_TXPARITY,
440 CH_EVENT_UC_HWFAIL,
441 CH_EVENT_UC_RXPARITY,
442 CH_EVENT_UC_ZERO,
443 CH_EVENT_UC_UNKNOWN,
444
445 /**
446 * Events, representing subchannel-check
447 */
448 CH_EVENT_SC_UNKNOWN,
449
450 /**
451 * Events, representing machine checks
452 */
453 CH_EVENT_MC_FAIL,
454 CH_EVENT_MC_GOOD,
455
456 /**
457 * Event, representing normal IRQ
458 */
459 CH_EVENT_IRQ,
460 CH_EVENT_FINSTAT,
461
462 /**
463 * Event, representing timer expiry.
464 */
465 CH_EVENT_TIMER,
466
467 /**
468 * Events, representing commands from upper levels.
469 */
470 CH_EVENT_START,
471 CH_EVENT_STOP,
472
473 /**
474 * MUST be always the last element!!
475 */
476 NR_CH_EVENTS,
477};
478
479static const char *ch_event_names[] = {
480 "ccw_device success",
481 "ccw_device busy",
482 "ccw_device enodev",
483 "ccw_device ioerr",
484 "ccw_device unknown",
485
486 "Status ATTN & BUSY",
487 "Status ATTN",
488 "Status BUSY",
489
490 "Unit check remote reset",
491 "Unit check remote system reset",
492 "Unit check TX timeout",
493 "Unit check TX parity",
494 "Unit check Hardware failure",
495 "Unit check RX parity",
496 "Unit check ZERO",
497 "Unit check Unknown",
498
499 "SubChannel check Unknown",
500
501 "Machine check failure",
502 "Machine check operational",
503
504 "IRQ normal",
505 "IRQ final",
506
507 "Timer",
508
509 "Start",
510 "Stop",
511};
512
513/**
514 * States of the channel statemachine.
515 */
516enum ch_states {
517 /**
518 * Channel not assigned to any device,
519 * initial state, direction invalid
520 */
521 CH_STATE_IDLE,
522
523 /**
524 * Channel assigned but not operating
525 */
526 CH_STATE_STOPPED,
527 CH_STATE_STARTWAIT,
528 CH_STATE_STARTRETRY,
529 CH_STATE_SETUPWAIT,
530 CH_STATE_RXINIT,
531 CH_STATE_TXINIT,
532 CH_STATE_RX,
533 CH_STATE_TX,
534 CH_STATE_RXIDLE,
535 CH_STATE_TXIDLE,
536 CH_STATE_RXERR,
537 CH_STATE_TXERR,
538 CH_STATE_TERM,
539 CH_STATE_DTERM,
540 CH_STATE_NOTOP,
541
542 /**
543 * MUST be always the last element!!
544 */
545 NR_CH_STATES,
546};
547
548static const char *ch_state_names[] = {
549 "Idle",
550 "Stopped",
551 "StartWait",
552 "StartRetry",
553 "SetupWait",
554 "RX init",
555 "TX init",
556 "RX",
557 "TX",
558 "RX idle",
559 "TX idle",
560 "RX error",
561 "TX error",
562 "Terminating",
563 "Restarting",
564 "Not operational",
565};
566
567#ifdef DEBUG
568/**
569 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
570 *
571 * @param skb The sk_buff to dump.
572 * @param offset Offset relative to skb-data, where to start the dump.
573 */
574static void
575ctc_dump_skb(struct sk_buff *skb, int offset)
576{
577 unsigned char *p = skb->data;
578 __u16 bl;
579 struct ll_header *header;
580 int i;
581
582 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
583 return;
584 p += offset;
585 bl = *((__u16 *) p);
586 p += 2;
587 header = (struct ll_header *) p;
588 p -= 2;
589
590 printk(KERN_DEBUG "dump:\n");
591 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
592
593 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
594 header->length);
595 printk(KERN_DEBUG "h->type=%04x\n", header->type);
596 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
597 if (bl > 16)
598 bl = 16;
599 printk(KERN_DEBUG "data: ");
600 for (i = 0; i < bl; i++)
601 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
602 printk("\n");
603}
604#else
605static inline void
606ctc_dump_skb(struct sk_buff *skb, int offset)
607{
608}
609#endif
610
611/**
612 * Unpack a just received skb and hand it over to
613 * upper layers.
614 *
615 * @param ch The channel where this skb has been received.
616 * @param pskb The received skb.
617 */
618static __inline__ void
619ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
620{
621 struct net_device *dev = ch->netdev;
622 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
623 __u16 len = *((__u16 *) pskb->data);
624
625 DBF_TEXT(trace, 4, __FUNCTION__);
626 skb_put(pskb, 2 + LL_HEADER_LENGTH);
627 skb_pull(pskb, 2);
628 pskb->dev = dev;
629 pskb->ip_summed = CHECKSUM_UNNECESSARY;
630 while (len > 0) {
631 struct sk_buff *skb;
632 struct ll_header *header = (struct ll_header *) pskb->data;
633
634 skb_pull(pskb, LL_HEADER_LENGTH);
635 if ((ch->protocol == CTC_PROTO_S390) &&
636 (header->type != ETH_P_IP)) {
637
638#ifndef DEBUG
639 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
640#endif
641 /**
642 * Check packet type only if we stick strictly
643 * to S/390's protocol of OS390. This only
644 * supports IP. Otherwise allow any packet
645 * type.
646 */
647 ctc_pr_warn(
648 "%s Illegal packet type 0x%04x received, dropping\n",
649 dev->name, header->type);
650 ch->logflags |= LOG_FLAG_ILLEGALPKT;
651#ifndef DEBUG
652 }
653#endif
654#ifdef DEBUG
655 ctc_dump_skb(pskb, -6);
656#endif
657 privptr->stats.rx_dropped++;
658 privptr->stats.rx_frame_errors++;
659 return;
660 }
661 pskb->protocol = ntohs(header->type);
662 if (header->length <= LL_HEADER_LENGTH) {
663#ifndef DEBUG
664 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
665#endif
666 ctc_pr_warn(
667 "%s Illegal packet size %d "
668 "received (MTU=%d blocklen=%d), "
669 "dropping\n", dev->name, header->length,
670 dev->mtu, len);
671 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
672#ifndef DEBUG
673 }
674#endif
675#ifdef DEBUG
676 ctc_dump_skb(pskb, -6);
677#endif
678 privptr->stats.rx_dropped++;
679 privptr->stats.rx_length_errors++;
680 return;
681 }
682 header->length -= LL_HEADER_LENGTH;
683 len -= LL_HEADER_LENGTH;
684 if ((header->length > skb_tailroom(pskb)) ||
685 (header->length > len)) {
686#ifndef DEBUG
687 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
688#endif
689 ctc_pr_warn(
690 "%s Illegal packet size %d "
691 "(beyond the end of received data), "
692 "dropping\n", dev->name, header->length);
693 ch->logflags |= LOG_FLAG_OVERRUN;
694#ifndef DEBUG
695 }
696#endif
697#ifdef DEBUG
698 ctc_dump_skb(pskb, -6);
699#endif
700 privptr->stats.rx_dropped++;
701 privptr->stats.rx_length_errors++;
702 return;
703 }
704 skb_put(pskb, header->length);
705 pskb->mac.raw = pskb->data;
706 len -= header->length;
707 skb = dev_alloc_skb(pskb->len);
708 if (!skb) {
709#ifndef DEBUG
710 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
711#endif
712 ctc_pr_warn(
713 "%s Out of memory in ctc_unpack_skb\n",
714 dev->name);
715 ch->logflags |= LOG_FLAG_NOMEM;
716#ifndef DEBUG
717 }
718#endif
719 privptr->stats.rx_dropped++;
720 return;
721 }
722 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
723 skb->mac.raw = skb->data;
724 skb->dev = pskb->dev;
725 skb->protocol = pskb->protocol;
726 pskb->ip_summed = CHECKSUM_UNNECESSARY;
727 if (ch->protocol == CTC_PROTO_LINUX_TTY)
728 ctc_tty_netif_rx(skb);
729 else
730 netif_rx_ni(skb);
731 /**
732 * Successful rx; reset logflags
733 */
734 ch->logflags = 0;
735 dev->last_rx = jiffies;
736 privptr->stats.rx_packets++;
737 privptr->stats.rx_bytes += skb->len;
738 if (len > 0) {
739 skb_pull(pskb, header->length);
740 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
741#ifndef DEBUG
742 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
743#endif
744 ctc_pr_warn(
745 "%s Overrun in ctc_unpack_skb\n",
746 dev->name);
747 ch->logflags |= LOG_FLAG_OVERRUN;
748#ifndef DEBUG
749 }
750#endif
751 return;
752 }
753 skb_put(pskb, LL_HEADER_LENGTH);
754 }
755 }
756}
757
758/**
759 * Check return code of a preceeding ccw_device call, halt_IO etc...
760 *
761 * @param ch The channel, the error belongs to.
762 * @param return_code The error code to inspect.
763 */
764static void inline
765ccw_check_return_code(struct channel *ch, int return_code, char *msg)
766{
767 DBF_TEXT(trace, 5, __FUNCTION__);
768 switch (return_code) {
769 case 0:
770 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
771 break;
772 case -EBUSY:
773 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
774 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
775 break;
776 case -ENODEV:
777 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
778 ch->id, msg);
779 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
780 break;
781 case -EIO:
782 ctc_pr_emerg("%s (%s): Status pending... \n",
783 ch->id, msg);
784 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
785 break;
786 default:
787 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
788 ch->id, msg, return_code);
789 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
790 }
791}
792
793/**
794 * Check sense of a unit check.
795 *
796 * @param ch The channel, the sense code belongs to.
797 * @param sense The sense code to inspect.
798 */
799static void inline
800ccw_unit_check(struct channel *ch, unsigned char sense)
801{
802 DBF_TEXT(trace, 5, __FUNCTION__);
803 if (sense & SNS0_INTERVENTION_REQ) {
804 if (sense & 0x01) {
805 if (ch->protocol != CTC_PROTO_LINUX_TTY)
806 ctc_pr_debug("%s: Interface disc. or Sel. reset "
807 "(remote)\n", ch->id);
808 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
809 } else {
810 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
811 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
812 }
813 } else if (sense & SNS0_EQUIPMENT_CHECK) {
814 if (sense & SNS0_BUS_OUT_CHECK) {
815 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
816 ch->id);
817 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
818 } else {
819 ctc_pr_warn("%s: Read-data parity error (remote)\n",
820 ch->id);
821 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
822 }
823 } else if (sense & SNS0_BUS_OUT_CHECK) {
824 if (sense & 0x04) {
825 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
826 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
827 } else {
828 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
829 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
830 }
831 } else if (sense & SNS0_CMD_REJECT) {
832 ctc_pr_warn("%s: Command reject\n", ch->id);
833 } else if (sense == 0) {
834 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
835 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
836 } else {
837 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
838 ch->id, sense);
839 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
840 }
841}
842
843static void
844ctc_purge_skb_queue(struct sk_buff_head *q)
845{
846 struct sk_buff *skb;
847
848 DBF_TEXT(trace, 5, __FUNCTION__);
849
850 while ((skb = skb_dequeue(q))) {
851 atomic_dec(&skb->users);
852 dev_kfree_skb_irq(skb);
853 }
854}
855
856static __inline__ int
857ctc_checkalloc_buffer(struct channel *ch, int warn)
858{
859 DBF_TEXT(trace, 5, __FUNCTION__);
860 if ((ch->trans_skb == NULL) ||
861 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
862 if (ch->trans_skb != NULL)
863 dev_kfree_skb(ch->trans_skb);
864 clear_normalized_cda(&ch->ccw[1]);
865 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
866 GFP_ATOMIC | GFP_DMA);
867 if (ch->trans_skb == NULL) {
868 if (warn)
869 ctc_pr_warn(
870 "%s: Couldn't alloc %s trans_skb\n",
871 ch->id,
872 (CHANNEL_DIRECTION(ch->flags) == READ) ?
873 "RX" : "TX");
874 return -ENOMEM;
875 }
876 ch->ccw[1].count = ch->max_bufsize;
877 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
878 dev_kfree_skb(ch->trans_skb);
879 ch->trans_skb = NULL;
880 if (warn)
881 ctc_pr_warn(
882 "%s: set_normalized_cda for %s "
883 "trans_skb failed, dropping packets\n",
884 ch->id,
885 (CHANNEL_DIRECTION(ch->flags) == READ) ?
886 "RX" : "TX");
887 return -ENOMEM;
888 }
889 ch->ccw[1].count = 0;
890 ch->trans_skb_data = ch->trans_skb->data;
891 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
892 }
893 return 0;
894}
895
896/**
897 * Dummy NOP action for statemachines
898 */
899static void
900fsm_action_nop(fsm_instance * fi, int event, void *arg)
901{
902}
903
904/**
905 * Actions for channel - statemachines.
906 *****************************************************************************/
907
908/**
909 * Normal data has been send. Free the corresponding
910 * skb (it's in io_queue), reset dev->tbusy and
911 * revert to idle state.
912 *
913 * @param fi An instance of a channel statemachine.
914 * @param event The event, just happened.
915 * @param arg Generic pointer, casted from channel * upon call.
916 */
917static void
918ch_action_txdone(fsm_instance * fi, int event, void *arg)
919{
920 struct channel *ch = (struct channel *) arg;
921 struct net_device *dev = ch->netdev;
922 struct ctc_priv *privptr = dev->priv;
923 struct sk_buff *skb;
924 int first = 1;
925 int i;
926 unsigned long duration;
927 struct timespec done_stamp = xtime;
928
929 DBF_TEXT(trace, 4, __FUNCTION__);
930
931 duration =
932 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
933 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
934 if (duration > ch->prof.tx_time)
935 ch->prof.tx_time = duration;
936
937 if (ch->irb->scsw.count != 0)
938 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
939 dev->name, ch->irb->scsw.count);
940 fsm_deltimer(&ch->timer);
941 while ((skb = skb_dequeue(&ch->io_queue))) {
942 privptr->stats.tx_packets++;
943 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
944 if (first) {
945 privptr->stats.tx_bytes += 2;
946 first = 0;
947 }
948 atomic_dec(&skb->users);
949 dev_kfree_skb_irq(skb);
950 }
951 spin_lock(&ch->collect_lock);
952 clear_normalized_cda(&ch->ccw[4]);
953 if (ch->collect_len > 0) {
954 int rc;
955
956 if (ctc_checkalloc_buffer(ch, 1)) {
957 spin_unlock(&ch->collect_lock);
958 return;
959 }
960 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
961 ch->trans_skb->len = 0;
962 if (ch->prof.maxmulti < (ch->collect_len + 2))
963 ch->prof.maxmulti = ch->collect_len + 2;
964 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
965 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
966 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
967 i = 0;
968 while ((skb = skb_dequeue(&ch->collect_queue))) {
969 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
970 skb->len);
971 privptr->stats.tx_packets++;
972 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
973 atomic_dec(&skb->users);
974 dev_kfree_skb_irq(skb);
975 i++;
976 }
977 ch->collect_len = 0;
978 spin_unlock(&ch->collect_lock);
979 ch->ccw[1].count = ch->trans_skb->len;
980 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
981 ch->prof.send_stamp = xtime;
982 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
983 (unsigned long) ch, 0xff, 0);
984 ch->prof.doios_multi++;
985 if (rc != 0) {
986 privptr->stats.tx_dropped += i;
987 privptr->stats.tx_errors += i;
988 fsm_deltimer(&ch->timer);
989 ccw_check_return_code(ch, rc, "chained TX");
990 }
991 } else {
992 spin_unlock(&ch->collect_lock);
993 fsm_newstate(fi, CH_STATE_TXIDLE);
994 }
995 ctc_clear_busy(dev);
996}
997
998/**
999 * Initial data is sent.
1000 * Notify device statemachine that we are up and
1001 * running.
1002 *
1003 * @param fi An instance of a channel statemachine.
1004 * @param event The event, just happened.
1005 * @param arg Generic pointer, casted from channel * upon call.
1006 */
1007static void
1008ch_action_txidle(fsm_instance * fi, int event, void *arg)
1009{
1010 struct channel *ch = (struct channel *) arg;
1011
1012 DBF_TEXT(trace, 4, __FUNCTION__);
1013 fsm_deltimer(&ch->timer);
1014 fsm_newstate(fi, CH_STATE_TXIDLE);
1015 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
1016 ch->netdev);
1017}
1018
1019/**
1020 * Got normal data, check for sanity, queue it up, allocate new buffer
1021 * trigger bottom half, and initiate next read.
1022 *
1023 * @param fi An instance of a channel statemachine.
1024 * @param event The event, just happened.
1025 * @param arg Generic pointer, casted from channel * upon call.
1026 */
1027static void
1028ch_action_rx(fsm_instance * fi, int event, void *arg)
1029{
1030 struct channel *ch = (struct channel *) arg;
1031 struct net_device *dev = ch->netdev;
1032 struct ctc_priv *privptr = dev->priv;
1033 int len = ch->max_bufsize - ch->irb->scsw.count;
1034 struct sk_buff *skb = ch->trans_skb;
1035 __u16 block_len = *((__u16 *) skb->data);
1036 int check_len;
1037 int rc;
1038
1039 DBF_TEXT(trace, 4, __FUNCTION__);
1040 fsm_deltimer(&ch->timer);
1041 if (len < 8) {
1042 ctc_pr_debug("%s: got packet with length %d < 8\n",
1043 dev->name, len);
1044 privptr->stats.rx_dropped++;
1045 privptr->stats.rx_length_errors++;
1046 goto again;
1047 }
1048 if (len > ch->max_bufsize) {
1049 ctc_pr_debug("%s: got packet with length %d > %d\n",
1050 dev->name, len, ch->max_bufsize);
1051 privptr->stats.rx_dropped++;
1052 privptr->stats.rx_length_errors++;
1053 goto again;
1054 }
1055
1056 /**
1057 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
1058 */
1059 switch (ch->protocol) {
1060 case CTC_PROTO_S390:
1061 case CTC_PROTO_OS390:
1062 check_len = block_len + 2;
1063 break;
1064 default:
1065 check_len = block_len;
1066 break;
1067 }
1068 if ((len < block_len) || (len > check_len)) {
1069 ctc_pr_debug("%s: got block length %d != rx length %d\n",
1070 dev->name, block_len, len);
1071#ifdef DEBUG
1072 ctc_dump_skb(skb, 0);
1073#endif
1074 *((__u16 *) skb->data) = len;
1075 privptr->stats.rx_dropped++;
1076 privptr->stats.rx_length_errors++;
1077 goto again;
1078 }
1079 block_len -= 2;
1080 if (block_len > 0) {
1081 *((__u16 *) skb->data) = block_len;
1082 ctc_unpack_skb(ch, skb);
1083 }
1084 again:
1085 skb->data = skb->tail = ch->trans_skb_data;
1086 skb->len = 0;
1087 if (ctc_checkalloc_buffer(ch, 1))
1088 return;
1089 ch->ccw[1].count = ch->max_bufsize;
1090 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
1091 if (rc != 0)
1092 ccw_check_return_code(ch, rc, "normal RX");
1093}
1094
1095static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
1096
1097/**
1098 * Initialize connection by sending a __u16 of value 0.
1099 *
1100 * @param fi An instance of a channel statemachine.
1101 * @param event The event, just happened.
1102 * @param arg Generic pointer, casted from channel * upon call.
1103 */
1104static void
1105ch_action_firstio(fsm_instance * fi, int event, void *arg)
1106{
1107 struct channel *ch = (struct channel *) arg;
1108 int rc;
1109
1110 DBF_TEXT(trace, 4, __FUNCTION__);
1111
1112 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
1113 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
1114 fsm_deltimer(&ch->timer);
1115 if (ctc_checkalloc_buffer(ch, 1))
1116 return;
1117 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1118 (ch->protocol == CTC_PROTO_OS390)) {
1119 /* OS/390 resp. z/OS */
1120 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1121 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
1122 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
1123 CH_EVENT_TIMER, ch);
1124 ch_action_rxidle(fi, event, arg);
1125 } else {
1126 struct net_device *dev = ch->netdev;
1127 fsm_newstate(fi, CH_STATE_TXIDLE);
1128 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1129 DEV_EVENT_TXUP, dev);
1130 }
1131 return;
1132 }
1133
1134 /**
1135 * Don´t setup a timer for receiving the initial RX frame
1136 * if in compatibility mode, since VM TCP delays the initial
1137 * frame until it has some data to send.
1138 */
1139 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
1140 (ch->protocol != CTC_PROTO_S390))
1141 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1142
1143 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
1144 ch->ccw[1].count = 2; /* Transfer only length */
1145
1146 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
1147 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
1148 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
1149 if (rc != 0) {
1150 fsm_deltimer(&ch->timer);
1151 fsm_newstate(fi, CH_STATE_SETUPWAIT);
1152 ccw_check_return_code(ch, rc, "init IO");
1153 }
1154 /**
1155 * If in compatibility mode since we don´t setup a timer, we
1156 * also signal RX channel up immediately. This enables us
1157 * to send packets early which in turn usually triggers some
1158 * reply from VM TCP which brings up the RX channel to it´s
1159 * final state.
1160 */
1161 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
1162 (ch->protocol == CTC_PROTO_S390)) {
1163 struct net_device *dev = ch->netdev;
1164 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
1165 dev);
1166 }
1167}
1168
1169/**
1170 * Got initial data, check it. If OK,
1171 * notify device statemachine that we are up and
1172 * running.
1173 *
1174 * @param fi An instance of a channel statemachine.
1175 * @param event The event, just happened.
1176 * @param arg Generic pointer, casted from channel * upon call.
1177 */
1178static void
1179ch_action_rxidle(fsm_instance * fi, int event, void *arg)
1180{
1181 struct channel *ch = (struct channel *) arg;
1182 struct net_device *dev = ch->netdev;
1183 __u16 buflen;
1184 int rc;
1185
1186 DBF_TEXT(trace, 4, __FUNCTION__);
1187 fsm_deltimer(&ch->timer);
1188 buflen = *((__u16 *) ch->trans_skb->data);
1189#ifdef DEBUG
1190 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
1191#endif
1192 if (buflen >= CTC_INITIAL_BLOCKLEN) {
1193 if (ctc_checkalloc_buffer(ch, 1))
1194 return;
1195 ch->ccw[1].count = ch->max_bufsize;
1196 fsm_newstate(fi, CH_STATE_RXIDLE);
1197 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1198 (unsigned long) ch, 0xff, 0);
1199 if (rc != 0) {
1200 fsm_newstate(fi, CH_STATE_RXINIT);
1201 ccw_check_return_code(ch, rc, "initial RX");
1202 } else
1203 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1204 DEV_EVENT_RXUP, dev);
1205 } else {
1206 ctc_pr_debug("%s: Initial RX count %d not %d\n",
1207 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
1208 ch_action_firstio(fi, event, arg);
1209 }
1210}
1211
1212/**
1213 * Set channel into extended mode.
1214 *
1215 * @param fi An instance of a channel statemachine.
1216 * @param event The event, just happened.
1217 * @param arg Generic pointer, casted from channel * upon call.
1218 */
1219static void
1220ch_action_setmode(fsm_instance * fi, int event, void *arg)
1221{
1222 struct channel *ch = (struct channel *) arg;
1223 int rc;
1224 unsigned long saveflags;
1225
1226 DBF_TEXT(trace, 4, __FUNCTION__);
1227 fsm_deltimer(&ch->timer);
1228 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1229 fsm_newstate(fi, CH_STATE_SETUPWAIT);
1230 saveflags = 0; /* avoids compiler warning with
1231 spin_unlock_irqrestore */
1232 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1233 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1234 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
1235 if (event == CH_EVENT_TIMER)
1236 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1237 if (rc != 0) {
1238 fsm_deltimer(&ch->timer);
1239 fsm_newstate(fi, CH_STATE_STARTWAIT);
1240 ccw_check_return_code(ch, rc, "set Mode");
1241 } else
1242 ch->retry = 0;
1243}
1244
1245/**
1246 * Setup channel.
1247 *
1248 * @param fi An instance of a channel statemachine.
1249 * @param event The event, just happened.
1250 * @param arg Generic pointer, casted from channel * upon call.
1251 */
1252static void
1253ch_action_start(fsm_instance * fi, int event, void *arg)
1254{
1255 struct channel *ch = (struct channel *) arg;
1256 unsigned long saveflags;
1257 int rc;
1258 struct net_device *dev;
1259
1260 DBF_TEXT(trace, 4, __FUNCTION__);
1261 if (ch == NULL) {
1262 ctc_pr_warn("ch_action_start ch=NULL\n");
1263 return;
1264 }
1265 if (ch->netdev == NULL) {
1266 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1267 return;
1268 }
1269 dev = ch->netdev;
1270
1271#ifdef DEBUG
1272 ctc_pr_debug("%s: %s channel start\n", dev->name,
1273 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1274#endif
1275
1276 if (ch->trans_skb != NULL) {
1277 clear_normalized_cda(&ch->ccw[1]);
1278 dev_kfree_skb(ch->trans_skb);
1279 ch->trans_skb = NULL;
1280 }
1281 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1282 ch->ccw[1].cmd_code = CCW_CMD_READ;
1283 ch->ccw[1].flags = CCW_FLAG_SLI;
1284 ch->ccw[1].count = 0;
1285 } else {
1286 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1287 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1288 ch->ccw[1].count = 0;
1289 }
1290 if (ctc_checkalloc_buffer(ch, 0)) {
1291 ctc_pr_notice(
1292 "%s: Could not allocate %s trans_skb, delaying "
1293 "allocation until first transfer\n",
1294 dev->name,
1295 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1296 }
1297
1298 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1299 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1300 ch->ccw[0].count = 0;
1301 ch->ccw[0].cda = 0;
1302 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1303 ch->ccw[2].flags = CCW_FLAG_SLI;
1304 ch->ccw[2].count = 0;
1305 ch->ccw[2].cda = 0;
1306 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1307 ch->ccw[4].cda = 0;
1308 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1309
1310 fsm_newstate(fi, CH_STATE_STARTWAIT);
1311 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1312 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1313 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1314 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1315 if (rc != 0) {
1316 if (rc != -EBUSY)
1317 fsm_deltimer(&ch->timer);
1318 ccw_check_return_code(ch, rc, "initial HaltIO");
1319 }
1320#ifdef DEBUG
1321 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1322#endif
1323}
1324
1325/**
1326 * Shutdown a channel.
1327 *
1328 * @param fi An instance of a channel statemachine.
1329 * @param event The event, just happened.
1330 * @param arg Generic pointer, casted from channel * upon call.
1331 */
1332static void
1333ch_action_haltio(fsm_instance * fi, int event, void *arg)
1334{
1335 struct channel *ch = (struct channel *) arg;
1336 unsigned long saveflags;
1337 int rc;
1338 int oldstate;
1339
1340 DBF_TEXT(trace, 3, __FUNCTION__);
1341 fsm_deltimer(&ch->timer);
1342 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1343 saveflags = 0; /* avoids comp warning with
1344 spin_unlock_irqrestore */
1345 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1346 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1347 oldstate = fsm_getstate(fi);
1348 fsm_newstate(fi, CH_STATE_TERM);
1349 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1350 if (event == CH_EVENT_STOP)
1351 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1352 if (rc != 0) {
1353 if (rc != -EBUSY) {
1354 fsm_deltimer(&ch->timer);
1355 fsm_newstate(fi, oldstate);
1356 }
1357 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1358 }
1359}
1360
1361/**
1362 * A channel has successfully been halted.
1363 * Cleanup it's queue and notify interface statemachine.
1364 *
1365 * @param fi An instance of a channel statemachine.
1366 * @param event The event, just happened.
1367 * @param arg Generic pointer, casted from channel * upon call.
1368 */
1369static void
1370ch_action_stopped(fsm_instance * fi, int event, void *arg)
1371{
1372 struct channel *ch = (struct channel *) arg;
1373 struct net_device *dev = ch->netdev;
1374
1375 DBF_TEXT(trace, 3, __FUNCTION__);
1376 fsm_deltimer(&ch->timer);
1377 fsm_newstate(fi, CH_STATE_STOPPED);
1378 if (ch->trans_skb != NULL) {
1379 clear_normalized_cda(&ch->ccw[1]);
1380 dev_kfree_skb(ch->trans_skb);
1381 ch->trans_skb = NULL;
1382 }
1383 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1384 skb_queue_purge(&ch->io_queue);
1385 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1386 DEV_EVENT_RXDOWN, dev);
1387 } else {
1388 ctc_purge_skb_queue(&ch->io_queue);
1389 spin_lock(&ch->collect_lock);
1390 ctc_purge_skb_queue(&ch->collect_queue);
1391 ch->collect_len = 0;
1392 spin_unlock(&ch->collect_lock);
1393 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1394 DEV_EVENT_TXDOWN, dev);
1395 }
1396}
1397
1398/**
1399 * A stop command from device statemachine arrived and we are in
1400 * not operational mode. Set state to stopped.
1401 *
1402 * @param fi An instance of a channel statemachine.
1403 * @param event The event, just happened.
1404 * @param arg Generic pointer, casted from channel * upon call.
1405 */
1406static void
1407ch_action_stop(fsm_instance * fi, int event, void *arg)
1408{
1409 fsm_newstate(fi, CH_STATE_STOPPED);
1410}
1411
1412/**
1413 * A machine check for no path, not operational status or gone device has
1414 * happened.
1415 * Cleanup queue and notify interface statemachine.
1416 *
1417 * @param fi An instance of a channel statemachine.
1418 * @param event The event, just happened.
1419 * @param arg Generic pointer, casted from channel * upon call.
1420 */
1421static void
1422ch_action_fail(fsm_instance * fi, int event, void *arg)
1423{
1424 struct channel *ch = (struct channel *) arg;
1425 struct net_device *dev = ch->netdev;
1426
1427 DBF_TEXT(trace, 3, __FUNCTION__);
1428 fsm_deltimer(&ch->timer);
1429 fsm_newstate(fi, CH_STATE_NOTOP);
1430 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1431 skb_queue_purge(&ch->io_queue);
1432 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1433 DEV_EVENT_RXDOWN, dev);
1434 } else {
1435 ctc_purge_skb_queue(&ch->io_queue);
1436 spin_lock(&ch->collect_lock);
1437 ctc_purge_skb_queue(&ch->collect_queue);
1438 ch->collect_len = 0;
1439 spin_unlock(&ch->collect_lock);
1440 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1441 DEV_EVENT_TXDOWN, dev);
1442 }
1443}
1444
1445/**
1446 * Handle error during setup of channel.
1447 *
1448 * @param fi An instance of a channel statemachine.
1449 * @param event The event, just happened.
1450 * @param arg Generic pointer, casted from channel * upon call.
1451 */
1452static void
1453ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1454{
1455 struct channel *ch = (struct channel *) arg;
1456 struct net_device *dev = ch->netdev;
1457
1458 DBF_TEXT(setup, 3, __FUNCTION__);
1459 /**
1460 * Special case: Got UC_RCRESET on setmode.
1461 * This means that remote side isn't setup. In this case
1462 * simply retry after some 10 secs...
1463 */
1464 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1465 ((event == CH_EVENT_UC_RCRESET) ||
1466 (event == CH_EVENT_UC_RSRESET))) {
1467 fsm_newstate(fi, CH_STATE_STARTRETRY);
1468 fsm_deltimer(&ch->timer);
1469 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1470 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1471 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1472 if (rc != 0)
1473 ccw_check_return_code(
1474 ch, rc, "HaltIO in ch_action_setuperr");
1475 }
1476 return;
1477 }
1478
1479 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1480 dev->name, ch_event_names[event],
1481 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1482 fsm_getstate_str(fi));
1483 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1484 fsm_newstate(fi, CH_STATE_RXERR);
1485 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1486 DEV_EVENT_RXDOWN, dev);
1487 } else {
1488 fsm_newstate(fi, CH_STATE_TXERR);
1489 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1490 DEV_EVENT_TXDOWN, dev);
1491 }
1492}
1493
1494/**
1495 * Restart a channel after an error.
1496 *
1497 * @param fi An instance of a channel statemachine.
1498 * @param event The event, just happened.
1499 * @param arg Generic pointer, casted from channel * upon call.
1500 */
1501static void
1502ch_action_restart(fsm_instance * fi, int event, void *arg)
1503{
1504 unsigned long saveflags;
1505 int oldstate;
1506 int rc;
1507
1508 struct channel *ch = (struct channel *) arg;
1509 struct net_device *dev = ch->netdev;
1510
1511 DBF_TEXT(trace, 3, __FUNCTION__);
1512 fsm_deltimer(&ch->timer);
1513 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1514 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1515 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1516 oldstate = fsm_getstate(fi);
1517 fsm_newstate(fi, CH_STATE_STARTWAIT);
1518 saveflags = 0; /* avoids compiler warning with
1519 spin_unlock_irqrestore */
1520 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1521 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1522 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1523 if (event == CH_EVENT_TIMER)
1524 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1525 if (rc != 0) {
1526 if (rc != -EBUSY) {
1527 fsm_deltimer(&ch->timer);
1528 fsm_newstate(fi, oldstate);
1529 }
1530 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1531 }
1532}
1533
1534/**
1535 * Handle error during RX initial handshake (exchange of
1536 * 0-length block header)
1537 *
1538 * @param fi An instance of a channel statemachine.
1539 * @param event The event, just happened.
1540 * @param arg Generic pointer, casted from channel * upon call.
1541 */
1542static void
1543ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1544{
1545 struct channel *ch = (struct channel *) arg;
1546 struct net_device *dev = ch->netdev;
1547
1548 DBF_TEXT(setup, 3, __FUNCTION__);
1549 if (event == CH_EVENT_TIMER) {
1550 fsm_deltimer(&ch->timer);
1551 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1552 if (ch->retry++ < 3)
1553 ch_action_restart(fi, event, arg);
1554 else {
1555 fsm_newstate(fi, CH_STATE_RXERR);
1556 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1557 DEV_EVENT_RXDOWN, dev);
1558 }
1559 } else
1560 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1561}
1562
1563/**
1564 * Notify device statemachine if we gave up initialization
1565 * of RX channel.
1566 *
1567 * @param fi An instance of a channel statemachine.
1568 * @param event The event, just happened.
1569 * @param arg Generic pointer, casted from channel * upon call.
1570 */
1571static void
1572ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1573{
1574 struct channel *ch = (struct channel *) arg;
1575 struct net_device *dev = ch->netdev;
1576
1577 DBF_TEXT(setup, 3, __FUNCTION__);
1578 fsm_newstate(fi, CH_STATE_RXERR);
1579 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1580 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1581 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1582}
1583
1584/**
1585 * Handle RX Unit check remote reset (remote disconnected)
1586 *
1587 * @param fi An instance of a channel statemachine.
1588 * @param event The event, just happened.
1589 * @param arg Generic pointer, casted from channel * upon call.
1590 */
1591static void
1592ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1593{
1594 struct channel *ch = (struct channel *) arg;
1595 struct channel *ch2;
1596 struct net_device *dev = ch->netdev;
1597
1598 DBF_TEXT(trace, 3, __FUNCTION__);
1599 fsm_deltimer(&ch->timer);
1600 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1601 dev->name);
1602
1603 /**
1604 * Notify device statemachine
1605 */
1606 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1607 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1608
1609 fsm_newstate(fi, CH_STATE_DTERM);
1610 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1611 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1612
1613 ccw_device_halt(ch->cdev, (unsigned long) ch);
1614 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1615}
1616
1617/**
1618 * Handle error during TX channel initialization.
1619 *
1620 * @param fi An instance of a channel statemachine.
1621 * @param event The event, just happened.
1622 * @param arg Generic pointer, casted from channel * upon call.
1623 */
1624static void
1625ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1626{
1627 struct channel *ch = (struct channel *) arg;
1628 struct net_device *dev = ch->netdev;
1629
1630 DBF_TEXT(setup, 2, __FUNCTION__);
1631 if (event == CH_EVENT_TIMER) {
1632 fsm_deltimer(&ch->timer);
1633 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1634 if (ch->retry++ < 3)
1635 ch_action_restart(fi, event, arg);
1636 else {
1637 fsm_newstate(fi, CH_STATE_TXERR);
1638 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1639 DEV_EVENT_TXDOWN, dev);
1640 }
1641 } else
1642 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1643}
1644
1645/**
1646 * Handle TX timeout by retrying operation.
1647 *
1648 * @param fi An instance of a channel statemachine.
1649 * @param event The event, just happened.
1650 * @param arg Generic pointer, casted from channel * upon call.
1651 */
1652static void
1653ch_action_txretry(fsm_instance * fi, int event, void *arg)
1654{
1655 struct channel *ch = (struct channel *) arg;
1656 struct net_device *dev = ch->netdev;
1657 unsigned long saveflags;
1658
1659 DBF_TEXT(trace, 4, __FUNCTION__);
1660 fsm_deltimer(&ch->timer);
1661 if (ch->retry++ > 3) {
1662 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1663 dev->name);
1664 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1665 DEV_EVENT_TXDOWN, dev);
1666 ch_action_restart(fi, event, arg);
1667 } else {
1668 struct sk_buff *skb;
1669
1670 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1671 if ((skb = skb_peek(&ch->io_queue))) {
1672 int rc = 0;
1673
1674 clear_normalized_cda(&ch->ccw[4]);
1675 ch->ccw[4].count = skb->len;
1676 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1677 ctc_pr_debug(
1678 "%s: IDAL alloc failed, chan restart\n",
1679 dev->name);
1680 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1681 DEV_EVENT_TXDOWN, dev);
1682 ch_action_restart(fi, event, arg);
1683 return;
1684 }
1685 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1686 saveflags = 0; /* avoids compiler warning with
1687 spin_unlock_irqrestore */
1688 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1689 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1690 saveflags);
1691 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1692 (unsigned long) ch, 0xff, 0);
1693 if (event == CH_EVENT_TIMER)
1694 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1695 saveflags);
1696 if (rc != 0) {
1697 fsm_deltimer(&ch->timer);
1698 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1699 ctc_purge_skb_queue(&ch->io_queue);
1700 }
1701 }
1702 }
1703
1704}
1705
1706/**
1707 * Handle fatal errors during an I/O command.
1708 *
1709 * @param fi An instance of a channel statemachine.
1710 * @param event The event, just happened.
1711 * @param arg Generic pointer, casted from channel * upon call.
1712 */
1713static void
1714ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1715{
1716 struct channel *ch = (struct channel *) arg;
1717 struct net_device *dev = ch->netdev;
1718
1719 DBF_TEXT(trace, 3, __FUNCTION__);
1720 fsm_deltimer(&ch->timer);
1721 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1722 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1723 fsm_newstate(fi, CH_STATE_RXERR);
1724 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1725 DEV_EVENT_RXDOWN, dev);
1726 } else {
1727 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1728 fsm_newstate(fi, CH_STATE_TXERR);
1729 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1730 DEV_EVENT_TXDOWN, dev);
1731 }
1732}
1733
1734static void
1735ch_action_reinit(fsm_instance *fi, int event, void *arg)
1736{
1737 struct channel *ch = (struct channel *)arg;
1738 struct net_device *dev = ch->netdev;
1739 struct ctc_priv *privptr = dev->priv;
1740
1741 DBF_TEXT(trace, 4, __FUNCTION__);
1742 ch_action_iofatal(fi, event, arg);
1743 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1744}
1745
1746
1747/**
1748 * The statemachine for a channel.
1749 */
1750static const fsm_node ch_fsm[] = {
1751 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1752 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1753 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1754 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1755
1756 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1757 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1758 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1759 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1760 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1761
1762 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1763 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1764 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1765 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1766 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1767 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1768 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1769
1770 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1771 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1772 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1773 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1774
1775 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1776 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1777 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1778 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1779 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1780 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1781 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1782 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1783 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1784
1785 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1786 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1787 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1788 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1789 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1790 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1791 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1792 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1793 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1794 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1795 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1796
1797 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1798 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1799 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1800 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1801// {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1802 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1803 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1804 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1805 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1806
1807 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1808 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1809 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1810 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1811 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1812 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1813 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1814 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1815 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1816
1817 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1818 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1819 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1820 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1821 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1822 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1823 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1824 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1825
1826 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1827 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1828 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1829 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1830 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1831 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1832
1833 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1834 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1835 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1836 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1837 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1838 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1839
1840 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1841 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1842 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1843 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1844 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1845 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1846 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1847 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1848 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1849
1850 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1851 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1852 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1853 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1854};
1855
1856static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1857
1858/**
1859 * Functions related to setup and device detection.
1860 *****************************************************************************/
1861
1862static inline int
1863less_than(char *id1, char *id2)
1864{
1865 int dev1, dev2, i;
1866
1867 for (i = 0; i < 5; i++) {
1868 id1++;
1869 id2++;
1870 }
1871 dev1 = simple_strtoul(id1, &id1, 16);
1872 dev2 = simple_strtoul(id2, &id2, 16);
1873
1874 return (dev1 < dev2);
1875}
1876
1877/**
1878 * Add a new channel to the list of channels.
1879 * Keeps the channel list sorted.
1880 *
1881 * @param cdev The ccw_device to be added.
1882 * @param type The type class of the new channel.
1883 *
1884 * @return 0 on success, !0 on error.
1885 */
1886static int
1887add_channel(struct ccw_device *cdev, enum channel_types type)
1888{
1889 struct channel **c = &channels;
1890 struct channel *ch;
1891
1892 DBF_TEXT(trace, 2, __FUNCTION__);
1893 if ((ch =
1894 (struct channel *) kmalloc(sizeof (struct channel),
1895 GFP_KERNEL)) == NULL) {
1896 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1897 return -1;
1898 }
1899 memset(ch, 0, sizeof (struct channel));
1900 if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1),
1901 GFP_KERNEL | GFP_DMA)) == NULL) {
1902 kfree(ch);
1903 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1904 return -1;
1905 }
1906
1907 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1908
1909 /**
1910 * "static" ccws are used in the following way:
1911 *
1912 * ccw[0..2] (Channel program for generic I/O):
1913 * 0: prepare
1914 * 1: read or write (depending on direction) with fixed
1915 * buffer (idal allocated once when buffer is allocated)
1916 * 2: nop
1917 * ccw[3..5] (Channel program for direct write of packets)
1918 * 3: prepare
1919 * 4: write (idal allocated on every write).
1920 * 5: nop
1921 * ccw[6..7] (Channel program for initial channel setup):
1922 * 6: set extended mode
1923 * 7: nop
1924 *
1925 * ch->ccw[0..5] are initialized in ch_action_start because
1926 * the channel's direction is yet unknown here.
1927 */
1928 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1929 ch->ccw[6].flags = CCW_FLAG_SLI;
1930
1931 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1932 ch->ccw[7].flags = CCW_FLAG_SLI;
1933
1934 ch->cdev = cdev;
1935 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1936 ch->type = type;
1937 loglevel = CTC_LOGLEVEL_DEFAULT;
1938 ch->fsm = init_fsm(ch->id, ch_state_names,
1939 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1940 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1941 if (ch->fsm == NULL) {
1942 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1943 kfree(ch->ccw);
1944 kfree(ch);
1945 return -1;
1946 }
1947 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1948 if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb),
1949 GFP_KERNEL)) == NULL) {
1950 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1951 kfree_fsm(ch->fsm);
1952 kfree(ch->ccw);
1953 kfree(ch);
1954 return -1;
1955 }
1956 memset(ch->irb, 0, sizeof (struct irb));
1957 while (*c && less_than((*c)->id, ch->id))
1958 c = &(*c)->next;
1959 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1960 ctc_pr_debug(
1961 "ctc: add_channel: device %s already in list, "
1962 "using old entry\n", (*c)->id);
1963 kfree(ch->irb);
1964 kfree_fsm(ch->fsm);
1965 kfree(ch->ccw);
1966 kfree(ch);
1967 return 0;
1968 }
1969 fsm_settimer(ch->fsm, &ch->timer);
1970 skb_queue_head_init(&ch->io_queue);
1971 skb_queue_head_init(&ch->collect_queue);
1972 ch->next = *c;
1973 *c = ch;
1974 return 0;
1975}
1976
1977/**
1978 * Release a specific channel in the channel list.
1979 *
1980 * @param ch Pointer to channel struct to be released.
1981 */
1982static void
1983channel_free(struct channel *ch)
1984{
1985 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1986 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1987}
1988
1989/**
1990 * Remove a specific channel in the channel list.
1991 *
1992 * @param ch Pointer to channel struct to be released.
1993 */
1994static void
1995channel_remove(struct channel *ch)
1996{
1997 struct channel **c = &channels;
1998
1999 DBF_TEXT(trace, 2, __FUNCTION__);
2000 if (ch == NULL)
2001 return;
2002
2003 channel_free(ch);
2004 while (*c) {
2005 if (*c == ch) {
2006 *c = ch->next;
2007 fsm_deltimer(&ch->timer);
2008 kfree_fsm(ch->fsm);
2009 clear_normalized_cda(&ch->ccw[4]);
2010 if (ch->trans_skb != NULL) {
2011 clear_normalized_cda(&ch->ccw[1]);
2012 dev_kfree_skb(ch->trans_skb);
2013 }
2014 kfree(ch->ccw);
2015 kfree(ch->irb);
2016 kfree(ch);
2017 return;
2018 }
2019 c = &((*c)->next);
2020 }
2021}
2022
2023/**
2024 * Get a specific channel from the channel list.
2025 *
2026 * @param type Type of channel we are interested in.
2027 * @param id Id of channel we are interested in.
2028 * @param direction Direction we want to use this channel for.
2029 *
2030 * @return Pointer to a channel or NULL if no matching channel available.
2031 */
2032static struct channel
2033*
2034channel_get(enum channel_types type, char *id, int direction)
2035{
2036 struct channel *ch = channels;
2037
2038 DBF_TEXT(trace, 3, __FUNCTION__);
2039#ifdef DEBUG
2040 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
2041 __func__, id, type);
2042#endif
2043
2044 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
2045#ifdef DEBUG
2046 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
2047 __func__, ch, ch->id, ch->type);
2048#endif
2049 ch = ch->next;
2050 }
2051#ifdef DEBUG
2052 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
2053 __func__, ch, ch->id, ch->type);
2054#endif
2055 if (!ch) {
2056 ctc_pr_warn("ctc: %s(): channel with id %s "
2057 "and type %d not found in channel list\n",
2058 __func__, id, type);
2059 } else {
2060 if (ch->flags & CHANNEL_FLAGS_INUSE)
2061 ch = NULL;
2062 else {
2063 ch->flags |= CHANNEL_FLAGS_INUSE;
2064 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
2065 ch->flags |= (direction == WRITE)
2066 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
2067 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
2068 }
2069 }
2070 return ch;
2071}
2072
2073/**
2074 * Return the channel type by name.
2075 *
2076 * @param name Name of network interface.
2077 *
2078 * @return Type class of channel to be used for that interface.
2079 */
2080static enum channel_types inline
2081extract_channel_media(char *name)
2082{
2083 enum channel_types ret = channel_type_unknown;
2084
2085 if (name != NULL) {
2086 if (strncmp(name, "ctc", 3) == 0)
2087 ret = channel_type_parallel;
2088 if (strncmp(name, "escon", 5) == 0)
2089 ret = channel_type_escon;
2090 }
2091 return ret;
2092}
2093
2094static long
2095__ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
2096{
2097 if (!IS_ERR(irb))
2098 return 0;
2099
2100 switch (PTR_ERR(irb)) {
2101 case -EIO:
2102 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
2103// CTC_DBF_TEXT(trace, 2, "ckirberr");
2104// CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
2105 break;
2106 case -ETIMEDOUT:
2107 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
2108// CTC_DBF_TEXT(trace, 2, "ckirberr");
2109// CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
2110 break;
2111 default:
2112 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
2113 cdev->dev.bus_id);
2114// CTC_DBF_TEXT(trace, 2, "ckirberr");
2115// CTC_DBF_TEXT(trace, 2, " rc???");
2116 }
2117 return PTR_ERR(irb);
2118}
2119
2120/**
2121 * Main IRQ handler.
2122 *
2123 * @param cdev The ccw_device the interrupt is for.
2124 * @param intparm interruption parameter.
2125 * @param irb interruption response block.
2126 */
2127static void
2128ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2129{
2130 struct channel *ch;
2131 struct net_device *dev;
2132 struct ctc_priv *priv;
2133
2134 DBF_TEXT(trace, 5, __FUNCTION__);
2135 if (__ctc_check_irb_error(cdev, irb))
2136 return;
2137
2138 /* Check for unsolicited interrupts. */
2139 if (!cdev->dev.driver_data) {
2140 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
2141 cdev->dev.bus_id, irb->scsw.cstat,
2142 irb->scsw.dstat);
2143 return;
2144 }
2145
2146 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
2147 ->dev.driver_data;
2148
2149 /* Try to extract channel from driver data. */
2150 if (priv->channel[READ]->cdev == cdev)
2151 ch = priv->channel[READ];
2152 else if (priv->channel[WRITE]->cdev == cdev)
2153 ch = priv->channel[WRITE];
2154 else {
2155 ctc_pr_err("ctc: Can't determine channel for interrupt, "
2156 "device %s\n", cdev->dev.bus_id);
2157 return;
2158 }
2159
2160 dev = (struct net_device *) (ch->netdev);
2161 if (dev == NULL) {
2162 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
2163 cdev->dev.bus_id, ch);
2164 return;
2165 }
2166
2167#ifdef DEBUG
2168 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
2169 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
2170#endif
2171
2172 /* Copy interruption response block. */
2173 memcpy(ch->irb, irb, sizeof(struct irb));
2174
2175 /* Check for good subchannel return code, otherwise error message */
2176 if (ch->irb->scsw.cstat) {
2177 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
2178 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
2179 dev->name, ch->id, ch->irb->scsw.cstat,
2180 ch->irb->scsw.dstat);
2181 return;
2182 }
2183
2184 /* Check the reason-code of a unit check */
2185 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
2186 ccw_unit_check(ch, ch->irb->ecw[0]);
2187 return;
2188 }
2189 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
2190 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
2191 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
2192 else
2193 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
2194 return;
2195 }
2196 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
2197 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
2198 return;
2199 }
2200 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
2201 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
2202 (ch->irb->scsw.stctl ==
2203 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
2204 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
2205 else
2206 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
2207
2208}
2209
2210/**
2211 * Actions for interface - statemachine.
2212 *****************************************************************************/
2213
2214/**
2215 * Startup channels by sending CH_EVENT_START to each channel.
2216 *
2217 * @param fi An instance of an interface statemachine.
2218 * @param event The event, just happened.
2219 * @param arg Generic pointer, casted from struct net_device * upon call.
2220 */
2221static void
2222dev_action_start(fsm_instance * fi, int event, void *arg)
2223{
2224 struct net_device *dev = (struct net_device *) arg;
2225 struct ctc_priv *privptr = dev->priv;
2226 int direction;
2227
2228 DBF_TEXT(setup, 3, __FUNCTION__);
2229 fsm_deltimer(&privptr->restart_timer);
2230 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2231 for (direction = READ; direction <= WRITE; direction++) {
2232 struct channel *ch = privptr->channel[direction];
2233 fsm_event(ch->fsm, CH_EVENT_START, ch);
2234 }
2235}
2236
2237/**
2238 * Shutdown channels by sending CH_EVENT_STOP to each channel.
2239 *
2240 * @param fi An instance of an interface statemachine.
2241 * @param event The event, just happened.
2242 * @param arg Generic pointer, casted from struct net_device * upon call.
2243 */
2244static void
2245dev_action_stop(fsm_instance * fi, int event, void *arg)
2246{
2247 struct net_device *dev = (struct net_device *) arg;
2248 struct ctc_priv *privptr = dev->priv;
2249 int direction;
2250
2251 DBF_TEXT(trace, 3, __FUNCTION__);
2252 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2253 for (direction = READ; direction <= WRITE; direction++) {
2254 struct channel *ch = privptr->channel[direction];
2255 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2256 }
2257}
2258static void
2259dev_action_restart(fsm_instance *fi, int event, void *arg)
2260{
2261 struct net_device *dev = (struct net_device *)arg;
2262 struct ctc_priv *privptr = dev->priv;
2263
2264 DBF_TEXT(trace, 3, __FUNCTION__);
2265 ctc_pr_debug("%s: Restarting\n", dev->name);
2266 dev_action_stop(fi, event, arg);
2267 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2268 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2269 DEV_EVENT_START, dev);
2270}
2271
2272/**
2273 * Called from channel statemachine
2274 * when a channel is up and running.
2275 *
2276 * @param fi An instance of an interface statemachine.
2277 * @param event The event, just happened.
2278 * @param arg Generic pointer, casted from struct net_device * upon call.
2279 */
2280static void
2281dev_action_chup(fsm_instance * fi, int event, void *arg)
2282{
2283 struct net_device *dev = (struct net_device *) arg;
2284 struct ctc_priv *privptr = dev->priv;
2285
2286 DBF_TEXT(trace, 3, __FUNCTION__);
2287 switch (fsm_getstate(fi)) {
2288 case DEV_STATE_STARTWAIT_RXTX:
2289 if (event == DEV_EVENT_RXUP)
2290 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2291 else
2292 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2293 break;
2294 case DEV_STATE_STARTWAIT_RX:
2295 if (event == DEV_EVENT_RXUP) {
2296 fsm_newstate(fi, DEV_STATE_RUNNING);
2297 ctc_pr_info("%s: connected with remote side\n",
2298 dev->name);
2299 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2300 ctc_tty_setcarrier(dev, 1);
2301 ctc_clear_busy(dev);
2302 }
2303 break;
2304 case DEV_STATE_STARTWAIT_TX:
2305 if (event == DEV_EVENT_TXUP) {
2306 fsm_newstate(fi, DEV_STATE_RUNNING);
2307 ctc_pr_info("%s: connected with remote side\n",
2308 dev->name);
2309 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2310 ctc_tty_setcarrier(dev, 1);
2311 ctc_clear_busy(dev);
2312 }
2313 break;
2314 case DEV_STATE_STOPWAIT_TX:
2315 if (event == DEV_EVENT_RXUP)
2316 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2317 break;
2318 case DEV_STATE_STOPWAIT_RX:
2319 if (event == DEV_EVENT_TXUP)
2320 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2321 break;
2322 }
2323}
2324
2325/**
2326 * Called from channel statemachine
2327 * when a channel has been shutdown.
2328 *
2329 * @param fi An instance of an interface statemachine.
2330 * @param event The event, just happened.
2331 * @param arg Generic pointer, casted from struct net_device * upon call.
2332 */
2333static void
2334dev_action_chdown(fsm_instance * fi, int event, void *arg)
2335{
2336 struct net_device *dev = (struct net_device *) arg;
2337 struct ctc_priv *privptr = dev->priv;
2338
2339 DBF_TEXT(trace, 3, __FUNCTION__);
2340 switch (fsm_getstate(fi)) {
2341 case DEV_STATE_RUNNING:
2342 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2343 ctc_tty_setcarrier(dev, 0);
2344 if (event == DEV_EVENT_TXDOWN)
2345 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2346 else
2347 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2348 break;
2349 case DEV_STATE_STARTWAIT_RX:
2350 if (event == DEV_EVENT_TXDOWN)
2351 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2352 break;
2353 case DEV_STATE_STARTWAIT_TX:
2354 if (event == DEV_EVENT_RXDOWN)
2355 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2356 break;
2357 case DEV_STATE_STOPWAIT_RXTX:
2358 if (event == DEV_EVENT_TXDOWN)
2359 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2360 else
2361 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2362 break;
2363 case DEV_STATE_STOPWAIT_RX:
2364 if (event == DEV_EVENT_RXDOWN)
2365 fsm_newstate(fi, DEV_STATE_STOPPED);
2366 break;
2367 case DEV_STATE_STOPWAIT_TX:
2368 if (event == DEV_EVENT_TXDOWN)
2369 fsm_newstate(fi, DEV_STATE_STOPPED);
2370 break;
2371 }
2372}
2373
2374static const fsm_node dev_fsm[] = {
2375 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2376
2377 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2378 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2379 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2380 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2381
2382 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2383 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2384 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2385 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2386 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2387
2388 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2389 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2390 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2391 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2392 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2393
2394 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2395 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2396 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2397 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2398 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2399 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2400
2401 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2402 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2403 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2404 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2405 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2406
2407 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2408 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2409 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2410 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2411 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2412
2413 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2414 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2415 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2416 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2417 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2418 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2419};
2420
2421static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2422
2423/**
2424 * Transmit a packet.
2425 * This is a helper function for ctc_tx().
2426 *
2427 * @param ch Channel to be used for sending.
2428 * @param skb Pointer to struct sk_buff of packet to send.
2429 * The linklevel header has already been set up
2430 * by ctc_tx().
2431 *
2432 * @return 0 on success, -ERRNO on failure. (Never fails.)
2433 */
2434static int
2435transmit_skb(struct channel *ch, struct sk_buff *skb)
2436{
2437 unsigned long saveflags;
2438 struct ll_header header;
2439 int rc = 0;
2440
2441 DBF_TEXT(trace, 5, __FUNCTION__);
2442 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2443 int l = skb->len + LL_HEADER_LENGTH;
2444
2445 spin_lock_irqsave(&ch->collect_lock, saveflags);
2446 if (ch->collect_len + l > ch->max_bufsize - 2)
2447 rc = -EBUSY;
2448 else {
2449 atomic_inc(&skb->users);
2450 header.length = l;
2451 header.type = skb->protocol;
2452 header.unused = 0;
2453 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2454 LL_HEADER_LENGTH);
2455 skb_queue_tail(&ch->collect_queue, skb);
2456 ch->collect_len += l;
2457 }
2458 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2459 } else {
2460 __u16 block_len;
2461 int ccw_idx;
2462 struct sk_buff *nskb;
2463 unsigned long hi;
2464
2465 /**
2466 * Protect skb against beeing free'd by upper
2467 * layers.
2468 */
2469 atomic_inc(&skb->users);
2470 ch->prof.txlen += skb->len;
2471 header.length = skb->len + LL_HEADER_LENGTH;
2472 header.type = skb->protocol;
2473 header.unused = 0;
2474 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2475 LL_HEADER_LENGTH);
2476 block_len = skb->len + 2;
2477 *((__u16 *) skb_push(skb, 2)) = block_len;
2478
2479 /**
2480 * IDAL support in CTC is broken, so we have to
2481 * care about skb's above 2G ourselves.
2482 */
2483 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2484 if (hi) {
2485 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2486 if (!nskb) {
2487 atomic_dec(&skb->users);
2488 skb_pull(skb, LL_HEADER_LENGTH + 2);
2489 return -ENOMEM;
2490 } else {
2491 memcpy(skb_put(nskb, skb->len),
2492 skb->data, skb->len);
2493 atomic_inc(&nskb->users);
2494 atomic_dec(&skb->users);
2495 dev_kfree_skb_irq(skb);
2496 skb = nskb;
2497 }
2498 }
2499
2500 ch->ccw[4].count = block_len;
2501 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2502 /**
2503 * idal allocation failed, try via copying to
2504 * trans_skb. trans_skb usually has a pre-allocated
2505 * idal.
2506 */
2507 if (ctc_checkalloc_buffer(ch, 1)) {
2508 /**
2509 * Remove our header. It gets added
2510 * again on retransmit.
2511 */
2512 atomic_dec(&skb->users);
2513 skb_pull(skb, LL_HEADER_LENGTH + 2);
2514 return -EBUSY;
2515 }
2516
2517 ch->trans_skb->tail = ch->trans_skb->data;
2518 ch->trans_skb->len = 0;
2519 ch->ccw[1].count = skb->len;
2520 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2521 skb->len);
2522 atomic_dec(&skb->users);
2523 dev_kfree_skb_irq(skb);
2524 ccw_idx = 0;
2525 } else {
2526 skb_queue_tail(&ch->io_queue, skb);
2527 ccw_idx = 3;
2528 }
2529 ch->retry = 0;
2530 fsm_newstate(ch->fsm, CH_STATE_TX);
2531 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2532 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2533 ch->prof.send_stamp = xtime;
2534 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2535 (unsigned long) ch, 0xff, 0);
2536 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2537 if (ccw_idx == 3)
2538 ch->prof.doios_single++;
2539 if (rc != 0) {
2540 fsm_deltimer(&ch->timer);
2541 ccw_check_return_code(ch, rc, "single skb TX");
2542 if (ccw_idx == 3)
2543 skb_dequeue_tail(&ch->io_queue);
2544 /**
2545 * Remove our header. It gets added
2546 * again on retransmit.
2547 */
2548 skb_pull(skb, LL_HEADER_LENGTH + 2);
2549 } else {
2550 if (ccw_idx == 0) {
2551 struct net_device *dev = ch->netdev;
2552 struct ctc_priv *privptr = dev->priv;
2553 privptr->stats.tx_packets++;
2554 privptr->stats.tx_bytes +=
2555 skb->len - LL_HEADER_LENGTH;
2556 }
2557 }
2558 }
2559
2560 return rc;
2561}
2562
2563/**
2564 * Interface API for upper network layers
2565 *****************************************************************************/
2566
2567/**
2568 * Open an interface.
2569 * Called from generic network layer when ifconfig up is run.
2570 *
2571 * @param dev Pointer to interface struct.
2572 *
2573 * @return 0 on success, -ERRNO on failure. (Never fails.)
2574 */
2575static int
2576ctc_open(struct net_device * dev)
2577{
2578 DBF_TEXT(trace, 5, __FUNCTION__);
2579 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2580 return 0;
2581}
2582
2583/**
2584 * Close an interface.
2585 * Called from generic network layer when ifconfig down is run.
2586 *
2587 * @param dev Pointer to interface struct.
2588 *
2589 * @return 0 on success, -ERRNO on failure. (Never fails.)
2590 */
2591static int
2592ctc_close(struct net_device * dev)
2593{
2594 DBF_TEXT(trace, 5, __FUNCTION__);
2595 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2596 return 0;
2597}
2598
2599/**
2600 * Start transmission of a packet.
2601 * Called from generic network device layer.
2602 *
2603 * @param skb Pointer to buffer containing the packet.
2604 * @param dev Pointer to interface struct.
2605 *
2606 * @return 0 if packet consumed, !0 if packet rejected.
2607 * Note: If we return !0, then the packet is free'd by
2608 * the generic network layer.
2609 */
2610static int
2611ctc_tx(struct sk_buff *skb, struct net_device * dev)
2612{
2613 int rc = 0;
2614 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2615
2616 DBF_TEXT(trace, 5, __FUNCTION__);
2617 /**
2618 * Some sanity checks ...
2619 */
2620 if (skb == NULL) {
2621 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2622 privptr->stats.tx_dropped++;
2623 return 0;
2624 }
2625 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2626 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2627 dev->name, LL_HEADER_LENGTH + 2);
2628 dev_kfree_skb(skb);
2629 privptr->stats.tx_dropped++;
2630 return 0;
2631 }
2632
2633 /**
2634 * If channels are not running, try to restart them
2635 * and throw away packet.
2636 */
2637 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2638 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2639 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2640 return -EBUSY;
2641 dev_kfree_skb(skb);
2642 privptr->stats.tx_dropped++;
2643 privptr->stats.tx_errors++;
2644 privptr->stats.tx_carrier_errors++;
2645 return 0;
2646 }
2647
2648 if (ctc_test_and_set_busy(dev))
2649 return -EBUSY;
2650
2651 dev->trans_start = jiffies;
2652 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2653 rc = 1;
2654 ctc_clear_busy(dev);
2655 return rc;
2656}
2657
2658/**
2659 * Sets MTU of an interface.
2660 *
2661 * @param dev Pointer to interface struct.
2662 * @param new_mtu The new MTU to use for this interface.
2663 *
2664 * @return 0 on success, -EINVAL if MTU is out of valid range.
2665 * (valid range is 576 .. 65527). If VM is on the
2666 * remote side, maximum MTU is 32760, however this is
2667 * <em>not</em> checked here.
2668 */
2669static int
2670ctc_change_mtu(struct net_device * dev, int new_mtu)
2671{
2672 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2673
2674 DBF_TEXT(trace, 3, __FUNCTION__);
2675 if ((new_mtu < 576) || (new_mtu > 65527) ||
2676 (new_mtu > (privptr->channel[READ]->max_bufsize -
2677 LL_HEADER_LENGTH - 2)))
2678 return -EINVAL;
2679 dev->mtu = new_mtu;
2680 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2681 return 0;
2682}
2683
2684/**
2685 * Returns interface statistics of a device.
2686 *
2687 * @param dev Pointer to interface struct.
2688 *
2689 * @return Pointer to stats struct of this interface.
2690 */
2691static struct net_device_stats *
2692ctc_stats(struct net_device * dev)
2693{
2694 return &((struct ctc_priv *) dev->priv)->stats;
2695}
2696
2697/*
2698 * sysfs attributes
2699 */
2700static ssize_t
2701buffer_show(struct device *dev, char *buf)
2702{
2703 struct ctc_priv *priv;
2704
2705 priv = dev->driver_data;
2706 if (!priv)
2707 return -ENODEV;
2708 return sprintf(buf, "%d\n",
2709 priv->buffer_size);
2710}
2711
2712static ssize_t
2713buffer_write(struct device *dev, const char *buf, size_t count)
2714{
2715 struct ctc_priv *priv;
2716 struct net_device *ndev;
2717 int bs1;
2718
2719 DBF_TEXT(trace, 3, __FUNCTION__);
2720 priv = dev->driver_data;
2721 if (!priv)
2722 return -ENODEV;
2723 ndev = priv->channel[READ]->netdev;
2724 if (!ndev)
2725 return -ENODEV;
2726 sscanf(buf, "%u", &bs1);
2727
2728 if (bs1 > CTC_BUFSIZE_LIMIT)
2729 return -EINVAL;
2730 if ((ndev->flags & IFF_RUNNING) &&
2731 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2732 return -EINVAL;
2733 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2734 return -EINVAL;
2735
2736 priv->buffer_size = bs1;
2737 priv->channel[READ]->max_bufsize =
2738 priv->channel[WRITE]->max_bufsize = bs1;
2739 if (!(ndev->flags & IFF_RUNNING))
2740 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2741 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2742 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2743
2744 return count;
2745
2746}
2747
2748static ssize_t
2749loglevel_show(struct device *dev, char *buf)
2750{
2751 struct ctc_priv *priv;
2752
2753 priv = dev->driver_data;
2754 if (!priv)
2755 return -ENODEV;
2756 return sprintf(buf, "%d\n", loglevel);
2757}
2758
2759static ssize_t
2760loglevel_write(struct device *dev, const char *buf, size_t count)
2761{
2762 struct ctc_priv *priv;
2763 int ll1;
2764
2765 DBF_TEXT(trace, 5, __FUNCTION__);
2766 priv = dev->driver_data;
2767 if (!priv)
2768 return -ENODEV;
2769 sscanf(buf, "%i", &ll1);
2770
2771 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2772 return -EINVAL;
2773 loglevel = ll1;
2774 return count;
2775}
2776
2777static void
2778ctc_print_statistics(struct ctc_priv *priv)
2779{
2780 char *sbuf;
2781 char *p;
2782
2783 DBF_TEXT(trace, 4, __FUNCTION__);
2784 if (!priv)
2785 return;
2786 sbuf = (char *)kmalloc(2048, GFP_KERNEL);
2787 if (sbuf == NULL)
2788 return;
2789 p = sbuf;
2790
2791 p += sprintf(p, " Device FSM state: %s\n",
2792 fsm_getstate_str(priv->fsm));
2793 p += sprintf(p, " RX channel FSM state: %s\n",
2794 fsm_getstate_str(priv->channel[READ]->fsm));
2795 p += sprintf(p, " TX channel FSM state: %s\n",
2796 fsm_getstate_str(priv->channel[WRITE]->fsm));
2797 p += sprintf(p, " Max. TX buffer used: %ld\n",
2798 priv->channel[WRITE]->prof.maxmulti);
2799 p += sprintf(p, " Max. chained SKBs: %ld\n",
2800 priv->channel[WRITE]->prof.maxcqueue);
2801 p += sprintf(p, " TX single write ops: %ld\n",
2802 priv->channel[WRITE]->prof.doios_single);
2803 p += sprintf(p, " TX multi write ops: %ld\n",
2804 priv->channel[WRITE]->prof.doios_multi);
2805 p += sprintf(p, " Netto bytes written: %ld\n",
2806 priv->channel[WRITE]->prof.txlen);
2807 p += sprintf(p, " Max. TX IO-time: %ld\n",
2808 priv->channel[WRITE]->prof.tx_time);
2809
2810 ctc_pr_debug("Statistics for %s:\n%s",
2811 priv->channel[WRITE]->netdev->name, sbuf);
2812 kfree(sbuf);
2813 return;
2814}
2815
2816static ssize_t
2817stats_show(struct device *dev, char *buf)
2818{
2819 struct ctc_priv *priv = dev->driver_data;
2820 if (!priv)
2821 return -ENODEV;
2822 ctc_print_statistics(priv);
2823 return sprintf(buf, "0\n");
2824}
2825
2826static ssize_t
2827stats_write(struct device *dev, const char *buf, size_t count)
2828{
2829 struct ctc_priv *priv = dev->driver_data;
2830 if (!priv)
2831 return -ENODEV;
2832 /* Reset statistics */
2833 memset(&priv->channel[WRITE]->prof, 0,
2834 sizeof(priv->channel[WRITE]->prof));
2835 return count;
2836}
2837
2838static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2839static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2840static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2841
2842static int
2843ctc_add_attributes(struct device *dev)
2844{
2845// device_create_file(dev, &dev_attr_buffer);
2846 device_create_file(dev, &dev_attr_loglevel);
2847 device_create_file(dev, &dev_attr_stats);
2848 return 0;
2849}
2850
2851static void
2852ctc_remove_attributes(struct device *dev)
2853{
2854 device_remove_file(dev, &dev_attr_stats);
2855 device_remove_file(dev, &dev_attr_loglevel);
2856// device_remove_file(dev, &dev_attr_buffer);
2857}
2858
2859
2860static void
2861ctc_netdev_unregister(struct net_device * dev)
2862{
2863 struct ctc_priv *privptr;
2864
2865 if (!dev)
2866 return;
2867 privptr = (struct ctc_priv *) dev->priv;
2868 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2869 unregister_netdev(dev);
2870 else
2871 ctc_tty_unregister_netdev(dev);
2872}
2873
2874static int
2875ctc_netdev_register(struct net_device * dev)
2876{
2877 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2878 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2879 return register_netdev(dev);
2880 else
2881 return ctc_tty_register_netdev(dev);
2882}
2883
2884static void
2885ctc_free_netdevice(struct net_device * dev, int free_dev)
2886{
2887 struct ctc_priv *privptr;
2888 if (!dev)
2889 return;
2890 privptr = dev->priv;
2891 if (privptr) {
2892 if (privptr->fsm)
2893 kfree_fsm(privptr->fsm);
2894 kfree(privptr);
2895 }
2896#ifdef MODULE
2897 if (free_dev)
2898 free_netdev(dev);
2899#endif
2900}
2901
2902/**
2903 * Initialize everything of the net device except the name and the
2904 * channel structs.
2905 */
2906static struct net_device *
2907ctc_init_netdevice(struct net_device * dev, int alloc_device,
2908 struct ctc_priv *privptr)
2909{
2910 if (!privptr)
2911 return NULL;
2912
2913 DBF_TEXT(setup, 3, __FUNCTION__);
2914 if (alloc_device) {
2915 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2916 if (!dev)
2917 return NULL;
2918 memset(dev, 0, sizeof (struct net_device));
2919 }
2920
2921 dev->priv = privptr;
2922 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2923 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2924 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2925 if (privptr->fsm == NULL) {
2926 if (alloc_device)
2927 kfree(dev);
2928 return NULL;
2929 }
2930 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2931 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2932 if (dev->mtu == 0)
2933 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2934 dev->hard_start_xmit = ctc_tx;
2935 dev->open = ctc_open;
2936 dev->stop = ctc_close;
2937 dev->get_stats = ctc_stats;
2938 dev->change_mtu = ctc_change_mtu;
2939 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2940 dev->addr_len = 0;
2941 dev->type = ARPHRD_SLIP;
2942 dev->tx_queue_len = 100;
2943 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2944 SET_MODULE_OWNER(dev);
2945 return dev;
2946}
2947
2948static ssize_t
2949ctc_proto_show(struct device *dev, char *buf)
2950{
2951 struct ctc_priv *priv;
2952
2953 priv = dev->driver_data;
2954 if (!priv)
2955 return -ENODEV;
2956
2957 return sprintf(buf, "%d\n", priv->protocol);
2958}
2959
2960static ssize_t
2961ctc_proto_store(struct device *dev, const char *buf, size_t count)
2962{
2963 struct ctc_priv *priv;
2964 int value;
2965
2966 DBF_TEXT(trace, 3, __FUNCTION__);
2967 pr_debug("%s() called\n", __FUNCTION__);
2968
2969 priv = dev->driver_data;
2970 if (!priv)
2971 return -ENODEV;
2972 sscanf(buf, "%u", &value);
2973 if ((value < 0) || (value > CTC_PROTO_MAX))
2974 return -EINVAL;
2975 priv->protocol = value;
2976
2977 return count;
2978}
2979
2980static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2981
2982static ssize_t
2983ctc_type_show(struct device *dev, char *buf)
2984{
2985 struct ccwgroup_device *cgdev;
2986
2987 cgdev = to_ccwgroupdev(dev);
2988 if (!cgdev)
2989 return -ENODEV;
2990
2991 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2992}
2993
2994static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2995
2996static struct attribute *ctc_attr[] = {
2997 &dev_attr_protocol.attr,
2998 &dev_attr_type.attr,
2999 &dev_attr_buffer.attr,
3000 NULL,
3001};
3002
3003static struct attribute_group ctc_attr_group = {
3004 .attrs = ctc_attr,
3005};
3006
3007static int
3008ctc_add_files(struct device *dev)
3009{
3010 pr_debug("%s() called\n", __FUNCTION__);
3011
3012 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
3013}
3014
3015static void
3016ctc_remove_files(struct device *dev)
3017{
3018 pr_debug("%s() called\n", __FUNCTION__);
3019
3020 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
3021}
3022
3023/**
3024 * Add ctc specific attributes.
3025 * Add ctc private data.
3026 *
3027 * @param cgdev pointer to ccwgroup_device just added
3028 *
3029 * @returns 0 on success, !0 on failure.
3030 */
3031
3032static int
3033ctc_probe_device(struct ccwgroup_device *cgdev)
3034{
3035 struct ctc_priv *priv;
3036 int rc;
3037
3038 pr_debug("%s() called\n", __FUNCTION__);
3039 DBF_TEXT(trace, 3, __FUNCTION__);
3040
3041 if (!get_device(&cgdev->dev))
3042 return -ENODEV;
3043
3044 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
3045 if (!priv) {
3046 ctc_pr_err("%s: Out of memory\n", __func__);
3047 put_device(&cgdev->dev);
3048 return -ENOMEM;
3049 }
3050
3051 memset(priv, 0, sizeof (struct ctc_priv));
3052 rc = ctc_add_files(&cgdev->dev);
3053 if (rc) {
3054 kfree(priv);
3055 put_device(&cgdev->dev);
3056 return rc;
3057 }
3058 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
3059 cgdev->cdev[0]->handler = ctc_irq_handler;
3060 cgdev->cdev[1]->handler = ctc_irq_handler;
3061 cgdev->dev.driver_data = priv;
3062
3063 return 0;
3064}
3065
3066/**
3067 *
3068 * Setup an interface.
3069 *
3070 * @param cgdev Device to be setup.
3071 *
3072 * @returns 0 on success, !0 on failure.
3073 */
3074static int
3075ctc_new_device(struct ccwgroup_device *cgdev)
3076{
3077 char read_id[CTC_ID_SIZE];
3078 char write_id[CTC_ID_SIZE];
3079 int direction;
3080 enum channel_types type;
3081 struct ctc_priv *privptr;
3082 struct net_device *dev;
3083 int ret;
3084
3085 pr_debug("%s() called\n", __FUNCTION__);
3086 DBF_TEXT(setup, 3, __FUNCTION__);
3087
3088 privptr = cgdev->dev.driver_data;
3089 if (!privptr)
3090 return -ENODEV;
3091
3092 type = get_channel_type(&cgdev->cdev[0]->id);
3093
3094 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
3095 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
3096
3097 if (add_channel(cgdev->cdev[0], type))
3098 return -ENOMEM;
3099 if (add_channel(cgdev->cdev[1], type))
3100 return -ENOMEM;
3101
3102 ret = ccw_device_set_online(cgdev->cdev[0]);
3103 if (ret != 0) {
3104 printk(KERN_WARNING
3105 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
3106 }
3107
3108 ret = ccw_device_set_online(cgdev->cdev[1]);
3109 if (ret != 0) {
3110 printk(KERN_WARNING
3111 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
3112 }
3113
3114 dev = ctc_init_netdevice(NULL, 1, privptr);
3115
3116 if (!dev) {
3117 ctc_pr_warn("ctc_init_netdevice failed\n");
3118 goto out;
3119 }
3120
3121 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
3122 strlcpy(dev->name, "ctctty%d", IFNAMSIZ);
3123 else
3124 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
3125
3126 for (direction = READ; direction <= WRITE; direction++) {
3127 privptr->channel[direction] =
3128 channel_get(type, direction == READ ? read_id : write_id,
3129 direction);
3130 if (privptr->channel[direction] == NULL) {
3131 if (direction == WRITE)
3132 channel_free(privptr->channel[READ]);
3133
3134 ctc_free_netdevice(dev, 1);
3135 goto out;
3136 }
3137 privptr->channel[direction]->netdev = dev;
3138 privptr->channel[direction]->protocol = privptr->protocol;
3139 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
3140 }
3141 /* sysfs magic */
3142 SET_NETDEV_DEV(dev, &cgdev->dev);
3143
3144 if (ctc_netdev_register(dev) != 0) {
3145 ctc_free_netdevice(dev, 1);
3146 goto out;
3147 }
3148
3149 ctc_add_attributes(&cgdev->dev);
3150
3151 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
3152
3153 print_banner();
3154
3155 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
3156 dev->name, privptr->channel[READ]->id,
3157 privptr->channel[WRITE]->id, privptr->protocol);
3158
3159 return 0;
3160out:
3161 ccw_device_set_offline(cgdev->cdev[1]);
3162 ccw_device_set_offline(cgdev->cdev[0]);
3163
3164 return -ENODEV;
3165}
3166
3167/**
3168 * Shutdown an interface.
3169 *
3170 * @param cgdev Device to be shut down.
3171 *
3172 * @returns 0 on success, !0 on failure.
3173 */
3174static int
3175ctc_shutdown_device(struct ccwgroup_device *cgdev)
3176{
3177 struct ctc_priv *priv;
3178 struct net_device *ndev;
3179
3180 DBF_TEXT(trace, 3, __FUNCTION__);
3181 pr_debug("%s() called\n", __FUNCTION__);
3182
3183 priv = cgdev->dev.driver_data;
3184 ndev = NULL;
3185 if (!priv)
3186 return -ENODEV;
3187
3188 if (priv->channel[READ]) {
3189 ndev = priv->channel[READ]->netdev;
3190
3191 /* Close the device */
3192 ctc_close(ndev);
3193 ndev->flags &=~IFF_RUNNING;
3194
3195 ctc_remove_attributes(&cgdev->dev);
3196
3197 channel_free(priv->channel[READ]);
3198 }
3199 if (priv->channel[WRITE])
3200 channel_free(priv->channel[WRITE]);
3201
3202 if (ndev) {
3203 ctc_netdev_unregister(ndev);
3204 ndev->priv = NULL;
3205 ctc_free_netdevice(ndev, 1);
3206 }
3207
3208 if (priv->fsm)
3209 kfree_fsm(priv->fsm);
3210
3211 ccw_device_set_offline(cgdev->cdev[1]);
3212 ccw_device_set_offline(cgdev->cdev[0]);
3213
3214 if (priv->channel[READ])
3215 channel_remove(priv->channel[READ]);
3216 if (priv->channel[WRITE])
3217 channel_remove(priv->channel[WRITE]);
3218
3219 priv->channel[READ] = priv->channel[WRITE] = NULL;
3220
3221 return 0;
3222
3223}
3224
3225static void
3226ctc_remove_device(struct ccwgroup_device *cgdev)
3227{
3228 struct ctc_priv *priv;
3229
3230 pr_debug("%s() called\n", __FUNCTION__);
3231 DBF_TEXT(trace, 3, __FUNCTION__);
3232
3233 priv = cgdev->dev.driver_data;
3234 if (!priv)
3235 return;
3236 if (cgdev->state == CCWGROUP_ONLINE)
3237 ctc_shutdown_device(cgdev);
3238 ctc_remove_files(&cgdev->dev);
3239 cgdev->dev.driver_data = NULL;
3240 kfree(priv);
3241 put_device(&cgdev->dev);
3242}
3243
3244static struct ccwgroup_driver ctc_group_driver = {
3245 .owner = THIS_MODULE,
3246 .name = "ctc",
3247 .max_slaves = 2,
3248 .driver_id = 0xC3E3C3,
3249 .probe = ctc_probe_device,
3250 .remove = ctc_remove_device,
3251 .set_online = ctc_new_device,
3252 .set_offline = ctc_shutdown_device,
3253};
3254
3255/**
3256 * Module related routines
3257 *****************************************************************************/
3258
3259/**
3260 * Prepare to be unloaded. Free IRQ's and release all resources.
3261 * This is called just before this module is unloaded. It is
3262 * <em>not</em> called, if the usage count is !0, so we don't need to check
3263 * for that.
3264 */
3265static void __exit
3266ctc_exit(void)
3267{
3268 unregister_cu3088_discipline(&ctc_group_driver);
3269 ctc_tty_cleanup();
3270 ctc_unregister_dbf_views();
3271 ctc_pr_info("CTC driver unloaded\n");
3272}
3273
3274/**
3275 * Initialize module.
3276 * This is called just after the module is loaded.
3277 *
3278 * @return 0 on success, !0 on error.
3279 */
3280static int __init
3281ctc_init(void)
3282{
3283 int ret = 0;
3284
3285 print_banner();
3286
3287 ret = ctc_register_dbf_views();
3288 if (ret){
3289 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3290 return ret;
3291 }
3292 ctc_tty_init();
3293 ret = register_cu3088_discipline(&ctc_group_driver);
3294 if (ret) {
3295 ctc_tty_cleanup();
3296 ctc_unregister_dbf_views();
3297 }
3298 return ret;
3299}
3300
3301module_init(ctc_init);
3302module_exit(ctc_exit);
3303
3304/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctctty.c b/drivers/s390/net/ctctty.c
new file mode 100644
index 000000000000..9257d60c7833
--- /dev/null
+++ b/drivers/s390/net/ctctty.c
@@ -0,0 +1,1276 @@
1/*
2 * $Id: ctctty.c,v 1.26 2004/08/04 11:06:55 mschwide Exp $
3 *
4 * CTC / ESCON network driver, tty interface.
5 *
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 */
24
25#include <linux/config.h>
26#include <linux/module.h>
27#include <linux/tty.h>
28#include <linux/serial_reg.h>
29#include <linux/interrupt.h>
30#include <linux/delay.h>
31#include <asm/uaccess.h>
32#include <linux/devfs_fs_kernel.h>
33#include "ctctty.h"
34#include "ctcdbug.h"
35
36#define CTC_TTY_MAJOR 43
37#define CTC_TTY_MAX_DEVICES 64
38
39#define CTC_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */
40#define CTC_ASYNC_INITIALIZED 0x80000000 /* port was initialized */
41#define CTC_ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device active */
42#define CTC_ASYNC_CLOSING 0x08000000 /* Serial port is closing */
43#define CTC_ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */
44#define CTC_ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */
45#define CTC_ASYNC_HUP_NOTIFY 0x0001 /* Notify tty on hangups/closes */
46#define CTC_ASYNC_NETDEV_OPEN 0x0002 /* Underlying netdev is open */
47#define CTC_ASYNC_TX_LINESTAT 0x0004 /* Must send line status */
48#define CTC_ASYNC_SPLIT_TERMIOS 0x0008 /* Sep. termios for dialin/out */
49#define CTC_TTY_XMIT_SIZE 1024 /* Default bufsize for write */
50#define CTC_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */
51
52/* Private data (similar to async_struct in <linux/serial.h>) */
53typedef struct {
54 int magic;
55 int flags; /* defined in tty.h */
56 int mcr; /* Modem control register */
57 int msr; /* Modem status register */
58 int lsr; /* Line status register */
59 int line;
60 int count; /* # of fd on device */
61 int blocked_open; /* # of blocked opens */
62 struct net_device *netdev;
63 struct sk_buff_head tx_queue; /* transmit queue */
64 struct sk_buff_head rx_queue; /* receive queue */
65 struct tty_struct *tty; /* Pointer to corresponding tty */
66 wait_queue_head_t open_wait;
67 wait_queue_head_t close_wait;
68 struct semaphore write_sem;
69 struct tasklet_struct tasklet;
70 struct timer_list stoptimer;
71} ctc_tty_info;
72
73/* Description of one CTC-tty */
74typedef struct {
75 struct tty_driver *ctc_tty_device; /* tty-device */
76 ctc_tty_info info[CTC_TTY_MAX_DEVICES]; /* Private data */
77} ctc_tty_driver;
78
79static ctc_tty_driver *driver;
80
81/* Leave this unchanged unless you know what you do! */
82#define MODEM_PARANOIA_CHECK
83#define MODEM_DO_RESTART
84
85#define CTC_TTY_NAME "ctctty"
86
87static __u32 ctc_tty_magic = CTC_ASYNC_MAGIC;
88static int ctc_tty_shuttingdown = 0;
89
90static spinlock_t ctc_tty_lock;
91
92/* ctc_tty_try_read() is called from within ctc_tty_rcv_skb()
93 * to stuff incoming data directly into a tty's flip-buffer. If the
94 * flip buffer is full, the packet gets queued up.
95 *
96 * Return:
97 * 1 = Success
98 * 0 = Failure, data has to be buffered and later processed by
99 * ctc_tty_readmodem().
100 */
101static int
102ctc_tty_try_read(ctc_tty_info * info, struct sk_buff *skb)
103{
104 int c;
105 int len;
106 struct tty_struct *tty;
107
108 DBF_TEXT(trace, 5, __FUNCTION__);
109 if ((tty = info->tty)) {
110 if (info->mcr & UART_MCR_RTS) {
111 c = TTY_FLIPBUF_SIZE - tty->flip.count;
112 len = skb->len;
113 if (c >= len) {
114 memcpy(tty->flip.char_buf_ptr, skb->data, len);
115 memset(tty->flip.flag_buf_ptr, 0, len);
116 tty->flip.count += len;
117 tty->flip.char_buf_ptr += len;
118 tty->flip.flag_buf_ptr += len;
119 tty_flip_buffer_push(tty);
120 kfree_skb(skb);
121 return 1;
122 }
123 }
124 }
125 return 0;
126}
127
128/* ctc_tty_readmodem() is called periodically from within timer-interrupt.
129 * It tries getting received data from the receive queue an stuff it into
130 * the tty's flip-buffer.
131 */
132static int
133ctc_tty_readmodem(ctc_tty_info *info)
134{
135 int ret = 1;
136 struct tty_struct *tty;
137
138 DBF_TEXT(trace, 5, __FUNCTION__);
139 if ((tty = info->tty)) {
140 if (info->mcr & UART_MCR_RTS) {
141 int c = TTY_FLIPBUF_SIZE - tty->flip.count;
142 struct sk_buff *skb;
143
144 if ((c > 0) && (skb = skb_dequeue(&info->rx_queue))) {
145 int len = skb->len;
146 if (len > c)
147 len = c;
148 memcpy(tty->flip.char_buf_ptr, skb->data, len);
149 skb_pull(skb, len);
150 memset(tty->flip.flag_buf_ptr, 0, len);
151 tty->flip.count += len;
152 tty->flip.char_buf_ptr += len;
153 tty->flip.flag_buf_ptr += len;
154 tty_flip_buffer_push(tty);
155 if (skb->len > 0)
156 skb_queue_head(&info->rx_queue, skb);
157 else {
158 kfree_skb(skb);
159 ret = skb_queue_len(&info->rx_queue);
160 }
161 }
162 }
163 }
164 return ret;
165}
166
167void
168ctc_tty_setcarrier(struct net_device *netdev, int on)
169{
170 int i;
171
172 DBF_TEXT(trace, 4, __FUNCTION__);
173 if ((!driver) || ctc_tty_shuttingdown)
174 return;
175 for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
176 if (driver->info[i].netdev == netdev) {
177 ctc_tty_info *info = &driver->info[i];
178 if (on)
179 info->msr |= UART_MSR_DCD;
180 else
181 info->msr &= ~UART_MSR_DCD;
182 if ((info->flags & CTC_ASYNC_CHECK_CD) && (!on))
183 tty_hangup(info->tty);
184 }
185}
186
187void
188ctc_tty_netif_rx(struct sk_buff *skb)
189{
190 int i;
191 ctc_tty_info *info = NULL;
192
193 DBF_TEXT(trace, 5, __FUNCTION__);
194 if (!skb)
195 return;
196 if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) {
197 dev_kfree_skb(skb);
198 return;
199 }
200 for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
201 if (driver->info[i].netdev == skb->dev) {
202 info = &driver->info[i];
203 break;
204 }
205 if (!info) {
206 dev_kfree_skb(skb);
207 return;
208 }
209 if (skb->len < 6) {
210 dev_kfree_skb(skb);
211 return;
212 }
213 if (memcmp(skb->data, &ctc_tty_magic, sizeof(__u32))) {
214 dev_kfree_skb(skb);
215 return;
216 }
217 skb_pull(skb, sizeof(__u32));
218
219 i = *((int *)skb->data);
220 skb_pull(skb, sizeof(info->mcr));
221 if (i & UART_MCR_RTS) {
222 info->msr |= UART_MSR_CTS;
223 if (info->flags & CTC_ASYNC_CTS_FLOW)
224 info->tty->hw_stopped = 0;
225 } else {
226 info->msr &= ~UART_MSR_CTS;
227 if (info->flags & CTC_ASYNC_CTS_FLOW)
228 info->tty->hw_stopped = 1;
229 }
230 if (i & UART_MCR_DTR)
231 info->msr |= UART_MSR_DSR;
232 else
233 info->msr &= ~UART_MSR_DSR;
234 if (skb->len <= 0) {
235 kfree_skb(skb);
236 return;
237 }
238 /* Try to deliver directly via tty-flip-buf if queue is empty */
239 if (skb_queue_empty(&info->rx_queue))
240 if (ctc_tty_try_read(info, skb))
241 return;
242 /* Direct deliver failed or queue wasn't empty.
243 * Queue up for later dequeueing via timer-irq.
244 */
245 skb_queue_tail(&info->rx_queue, skb);
246 /* Schedule dequeuing */
247 tasklet_schedule(&info->tasklet);
248}
249
250static int
251ctc_tty_tint(ctc_tty_info * info)
252{
253 struct sk_buff *skb = skb_dequeue(&info->tx_queue);
254 int stopped = (info->tty->hw_stopped || info->tty->stopped);
255 int wake = 1;
256 int rc;
257
258 DBF_TEXT(trace, 4, __FUNCTION__);
259 if (!info->netdev) {
260 if (skb)
261 kfree_skb(skb);
262 return 0;
263 }
264 if (info->flags & CTC_ASYNC_TX_LINESTAT) {
265 int skb_res = info->netdev->hard_header_len +
266 sizeof(info->mcr) + sizeof(__u32);
267 /* If we must update line status,
268 * create an empty dummy skb and insert it.
269 */
270 if (skb)
271 skb_queue_head(&info->tx_queue, skb);
272
273 skb = dev_alloc_skb(skb_res);
274 if (!skb) {
275 printk(KERN_WARNING
276 "ctc_tty: Out of memory in %s%d tint\n",
277 CTC_TTY_NAME, info->line);
278 return 1;
279 }
280 skb_reserve(skb, skb_res);
281 stopped = 0;
282 wake = 0;
283 }
284 if (!skb)
285 return 0;
286 if (stopped) {
287 skb_queue_head(&info->tx_queue, skb);
288 return 1;
289 }
290#if 0
291 if (skb->len > 0)
292 printk(KERN_DEBUG "tint: %d %02x\n", skb->len, *(skb->data));
293 else
294 printk(KERN_DEBUG "tint: %d STAT\n", skb->len);
295#endif
296 memcpy(skb_push(skb, sizeof(info->mcr)), &info->mcr, sizeof(info->mcr));
297 memcpy(skb_push(skb, sizeof(__u32)), &ctc_tty_magic, sizeof(__u32));
298 rc = info->netdev->hard_start_xmit(skb, info->netdev);
299 if (rc) {
300 skb_pull(skb, sizeof(info->mcr) + sizeof(__u32));
301 if (skb->len > 0)
302 skb_queue_head(&info->tx_queue, skb);
303 else
304 kfree_skb(skb);
305 } else {
306 struct tty_struct *tty = info->tty;
307
308 info->flags &= ~CTC_ASYNC_TX_LINESTAT;
309 if (tty) {
310 tty_wakeup(tty);
311 }
312 }
313 return (skb_queue_empty(&info->tx_queue) ? 0 : 1);
314}
315
316/************************************************************
317 *
318 * Modem-functions
319 *
320 * mostly "stolen" from original Linux-serial.c and friends.
321 *
322 ************************************************************/
323
324static inline int
325ctc_tty_paranoia_check(ctc_tty_info * info, char *name, const char *routine)
326{
327#ifdef MODEM_PARANOIA_CHECK
328 if (!info) {
329 printk(KERN_WARNING "ctc_tty: null info_struct for %s in %s\n",
330 name, routine);
331 return 1;
332 }
333 if (info->magic != CTC_ASYNC_MAGIC) {
334 printk(KERN_WARNING "ctc_tty: bad magic for info struct %s in %s\n",
335 name, routine);
336 return 1;
337 }
338#endif
339 return 0;
340}
341
342static void
343ctc_tty_inject(ctc_tty_info *info, char c)
344{
345 int skb_res;
346 struct sk_buff *skb;
347
348 DBF_TEXT(trace, 4, __FUNCTION__);
349 if (ctc_tty_shuttingdown)
350 return;
351 skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
352 sizeof(__u32) + 1;
353 skb = dev_alloc_skb(skb_res);
354 if (!skb) {
355 printk(KERN_WARNING
356 "ctc_tty: Out of memory in %s%d tx_inject\n",
357 CTC_TTY_NAME, info->line);
358 return;
359 }
360 skb_reserve(skb, skb_res);
361 *(skb_put(skb, 1)) = c;
362 skb_queue_head(&info->tx_queue, skb);
363 tasklet_schedule(&info->tasklet);
364}
365
366static void
367ctc_tty_transmit_status(ctc_tty_info *info)
368{
369 DBF_TEXT(trace, 5, __FUNCTION__);
370 if (ctc_tty_shuttingdown)
371 return;
372 info->flags |= CTC_ASYNC_TX_LINESTAT;
373 tasklet_schedule(&info->tasklet);
374}
375
376static void
377ctc_tty_change_speed(ctc_tty_info * info)
378{
379 unsigned int cflag;
380 unsigned int quot;
381 int i;
382
383 DBF_TEXT(trace, 3, __FUNCTION__);
384 if (!info->tty || !info->tty->termios)
385 return;
386 cflag = info->tty->termios->c_cflag;
387
388 quot = i = cflag & CBAUD;
389 if (i & CBAUDEX) {
390 i &= ~CBAUDEX;
391 if (i < 1 || i > 2)
392 info->tty->termios->c_cflag &= ~CBAUDEX;
393 else
394 i += 15;
395 }
396 if (quot) {
397 info->mcr |= UART_MCR_DTR;
398 info->mcr |= UART_MCR_RTS;
399 ctc_tty_transmit_status(info);
400 } else {
401 info->mcr &= ~UART_MCR_DTR;
402 info->mcr &= ~UART_MCR_RTS;
403 ctc_tty_transmit_status(info);
404 return;
405 }
406
407 /* CTS flow control flag and modem status interrupts */
408 if (cflag & CRTSCTS) {
409 info->flags |= CTC_ASYNC_CTS_FLOW;
410 } else
411 info->flags &= ~CTC_ASYNC_CTS_FLOW;
412 if (cflag & CLOCAL)
413 info->flags &= ~CTC_ASYNC_CHECK_CD;
414 else {
415 info->flags |= CTC_ASYNC_CHECK_CD;
416 }
417}
418
419static int
420ctc_tty_startup(ctc_tty_info * info)
421{
422 DBF_TEXT(trace, 3, __FUNCTION__);
423 if (info->flags & CTC_ASYNC_INITIALIZED)
424 return 0;
425#ifdef CTC_DEBUG_MODEM_OPEN
426 printk(KERN_DEBUG "starting up %s%d ...\n", CTC_TTY_NAME, info->line);
427#endif
428 /*
429 * Now, initialize the UART
430 */
431 info->mcr = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
432 if (info->tty)
433 clear_bit(TTY_IO_ERROR, &info->tty->flags);
434 /*
435 * and set the speed of the serial port
436 */
437 ctc_tty_change_speed(info);
438
439 info->flags |= CTC_ASYNC_INITIALIZED;
440 if (!(info->flags & CTC_ASYNC_NETDEV_OPEN))
441 info->netdev->open(info->netdev);
442 info->flags |= CTC_ASYNC_NETDEV_OPEN;
443 return 0;
444}
445
446static void
447ctc_tty_stopdev(unsigned long data)
448{
449 ctc_tty_info *info = (ctc_tty_info *)data;
450
451 if ((!info) || (!info->netdev) ||
452 (info->flags & CTC_ASYNC_INITIALIZED))
453 return;
454 info->netdev->stop(info->netdev);
455 info->flags &= ~CTC_ASYNC_NETDEV_OPEN;
456}
457
458/*
459 * This routine will shutdown a serial port; interrupts are disabled, and
460 * DTR is dropped if the hangup on close termio flag is on.
461 */
462static void
463ctc_tty_shutdown(ctc_tty_info * info)
464{
465 DBF_TEXT(trace, 3, __FUNCTION__);
466 if (!(info->flags & CTC_ASYNC_INITIALIZED))
467 return;
468#ifdef CTC_DEBUG_MODEM_OPEN
469 printk(KERN_DEBUG "Shutting down %s%d ....\n", CTC_TTY_NAME, info->line);
470#endif
471 info->msr &= ~UART_MSR_RI;
472 if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
473 info->mcr &= ~(UART_MCR_DTR | UART_MCR_RTS);
474 if (info->tty)
475 set_bit(TTY_IO_ERROR, &info->tty->flags);
476 mod_timer(&info->stoptimer, jiffies + (10 * HZ));
477 skb_queue_purge(&info->tx_queue);
478 skb_queue_purge(&info->rx_queue);
479 info->flags &= ~CTC_ASYNC_INITIALIZED;
480}
481
482/* ctc_tty_write() is the main send-routine. It is called from the upper
483 * levels within the kernel to perform sending data. Depending on the
484 * online-flag it either directs output to the at-command-interpreter or
485 * to the lower level. Additional tasks done here:
486 * - If online, check for escape-sequence (+++)
487 * - If sending audio-data, call ctc_tty_DLEdown() to parse DLE-codes.
488 * - If receiving audio-data, call ctc_tty_end_vrx() to abort if needed.
489 * - If dialing, abort dial.
490 */
491static int
492ctc_tty_write(struct tty_struct *tty, const u_char * buf, int count)
493{
494 int c;
495 int total = 0;
496 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
497
498 DBF_TEXT(trace, 5, __FUNCTION__);
499 if (ctc_tty_shuttingdown)
500 goto ex;
501 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write"))
502 goto ex;
503 if (!tty)
504 goto ex;
505 if (!info->netdev) {
506 total = -ENODEV;
507 goto ex;
508 }
509 while (1) {
510 struct sk_buff *skb;
511 int skb_res;
512
513 c = (count < CTC_TTY_XMIT_SIZE) ? count : CTC_TTY_XMIT_SIZE;
514 if (c <= 0)
515 break;
516
517 skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
518 + sizeof(__u32);
519 skb = dev_alloc_skb(skb_res + c);
520 if (!skb) {
521 printk(KERN_WARNING
522 "ctc_tty: Out of memory in %s%d write\n",
523 CTC_TTY_NAME, info->line);
524 break;
525 }
526 skb_reserve(skb, skb_res);
527 memcpy(skb_put(skb, c), buf, c);
528 skb_queue_tail(&info->tx_queue, skb);
529 buf += c;
530 total += c;
531 count -= c;
532 }
533 if (skb_queue_len(&info->tx_queue)) {
534 info->lsr &= ~UART_LSR_TEMT;
535 tasklet_schedule(&info->tasklet);
536 }
537ex:
538 DBF_TEXT(trace, 6, __FUNCTION__);
539 return total;
540}
541
542static int
543ctc_tty_write_room(struct tty_struct *tty)
544{
545 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
546
547 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write_room"))
548 return 0;
549 return CTC_TTY_XMIT_SIZE;
550}
551
552static int
553ctc_tty_chars_in_buffer(struct tty_struct *tty)
554{
555 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
556
557 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_chars_in_buffer"))
558 return 0;
559 return 0;
560}
561
562static void
563ctc_tty_flush_buffer(struct tty_struct *tty)
564{
565 ctc_tty_info *info;
566 unsigned long flags;
567
568 DBF_TEXT(trace, 4, __FUNCTION__);
569 if (!tty)
570 goto ex;
571 spin_lock_irqsave(&ctc_tty_lock, flags);
572 info = (ctc_tty_info *) tty->driver_data;
573 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_buffer")) {
574 spin_unlock_irqrestore(&ctc_tty_lock, flags);
575 goto ex;
576 }
577 skb_queue_purge(&info->tx_queue);
578 info->lsr |= UART_LSR_TEMT;
579 spin_unlock_irqrestore(&ctc_tty_lock, flags);
580 wake_up_interruptible(&tty->write_wait);
581 tty_wakeup(tty);
582ex:
583 DBF_TEXT_(trace, 2, "ex: %s ", __FUNCTION__);
584 return;
585}
586
587static void
588ctc_tty_flush_chars(struct tty_struct *tty)
589{
590 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
591
592 DBF_TEXT(trace, 4, __FUNCTION__);
593 if (ctc_tty_shuttingdown)
594 return;
595 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars"))
596 return;
597 if (tty->stopped || tty->hw_stopped || (!skb_queue_len(&info->tx_queue)))
598 return;
599 tasklet_schedule(&info->tasklet);
600}
601
602/*
603 * ------------------------------------------------------------
604 * ctc_tty_throttle()
605 *
606 * This routine is called by the upper-layer tty layer to signal that
607 * incoming characters should be throttled.
608 * ------------------------------------------------------------
609 */
610static void
611ctc_tty_throttle(struct tty_struct *tty)
612{
613 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
614
615 DBF_TEXT(trace, 4, __FUNCTION__);
616 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_throttle"))
617 return;
618 info->mcr &= ~UART_MCR_RTS;
619 if (I_IXOFF(tty))
620 ctc_tty_inject(info, STOP_CHAR(tty));
621 ctc_tty_transmit_status(info);
622}
623
624static void
625ctc_tty_unthrottle(struct tty_struct *tty)
626{
627 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
628
629 DBF_TEXT(trace, 4, __FUNCTION__);
630 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_unthrottle"))
631 return;
632 info->mcr |= UART_MCR_RTS;
633 if (I_IXOFF(tty))
634 ctc_tty_inject(info, START_CHAR(tty));
635 ctc_tty_transmit_status(info);
636}
637
638/*
639 * ------------------------------------------------------------
640 * ctc_tty_ioctl() and friends
641 * ------------------------------------------------------------
642 */
643
644/*
645 * ctc_tty_get_lsr_info - get line status register info
646 *
647 * Purpose: Let user call ioctl() to get info when the UART physically
648 * is emptied. On bus types like RS485, the transmitter must
649 * release the bus after transmitting. This must be done when
650 * the transmit shift register is empty, not be done when the
651 * transmit holding register is empty. This functionality
652 * allows RS485 driver to be written in user space.
653 */
654static int
655ctc_tty_get_lsr_info(ctc_tty_info * info, uint __user *value)
656{
657 u_char status;
658 uint result;
659 ulong flags;
660
661 DBF_TEXT(trace, 4, __FUNCTION__);
662 spin_lock_irqsave(&ctc_tty_lock, flags);
663 status = info->lsr;
664 spin_unlock_irqrestore(&ctc_tty_lock, flags);
665 result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
666 put_user(result, value);
667 return 0;
668}
669
670
671static int ctc_tty_tiocmget(struct tty_struct *tty, struct file *file)
672{
673 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
674 u_char control,
675 status;
676 uint result;
677 ulong flags;
678
679 DBF_TEXT(trace, 4, __FUNCTION__);
680 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
681 return -ENODEV;
682 if (tty->flags & (1 << TTY_IO_ERROR))
683 return -EIO;
684
685 control = info->mcr;
686 spin_lock_irqsave(&ctc_tty_lock, flags);
687 status = info->msr;
688 spin_unlock_irqrestore(&ctc_tty_lock, flags);
689 result = ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
690 | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
691 | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
692 | ((status & UART_MSR_RI) ? TIOCM_RNG : 0)
693 | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0)
694 | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
695 return result;
696}
697
698static int
699ctc_tty_tiocmset(struct tty_struct *tty, struct file *file,
700 unsigned int set, unsigned int clear)
701{
702 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
703
704 DBF_TEXT(trace, 4, __FUNCTION__);
705 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
706 return -ENODEV;
707 if (tty->flags & (1 << TTY_IO_ERROR))
708 return -EIO;
709
710 if (set & TIOCM_RTS)
711 info->mcr |= UART_MCR_RTS;
712 if (set & TIOCM_DTR)
713 info->mcr |= UART_MCR_DTR;
714
715 if (clear & TIOCM_RTS)
716 info->mcr &= ~UART_MCR_RTS;
717 if (clear & TIOCM_DTR)
718 info->mcr &= ~UART_MCR_DTR;
719
720 if ((set | clear) & (TIOCM_RTS|TIOCM_DTR))
721 ctc_tty_transmit_status(info);
722 return 0;
723}
724
725static int
726ctc_tty_ioctl(struct tty_struct *tty, struct file *file,
727 uint cmd, ulong arg)
728{
729 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
730 int error;
731 int retval;
732
733 DBF_TEXT(trace, 4, __FUNCTION__);
734 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
735 return -ENODEV;
736 if (tty->flags & (1 << TTY_IO_ERROR))
737 return -EIO;
738 switch (cmd) {
739 case TCSBRK: /* SVID version: non-zero arg --> no break */
740#ifdef CTC_DEBUG_MODEM_IOCTL
741 printk(KERN_DEBUG "%s%d ioctl TCSBRK\n", CTC_TTY_NAME, info->line);
742#endif
743 retval = tty_check_change(tty);
744 if (retval)
745 return retval;
746 tty_wait_until_sent(tty, 0);
747 return 0;
748 case TCSBRKP: /* support for POSIX tcsendbreak() */
749#ifdef CTC_DEBUG_MODEM_IOCTL
750 printk(KERN_DEBUG "%s%d ioctl TCSBRKP\n", CTC_TTY_NAME, info->line);
751#endif
752 retval = tty_check_change(tty);
753 if (retval)
754 return retval;
755 tty_wait_until_sent(tty, 0);
756 return 0;
757 case TIOCGSOFTCAR:
758#ifdef CTC_DEBUG_MODEM_IOCTL
759 printk(KERN_DEBUG "%s%d ioctl TIOCGSOFTCAR\n", CTC_TTY_NAME,
760 info->line);
761#endif
762 error = put_user(C_CLOCAL(tty) ? 1 : 0, (ulong __user *) arg);
763 return error;
764 case TIOCSSOFTCAR:
765#ifdef CTC_DEBUG_MODEM_IOCTL
766 printk(KERN_DEBUG "%s%d ioctl TIOCSSOFTCAR\n", CTC_TTY_NAME,
767 info->line);
768#endif
769 error = get_user(arg, (ulong __user *) arg);
770 if (error)
771 return error;
772 tty->termios->c_cflag =
773 ((tty->termios->c_cflag & ~CLOCAL) |
774 (arg ? CLOCAL : 0));
775 return 0;
776 case TIOCSERGETLSR: /* Get line status register */
777#ifdef CTC_DEBUG_MODEM_IOCTL
778 printk(KERN_DEBUG "%s%d ioctl TIOCSERGETLSR\n", CTC_TTY_NAME,
779 info->line);
780#endif
781 if (access_ok(VERIFY_WRITE, (void __user *) arg, sizeof(uint)))
782 return ctc_tty_get_lsr_info(info, (uint __user *) arg);
783 else
784 return -EFAULT;
785 default:
786#ifdef CTC_DEBUG_MODEM_IOCTL
787 printk(KERN_DEBUG "UNKNOWN ioctl 0x%08x on %s%d\n", cmd,
788 CTC_TTY_NAME, info->line);
789#endif
790 return -ENOIOCTLCMD;
791 }
792 return 0;
793}
794
795static void
796ctc_tty_set_termios(struct tty_struct *tty, struct termios *old_termios)
797{
798 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
799 unsigned int cflag = tty->termios->c_cflag;
800
801 DBF_TEXT(trace, 4, __FUNCTION__);
802 ctc_tty_change_speed(info);
803
804 /* Handle transition to B0 */
805 if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) {
806 info->mcr &= ~(UART_MCR_DTR|UART_MCR_RTS);
807 ctc_tty_transmit_status(info);
808 }
809
810 /* Handle transition from B0 to other */
811 if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
812 info->mcr |= UART_MCR_DTR;
813 if (!(tty->termios->c_cflag & CRTSCTS) ||
814 !test_bit(TTY_THROTTLED, &tty->flags)) {
815 info->mcr |= UART_MCR_RTS;
816 }
817 ctc_tty_transmit_status(info);
818 }
819
820 /* Handle turning off CRTSCTS */
821 if ((old_termios->c_cflag & CRTSCTS) &&
822 !(tty->termios->c_cflag & CRTSCTS))
823 tty->hw_stopped = 0;
824}
825
826/*
827 * ------------------------------------------------------------
828 * ctc_tty_open() and friends
829 * ------------------------------------------------------------
830 */
831static int
832ctc_tty_block_til_ready(struct tty_struct *tty, struct file *filp, ctc_tty_info *info)
833{
834 DECLARE_WAITQUEUE(wait, NULL);
835 int do_clocal = 0;
836 unsigned long flags;
837 int retval;
838
839 DBF_TEXT(trace, 4, __FUNCTION__);
840 /*
841 * If the device is in the middle of being closed, then block
842 * until it's done, and then try again.
843 */
844 if (tty_hung_up_p(filp) ||
845 (info->flags & CTC_ASYNC_CLOSING)) {
846 if (info->flags & CTC_ASYNC_CLOSING)
847 wait_event(info->close_wait,
848 !(info->flags & CTC_ASYNC_CLOSING));
849#ifdef MODEM_DO_RESTART
850 if (info->flags & CTC_ASYNC_HUP_NOTIFY)
851 return -EAGAIN;
852 else
853 return -ERESTARTSYS;
854#else
855 return -EAGAIN;
856#endif
857 }
858 /*
859 * If non-blocking mode is set, then make the check up front
860 * and then exit.
861 */
862 if ((filp->f_flags & O_NONBLOCK) ||
863 (tty->flags & (1 << TTY_IO_ERROR))) {
864 info->flags |= CTC_ASYNC_NORMAL_ACTIVE;
865 return 0;
866 }
867 if (tty->termios->c_cflag & CLOCAL)
868 do_clocal = 1;
869 /*
870 * Block waiting for the carrier detect and the line to become
871 * free (i.e., not in use by the callout). While we are in
872 * this loop, info->count is dropped by one, so that
873 * ctc_tty_close() knows when to free things. We restore it upon
874 * exit, either normal or abnormal.
875 */
876 retval = 0;
877 add_wait_queue(&info->open_wait, &wait);
878#ifdef CTC_DEBUG_MODEM_OPEN
879 printk(KERN_DEBUG "ctc_tty_block_til_ready before block: %s%d, count = %d\n",
880 CTC_TTY_NAME, info->line, info->count);
881#endif
882 spin_lock_irqsave(&ctc_tty_lock, flags);
883 if (!(tty_hung_up_p(filp)))
884 info->count--;
885 spin_unlock_irqrestore(&ctc_tty_lock, flags);
886 info->blocked_open++;
887 while (1) {
888 set_current_state(TASK_INTERRUPTIBLE);
889 if (tty_hung_up_p(filp) ||
890 !(info->flags & CTC_ASYNC_INITIALIZED)) {
891#ifdef MODEM_DO_RESTART
892 if (info->flags & CTC_ASYNC_HUP_NOTIFY)
893 retval = -EAGAIN;
894 else
895 retval = -ERESTARTSYS;
896#else
897 retval = -EAGAIN;
898#endif
899 break;
900 }
901 if (!(info->flags & CTC_ASYNC_CLOSING) &&
902 (do_clocal || (info->msr & UART_MSR_DCD))) {
903 break;
904 }
905 if (signal_pending(current)) {
906 retval = -ERESTARTSYS;
907 break;
908 }
909#ifdef CTC_DEBUG_MODEM_OPEN
910 printk(KERN_DEBUG "ctc_tty_block_til_ready blocking: %s%d, count = %d\n",
911 CTC_TTY_NAME, info->line, info->count);
912#endif
913 schedule();
914 }
915 current->state = TASK_RUNNING;
916 remove_wait_queue(&info->open_wait, &wait);
917 if (!tty_hung_up_p(filp))
918 info->count++;
919 info->blocked_open--;
920#ifdef CTC_DEBUG_MODEM_OPEN
921 printk(KERN_DEBUG "ctc_tty_block_til_ready after blocking: %s%d, count = %d\n",
922 CTC_TTY_NAME, info->line, info->count);
923#endif
924 if (retval)
925 return retval;
926 info->flags |= CTC_ASYNC_NORMAL_ACTIVE;
927 return 0;
928}
929
930/*
931 * This routine is called whenever a serial port is opened. It
932 * enables interrupts for a serial port, linking in its async structure into
933 * the IRQ chain. It also performs the serial-specific
934 * initialization for the tty structure.
935 */
936static int
937ctc_tty_open(struct tty_struct *tty, struct file *filp)
938{
939 ctc_tty_info *info;
940 unsigned long saveflags;
941 int retval,
942 line;
943
944 DBF_TEXT(trace, 3, __FUNCTION__);
945 line = tty->index;
946 if (line < 0 || line > CTC_TTY_MAX_DEVICES)
947 return -ENODEV;
948 info = &driver->info[line];
949 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_open"))
950 return -ENODEV;
951 if (!info->netdev)
952 return -ENODEV;
953#ifdef CTC_DEBUG_MODEM_OPEN
954 printk(KERN_DEBUG "ctc_tty_open %s, count = %d\n", tty->name,
955 info->count);
956#endif
957 spin_lock_irqsave(&ctc_tty_lock, saveflags);
958 info->count++;
959 tty->driver_data = info;
960 info->tty = tty;
961 spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
962 /*
963 * Start up serial port
964 */
965 retval = ctc_tty_startup(info);
966 if (retval) {
967#ifdef CTC_DEBUG_MODEM_OPEN
968 printk(KERN_DEBUG "ctc_tty_open return after startup\n");
969#endif
970 return retval;
971 }
972 retval = ctc_tty_block_til_ready(tty, filp, info);
973 if (retval) {
974#ifdef CTC_DEBUG_MODEM_OPEN
975 printk(KERN_DEBUG "ctc_tty_open return after ctc_tty_block_til_ready \n");
976#endif
977 return retval;
978 }
979#ifdef CTC_DEBUG_MODEM_OPEN
980 printk(KERN_DEBUG "ctc_tty_open %s successful...\n", tty->name);
981#endif
982 return 0;
983}
984
985static void
986ctc_tty_close(struct tty_struct *tty, struct file *filp)
987{
988 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
989 ulong flags;
990 ulong timeout;
991 DBF_TEXT(trace, 3, __FUNCTION__);
992 if (!info || ctc_tty_paranoia_check(info, tty->name, "ctc_tty_close"))
993 return;
994 spin_lock_irqsave(&ctc_tty_lock, flags);
995 if (tty_hung_up_p(filp)) {
996 spin_unlock_irqrestore(&ctc_tty_lock, flags);
997#ifdef CTC_DEBUG_MODEM_OPEN
998 printk(KERN_DEBUG "ctc_tty_close return after tty_hung_up_p\n");
999#endif
1000 return;
1001 }
1002 if ((tty->count == 1) && (info->count != 1)) {
1003 /*
1004 * Uh, oh. tty->count is 1, which means that the tty
1005 * structure will be freed. Info->count should always
1006 * be one in these conditions. If it's greater than
1007 * one, we've got real problems, since it means the
1008 * serial port won't be shutdown.
1009 */
1010 printk(KERN_ERR "ctc_tty_close: bad port count; tty->count is 1, "
1011 "info->count is %d\n", info->count);
1012 info->count = 1;
1013 }
1014 if (--info->count < 0) {
1015 printk(KERN_ERR "ctc_tty_close: bad port count for %s%d: %d\n",
1016 CTC_TTY_NAME, info->line, info->count);
1017 info->count = 0;
1018 }
1019 if (info->count) {
1020 local_irq_restore(flags);
1021#ifdef CTC_DEBUG_MODEM_OPEN
1022 printk(KERN_DEBUG "ctc_tty_close after info->count != 0\n");
1023#endif
1024 return;
1025 }
1026 info->flags |= CTC_ASYNC_CLOSING;
1027 tty->closing = 1;
1028 /*
1029 * At this point we stop accepting input. To do this, we
1030 * disable the receive line status interrupts, and tell the
1031 * interrupt driver to stop checking the data ready bit in the
1032 * line status register.
1033 */
1034 if (info->flags & CTC_ASYNC_INITIALIZED) {
1035 tty_wait_until_sent(tty, 30*HZ); /* 30 seconds timeout */
1036 /*
1037 * Before we drop DTR, make sure the UART transmitter
1038 * has completely drained; this is especially
1039 * important if there is a transmit FIFO!
1040 */
1041 timeout = jiffies + HZ;
1042 while (!(info->lsr & UART_LSR_TEMT)) {
1043 spin_unlock_irqrestore(&ctc_tty_lock, flags);
1044 msleep(500);
1045 spin_lock_irqsave(&ctc_tty_lock, flags);
1046 if (time_after(jiffies,timeout))
1047 break;
1048 }
1049 }
1050 ctc_tty_shutdown(info);
1051 if (tty->driver->flush_buffer) {
1052 skb_queue_purge(&info->tx_queue);
1053 info->lsr |= UART_LSR_TEMT;
1054 }
1055 tty_ldisc_flush(tty);
1056 info->tty = 0;
1057 tty->closing = 0;
1058 if (info->blocked_open) {
1059 set_current_state(TASK_INTERRUPTIBLE);
1060 schedule_timeout(HZ/2);
1061 wake_up_interruptible(&info->open_wait);
1062 }
1063 info->flags &= ~(CTC_ASYNC_NORMAL_ACTIVE | CTC_ASYNC_CLOSING);
1064 wake_up_interruptible(&info->close_wait);
1065 spin_unlock_irqrestore(&ctc_tty_lock, flags);
1066#ifdef CTC_DEBUG_MODEM_OPEN
1067 printk(KERN_DEBUG "ctc_tty_close normal exit\n");
1068#endif
1069}
1070
1071/*
1072 * ctc_tty_hangup() --- called by tty_hangup() when a hangup is signaled.
1073 */
1074static void
1075ctc_tty_hangup(struct tty_struct *tty)
1076{
1077 ctc_tty_info *info = (ctc_tty_info *)tty->driver_data;
1078 unsigned long saveflags;
1079 DBF_TEXT(trace, 3, __FUNCTION__);
1080 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup"))
1081 return;
1082 ctc_tty_shutdown(info);
1083 info->count = 0;
1084 info->flags &= ~CTC_ASYNC_NORMAL_ACTIVE;
1085 spin_lock_irqsave(&ctc_tty_lock, saveflags);
1086 info->tty = 0;
1087 spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
1088 wake_up_interruptible(&info->open_wait);
1089}
1090
1091
1092/*
1093 * For all online tty's, try sending data to
1094 * the lower levels.
1095 */
1096static void
1097ctc_tty_task(unsigned long arg)
1098{
1099 ctc_tty_info *info = (void *)arg;
1100 unsigned long saveflags;
1101 int again;
1102
1103 DBF_TEXT(trace, 3, __FUNCTION__);
1104 spin_lock_irqsave(&ctc_tty_lock, saveflags);
1105 if ((!ctc_tty_shuttingdown) && info) {
1106 again = ctc_tty_tint(info);
1107 if (!again)
1108 info->lsr |= UART_LSR_TEMT;
1109 again |= ctc_tty_readmodem(info);
1110 if (again) {
1111 tasklet_schedule(&info->tasklet);
1112 }
1113 }
1114 spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
1115}
1116
1117static struct tty_operations ctc_ops = {
1118 .open = ctc_tty_open,
1119 .close = ctc_tty_close,
1120 .write = ctc_tty_write,
1121 .flush_chars = ctc_tty_flush_chars,
1122 .write_room = ctc_tty_write_room,
1123 .chars_in_buffer = ctc_tty_chars_in_buffer,
1124 .flush_buffer = ctc_tty_flush_buffer,
1125 .ioctl = ctc_tty_ioctl,
1126 .throttle = ctc_tty_throttle,
1127 .unthrottle = ctc_tty_unthrottle,
1128 .set_termios = ctc_tty_set_termios,
1129 .hangup = ctc_tty_hangup,
1130 .tiocmget = ctc_tty_tiocmget,
1131 .tiocmset = ctc_tty_tiocmset,
1132};
1133
1134int
1135ctc_tty_init(void)
1136{
1137 int i;
1138 ctc_tty_info *info;
1139 struct tty_driver *device;
1140
1141 DBF_TEXT(trace, 2, __FUNCTION__);
1142 driver = kmalloc(sizeof(ctc_tty_driver), GFP_KERNEL);
1143 if (driver == NULL) {
1144 printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
1145 return -ENOMEM;
1146 }
1147 memset(driver, 0, sizeof(ctc_tty_driver));
1148 device = alloc_tty_driver(CTC_TTY_MAX_DEVICES);
1149 if (!device) {
1150 kfree(driver);
1151 printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
1152 return -ENOMEM;
1153 }
1154
1155 device->devfs_name = "ctc/" CTC_TTY_NAME;
1156 device->name = CTC_TTY_NAME;
1157 device->major = CTC_TTY_MAJOR;
1158 device->minor_start = 0;
1159 device->type = TTY_DRIVER_TYPE_SERIAL;
1160 device->subtype = SERIAL_TYPE_NORMAL;
1161 device->init_termios = tty_std_termios;
1162 device->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1163 device->flags = TTY_DRIVER_REAL_RAW;
1164 device->driver_name = "ctc_tty",
1165 tty_set_operations(device, &ctc_ops);
1166 if (tty_register_driver(device)) {
1167 printk(KERN_WARNING "ctc_tty: Couldn't register serial-device\n");
1168 put_tty_driver(device);
1169 kfree(driver);
1170 return -1;
1171 }
1172 driver->ctc_tty_device = device;
1173 for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) {
1174 info = &driver->info[i];
1175 init_MUTEX(&info->write_sem);
1176 tasklet_init(&info->tasklet, ctc_tty_task,
1177 (unsigned long) info);
1178 info->magic = CTC_ASYNC_MAGIC;
1179 info->line = i;
1180 info->tty = 0;
1181 info->count = 0;
1182 info->blocked_open = 0;
1183 init_waitqueue_head(&info->open_wait);
1184 init_waitqueue_head(&info->close_wait);
1185 skb_queue_head_init(&info->tx_queue);
1186 skb_queue_head_init(&info->rx_queue);
1187 init_timer(&info->stoptimer);
1188 info->stoptimer.function = ctc_tty_stopdev;
1189 info->stoptimer.data = (unsigned long)info;
1190 info->mcr = UART_MCR_RTS;
1191 }
1192 return 0;
1193}
1194
1195int
1196ctc_tty_register_netdev(struct net_device *dev) {
1197 int ttynum;
1198 char *err;
1199 char *p;
1200
1201 DBF_TEXT(trace, 2, __FUNCTION__);
1202 if ((!dev) || (!dev->name)) {
1203 printk(KERN_WARNING
1204 "ctc_tty_register_netdev called "
1205 "with NULL dev or NULL dev-name\n");
1206 return -1;
1207 }
1208
1209 /*
1210 * If the name is a format string the caller wants us to
1211 * do a name allocation : format string must end with %d
1212 */
1213 if (strchr(dev->name, '%'))
1214 {
1215 int err = dev_alloc_name(dev, dev->name); // dev->name is changed by this
1216 if (err < 0) {
1217 printk(KERN_DEBUG "dev_alloc returned error %d\n", err);
1218 return err;
1219 }
1220
1221 }
1222
1223 for (p = dev->name; p && ((*p < '0') || (*p > '9')); p++);
1224 ttynum = simple_strtoul(p, &err, 0);
1225 if ((ttynum < 0) || (ttynum >= CTC_TTY_MAX_DEVICES) ||
1226 (err && *err)) {
1227 printk(KERN_WARNING
1228 "ctc_tty_register_netdev called "
1229 "with number in name '%s'\n", dev->name);
1230 return -1;
1231 }
1232 if (driver->info[ttynum].netdev) {
1233 printk(KERN_WARNING
1234 "ctc_tty_register_netdev called "
1235 "for already registered device '%s'\n",
1236 dev->name);
1237 return -1;
1238 }
1239 driver->info[ttynum].netdev = dev;
1240 return 0;
1241}
1242
1243void
1244ctc_tty_unregister_netdev(struct net_device *dev) {
1245 int i;
1246 unsigned long saveflags;
1247 ctc_tty_info *info = NULL;
1248
1249 DBF_TEXT(trace, 2, __FUNCTION__);
1250 spin_lock_irqsave(&ctc_tty_lock, saveflags);
1251 for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
1252 if (driver->info[i].netdev == dev) {
1253 info = &driver->info[i];
1254 break;
1255 }
1256 if (info) {
1257 info->netdev = NULL;
1258 skb_queue_purge(&info->tx_queue);
1259 skb_queue_purge(&info->rx_queue);
1260 }
1261 spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
1262}
1263
1264void
1265ctc_tty_cleanup(void) {
1266 unsigned long saveflags;
1267
1268 DBF_TEXT(trace, 2, __FUNCTION__);
1269 spin_lock_irqsave(&ctc_tty_lock, saveflags);
1270 ctc_tty_shuttingdown = 1;
1271 spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
1272 tty_unregister_driver(driver->ctc_tty_device);
1273 put_tty_driver(driver->ctc_tty_device);
1274 kfree(driver);
1275 driver = NULL;
1276}
diff --git a/drivers/s390/net/ctctty.h b/drivers/s390/net/ctctty.h
new file mode 100644
index 000000000000..84b2f8f23ab3
--- /dev/null
+++ b/drivers/s390/net/ctctty.h
@@ -0,0 +1,37 @@
1/*
2 * $Id: ctctty.h,v 1.4 2003/09/18 08:01:10 mschwide Exp $
3 *
4 * CTC / ESCON network driver, tty interface.
5 *
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#ifndef _CTCTTY_H_
25#define _CTCTTY_H_
26
27#include <linux/skbuff.h>
28#include <linux/netdevice.h>
29
30extern int ctc_tty_register_netdev(struct net_device *);
31extern void ctc_tty_unregister_netdev(struct net_device *);
32extern void ctc_tty_netif_rx(struct sk_buff *);
33extern int ctc_tty_init(void);
34extern void ctc_tty_cleanup(void);
35extern void ctc_tty_setcarrier(struct net_device *, int);
36
37#endif
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
new file mode 100644
index 000000000000..1b0a9f16024c
--- /dev/null
+++ b/drivers/s390/net/cu3088.c
@@ -0,0 +1,166 @@
1/*
2 * $Id: cu3088.c,v 1.34 2004/06/15 13:16:27 pavlic Exp $
3 *
4 * CTC / LCS ccw_device driver
5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Arnd Bergmann <arndb@de.ibm.com>
8 * Cornelia Huck <cohuck@de.ibm.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 */
25
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/err.h>
29
30#include <asm/ccwdev.h>
31#include <asm/ccwgroup.h>
32
33#include "cu3088.h"
34
35const char *cu3088_type[] = {
36 "not a channel",
37 "CTC/A",
38 "ESCON channel",
39 "FICON channel",
40 "P390 LCS card",
41 "OSA LCS card",
42 "unknown channel type",
43 "unsupported channel type",
44};
45
46/* static definitions */
47
48static struct ccw_device_id cu3088_ids[] = {
49 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
50 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
51 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
52 { CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 },
53 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
54 { /* end of list */ }
55};
56
57static struct ccw_driver cu3088_driver;
58
59struct device *cu3088_root_dev;
60
61static ssize_t
62group_write(struct device_driver *drv, const char *buf, size_t count)
63{
64 const char *start, *end;
65 char bus_ids[2][BUS_ID_SIZE], *argv[2];
66 int i;
67 int ret;
68 struct ccwgroup_driver *cdrv;
69
70 cdrv = to_ccwgroupdrv(drv);
71 if (!cdrv)
72 return -EINVAL;
73 start = buf;
74 for (i=0; i<2; i++) {
75 static const char delim[] = {',', '\n'};
76 int len;
77
78 if (!(end = strchr(start, delim[i])))
79 return count;
80 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start + 1);
81 strlcpy (bus_ids[i], start, len);
82 argv[i] = bus_ids[i];
83 start = end + 1;
84 }
85
86 ret = ccwgroup_create(cu3088_root_dev, cdrv->driver_id,
87 &cu3088_driver, 2, argv);
88
89 return (ret == 0) ? count : ret;
90}
91
92static DRIVER_ATTR(group, 0200, NULL, group_write);
93
94/* Register-unregister for ctc&lcs */
95int
96register_cu3088_discipline(struct ccwgroup_driver *dcp)
97{
98 int rc;
99
100 if (!dcp)
101 return -EINVAL;
102
103 /* Register discipline.*/
104 rc = ccwgroup_driver_register(dcp);
105 if (rc)
106 return rc;
107
108 rc = driver_create_file(&dcp->driver, &driver_attr_group);
109 if (rc)
110 ccwgroup_driver_unregister(dcp);
111
112 return rc;
113
114}
115
116void
117unregister_cu3088_discipline(struct ccwgroup_driver *dcp)
118{
119 if (!dcp)
120 return;
121
122 driver_remove_file(&dcp->driver, &driver_attr_group);
123 ccwgroup_driver_unregister(dcp);
124}
125
126static struct ccw_driver cu3088_driver = {
127 .owner = THIS_MODULE,
128 .ids = cu3088_ids,
129 .name = "cu3088",
130 .probe = ccwgroup_probe_ccwdev,
131 .remove = ccwgroup_remove_ccwdev,
132};
133
134/* module setup */
135static int __init
136cu3088_init (void)
137{
138 int rc;
139
140 cu3088_root_dev = s390_root_dev_register("cu3088");
141 if (IS_ERR(cu3088_root_dev))
142 return PTR_ERR(cu3088_root_dev);
143 rc = ccw_driver_register(&cu3088_driver);
144 if (rc)
145 s390_root_dev_unregister(cu3088_root_dev);
146
147 return rc;
148}
149
150static void __exit
151cu3088_exit (void)
152{
153 ccw_driver_unregister(&cu3088_driver);
154 s390_root_dev_unregister(cu3088_root_dev);
155}
156
157MODULE_DEVICE_TABLE(ccw,cu3088_ids);
158MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
159MODULE_LICENSE("GPL");
160
161module_init(cu3088_init);
162module_exit(cu3088_exit);
163
164EXPORT_SYMBOL_GPL(cu3088_type);
165EXPORT_SYMBOL_GPL(register_cu3088_discipline);
166EXPORT_SYMBOL_GPL(unregister_cu3088_discipline);
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
new file mode 100644
index 000000000000..0ec49a8b3adc
--- /dev/null
+++ b/drivers/s390/net/cu3088.h
@@ -0,0 +1,41 @@
1#ifndef _CU3088_H
2#define _CU3088_H
3
4/**
5 * Enum for classifying detected devices.
6 */
7enum channel_types {
8 /* Device is not a channel */
9 channel_type_none,
10
11 /* Device is a CTC/A */
12 channel_type_parallel,
13
14 /* Device is a ESCON channel */
15 channel_type_escon,
16
17 /* Device is a FICON channel */
18 channel_type_ficon,
19
20 /* Device is a P390 LCS card */
21 channel_type_p390,
22
23 /* Device is a OSA2 card */
24 channel_type_osa2,
25
26 /* Device is a channel, but we don't know
27 * anything about it */
28 channel_type_unknown,
29
30 /* Device is an unsupported model */
31 channel_type_unsupported,
32
33 /* number of type entries */
34 num_channel_types
35};
36
37extern const char *cu3088_type[num_channel_types];
38extern int register_cu3088_discipline(struct ccwgroup_driver *);
39extern void unregister_cu3088_discipline(struct ccwgroup_driver *);
40
41#endif
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
new file mode 100644
index 000000000000..fa09440d82e5
--- /dev/null
+++ b/drivers/s390/net/fsm.c
@@ -0,0 +1,220 @@
1/**
2 * $Id: fsm.c,v 1.6 2003/10/15 11:37:29 mschwide Exp $
3 *
4 * A generic FSM based on fsm used in isdn4linux
5 *
6 */
7
8#include "fsm.h"
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/timer.h>
12
13MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
14MODULE_DESCRIPTION("Finite state machine helper functions");
15MODULE_LICENSE("GPL");
16
17fsm_instance *
18init_fsm(char *name, const char **state_names, const char **event_names, int nr_states,
19 int nr_events, const fsm_node *tmpl, int tmpl_len, int order)
20{
21 int i;
22 fsm_instance *this;
23 fsm_function_t *m;
24 fsm *f;
25
26 this = (fsm_instance *)kmalloc(sizeof(fsm_instance), order);
27 if (this == NULL) {
28 printk(KERN_WARNING
29 "fsm(%s): init_fsm: Couldn't alloc instance\n", name);
30 return NULL;
31 }
32 memset(this, 0, sizeof(fsm_instance));
33 strlcpy(this->name, name, sizeof(this->name));
34
35 f = (fsm *)kmalloc(sizeof(fsm), order);
36 if (f == NULL) {
37 printk(KERN_WARNING
38 "fsm(%s): init_fsm: Couldn't alloc fsm\n", name);
39 kfree_fsm(this);
40 return NULL;
41 }
42 memset(f, 0, sizeof(fsm));
43 f->nr_events = nr_events;
44 f->nr_states = nr_states;
45 f->event_names = event_names;
46 f->state_names = state_names;
47 this->f = f;
48
49 m = (fsm_function_t *)kmalloc(
50 sizeof(fsm_function_t) * nr_states * nr_events, order);
51 if (m == NULL) {
52 printk(KERN_WARNING
53 "fsm(%s): init_fsm: Couldn't alloc jumptable\n", name);
54 kfree_fsm(this);
55 return NULL;
56 }
57 memset(m, 0, sizeof(fsm_function_t) * f->nr_states * f->nr_events);
58 f->jumpmatrix = m;
59
60 for (i = 0; i < tmpl_len; i++) {
61 if ((tmpl[i].cond_state >= nr_states) ||
62 (tmpl[i].cond_event >= nr_events) ) {
63 printk(KERN_ERR
64 "fsm(%s): init_fsm: Bad template l=%d st(%ld/%ld) ev(%ld/%ld)\n",
65 name, i, (long)tmpl[i].cond_state, (long)f->nr_states,
66 (long)tmpl[i].cond_event, (long)f->nr_events);
67 kfree_fsm(this);
68 return NULL;
69 } else
70 m[nr_states * tmpl[i].cond_event + tmpl[i].cond_state] =
71 tmpl[i].function;
72 }
73 return this;
74}
75
76void
77kfree_fsm(fsm_instance *this)
78{
79 if (this) {
80 if (this->f) {
81 if (this->f->jumpmatrix)
82 kfree(this->f->jumpmatrix);
83 kfree(this->f);
84 }
85 kfree(this);
86 } else
87 printk(KERN_WARNING
88 "fsm: kfree_fsm called with NULL argument\n");
89}
90
91#if FSM_DEBUG_HISTORY
92void
93fsm_print_history(fsm_instance *fi)
94{
95 int idx = 0;
96 int i;
97
98 if (fi->history_size >= FSM_HISTORY_SIZE)
99 idx = fi->history_index;
100
101 printk(KERN_DEBUG "fsm(%s): History:\n", fi->name);
102 for (i = 0; i < fi->history_size; i++) {
103 int e = fi->history[idx].event;
104 int s = fi->history[idx++].state;
105 idx %= FSM_HISTORY_SIZE;
106 if (e == -1)
107 printk(KERN_DEBUG " S=%s\n",
108 fi->f->state_names[s]);
109 else
110 printk(KERN_DEBUG " S=%s E=%s\n",
111 fi->f->state_names[s],
112 fi->f->event_names[e]);
113 }
114 fi->history_size = fi->history_index = 0;
115}
116
117void
118fsm_record_history(fsm_instance *fi, int state, int event)
119{
120 fi->history[fi->history_index].state = state;
121 fi->history[fi->history_index++].event = event;
122 fi->history_index %= FSM_HISTORY_SIZE;
123 if (fi->history_size < FSM_HISTORY_SIZE)
124 fi->history_size++;
125}
126#endif
127
128const char *
129fsm_getstate_str(fsm_instance *fi)
130{
131 int st = atomic_read(&fi->state);
132 if (st >= fi->f->nr_states)
133 return "Invalid";
134 return fi->f->state_names[st];
135}
136
137static void
138fsm_expire_timer(fsm_timer *this)
139{
140#if FSM_TIMER_DEBUG
141 printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
142 this->fi->name, this);
143#endif
144 fsm_event(this->fi, this->expire_event, this->event_arg);
145}
146
147void
148fsm_settimer(fsm_instance *fi, fsm_timer *this)
149{
150 this->fi = fi;
151 this->tl.function = (void *)fsm_expire_timer;
152 this->tl.data = (long)this;
153#if FSM_TIMER_DEBUG
154 printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
155 this);
156#endif
157 init_timer(&this->tl);
158}
159
160void
161fsm_deltimer(fsm_timer *this)
162{
163#if FSM_TIMER_DEBUG
164 printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name,
165 this);
166#endif
167 del_timer(&this->tl);
168}
169
170int
171fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
172{
173
174#if FSM_TIMER_DEBUG
175 printk(KERN_DEBUG "fsm(%s): Add timer %p %dms\n",
176 this->fi->name, this, millisec);
177#endif
178
179 init_timer(&this->tl);
180 this->tl.function = (void *)fsm_expire_timer;
181 this->tl.data = (long)this;
182 this->expire_event = event;
183 this->event_arg = arg;
184 this->tl.expires = jiffies + (millisec * HZ) / 1000;
185 add_timer(&this->tl);
186 return 0;
187}
188
189/* FIXME: this function is never used, why */
190void
191fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
192{
193
194#if FSM_TIMER_DEBUG
195 printk(KERN_DEBUG "fsm(%s): Restart timer %p %dms\n",
196 this->fi->name, this, millisec);
197#endif
198
199 del_timer(&this->tl);
200 init_timer(&this->tl);
201 this->tl.function = (void *)fsm_expire_timer;
202 this->tl.data = (long)this;
203 this->expire_event = event;
204 this->event_arg = arg;
205 this->tl.expires = jiffies + (millisec * HZ) / 1000;
206 add_timer(&this->tl);
207}
208
209EXPORT_SYMBOL(init_fsm);
210EXPORT_SYMBOL(kfree_fsm);
211EXPORT_SYMBOL(fsm_settimer);
212EXPORT_SYMBOL(fsm_deltimer);
213EXPORT_SYMBOL(fsm_addtimer);
214EXPORT_SYMBOL(fsm_modtimer);
215EXPORT_SYMBOL(fsm_getstate_str);
216
217#if FSM_DEBUG_HISTORY
218EXPORT_SYMBOL(fsm_print_history);
219EXPORT_SYMBOL(fsm_record_history);
220#endif
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
new file mode 100644
index 000000000000..f9a011001eb6
--- /dev/null
+++ b/drivers/s390/net/fsm.h
@@ -0,0 +1,265 @@
1/* $Id: fsm.h,v 1.1.1.1 2002/03/13 19:33:09 mschwide Exp $
2 */
3#ifndef _FSM_H_
4#define _FSM_H_
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/timer.h>
9#include <linux/time.h>
10#include <linux/slab.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <asm/atomic.h>
14
15/**
16 * Define this to get debugging messages.
17 */
18#define FSM_DEBUG 0
19
20/**
21 * Define this to get debugging massages for
22 * timer handling.
23 */
24#define FSM_TIMER_DEBUG 0
25
26/**
27 * Define these to record a history of
28 * Events/Statechanges and print it if a
29 * action_function is not found.
30 */
31#define FSM_DEBUG_HISTORY 0
32#define FSM_HISTORY_SIZE 40
33
34struct fsm_instance_t;
35
36/**
37 * Definition of an action function, called by a FSM
38 */
39typedef void (*fsm_function_t)(struct fsm_instance_t *, int, void *);
40
41/**
42 * Internal jump table for a FSM
43 */
44typedef struct {
45 fsm_function_t *jumpmatrix;
46 int nr_events;
47 int nr_states;
48 const char **event_names;
49 const char **state_names;
50} fsm;
51
52#if FSM_DEBUG_HISTORY
53/**
54 * Element of State/Event history used for debugging.
55 */
56typedef struct {
57 int state;
58 int event;
59} fsm_history;
60#endif
61
62/**
63 * Representation of a FSM
64 */
65typedef struct fsm_instance_t {
66 fsm *f;
67 atomic_t state;
68 char name[16];
69 void *userdata;
70 int userint;
71#if FSM_DEBUG_HISTORY
72 int history_index;
73 int history_size;
74 fsm_history history[FSM_HISTORY_SIZE];
75#endif
76} fsm_instance;
77
78/**
79 * Description of a state-event combination
80 */
81typedef struct {
82 int cond_state;
83 int cond_event;
84 fsm_function_t function;
85} fsm_node;
86
87/**
88 * Description of a FSM Timer.
89 */
90typedef struct {
91 fsm_instance *fi;
92 struct timer_list tl;
93 int expire_event;
94 void *event_arg;
95} fsm_timer;
96
97/**
98 * Creates an FSM
99 *
100 * @param name Name of this instance for logging purposes.
101 * @param state_names An array of names for all states for logging purposes.
102 * @param event_names An array of names for all events for logging purposes.
103 * @param nr_states Number of states for this instance.
104 * @param nr_events Number of events for this instance.
105 * @param tmpl An array of fsm_nodes, describing this FSM.
106 * @param tmpl_len Length of the describing array.
107 * @param order Parameter for allocation of the FSM data structs.
108 */
109extern fsm_instance *
110init_fsm(char *name, const char **state_names,
111 const char **event_names,
112 int nr_states, int nr_events, const fsm_node *tmpl,
113 int tmpl_len, int order);
114
115/**
116 * Releases an FSM
117 *
118 * @param fi Pointer to an FSM, previously created with init_fsm.
119 */
120extern void kfree_fsm(fsm_instance *fi);
121
122#if FSM_DEBUG_HISTORY
123extern void
124fsm_print_history(fsm_instance *fi);
125
126extern void
127fsm_record_history(fsm_instance *fi, int state, int event);
128#endif
129
130/**
131 * Emits an event to a FSM.
132 * If an action function is defined for the current state/event combination,
133 * this function is called.
134 *
135 * @param fi Pointer to FSM which should receive the event.
136 * @param event The event do be delivered.
137 * @param arg A generic argument, handed to the action function.
138 *
139 * @return 0 on success,
140 * 1 if current state or event is out of range
141 * !0 if state and event in range, but no action defined.
142 */
143extern __inline__ int
144fsm_event(fsm_instance *fi, int event, void *arg)
145{
146 fsm_function_t r;
147 int state = atomic_read(&fi->state);
148
149 if ((state >= fi->f->nr_states) ||
150 (event >= fi->f->nr_events) ) {
151 printk(KERN_ERR "fsm(%s): Invalid state st(%ld/%ld) ev(%d/%ld)\n",
152 fi->name, (long)state,(long)fi->f->nr_states, event,
153 (long)fi->f->nr_events);
154#if FSM_DEBUG_HISTORY
155 fsm_print_history(fi);
156#endif
157 return 1;
158 }
159 r = fi->f->jumpmatrix[fi->f->nr_states * event + state];
160 if (r) {
161#if FSM_DEBUG
162 printk(KERN_DEBUG "fsm(%s): state %s event %s\n",
163 fi->name, fi->f->state_names[state],
164 fi->f->event_names[event]);
165#endif
166#if FSM_DEBUG_HISTORY
167 fsm_record_history(fi, state, event);
168#endif
169 r(fi, event, arg);
170 return 0;
171 } else {
172#if FSM_DEBUG || FSM_DEBUG_HISTORY
173 printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n",
174 fi->name, fi->f->event_names[event],
175 fi->f->state_names[state]);
176#endif
177#if FSM_DEBUG_HISTORY
178 fsm_print_history(fi);
179#endif
180 return !0;
181 }
182}
183
184/**
185 * Modifies the state of an FSM.
186 * This does <em>not</em> trigger an event or calls an action function.
187 *
188 * @param fi Pointer to FSM
189 * @param state The new state for this FSM.
190 */
191extern __inline__ void
192fsm_newstate(fsm_instance *fi, int newstate)
193{
194 atomic_set(&fi->state,newstate);
195#if FSM_DEBUG_HISTORY
196 fsm_record_history(fi, newstate, -1);
197#endif
198#if FSM_DEBUG
199 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
200 fi->f->state_names[newstate]);
201#endif
202}
203
204/**
205 * Retrieves the state of an FSM
206 *
207 * @param fi Pointer to FSM
208 *
209 * @return The current state of the FSM.
210 */
211extern __inline__ int
212fsm_getstate(fsm_instance *fi)
213{
214 return atomic_read(&fi->state);
215}
216
217/**
218 * Retrieves the name of the state of an FSM
219 *
220 * @param fi Pointer to FSM
221 *
222 * @return The current state of the FSM in a human readable form.
223 */
224extern const char *fsm_getstate_str(fsm_instance *fi);
225
226/**
227 * Initializes a timer for an FSM.
228 * This prepares an fsm_timer for usage with fsm_addtimer.
229 *
230 * @param fi Pointer to FSM
231 * @param timer The timer to be initialized.
232 */
233extern void fsm_settimer(fsm_instance *fi, fsm_timer *);
234
235/**
236 * Clears a pending timer of an FSM instance.
237 *
238 * @param timer The timer to clear.
239 */
240extern void fsm_deltimer(fsm_timer *timer);
241
242/**
243 * Adds and starts a timer to an FSM instance.
244 *
245 * @param timer The timer to be added. The field fi of that timer
246 * must have been set to point to the instance.
247 * @param millisec Duration, after which the timer should expire.
248 * @param event Event, to trigger if timer expires.
249 * @param arg Generic argument, provided to expiry function.
250 *
251 * @return 0 on success, -1 if timer is already active.
252 */
253extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg);
254
255/**
256 * Modifies a timer of an FSM.
257 *
258 * @param timer The timer to modify.
259 * @param millisec Duration, after which the timer should expire.
260 * @param event Event, to trigger if timer expires.
261 * @param arg Generic argument, provided to expiry function.
262 */
263extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg);
264
265#endif /* _FSM_H_ */
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
new file mode 100644
index 000000000000..1ac6563ee3e0
--- /dev/null
+++ b/drivers/s390/net/iucv.c
@@ -0,0 +1,2567 @@
1/*
2 * $Id: iucv.c,v 1.43 2005/02/09 14:47:43 braunu Exp $
3 *
4 * IUCV network driver
5 *
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s):
8 * Original source:
9 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
10 * Xenia Tkatschow (xenia@us.ibm.com)
11 * 2Gb awareness and general cleanup:
12 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
13 *
14 * Documentation used:
15 * The original source
16 * CP Programming Service, IBM document # SC24-5760
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 *
32 * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.43 $
33 *
34 */
35
36/* #define DEBUG */
37
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/config.h>
41
42#include <linux/spinlock.h>
43#include <linux/kernel.h>
44#include <linux/slab.h>
45#include <linux/init.h>
46#include <linux/interrupt.h>
47#include <linux/list.h>
48#include <linux/errno.h>
49#include <linux/err.h>
50#include <linux/device.h>
51#include <asm/atomic.h>
52#include "iucv.h"
53#include <asm/io.h>
54#include <asm/s390_ext.h>
55#include <asm/ebcdic.h>
56#include <asm/smp.h>
57#include <asm/ccwdev.h> //for root device stuff
58
59/* FLAGS:
60 * All flags are defined in the field IPFLAGS1 of each function
61 * and can be found in CP Programming Services.
62 * IPSRCCLS - Indicates you have specified a source class
63 * IPFGMCL - Indicates you have specified a target class
64 * IPFGPID - Indicates you have specified a pathid
65 * IPFGMID - Indicates you have specified a message ID
66 * IPANSLST - Indicates that you are using an address list for
67 * reply data
68 * IPBUFLST - Indicates that you are using an address list for
69 * message data
70 */
71
72#define IPSRCCLS 0x01
73#define IPFGMCL 0x01
74#define IPFGPID 0x02
75#define IPFGMID 0x04
76#define IPANSLST 0x08
77#define IPBUFLST 0x40
78
79static int
80iucv_bus_match (struct device *dev, struct device_driver *drv)
81{
82 return 0;
83}
84
85struct bus_type iucv_bus = {
86 .name = "iucv",
87 .match = iucv_bus_match,
88};
89
90struct device *iucv_root;
91
92/* General IUCV interrupt structure */
93typedef struct {
94 __u16 ippathid;
95 __u8 res1;
96 __u8 iptype;
97 __u32 res2;
98 __u8 ipvmid[8];
99 __u8 res3[24];
100} iucv_GeneralInterrupt;
101
102static iucv_GeneralInterrupt *iucv_external_int_buffer = NULL;
103
104/* Spin Lock declaration */
105
106static DEFINE_SPINLOCK(iucv_lock);
107
108static int messagesDisabled = 0;
109
110/***************INTERRUPT HANDLING ***************/
111
112typedef struct {
113 struct list_head queue;
114 iucv_GeneralInterrupt data;
115} iucv_irqdata;
116
117static struct list_head iucv_irq_queue;
118static DEFINE_SPINLOCK(iucv_irq_queue_lock);
119
120/*
121 *Internal function prototypes
122 */
123static void iucv_tasklet_handler(unsigned long);
124static void iucv_irq_handler(struct pt_regs *, __u16);
125
126static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0);
127
128/************ FUNCTION ID'S ****************************/
129
130#define ACCEPT 10
131#define CONNECT 11
132#define DECLARE_BUFFER 12
133#define PURGE 9
134#define QUERY 0
135#define QUIESCE 13
136#define RECEIVE 5
137#define REJECT 8
138#define REPLY 6
139#define RESUME 14
140#define RETRIEVE_BUFFER 2
141#define SEND 4
142#define SETMASK 16
143#define SEVER 15
144
145/**
146 * Structure: handler
147 * members: list - list management.
148 * structure: id
149 * userid - 8 char array of machine identification
150 * user_data - 16 char array for user identification
151 * mask - 24 char array used to compare the 2 previous
152 * interrupt_table - vector of interrupt functions.
153 * pgm_data - ulong, application data that is passed
154 * to the interrupt handlers
155*/
156typedef struct handler_t {
157 struct list_head list;
158 struct {
159 __u8 userid[8];
160 __u8 user_data[16];
161 __u8 mask[24];
162 } id;
163 iucv_interrupt_ops_t *interrupt_table;
164 void *pgm_data;
165} handler;
166
167/**
168 * iucv_handler_table: List of registered handlers.
169 */
170static struct list_head iucv_handler_table;
171
172/**
173 * iucv_pathid_table: an array of *handler pointing into
174 * iucv_handler_table for fast indexing by pathid;
175 */
176static handler **iucv_pathid_table;
177
178static unsigned long max_connections;
179
180/**
181 * iucv_cpuid: contains the logical cpu number of the cpu which
182 * has declared the iucv buffer by issuing DECLARE_BUFFER.
183 * If no cpu has done the initialization iucv_cpuid contains -1.
184 */
185static int iucv_cpuid = -1;
186/**
187 * register_flag: is 0 when external interrupt has not been registered
188 */
189static int register_flag;
190
191/****************FIVE 40-BYTE PARAMETER STRUCTURES******************/
192/* Data struct 1: iparml_control
193 * Used for iucv_accept
194 * iucv_connect
195 * iucv_quiesce
196 * iucv_resume
197 * iucv_sever
198 * iucv_retrieve_buffer
199 * Data struct 2: iparml_dpl (data in parameter list)
200 * Used for iucv_send_prmmsg
201 * iucv_send2way_prmmsg
202 * iucv_send2way_prmmsg_array
203 * iucv_reply_prmmsg
204 * Data struct 3: iparml_db (data in a buffer)
205 * Used for iucv_receive
206 * iucv_receive_array
207 * iucv_reject
208 * iucv_reply
209 * iucv_reply_array
210 * iucv_send
211 * iucv_send_array
212 * iucv_send2way
213 * iucv_send2way_array
214 * iucv_declare_buffer
215 * Data struct 4: iparml_purge
216 * Used for iucv_purge
217 * iucv_query
218 * Data struct 5: iparml_set_mask
219 * Used for iucv_set_mask
220 */
221
222typedef struct {
223 __u16 ippathid;
224 __u8 ipflags1;
225 __u8 iprcode;
226 __u16 ipmsglim;
227 __u16 res1;
228 __u8 ipvmid[8];
229 __u8 ipuser[16];
230 __u8 iptarget[8];
231} iparml_control;
232
233typedef struct {
234 __u16 ippathid;
235 __u8 ipflags1;
236 __u8 iprcode;
237 __u32 ipmsgid;
238 __u32 iptrgcls;
239 __u8 iprmmsg[8];
240 __u32 ipsrccls;
241 __u32 ipmsgtag;
242 __u32 ipbfadr2;
243 __u32 ipbfln2f;
244 __u32 res;
245} iparml_dpl;
246
247typedef struct {
248 __u16 ippathid;
249 __u8 ipflags1;
250 __u8 iprcode;
251 __u32 ipmsgid;
252 __u32 iptrgcls;
253 __u32 ipbfadr1;
254 __u32 ipbfln1f;
255 __u32 ipsrccls;
256 __u32 ipmsgtag;
257 __u32 ipbfadr2;
258 __u32 ipbfln2f;
259 __u32 res;
260} iparml_db;
261
262typedef struct {
263 __u16 ippathid;
264 __u8 ipflags1;
265 __u8 iprcode;
266 __u32 ipmsgid;
267 __u8 ipaudit[3];
268 __u8 res1[5];
269 __u32 res2;
270 __u32 ipsrccls;
271 __u32 ipmsgtag;
272 __u32 res3[3];
273} iparml_purge;
274
275typedef struct {
276 __u8 ipmask;
277 __u8 res1[2];
278 __u8 iprcode;
279 __u32 res2[9];
280} iparml_set_mask;
281
282typedef struct {
283 union {
284 iparml_control p_ctrl;
285 iparml_dpl p_dpl;
286 iparml_db p_db;
287 iparml_purge p_purge;
288 iparml_set_mask p_set_mask;
289 } param;
290 atomic_t in_use;
291 __u32 res;
292} __attribute__ ((aligned(8))) iucv_param;
293#define PARAM_POOL_SIZE (PAGE_SIZE / sizeof(iucv_param))
294
295static iucv_param * iucv_param_pool;
296
297MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
298MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
299MODULE_LICENSE("GPL");
300
301/*
302 * Debugging stuff
303 *******************************************************************************/
304
305
306#ifdef DEBUG
307static int debuglevel = 0;
308
309module_param(debuglevel, int, 0);
310MODULE_PARM_DESC(debuglevel,
311 "Specifies the debug level (0=off ... 3=all)");
312
313static void
314iucv_dumpit(char *title, void *buf, int len)
315{
316 int i;
317 __u8 *p = (__u8 *)buf;
318
319 if (debuglevel < 3)
320 return;
321
322 printk(KERN_DEBUG "%s\n", title);
323 printk(" ");
324 for (i = 0; i < len; i++) {
325 if (!(i % 16) && i != 0)
326 printk ("\n ");
327 else if (!(i % 4) && i != 0)
328 printk(" ");
329 printk("%02X", *p++);
330 }
331 if (len % 16)
332 printk ("\n");
333 return;
334}
335#define iucv_debug(lvl, fmt, args...) \
336do { \
337 if (debuglevel >= lvl) \
338 printk(KERN_DEBUG "%s: " fmt "\n", __FUNCTION__ , ## args); \
339} while (0)
340
341#else
342
343#define iucv_debug(lvl, fmt, args...)
344#define iucv_dumpit(title, buf, len)
345
346#endif
347
348/*
349 * Internal functions
350 *******************************************************************************/
351
352/**
353 * print start banner
354 */
355static void
356iucv_banner(void)
357{
358 char vbuf[] = "$Revision: 1.43 $";
359 char *version = vbuf;
360
361 if ((version = strchr(version, ':'))) {
362 char *p = strchr(version + 1, '$');
363 if (p)
364 *p = '\0';
365 } else
366 version = " ??? ";
367 printk(KERN_INFO
368 "IUCV lowlevel driver Version%s initialized\n", version);
369}
370
371/**
372 * iucv_init - Initialization
373 *
374 * Allocates and initializes various data structures.
375 */
376static int
377iucv_init(void)
378{
379 int ret;
380
381 if (iucv_external_int_buffer)
382 return 0;
383
384 if (!MACHINE_IS_VM) {
385 printk(KERN_ERR "IUCV: IUCV connection needs VM as base\n");
386 return -EPROTONOSUPPORT;
387 }
388
389 ret = bus_register(&iucv_bus);
390 if (ret) {
391 printk(KERN_ERR "IUCV: failed to register bus.\n");
392 return ret;
393 }
394
395 iucv_root = s390_root_dev_register("iucv");
396 if (IS_ERR(iucv_root)) {
397 printk(KERN_ERR "IUCV: failed to register iucv root.\n");
398 bus_unregister(&iucv_bus);
399 return PTR_ERR(iucv_root);
400 }
401
402 /* Note: GFP_DMA used used to get memory below 2G */
403 iucv_external_int_buffer = kmalloc(sizeof(iucv_GeneralInterrupt),
404 GFP_KERNEL|GFP_DMA);
405 if (!iucv_external_int_buffer) {
406 printk(KERN_WARNING
407 "%s: Could not allocate external interrupt buffer\n",
408 __FUNCTION__);
409 s390_root_dev_unregister(iucv_root);
410 bus_unregister(&iucv_bus);
411 return -ENOMEM;
412 }
413 memset(iucv_external_int_buffer, 0, sizeof(iucv_GeneralInterrupt));
414
415 /* Initialize parameter pool */
416 iucv_param_pool = kmalloc(sizeof(iucv_param) * PARAM_POOL_SIZE,
417 GFP_KERNEL|GFP_DMA);
418 if (!iucv_param_pool) {
419 printk(KERN_WARNING "%s: Could not allocate param pool\n",
420 __FUNCTION__);
421 kfree(iucv_external_int_buffer);
422 iucv_external_int_buffer = NULL;
423 s390_root_dev_unregister(iucv_root);
424 bus_unregister(&iucv_bus);
425 return -ENOMEM;
426 }
427 memset(iucv_param_pool, 0, sizeof(iucv_param) * PARAM_POOL_SIZE);
428
429 /* Initialize irq queue */
430 INIT_LIST_HEAD(&iucv_irq_queue);
431
432 /* Initialize handler table */
433 INIT_LIST_HEAD(&iucv_handler_table);
434
435 iucv_banner();
436 return 0;
437}
438
439/**
440 * iucv_exit - De-Initialization
441 *
442 * Frees everything allocated from iucv_init.
443 */
444static int iucv_retrieve_buffer (void);
445
446static void
447iucv_exit(void)
448{
449 iucv_retrieve_buffer();
450 if (iucv_external_int_buffer) {
451 kfree(iucv_external_int_buffer);
452 iucv_external_int_buffer = NULL;
453 }
454 if (iucv_param_pool) {
455 kfree(iucv_param_pool);
456 iucv_param_pool = NULL;
457 }
458 s390_root_dev_unregister(iucv_root);
459 bus_unregister(&iucv_bus);
460 printk(KERN_INFO "IUCV lowlevel driver unloaded\n");
461}
462
463/**
464 * grab_param: - Get a parameter buffer from the pre-allocated pool.
465 *
466 * This function searches for an unused element in the pre-allocated pool
467 * of parameter buffers. If one is found, it marks it "in use" and returns
468 * a pointer to it. The calling function is responsible for releasing it
469 * when it has finished its usage.
470 *
471 * Returns: A pointer to iucv_param.
472 */
473static __inline__ iucv_param *
474grab_param(void)
475{
476 iucv_param *ptr;
477 static int hint = 0;
478
479 ptr = iucv_param_pool + hint;
480 do {
481 ptr++;
482 if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
483 ptr = iucv_param_pool;
484 } while (atomic_compare_and_swap(0, 1, &ptr->in_use));
485 hint = ptr - iucv_param_pool;
486
487 memset(&ptr->param, 0, sizeof(ptr->param));
488 return ptr;
489}
490
491/**
492 * release_param - Release a parameter buffer.
493 * @p: A pointer to a struct iucv_param, previously obtained by calling
494 * grab_param().
495 *
496 * This function marks the specified parameter buffer "unused".
497 */
498static __inline__ void
499release_param(void *p)
500{
501 atomic_set(&((iucv_param *)p)->in_use, 0);
502}
503
504/**
505 * iucv_add_handler: - Add a new handler
506 * @new_handler: handle that is being entered into chain.
507 *
508 * Places new handle on iucv_handler_table, if identical handler is not
509 * found.
510 *
511 * Returns: 0 on success, !0 on failure (handler already in chain).
512 */
513static int
514iucv_add_handler (handler *new)
515{
516 ulong flags;
517
518 iucv_debug(1, "entering");
519 iucv_dumpit("handler:", new, sizeof(handler));
520
521 spin_lock_irqsave (&iucv_lock, flags);
522 if (!list_empty(&iucv_handler_table)) {
523 struct list_head *lh;
524
525 /**
526 * Search list for handler with identical id. If one
527 * is found, the new handler is _not_ added.
528 */
529 list_for_each(lh, &iucv_handler_table) {
530 handler *h = list_entry(lh, handler, list);
531 if (!memcmp(&new->id, &h->id, sizeof(h->id))) {
532 iucv_debug(1, "ret 1");
533 spin_unlock_irqrestore (&iucv_lock, flags);
534 return 1;
535 }
536 }
537 }
538 /**
539 * If we get here, no handler was found.
540 */
541 INIT_LIST_HEAD(&new->list);
542 list_add(&new->list, &iucv_handler_table);
543 spin_unlock_irqrestore (&iucv_lock, flags);
544
545 iucv_debug(1, "exiting");
546 return 0;
547}
548
549/**
550 * b2f0:
551 * @code: identifier of IUCV call to CP.
552 * @parm: pointer to 40 byte iparml area passed to CP
553 *
554 * Calls CP to execute IUCV commands.
555 *
556 * Returns: return code from CP's IUCV call
557 */
558static __inline__ ulong
559b2f0(__u32 code, void *parm)
560{
561 iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param));
562
563 asm volatile (
564 "LRA 1,0(%1)\n\t"
565 "LR 0,%0\n\t"
566 ".long 0xb2f01000"
567 :
568 : "d" (code), "a" (parm)
569 : "0", "1"
570 );
571
572 iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param));
573
574 return (unsigned long)*((__u8 *)(parm + 3));
575}
576
577/*
578 * Name: iucv_add_pathid
579 * Purpose: Adds a path id to the system.
580 * Input: pathid - pathid that is going to be entered into system
581 * handle - address of handler that the pathid will be associated
582 * with.
583 * pgm_data - token passed in by application.
584 * Output: 0: successful addition of pathid
585 * - EINVAL - pathid entry is being used by another application
586 * - ENOMEM - storage allocation for a new pathid table failed
587*/
588static int
589__iucv_add_pathid(__u16 pathid, handler *handler)
590{
591
592 iucv_debug(1, "entering");
593
594 iucv_debug(1, "handler is pointing to %p", handler);
595
596 if (pathid > (max_connections - 1))
597 return -EINVAL;
598
599 if (iucv_pathid_table[pathid]) {
600 iucv_debug(1, "pathid entry is %p", iucv_pathid_table[pathid]);
601 printk(KERN_WARNING
602 "%s: Pathid being used, error.\n", __FUNCTION__);
603 return -EINVAL;
604 }
605 iucv_pathid_table[pathid] = handler;
606
607 iucv_debug(1, "exiting");
608 return 0;
609} /* end of add_pathid function */
610
611static int
612iucv_add_pathid(__u16 pathid, handler *handler)
613{
614 ulong flags;
615 int rc;
616
617 spin_lock_irqsave (&iucv_lock, flags);
618 rc = __iucv_add_pathid(pathid, handler);
619 spin_unlock_irqrestore (&iucv_lock, flags);
620 return rc;
621}
622
623static void
624iucv_remove_pathid(__u16 pathid)
625{
626 ulong flags;
627
628 if (pathid > (max_connections - 1))
629 return;
630
631 spin_lock_irqsave (&iucv_lock, flags);
632 iucv_pathid_table[pathid] = NULL;
633 spin_unlock_irqrestore (&iucv_lock, flags);
634}
635
636/**
637 * iucv_declare_buffer_cpuid
638 * Register at VM for subsequent IUCV operations. This is executed
639 * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer().
640 */
641static void
642iucv_declare_buffer_cpuid (void *result)
643{
644 iparml_db *parm;
645
646 parm = (iparml_db *)grab_param();
647 parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer);
648 if ((*((ulong *)result) = b2f0(DECLARE_BUFFER, parm)) == 1)
649 *((ulong *)result) = parm->iprcode;
650 release_param(parm);
651}
652
653/**
654 * iucv_retrieve_buffer_cpuid:
655 * Unregister IUCV usage at VM. This is always executed on the same
656 * cpu that registered the buffer to VM.
657 * Called from iucv_retrieve_buffer().
658 */
659static void
660iucv_retrieve_buffer_cpuid (void *cpu)
661{
662 iparml_control *parm;
663
664 parm = (iparml_control *)grab_param();
665 b2f0(RETRIEVE_BUFFER, parm);
666 release_param(parm);
667}
668
669/**
670 * Name: iucv_declare_buffer
671 * Purpose: Specifies the guests real address of an external
672 * interrupt.
673 * Input: void
674 * Output: iprcode - return code from b2f0 call
675 */
676static int
677iucv_declare_buffer (void)
678{
679 unsigned long flags;
680 ulong b2f0_result;
681
682 iucv_debug(1, "entering");
683 b2f0_result = -ENODEV;
684 spin_lock_irqsave (&iucv_lock, flags);
685 if (iucv_cpuid == -1) {
686 /* Reserve any cpu for use by iucv. */
687 iucv_cpuid = smp_get_cpu(CPU_MASK_ALL);
688 spin_unlock_irqrestore (&iucv_lock, flags);
689 smp_call_function_on(iucv_declare_buffer_cpuid,
690 &b2f0_result, 0, 1, iucv_cpuid);
691 if (b2f0_result) {
692 smp_put_cpu(iucv_cpuid);
693 iucv_cpuid = -1;
694 }
695 iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
696 } else {
697 spin_unlock_irqrestore (&iucv_lock, flags);
698 b2f0_result = 0;
699 }
700 iucv_debug(1, "exiting");
701 return b2f0_result;
702}
703
704/**
705 * iucv_retrieve_buffer:
706 *
707 * Terminates all use of IUCV.
708 * Returns: return code from CP
709 */
710static int
711iucv_retrieve_buffer (void)
712{
713 iucv_debug(1, "entering");
714 if (iucv_cpuid != -1) {
715 smp_call_function_on(iucv_retrieve_buffer_cpuid,
716 0, 0, 1, iucv_cpuid);
717 /* Release the cpu reserved by iucv_declare_buffer. */
718 smp_put_cpu(iucv_cpuid);
719 iucv_cpuid = -1;
720 }
721 iucv_debug(1, "exiting");
722 return 0;
723}
724
725/**
726 * iucv_remove_handler:
727 * @users_handler: handler to be removed
728 *
729 * Remove handler when application unregisters.
730 */
731static void
732iucv_remove_handler(handler *handler)
733{
734 unsigned long flags;
735
736 if ((!iucv_pathid_table) || (!handler))
737 return;
738
739 iucv_debug(1, "entering");
740
741 spin_lock_irqsave (&iucv_lock, flags);
742 list_del(&handler->list);
743 if (list_empty(&iucv_handler_table)) {
744 if (register_flag) {
745 unregister_external_interrupt(0x4000, iucv_irq_handler);
746 register_flag = 0;
747 }
748 }
749 spin_unlock_irqrestore (&iucv_lock, flags);
750
751 iucv_debug(1, "exiting");
752 return;
753}
754
755/**
756 * iucv_register_program:
757 * @pgmname: user identification
758 * @userid: machine identification
759 * @pgmmask: Indicates which bits in the pgmname and userid combined will be
760 * used to determine who is given control.
761 * @ops: Address of interrupt handler table.
762 * @pgm_data: Application data to be passed to interrupt handlers.
763 *
764 * Registers an application with IUCV.
765 * Returns:
766 * The address of handler, or NULL on failure.
767 * NOTE on pgmmask:
768 * If pgmname, userid and pgmmask are provided, pgmmask is entered into the
769 * handler as is.
770 * If pgmmask is NULL, the internal mask is set to all 0xff's
771 * When userid is NULL, the first 8 bytes of the internal mask are forced
772 * to 0x00.
773 * If pgmmask and userid are NULL, the first 8 bytes of the internal mask
774 * are forced to 0x00 and the last 16 bytes to 0xff.
775 */
776
777iucv_handle_t
778iucv_register_program (__u8 pgmname[16],
779 __u8 userid[8],
780 __u8 pgmmask[24],
781 iucv_interrupt_ops_t * ops, void *pgm_data)
782{
783 ulong rc = 0; /* return code from function calls */
784 handler *new_handler;
785
786 iucv_debug(1, "entering");
787
788 if (ops == NULL) {
789 /* interrupt table is not defined */
790 printk(KERN_WARNING "%s: Interrupt table is not defined, "
791 "exiting\n", __FUNCTION__);
792 return NULL;
793 }
794 if (!pgmname) {
795 printk(KERN_WARNING "%s: pgmname not provided\n", __FUNCTION__);
796 return NULL;
797 }
798
799 /* Allocate handler entry */
800 new_handler = (handler *)kmalloc(sizeof(handler), GFP_ATOMIC);
801 if (new_handler == NULL) {
802 printk(KERN_WARNING "%s: storage allocation for new handler "
803 "failed.\n", __FUNCTION__);
804 return NULL;
805 }
806
807 if (!iucv_pathid_table) {
808 if (iucv_init()) {
809 kfree(new_handler);
810 return NULL;
811 }
812
813 max_connections = iucv_query_maxconn();
814 iucv_pathid_table = kmalloc(max_connections * sizeof(handler *),
815 GFP_ATOMIC);
816 if (iucv_pathid_table == NULL) {
817 printk(KERN_WARNING "%s: iucv_pathid_table storage "
818 "allocation failed\n", __FUNCTION__);
819 kfree(new_handler);
820 return NULL;
821 }
822 memset (iucv_pathid_table, 0, max_connections * sizeof(handler *));
823 }
824 memset(new_handler, 0, sizeof (handler));
825 memcpy(new_handler->id.user_data, pgmname,
826 sizeof (new_handler->id.user_data));
827 if (userid) {
828 memcpy (new_handler->id.userid, userid,
829 sizeof (new_handler->id.userid));
830 ASCEBC (new_handler->id.userid,
831 sizeof (new_handler->id.userid));
832 EBC_TOUPPER (new_handler->id.userid,
833 sizeof (new_handler->id.userid));
834
835 if (pgmmask) {
836 memcpy (new_handler->id.mask, pgmmask,
837 sizeof (new_handler->id.mask));
838 } else {
839 memset (new_handler->id.mask, 0xFF,
840 sizeof (new_handler->id.mask));
841 }
842 } else {
843 if (pgmmask) {
844 memcpy (new_handler->id.mask, pgmmask,
845 sizeof (new_handler->id.mask));
846 } else {
847 memset (new_handler->id.mask, 0xFF,
848 sizeof (new_handler->id.mask));
849 }
850 memset (new_handler->id.userid, 0x00,
851 sizeof (new_handler->id.userid));
852 }
853 /* fill in the rest of handler */
854 new_handler->pgm_data = pgm_data;
855 new_handler->interrupt_table = ops;
856
857 /*
858 * Check if someone else is registered with same pgmname, userid
859 * and mask. If someone is already registered with same pgmname,
860 * userid and mask, registration will fail and NULL will be returned
861 * to the application.
862 * If identical handler not found, then handler is added to list.
863 */
864 rc = iucv_add_handler(new_handler);
865 if (rc) {
866 printk(KERN_WARNING "%s: Someone already registered with same "
867 "pgmname, userid, pgmmask\n", __FUNCTION__);
868 kfree (new_handler);
869 return NULL;
870 }
871
872 rc = iucv_declare_buffer();
873 if (rc) {
874 char *err = "Unknown";
875 iucv_remove_handler(new_handler);
876 kfree(new_handler);
877 switch(rc) {
878 case 0x03:
879 err = "Directory error";
880 break;
881 case 0x0a:
882 err = "Invalid length";
883 break;
884 case 0x13:
885 err = "Buffer already exists";
886 break;
887 case 0x3e:
888 err = "Buffer overlap";
889 break;
890 case 0x5c:
891 err = "Paging or storage error";
892 break;
893 }
894 printk(KERN_WARNING "%s: iucv_declare_buffer "
895 "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err);
896 return NULL;
897 }
898 if (!register_flag) {
899 /* request the 0x4000 external interrupt */
900 rc = register_external_interrupt (0x4000, iucv_irq_handler);
901 if (rc) {
902 iucv_remove_handler(new_handler);
903 kfree (new_handler);
904 printk(KERN_WARNING "%s: "
905 "register_external_interrupt returned %ld\n",
906 __FUNCTION__, rc);
907 return NULL;
908
909 }
910 register_flag = 1;
911 }
912 iucv_debug(1, "exiting");
913 return new_handler;
914} /* end of register function */
915
916/**
917 * iucv_unregister_program:
918 * @handle: address of handler
919 *
920 * Unregister application with IUCV.
921 * Returns:
922 * 0 on success, -EINVAL, if specified handle is invalid.
923 */
924
925int
926iucv_unregister_program (iucv_handle_t handle)
927{
928 handler *h = NULL;
929 struct list_head *lh;
930 int i;
931 ulong flags;
932
933 iucv_debug(1, "entering");
934 iucv_debug(1, "address of handler is %p", h);
935
936 /* Checking if handle is valid */
937 spin_lock_irqsave (&iucv_lock, flags);
938 list_for_each(lh, &iucv_handler_table) {
939 if ((handler *)handle == list_entry(lh, handler, list)) {
940 h = (handler *)handle;
941 break;
942 }
943 }
944 if (!h) {
945 spin_unlock_irqrestore (&iucv_lock, flags);
946 if (handle)
947 printk(KERN_WARNING
948 "%s: Handler not found in iucv_handler_table.\n",
949 __FUNCTION__);
950 else
951 printk(KERN_WARNING
952 "%s: NULL handle passed by application.\n",
953 __FUNCTION__);
954 return -EINVAL;
955 }
956
957 /**
958 * First, walk thru iucv_pathid_table and sever any pathid which is
959 * still pointing to the handler to be removed.
960 */
961 for (i = 0; i < max_connections; i++)
962 if (iucv_pathid_table[i] == h) {
963 spin_unlock_irqrestore (&iucv_lock, flags);
964 iucv_sever(i, h->id.user_data);
965 spin_lock_irqsave(&iucv_lock, flags);
966 }
967 spin_unlock_irqrestore (&iucv_lock, flags);
968
969 iucv_remove_handler(h);
970 kfree(h);
971
972 iucv_debug(1, "exiting");
973 return 0;
974}
975
976/**
977 * iucv_accept:
978 * @pathid: Path identification number
979 * @msglim_reqstd: The number of outstanding messages requested.
980 * @user_data: Data specified by the iucv_connect function.
981 * @flags1: Contains options for this path.
982 * - IPPRTY (0x20) Specifies if you want to send priority message.
983 * - IPRMDATA (0x80) Specifies whether your program can handle a message
984 * in the parameter list.
985 * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
986 * established.
987 * @handle: Address of handler.
988 * @pgm_data: Application data passed to interrupt handlers.
989 * @flags1_out: Pointer to an int. If not NULL, on return the options for
990 * the path are stored at the given location:
991 * - IPPRTY (0x20) Indicates you may send a priority message.
992 * @msglim: Pointer to an __u16. If not NULL, on return the maximum
993 * number of outstanding messages is stored at the given
994 * location.
995 *
996 * This function is issued after the user receives a Connection Pending external
997 * interrupt and now wishes to complete the IUCV communication path.
998 * Returns:
999 * return code from CP
1000 */
1001int
1002iucv_accept(__u16 pathid, __u16 msglim_reqstd,
1003 __u8 user_data[16], int flags1,
1004 iucv_handle_t handle, void *pgm_data,
1005 int *flags1_out, __u16 * msglim)
1006{
1007 ulong b2f0_result = 0;
1008 ulong flags;
1009 struct list_head *lh;
1010 handler *h = NULL;
1011 iparml_control *parm;
1012
1013 iucv_debug(1, "entering");
1014 iucv_debug(1, "pathid = %d", pathid);
1015
1016 /* Checking if handle is valid */
1017 spin_lock_irqsave (&iucv_lock, flags);
1018 list_for_each(lh, &iucv_handler_table) {
1019 if ((handler *)handle == list_entry(lh, handler, list)) {
1020 h = (handler *)handle;
1021 break;
1022 }
1023 }
1024 spin_unlock_irqrestore (&iucv_lock, flags);
1025
1026 if (!h) {
1027 if (handle)
1028 printk(KERN_WARNING
1029 "%s: Handler not found in iucv_handler_table.\n",
1030 __FUNCTION__);
1031 else
1032 printk(KERN_WARNING
1033 "%s: NULL handle passed by application.\n",
1034 __FUNCTION__);
1035 return -EINVAL;
1036 }
1037
1038 parm = (iparml_control *)grab_param();
1039
1040 parm->ippathid = pathid;
1041 parm->ipmsglim = msglim_reqstd;
1042 if (user_data)
1043 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1044
1045 parm->ipflags1 = (__u8)flags1;
1046 b2f0_result = b2f0(ACCEPT, parm);
1047
1048 if (!b2f0_result) {
1049 if (msglim)
1050 *msglim = parm->ipmsglim;
1051 if (pgm_data)
1052 h->pgm_data = pgm_data;
1053 if (flags1_out)
1054 *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
1055 }
1056 release_param(parm);
1057
1058 iucv_debug(1, "exiting");
1059 return b2f0_result;
1060}
1061
1062/**
1063 * iucv_connect:
1064 * @pathid: Path identification number
1065 * @msglim_reqstd: Number of outstanding messages requested
1066 * @user_data: 16-byte user data
1067 * @userid: 8-byte of user identification
1068 * @system_name: 8-byte identifying the system name
1069 * @flags1: Specifies options for this path:
1070 * - IPPRTY (0x20) Specifies if you want to send priority message.
1071 * - IPRMDATA (0x80) Specifies whether your program can handle a message
1072 * in the parameter list.
1073 * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
1074 * established.
1075 * - IPLOCAL (0x01) Allows an application to force the partner to be on the
1076 * local system. If local is specified then target class
1077 * cannot be specified.
1078 * @flags1_out: Pointer to an int. If not NULL, on return the options for
1079 * the path are stored at the given location:
1080 * - IPPRTY (0x20) Indicates you may send a priority message.
1081 * @msglim: Pointer to an __u16. If not NULL, on return the maximum
1082 * number of outstanding messages is stored at the given
1083 * location.
1084 * @handle: Address of handler.
1085 * @pgm_data: Application data to be passed to interrupt handlers.
1086 *
1087 * This function establishes an IUCV path. Although the connect may complete
1088 * successfully, you are not able to use the path until you receive an IUCV
1089 * Connection Complete external interrupt.
1090 * Returns: return code from CP, or one of the following
1091 * - ENOMEM
1092 * - return code from iucv_declare_buffer
1093 * - EINVAL - invalid handle passed by application
1094 * - EINVAL - pathid address is NULL
1095 * - ENOMEM - pathid table storage allocation failed
1096 * - return code from internal function add_pathid
1097 */
1098int
1099iucv_connect (__u16 *pathid, __u16 msglim_reqstd,
1100 __u8 user_data[16], __u8 userid[8],
1101 __u8 system_name[8], int flags1,
1102 int *flags1_out, __u16 * msglim,
1103 iucv_handle_t handle, void *pgm_data)
1104{
1105 iparml_control *parm;
1106 iparml_control local_parm;
1107 struct list_head *lh;
1108 ulong b2f0_result = 0;
1109 ulong flags;
1110 int add_pathid_result = 0;
1111 handler *h = NULL;
1112 __u8 no_memory[16] = "NO MEMORY";
1113
1114 iucv_debug(1, "entering");
1115
1116 /* Checking if handle is valid */
1117 spin_lock_irqsave (&iucv_lock, flags);
1118 list_for_each(lh, &iucv_handler_table) {
1119 if ((handler *)handle == list_entry(lh, handler, list)) {
1120 h = (handler *)handle;
1121 break;
1122 }
1123 }
1124 spin_unlock_irqrestore (&iucv_lock, flags);
1125
1126 if (!h) {
1127 if (handle)
1128 printk(KERN_WARNING
1129 "%s: Handler not found in iucv_handler_table.\n",
1130 __FUNCTION__);
1131 else
1132 printk(KERN_WARNING
1133 "%s: NULL handle passed by application.\n",
1134 __FUNCTION__);
1135 return -EINVAL;
1136 }
1137
1138 if (pathid == NULL) {
1139 printk(KERN_WARNING "%s: NULL pathid pointer\n",
1140 __FUNCTION__);
1141 return -EINVAL;
1142 }
1143
1144 parm = (iparml_control *)grab_param();
1145
1146 parm->ipmsglim = msglim_reqstd;
1147
1148 if (user_data)
1149 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1150
1151 if (userid) {
1152 memcpy(parm->ipvmid, userid, sizeof(parm->ipvmid));
1153 ASCEBC(parm->ipvmid, sizeof(parm->ipvmid));
1154 EBC_TOUPPER(parm->ipvmid, sizeof(parm->ipvmid));
1155 }
1156
1157 if (system_name) {
1158 memcpy(parm->iptarget, system_name, sizeof(parm->iptarget));
1159 ASCEBC(parm->iptarget, sizeof(parm->iptarget));
1160 EBC_TOUPPER(parm->iptarget, sizeof(parm->iptarget));
1161 }
1162
1163 /* In order to establish an IUCV connection, the procedure is:
1164 *
1165 * b2f0(CONNECT)
1166 * take the ippathid from the b2f0 call
1167 * register the handler to the ippathid
1168 *
1169 * Unfortunately, the ConnectionEstablished message gets sent after the
1170 * b2f0(CONNECT) call but before the register is handled.
1171 *
1172 * In order for this race condition to be eliminated, the IUCV Control
1173 * Interrupts must be disabled for the above procedure.
1174 *
1175 * David Kennedy <dkennedy@linuxcare.com>
1176 */
1177
1178 /* Enable everything but IUCV Control messages */
1179 iucv_setmask(~(AllInterrupts));
1180 messagesDisabled = 1;
1181
1182 spin_lock_irqsave (&iucv_lock, flags);
1183 parm->ipflags1 = (__u8)flags1;
1184 b2f0_result = b2f0(CONNECT, parm);
1185 memcpy(&local_parm, parm, sizeof(local_parm));
1186 release_param(parm);
1187 parm = &local_parm;
1188 if (!b2f0_result)
1189 add_pathid_result = __iucv_add_pathid(parm->ippathid, h);
1190 spin_unlock_irqrestore (&iucv_lock, flags);
1191
1192 if (b2f0_result) {
1193 iucv_setmask(~0);
1194 messagesDisabled = 0;
1195 return b2f0_result;
1196 }
1197
1198 *pathid = parm->ippathid;
1199
1200 /* Enable everything again */
1201 iucv_setmask(IUCVControlInterruptsFlag);
1202
1203 if (msglim)
1204 *msglim = parm->ipmsglim;
1205 if (flags1_out)
1206 *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
1207
1208 if (add_pathid_result) {
1209 iucv_sever(*pathid, no_memory);
1210 printk(KERN_WARNING "%s: add_pathid failed with rc ="
1211 " %d\n", __FUNCTION__, add_pathid_result);
1212 return(add_pathid_result);
1213 }
1214
1215 iucv_debug(1, "exiting");
1216 return b2f0_result;
1217}
1218
1219/**
1220 * iucv_purge:
1221 * @pathid: Path identification number
1222 * @msgid: Message ID of message to purge.
1223 * @srccls: Message class of the message to purge.
1224 * @audit: Pointer to an __u32. If not NULL, on return, information about
1225 * asynchronous errors that may have affected the normal completion
1226 * of this message ist stored at the given location.
1227 *
1228 * Cancels a message you have sent.
1229 * Returns: return code from CP
1230 */
1231int
1232iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit)
1233{
1234 iparml_purge *parm;
1235 ulong b2f0_result = 0;
1236
1237 iucv_debug(1, "entering");
1238 iucv_debug(1, "pathid = %d", pathid);
1239
1240 parm = (iparml_purge *)grab_param();
1241
1242 parm->ipmsgid = msgid;
1243 parm->ippathid = pathid;
1244 parm->ipsrccls = srccls;
1245 parm->ipflags1 |= (IPSRCCLS | IPFGMID | IPFGPID);
1246 b2f0_result = b2f0(PURGE, parm);
1247
1248 if (!b2f0_result && audit) {
1249 memcpy(audit, parm->ipaudit, sizeof(parm->ipaudit));
1250 /* parm->ipaudit has only 3 bytes */
1251 *audit >>= 8;
1252 }
1253
1254 release_param(parm);
1255
1256 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1257 iucv_debug(1, "exiting");
1258 return b2f0_result;
1259}
1260
1261/**
1262 * iucv_query_generic:
1263 * @want_maxconn: Flag, describing which value is to be returned.
1264 *
1265 * Helper function for iucv_query_maxconn() and iucv_query_bufsize().
1266 *
1267 * Returns: The buffersize, if want_maxconn is 0; the maximum number of
1268 * connections, if want_maxconn is 1 or an error-code < 0 on failure.
1269 */
1270static int
1271iucv_query_generic(int want_maxconn)
1272{
1273 iparml_purge *parm = (iparml_purge *)grab_param();
1274 int bufsize, maxconn;
1275 int ccode;
1276
1277 /**
1278 * Call b2f0 and store R0 (max buffer size),
1279 * R1 (max connections) and CC.
1280 */
1281 asm volatile (
1282 "LRA 1,0(%4)\n\t"
1283 "LR 0,%3\n\t"
1284 ".long 0xb2f01000\n\t"
1285 "IPM %0\n\t"
1286 "SRL %0,28\n\t"
1287 "ST 0,%1\n\t"
1288 "ST 1,%2\n\t"
1289 : "=d" (ccode), "=m" (bufsize), "=m" (maxconn)
1290 : "d" (QUERY), "a" (parm)
1291 : "0", "1", "cc"
1292 );
1293 release_param(parm);
1294
1295 if (ccode)
1296 return -EPERM;
1297 if (want_maxconn)
1298 return maxconn;
1299 return bufsize;
1300}
1301
1302/**
1303 * iucv_query_maxconn:
1304 *
1305 * Determines the maximum number of connections thay may be established.
1306 *
1307 * Returns: Maximum number of connections that can be.
1308 */
1309ulong
1310iucv_query_maxconn(void)
1311{
1312 return iucv_query_generic(1);
1313}
1314
1315/**
1316 * iucv_query_bufsize:
1317 *
1318 * Determines the size of the external interrupt buffer.
1319 *
1320 * Returns: Size of external interrupt buffer.
1321 */
1322ulong
1323iucv_query_bufsize (void)
1324{
1325 return iucv_query_generic(0);
1326}
1327
1328/**
1329 * iucv_quiesce:
1330 * @pathid: Path identification number
1331 * @user_data: 16-byte user data
1332 *
1333 * Temporarily suspends incoming messages on an IUCV path.
1334 * You can later reactivate the path by invoking the iucv_resume function.
1335 * Returns: return code from CP
1336 */
1337int
1338iucv_quiesce (__u16 pathid, __u8 user_data[16])
1339{
1340 iparml_control *parm;
1341 ulong b2f0_result = 0;
1342
1343 iucv_debug(1, "entering");
1344 iucv_debug(1, "pathid = %d", pathid);
1345
1346 parm = (iparml_control *)grab_param();
1347
1348 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1349 parm->ippathid = pathid;
1350
1351 b2f0_result = b2f0(QUIESCE, parm);
1352 release_param(parm);
1353
1354 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1355 iucv_debug(1, "exiting");
1356
1357 return b2f0_result;
1358}
1359
1360/**
1361 * iucv_receive:
1362 * @pathid: Path identification number.
1363 * @buffer: Address of buffer to receive. Must be below 2G.
1364 * @buflen: Length of buffer to receive.
1365 * @msgid: Specifies the message ID.
1366 * @trgcls: Specifies target class.
1367 * @flags1_out: Receives options for path on return.
1368 * - IPNORPY (0x10) Specifies whether a reply is required
1369 * - IPPRTY (0x20) Specifies if you want to send priority message
1370 * - IPRMDATA (0x80) Specifies the data is contained in the parameter list
1371 * @residual_buffer: Receives the address of buffer updated by the number
1372 * of bytes you have received on return.
1373 * @residual_length: On return, receives one of the following values:
1374 * - 0 If the receive buffer is the same length as
1375 * the message.
1376 * - Remaining bytes in buffer If the receive buffer is longer than the
1377 * message.
1378 * - Remaining bytes in message If the receive buffer is shorter than the
1379 * message.
1380 *
1381 * This function receives messages that are being sent to you over established
1382 * paths.
1383 * Returns: return code from CP IUCV call; If the receive buffer is shorter
1384 * than the message, always 5
1385 * -EINVAL - buffer address is pointing to NULL
1386 */
1387int
1388iucv_receive (__u16 pathid, __u32 msgid, __u32 trgcls,
1389 void *buffer, ulong buflen,
1390 int *flags1_out, ulong * residual_buffer, ulong * residual_length)
1391{
1392 iparml_db *parm;
1393 ulong b2f0_result;
1394 int moved = 0; /* number of bytes moved from parmlist to buffer */
1395
1396 iucv_debug(2, "entering");
1397
1398 if (!buffer)
1399 return -EINVAL;
1400
1401 parm = (iparml_db *)grab_param();
1402
1403 parm->ipbfadr1 = (__u32) (addr_t) buffer;
1404 parm->ipbfln1f = (__u32) ((ulong) buflen);
1405 parm->ipmsgid = msgid;
1406 parm->ippathid = pathid;
1407 parm->iptrgcls = trgcls;
1408 parm->ipflags1 = (IPFGPID | IPFGMID | IPFGMCL);
1409
1410 b2f0_result = b2f0(RECEIVE, parm);
1411
1412 if (!b2f0_result || b2f0_result == 5) {
1413 if (flags1_out) {
1414 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1415 *flags1_out = (parm->ipflags1 & (~0x07));
1416 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1417 }
1418
1419 if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
1420 if (residual_length)
1421 *residual_length = parm->ipbfln1f;
1422
1423 if (residual_buffer)
1424 *residual_buffer = parm->ipbfadr1;
1425 } else {
1426 moved = min_t (unsigned long, buflen, 8);
1427
1428 memcpy ((char *) buffer,
1429 (char *) &parm->ipbfadr1, moved);
1430
1431 if (buflen < 8)
1432 b2f0_result = 5;
1433
1434 if (residual_length)
1435 *residual_length = abs (buflen - 8);
1436
1437 if (residual_buffer)
1438 *residual_buffer = (ulong) (buffer + moved);
1439 }
1440 }
1441 release_param(parm);
1442
1443 iucv_debug(2, "exiting");
1444 return b2f0_result;
1445}
1446
1447/*
1448 * Name: iucv_receive_array
1449 * Purpose: This function receives messages that are being sent to you
1450 * over established paths.
1451 * Input: pathid - path identification number
1452 * buffer - address of array of buffers
1453 * buflen - total length of buffers
1454 * msgid - specifies the message ID.
1455 * trgcls - specifies target class
1456 * Output:
1457 * flags1_out: Options for path.
1458 * IPNORPY - 0x10 specifies whether a reply is required
1459 * IPPRTY - 0x20 specifies if you want to send priority message
1460 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
1461 * residual_buffer - address points to the current list entry IUCV
1462 * is working on.
1463 * residual_length -
1464 * Contains one of the following values, if the receive buffer is:
1465 * The same length as the message, this field is zero.
1466 * Longer than the message, this field contains the number of
1467 * bytes remaining in the buffer.
1468 * Shorter than the message, this field contains the residual
1469 * count (that is, the number of bytes remaining in the
1470 * message that does not fit into the buffer. In this case
1471 * b2f0_result = 5.
1472 * Return: b2f0_result - return code from CP
1473 * (-EINVAL) - buffer address is NULL
1474 */
1475int
1476iucv_receive_array (__u16 pathid,
1477 __u32 msgid, __u32 trgcls,
1478 iucv_array_t * buffer, ulong buflen,
1479 int *flags1_out,
1480 ulong * residual_buffer, ulong * residual_length)
1481{
1482 iparml_db *parm;
1483 ulong b2f0_result;
1484 int i = 0, moved = 0, need_to_move = 8, dyn_len;
1485
1486 iucv_debug(2, "entering");
1487
1488 if (!buffer)
1489 return -EINVAL;
1490
1491 parm = (iparml_db *)grab_param();
1492
1493 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1494 parm->ipbfln1f = (__u32) buflen;
1495 parm->ipmsgid = msgid;
1496 parm->ippathid = pathid;
1497 parm->iptrgcls = trgcls;
1498 parm->ipflags1 = (IPBUFLST | IPFGPID | IPFGMID | IPFGMCL);
1499
1500 b2f0_result = b2f0(RECEIVE, parm);
1501
1502 if (!b2f0_result || b2f0_result == 5) {
1503
1504 if (flags1_out) {
1505 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1506 *flags1_out = (parm->ipflags1 & (~0x07));
1507 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1508 }
1509
1510 if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
1511
1512 if (residual_length)
1513 *residual_length = parm->ipbfln1f;
1514
1515 if (residual_buffer)
1516 *residual_buffer = parm->ipbfadr1;
1517
1518 } else {
1519 /* copy msg from parmlist to users array. */
1520
1521 while ((moved < 8) && (moved < buflen)) {
1522 dyn_len =
1523 min_t (unsigned int,
1524 (buffer + i)->length, need_to_move);
1525
1526 memcpy ((char *)((ulong)((buffer + i)->address)),
1527 ((char *) &parm->ipbfadr1) + moved,
1528 dyn_len);
1529
1530 moved += dyn_len;
1531 need_to_move -= dyn_len;
1532
1533 (buffer + i)->address =
1534 (__u32)
1535 ((ulong)(__u8 *) ((ulong)(buffer + i)->address)
1536 + dyn_len);
1537
1538 (buffer + i)->length -= dyn_len;
1539 i++;
1540 }
1541
1542 if (need_to_move) /* buflen < 8 bytes */
1543 b2f0_result = 5;
1544
1545 if (residual_length)
1546 *residual_length = abs (buflen - 8);
1547
1548 if (residual_buffer) {
1549 if (!moved)
1550 *residual_buffer = (ulong) buffer;
1551 else
1552 *residual_buffer =
1553 (ulong) (buffer + (i - 1));
1554 }
1555
1556 }
1557 }
1558 release_param(parm);
1559
1560 iucv_debug(2, "exiting");
1561 return b2f0_result;
1562}
1563
1564/**
1565 * iucv_reject:
1566 * @pathid: Path identification number.
1567 * @msgid: Message ID of the message to reject.
1568 * @trgcls: Target class of the message to reject.
1569 * Returns: return code from CP
1570 *
1571 * Refuses a specified message. Between the time you are notified of a
1572 * message and the time that you complete the message, the message may
1573 * be rejected.
1574 */
1575int
1576iucv_reject (__u16 pathid, __u32 msgid, __u32 trgcls)
1577{
1578 iparml_db *parm;
1579 ulong b2f0_result = 0;
1580
1581 iucv_debug(1, "entering");
1582 iucv_debug(1, "pathid = %d", pathid);
1583
1584 parm = (iparml_db *)grab_param();
1585
1586 parm->ippathid = pathid;
1587 parm->ipmsgid = msgid;
1588 parm->iptrgcls = trgcls;
1589 parm->ipflags1 = (IPFGMCL | IPFGMID | IPFGPID);
1590
1591 b2f0_result = b2f0(REJECT, parm);
1592 release_param(parm);
1593
1594 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1595 iucv_debug(1, "exiting");
1596
1597 return b2f0_result;
1598}
1599
1600/*
1601 * Name: iucv_reply
1602 * Purpose: This function responds to the two-way messages that you
1603 * receive. You must identify completely the message to
1604 * which you wish to reply. ie, pathid, msgid, and trgcls.
1605 * Input: pathid - path identification number
1606 * msgid - specifies the message ID.
1607 * trgcls - specifies target class
1608 * flags1 - option for path
1609 * IPPRTY- 0x20 - specifies if you want to send priority message
1610 * buffer - address of reply buffer
1611 * buflen - length of reply buffer
1612 * Output: ipbfadr2 - Address of buffer updated by the number
1613 * of bytes you have moved.
1614 * ipbfln2f - Contains one of the following values:
1615 * If the answer buffer is the same length as the reply, this field
1616 * contains zero.
1617 * If the answer buffer is longer than the reply, this field contains
1618 * the number of bytes remaining in the buffer.
1619 * If the answer buffer is shorter than the reply, this field contains
1620 * a residual count (that is, the number of bytes remianing in the
1621 * reply that does not fit into the buffer. In this
1622 * case b2f0_result = 5.
1623 * Return: b2f0_result - return code from CP
1624 * (-EINVAL) - buffer address is NULL
1625 */
1626int
1627iucv_reply (__u16 pathid,
1628 __u32 msgid, __u32 trgcls,
1629 int flags1,
1630 void *buffer, ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
1631{
1632 iparml_db *parm;
1633 ulong b2f0_result;
1634
1635 iucv_debug(2, "entering");
1636
1637 if (!buffer)
1638 return -EINVAL;
1639
1640 parm = (iparml_db *)grab_param();
1641
1642 parm->ipbfadr2 = (__u32) ((ulong) buffer);
1643 parm->ipbfln2f = (__u32) buflen; /* length of message */
1644 parm->ippathid = pathid;
1645 parm->ipmsgid = msgid;
1646 parm->iptrgcls = trgcls;
1647 parm->ipflags1 = (__u8) flags1; /* priority message */
1648
1649 b2f0_result = b2f0(REPLY, parm);
1650
1651 if ((!b2f0_result) || (b2f0_result == 5)) {
1652 if (ipbfadr2)
1653 *ipbfadr2 = parm->ipbfadr2;
1654 if (ipbfln2f)
1655 *ipbfln2f = parm->ipbfln2f;
1656 }
1657 release_param(parm);
1658
1659 iucv_debug(2, "exiting");
1660
1661 return b2f0_result;
1662}
1663
1664/*
1665 * Name: iucv_reply_array
1666 * Purpose: This function responds to the two-way messages that you
1667 * receive. You must identify completely the message to
1668 * which you wish to reply. ie, pathid, msgid, and trgcls.
1669 * The array identifies a list of addresses and lengths of
1670 * discontiguous buffers that contains the reply data.
1671 * Input: pathid - path identification number
1672 * msgid - specifies the message ID.
1673 * trgcls - specifies target class
1674 * flags1 - option for path
1675 * IPPRTY- specifies if you want to send priority message
1676 * buffer - address of array of reply buffers
1677 * buflen - total length of reply buffers
1678 * Output: ipbfadr2 - Address of buffer which IUCV is currently working on.
1679 * ipbfln2f - Contains one of the following values:
1680 * If the answer buffer is the same length as the reply, this field
1681 * contains zero.
1682 * If the answer buffer is longer than the reply, this field contains
1683 * the number of bytes remaining in the buffer.
1684 * If the answer buffer is shorter than the reply, this field contains
1685 * a residual count (that is, the number of bytes remianing in the
1686 * reply that does not fit into the buffer. In this
1687 * case b2f0_result = 5.
1688 * Return: b2f0_result - return code from CP
1689 * (-EINVAL) - buffer address is NULL
1690*/
1691int
1692iucv_reply_array (__u16 pathid,
1693 __u32 msgid, __u32 trgcls,
1694 int flags1,
1695 iucv_array_t * buffer,
1696 ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
1697{
1698 iparml_db *parm;
1699 ulong b2f0_result;
1700
1701 iucv_debug(2, "entering");
1702
1703 if (!buffer)
1704 return -EINVAL;
1705
1706 parm = (iparml_db *)grab_param();
1707
1708 parm->ipbfadr2 = (__u32) ((ulong) buffer);
1709 parm->ipbfln2f = buflen; /* length of message */
1710 parm->ippathid = pathid;
1711 parm->ipmsgid = msgid;
1712 parm->iptrgcls = trgcls;
1713 parm->ipflags1 = (IPANSLST | flags1);
1714
1715 b2f0_result = b2f0(REPLY, parm);
1716
1717 if ((!b2f0_result) || (b2f0_result == 5)) {
1718
1719 if (ipbfadr2)
1720 *ipbfadr2 = parm->ipbfadr2;
1721 if (ipbfln2f)
1722 *ipbfln2f = parm->ipbfln2f;
1723 }
1724 release_param(parm);
1725
1726 iucv_debug(2, "exiting");
1727
1728 return b2f0_result;
1729}
1730
1731/*
1732 * Name: iucv_reply_prmmsg
1733 * Purpose: This function responds to the two-way messages that you
1734 * receive. You must identify completely the message to
1735 * which you wish to reply. ie, pathid, msgid, and trgcls.
1736 * Prmmsg signifies the data is moved into the
1737 * parameter list.
1738 * Input: pathid - path identification number
1739 * msgid - specifies the message ID.
1740 * trgcls - specifies target class
1741 * flags1 - option for path
1742 * IPPRTY- specifies if you want to send priority message
1743 * prmmsg - 8-bytes of data to be placed into the parameter
1744 * list.
1745 * Output: NA
1746 * Return: b2f0_result - return code from CP
1747*/
1748int
1749iucv_reply_prmmsg (__u16 pathid,
1750 __u32 msgid, __u32 trgcls, int flags1, __u8 prmmsg[8])
1751{
1752 iparml_dpl *parm;
1753 ulong b2f0_result;
1754
1755 iucv_debug(2, "entering");
1756
1757 parm = (iparml_dpl *)grab_param();
1758
1759 parm->ippathid = pathid;
1760 parm->ipmsgid = msgid;
1761 parm->iptrgcls = trgcls;
1762 memcpy(parm->iprmmsg, prmmsg, sizeof (parm->iprmmsg));
1763 parm->ipflags1 = (IPRMDATA | flags1);
1764
1765 b2f0_result = b2f0(REPLY, parm);
1766 release_param(parm);
1767
1768 iucv_debug(2, "exiting");
1769
1770 return b2f0_result;
1771}
1772
1773/**
1774 * iucv_resume:
1775 * @pathid: Path identification number
1776 * @user_data: 16-byte of user data
1777 *
1778 * This function restores communication over a quiesced path.
1779 * Returns: return code from CP
1780 */
1781int
1782iucv_resume (__u16 pathid, __u8 user_data[16])
1783{
1784 iparml_control *parm;
1785 ulong b2f0_result = 0;
1786
1787 iucv_debug(1, "entering");
1788 iucv_debug(1, "pathid = %d", pathid);
1789
1790 parm = (iparml_control *)grab_param();
1791
1792 memcpy (parm->ipuser, user_data, sizeof (*user_data));
1793 parm->ippathid = pathid;
1794
1795 b2f0_result = b2f0(RESUME, parm);
1796 release_param(parm);
1797
1798 iucv_debug(1, "exiting");
1799
1800 return b2f0_result;
1801}
1802
1803/*
1804 * Name: iucv_send
1805 * Purpose: sends messages
1806 * Input: pathid - ushort, pathid
1807 * msgid - ulong *, id of message returned to caller
1808 * trgcls - ulong, target message class
1809 * srccls - ulong, source message class
1810 * msgtag - ulong, message tag
1811 * flags1 - Contains options for this path.
1812 * IPPRTY - Ox20 - specifies if you want to send a priority message.
1813 * buffer - pointer to buffer
1814 * buflen - ulong, length of buffer
1815 * Output: b2f0_result - return code from b2f0 call
1816 * msgid - returns message id
1817 */
1818int
1819iucv_send (__u16 pathid, __u32 * msgid,
1820 __u32 trgcls, __u32 srccls,
1821 __u32 msgtag, int flags1, void *buffer, ulong buflen)
1822{
1823 iparml_db *parm;
1824 ulong b2f0_result;
1825
1826 iucv_debug(2, "entering");
1827
1828 if (!buffer)
1829 return -EINVAL;
1830
1831 parm = (iparml_db *)grab_param();
1832
1833 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1834 parm->ippathid = pathid;
1835 parm->iptrgcls = trgcls;
1836 parm->ipbfln1f = (__u32) buflen; /* length of message */
1837 parm->ipsrccls = srccls;
1838 parm->ipmsgtag = msgtag;
1839 parm->ipflags1 = (IPNORPY | flags1); /* one way priority message */
1840
1841 b2f0_result = b2f0(SEND, parm);
1842
1843 if ((!b2f0_result) && (msgid))
1844 *msgid = parm->ipmsgid;
1845 release_param(parm);
1846
1847 iucv_debug(2, "exiting");
1848
1849 return b2f0_result;
1850}
1851
1852/*
1853 * Name: iucv_send_array
1854 * Purpose: This function transmits data to another application.
1855 * The contents of buffer is the address of the array of
1856 * addresses and lengths of discontiguous buffers that hold
1857 * the message text. This is a one-way message and the
1858 * receiver will not reply to the message.
1859 * Input: pathid - path identification number
1860 * trgcls - specifies target class
1861 * srccls - specifies the source message class
1862 * msgtag - specifies a tag to be associated witht the message
1863 * flags1 - option for path
1864 * IPPRTY- specifies if you want to send priority message
1865 * buffer - address of array of send buffers
1866 * buflen - total length of send buffers
1867 * Output: msgid - specifies the message ID.
1868 * Return: b2f0_result - return code from CP
1869 * (-EINVAL) - buffer address is NULL
1870 */
1871int
1872iucv_send_array (__u16 pathid,
1873 __u32 * msgid,
1874 __u32 trgcls,
1875 __u32 srccls,
1876 __u32 msgtag, int flags1, iucv_array_t * buffer, ulong buflen)
1877{
1878 iparml_db *parm;
1879 ulong b2f0_result;
1880
1881 iucv_debug(2, "entering");
1882
1883 if (!buffer)
1884 return -EINVAL;
1885
1886 parm = (iparml_db *)grab_param();
1887
1888 parm->ippathid = pathid;
1889 parm->iptrgcls = trgcls;
1890 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1891 parm->ipbfln1f = (__u32) buflen; /* length of message */
1892 parm->ipsrccls = srccls;
1893 parm->ipmsgtag = msgtag;
1894 parm->ipflags1 = (IPNORPY | IPBUFLST | flags1);
1895 b2f0_result = b2f0(SEND, parm);
1896
1897 if ((!b2f0_result) && (msgid))
1898 *msgid = parm->ipmsgid;
1899 release_param(parm);
1900
1901 iucv_debug(2, "exiting");
1902 return b2f0_result;
1903}
1904
1905/*
1906 * Name: iucv_send_prmmsg
1907 * Purpose: This function transmits data to another application.
1908 * Prmmsg specifies that the 8-bytes of data are to be moved
1909 * into the parameter list. This is a one-way message and the
1910 * receiver will not reply to the message.
1911 * Input: pathid - path identification number
1912 * trgcls - specifies target class
1913 * srccls - specifies the source message class
1914 * msgtag - specifies a tag to be associated with the message
1915 * flags1 - option for path
1916 * IPPRTY- specifies if you want to send priority message
1917 * prmmsg - 8-bytes of data to be placed into parameter list
1918 * Output: msgid - specifies the message ID.
1919 * Return: b2f0_result - return code from CP
1920*/
1921int
1922iucv_send_prmmsg (__u16 pathid,
1923 __u32 * msgid,
1924 __u32 trgcls,
1925 __u32 srccls, __u32 msgtag, int flags1, __u8 prmmsg[8])
1926{
1927 iparml_dpl *parm;
1928 ulong b2f0_result;
1929
1930 iucv_debug(2, "entering");
1931
1932 parm = (iparml_dpl *)grab_param();
1933
1934 parm->ippathid = pathid;
1935 parm->iptrgcls = trgcls;
1936 parm->ipsrccls = srccls;
1937 parm->ipmsgtag = msgtag;
1938 parm->ipflags1 = (IPRMDATA | IPNORPY | flags1);
1939 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
1940
1941 b2f0_result = b2f0(SEND, parm);
1942
1943 if ((!b2f0_result) && (msgid))
1944 *msgid = parm->ipmsgid;
1945 release_param(parm);
1946
1947 iucv_debug(2, "exiting");
1948
1949 return b2f0_result;
1950}
1951
1952/*
1953 * Name: iucv_send2way
1954 * Purpose: This function transmits data to another application.
1955 * Data to be transmitted is in a buffer. The receiver
1956 * of the send is expected to reply to the message and
1957 * a buffer is provided into which IUCV moves the reply
1958 * to this message.
1959 * Input: pathid - path identification number
1960 * trgcls - specifies target class
1961 * srccls - specifies the source message class
1962 * msgtag - specifies a tag associated with the message
1963 * flags1 - option for path
1964 * IPPRTY- specifies if you want to send priority message
1965 * buffer - address of send buffer
1966 * buflen - length of send buffer
1967 * ansbuf - address of buffer to reply with
1968 * anslen - length of buffer to reply with
1969 * Output: msgid - specifies the message ID.
1970 * Return: b2f0_result - return code from CP
1971 * (-EINVAL) - buffer or ansbuf address is NULL
1972 */
1973int
1974iucv_send2way (__u16 pathid,
1975 __u32 * msgid,
1976 __u32 trgcls,
1977 __u32 srccls,
1978 __u32 msgtag,
1979 int flags1,
1980 void *buffer, ulong buflen, void *ansbuf, ulong anslen)
1981{
1982 iparml_db *parm;
1983 ulong b2f0_result;
1984
1985 iucv_debug(2, "entering");
1986
1987 if (!buffer || !ansbuf)
1988 return -EINVAL;
1989
1990 parm = (iparml_db *)grab_param();
1991
1992 parm->ippathid = pathid;
1993 parm->iptrgcls = trgcls;
1994 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1995 parm->ipbfln1f = (__u32) buflen; /* length of message */
1996 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
1997 parm->ipbfln2f = (__u32) anslen;
1998 parm->ipsrccls = srccls;
1999 parm->ipmsgtag = msgtag;
2000 parm->ipflags1 = flags1; /* priority message */
2001
2002 b2f0_result = b2f0(SEND, parm);
2003
2004 if ((!b2f0_result) && (msgid))
2005 *msgid = parm->ipmsgid;
2006 release_param(parm);
2007
2008 iucv_debug(2, "exiting");
2009
2010 return b2f0_result;
2011}
2012
2013/*
2014 * Name: iucv_send2way_array
2015 * Purpose: This function transmits data to another application.
2016 * The contents of buffer is the address of the array of
2017 * addresses and lengths of discontiguous buffers that hold
2018 * the message text. The receiver of the send is expected to
2019 * reply to the message and a buffer is provided into which
2020 * IUCV moves the reply to this message.
2021 * Input: pathid - path identification number
2022 * trgcls - specifies target class
2023 * srccls - specifies the source message class
2024 * msgtag - spcifies a tag to be associated with the message
2025 * flags1 - option for path
2026 * IPPRTY- specifies if you want to send priority message
2027 * buffer - address of array of send buffers
2028 * buflen - total length of send buffers
2029 * ansbuf - address of buffer to reply with
2030 * anslen - length of buffer to reply with
2031 * Output: msgid - specifies the message ID.
2032 * Return: b2f0_result - return code from CP
2033 * (-EINVAL) - buffer address is NULL
2034 */
2035int
2036iucv_send2way_array (__u16 pathid,
2037 __u32 * msgid,
2038 __u32 trgcls,
2039 __u32 srccls,
2040 __u32 msgtag,
2041 int flags1,
2042 iucv_array_t * buffer,
2043 ulong buflen, iucv_array_t * ansbuf, ulong anslen)
2044{
2045 iparml_db *parm;
2046 ulong b2f0_result;
2047
2048 iucv_debug(2, "entering");
2049
2050 if (!buffer || !ansbuf)
2051 return -EINVAL;
2052
2053 parm = (iparml_db *)grab_param();
2054
2055 parm->ippathid = pathid;
2056 parm->iptrgcls = trgcls;
2057 parm->ipbfadr1 = (__u32) ((ulong) buffer);
2058 parm->ipbfln1f = (__u32) buflen; /* length of message */
2059 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2060 parm->ipbfln2f = (__u32) anslen;
2061 parm->ipsrccls = srccls;
2062 parm->ipmsgtag = msgtag;
2063 parm->ipflags1 = (IPBUFLST | IPANSLST | flags1);
2064 b2f0_result = b2f0(SEND, parm);
2065 if ((!b2f0_result) && (msgid))
2066 *msgid = parm->ipmsgid;
2067 release_param(parm);
2068
2069 iucv_debug(2, "exiting");
2070 return b2f0_result;
2071}
2072
2073/*
2074 * Name: iucv_send2way_prmmsg
2075 * Purpose: This function transmits data to another application.
2076 * Prmmsg specifies that the 8-bytes of data are to be moved
2077 * into the parameter list. This is a two-way message and the
2078 * receiver of the message is expected to reply. A buffer
2079 * is provided into which IUCV moves the reply to this
2080 * message.
2081 * Input: pathid - path identification number
2082 * trgcls - specifies target class
2083 * srccls - specifies the source message class
2084 * msgtag - specifies a tag to be associated with the message
2085 * flags1 - option for path
2086 * IPPRTY- specifies if you want to send priority message
2087 * prmmsg - 8-bytes of data to be placed in parameter list
2088 * ansbuf - address of buffer to reply with
2089 * anslen - length of buffer to reply with
2090 * Output: msgid - specifies the message ID.
2091 * Return: b2f0_result - return code from CP
2092 * (-EINVAL) - buffer address is NULL
2093*/
2094int
2095iucv_send2way_prmmsg (__u16 pathid,
2096 __u32 * msgid,
2097 __u32 trgcls,
2098 __u32 srccls,
2099 __u32 msgtag,
2100 ulong flags1, __u8 prmmsg[8], void *ansbuf, ulong anslen)
2101{
2102 iparml_dpl *parm;
2103 ulong b2f0_result;
2104
2105 iucv_debug(2, "entering");
2106
2107 if (!ansbuf)
2108 return -EINVAL;
2109
2110 parm = (iparml_dpl *)grab_param();
2111
2112 parm->ippathid = pathid;
2113 parm->iptrgcls = trgcls;
2114 parm->ipsrccls = srccls;
2115 parm->ipmsgtag = msgtag;
2116 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2117 parm->ipbfln2f = (__u32) anslen;
2118 parm->ipflags1 = (IPRMDATA | flags1); /* message in prmlist */
2119 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
2120
2121 b2f0_result = b2f0(SEND, parm);
2122
2123 if ((!b2f0_result) && (msgid))
2124 *msgid = parm->ipmsgid;
2125 release_param(parm);
2126
2127 iucv_debug(2, "exiting");
2128
2129 return b2f0_result;
2130}
2131
2132/*
2133 * Name: iucv_send2way_prmmsg_array
2134 * Purpose: This function transmits data to another application.
2135 * Prmmsg specifies that the 8-bytes of data are to be moved
2136 * into the parameter list. This is a two-way message and the
2137 * receiver of the message is expected to reply. A buffer
2138 * is provided into which IUCV moves the reply to this
2139 * message. The contents of ansbuf is the address of the
2140 * array of addresses and lengths of discontiguous buffers
2141 * that contain the reply.
2142 * Input: pathid - path identification number
2143 * trgcls - specifies target class
2144 * srccls - specifies the source message class
2145 * msgtag - specifies a tag to be associated with the message
2146 * flags1 - option for path
2147 * IPPRTY- specifies if you want to send priority message
2148 * prmmsg - 8-bytes of data to be placed into the parameter list
2149 * ansbuf - address of buffer to reply with
2150 * anslen - length of buffer to reply with
2151 * Output: msgid - specifies the message ID.
2152 * Return: b2f0_result - return code from CP
2153 * (-EINVAL) - ansbuf address is NULL
2154 */
2155int
2156iucv_send2way_prmmsg_array (__u16 pathid,
2157 __u32 * msgid,
2158 __u32 trgcls,
2159 __u32 srccls,
2160 __u32 msgtag,
2161 int flags1,
2162 __u8 prmmsg[8],
2163 iucv_array_t * ansbuf, ulong anslen)
2164{
2165 iparml_dpl *parm;
2166 ulong b2f0_result;
2167
2168 iucv_debug(2, "entering");
2169
2170 if (!ansbuf)
2171 return -EINVAL;
2172
2173 parm = (iparml_dpl *)grab_param();
2174
2175 parm->ippathid = pathid;
2176 parm->iptrgcls = trgcls;
2177 parm->ipsrccls = srccls;
2178 parm->ipmsgtag = msgtag;
2179 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2180 parm->ipbfln2f = (__u32) anslen;
2181 parm->ipflags1 = (IPRMDATA | IPANSLST | flags1);
2182 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
2183 b2f0_result = b2f0(SEND, parm);
2184 if ((!b2f0_result) && (msgid))
2185 *msgid = parm->ipmsgid;
2186 release_param(parm);
2187
2188 iucv_debug(2, "exiting");
2189 return b2f0_result;
2190}
2191
2192void
2193iucv_setmask_cpuid (void *result)
2194{
2195 iparml_set_mask *parm;
2196
2197 iucv_debug(1, "entering");
2198 parm = (iparml_set_mask *)grab_param();
2199 parm->ipmask = *((__u8*)result);
2200 *((ulong *)result) = b2f0(SETMASK, parm);
2201 release_param(parm);
2202
2203 iucv_debug(1, "b2f0_result = %ld", *((ulong *)result));
2204 iucv_debug(1, "exiting");
2205}
2206
2207/*
2208 * Name: iucv_setmask
2209 * Purpose: This function enables or disables the following IUCV
2210 * external interruptions: Nonpriority and priority message
2211 * interrupts, nonpriority and priority reply interrupts.
2212 * Input: SetMaskFlag - options for interrupts
2213 * 0x80 - Nonpriority_MessagePendingInterruptsFlag
2214 * 0x40 - Priority_MessagePendingInterruptsFlag
2215 * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
2216 * 0x10 - Priority_MessageCompletionInterruptsFlag
2217 * 0x08 - IUCVControlInterruptsFlag
2218 * Output: NA
2219 * Return: b2f0_result - return code from CP
2220*/
2221int
2222iucv_setmask (int SetMaskFlag)
2223{
2224 union {
2225 ulong result;
2226 __u8 param;
2227 } u;
2228 int cpu;
2229
2230 u.param = SetMaskFlag;
2231 cpu = get_cpu();
2232 smp_call_function_on(iucv_setmask_cpuid, &u, 0, 1, iucv_cpuid);
2233 put_cpu();
2234
2235 return u.result;
2236}
2237
2238/**
2239 * iucv_sever:
2240 * @pathid: Path identification number
2241 * @user_data: 16-byte of user data
2242 *
2243 * This function terminates an iucv path.
2244 * Returns: return code from CP
2245 */
2246int
2247iucv_sever(__u16 pathid, __u8 user_data[16])
2248{
2249 iparml_control *parm;
2250 ulong b2f0_result = 0;
2251
2252 iucv_debug(1, "entering");
2253 parm = (iparml_control *)grab_param();
2254
2255 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
2256 parm->ippathid = pathid;
2257
2258 b2f0_result = b2f0(SEVER, parm);
2259
2260 if (!b2f0_result)
2261 iucv_remove_pathid(pathid);
2262 release_param(parm);
2263
2264 iucv_debug(1, "exiting");
2265 return b2f0_result;
2266}
2267
2268/*
2269 * Interrupt Handlers
2270 *******************************************************************************/
2271
2272/**
2273 * iucv_irq_handler:
2274 * @regs: Current registers
2275 * @code: irq code
2276 *
2277 * Handles external interrupts coming in from CP.
2278 * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler().
2279 */
2280static void
2281iucv_irq_handler(struct pt_regs *regs, __u16 code)
2282{
2283 iucv_irqdata *irqdata;
2284
2285 irqdata = kmalloc(sizeof(iucv_irqdata), GFP_ATOMIC);
2286 if (!irqdata) {
2287 printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__);
2288 return;
2289 }
2290
2291 memcpy(&irqdata->data, iucv_external_int_buffer,
2292 sizeof(iucv_GeneralInterrupt));
2293
2294 spin_lock(&iucv_irq_queue_lock);
2295 list_add_tail(&irqdata->queue, &iucv_irq_queue);
2296 spin_unlock(&iucv_irq_queue_lock);
2297
2298 tasklet_schedule(&iucv_tasklet);
2299}
2300
2301/**
2302 * iucv_do_int:
2303 * @int_buf: Pointer to copy of external interrupt buffer
2304 *
2305 * The workhorse for handling interrupts queued by iucv_irq_handler().
2306 * This function is called from the bottom half iucv_tasklet_handler().
2307 */
2308static void
2309iucv_do_int(iucv_GeneralInterrupt * int_buf)
2310{
2311 handler *h = NULL;
2312 struct list_head *lh;
2313 ulong flags;
2314 iucv_interrupt_ops_t *interrupt = NULL; /* interrupt addresses */
2315 __u8 temp_buff1[24], temp_buff2[24]; /* masked handler id. */
2316 int rc = 0, j = 0;
2317 __u8 no_listener[16] = "NO LISTENER";
2318
2319 iucv_debug(2, "entering, pathid %d, type %02X",
2320 int_buf->ippathid, int_buf->iptype);
2321 iucv_dumpit("External Interrupt Buffer:",
2322 int_buf, sizeof(iucv_GeneralInterrupt));
2323
2324 ASCEBC (no_listener, 16);
2325
2326 if (int_buf->iptype != 01) {
2327 if ((int_buf->ippathid) > (max_connections - 1)) {
2328 printk(KERN_WARNING "%s: Got interrupt with pathid %d"
2329 " > max_connections (%ld)\n", __FUNCTION__,
2330 int_buf->ippathid, max_connections - 1);
2331 } else {
2332 h = iucv_pathid_table[int_buf->ippathid];
2333 interrupt = h->interrupt_table;
2334 iucv_dumpit("Handler:", h, sizeof(handler));
2335 }
2336 }
2337
2338 /* end of if statement */
2339 switch (int_buf->iptype) {
2340 case 0x01: /* connection pending */
2341 if (messagesDisabled) {
2342 iucv_setmask(~0);
2343 messagesDisabled = 0;
2344 }
2345 spin_lock_irqsave(&iucv_lock, flags);
2346 list_for_each(lh, &iucv_handler_table) {
2347 h = list_entry(lh, handler, list);
2348 memcpy(temp_buff1, &(int_buf->ipvmid), 24);
2349 memcpy(temp_buff2, &(h->id.userid), 24);
2350 for (j = 0; j < 24; j++) {
2351 temp_buff1[j] &= (h->id.mask)[j];
2352 temp_buff2[j] &= (h->id.mask)[j];
2353 }
2354
2355 iucv_dumpit("temp_buff1:",
2356 temp_buff1, sizeof(temp_buff1));
2357 iucv_dumpit("temp_buff2",
2358 temp_buff2, sizeof(temp_buff2));
2359
2360 if (!memcmp (temp_buff1, temp_buff2, 24)) {
2361
2362 iucv_debug(2,
2363 "found a matching handler");
2364 break;
2365 } else
2366 h = NULL;
2367 }
2368 spin_unlock_irqrestore (&iucv_lock, flags);
2369 if (h) {
2370 /* ADD PATH TO PATHID TABLE */
2371 rc = iucv_add_pathid(int_buf->ippathid, h);
2372 if (rc) {
2373 iucv_sever (int_buf->ippathid,
2374 no_listener);
2375 iucv_debug(1,
2376 "add_pathid failed, rc = %d",
2377 rc);
2378 } else {
2379 interrupt = h->interrupt_table;
2380 if (interrupt->ConnectionPending) {
2381 EBCASC (int_buf->ipvmid, 8);
2382 interrupt->ConnectionPending(
2383 (iucv_ConnectionPending *)int_buf,
2384 h->pgm_data);
2385 } else
2386 iucv_sever(int_buf->ippathid,
2387 no_listener);
2388 }
2389 } else
2390 iucv_sever(int_buf->ippathid, no_listener);
2391 break;
2392
2393 case 0x02: /*connection complete */
2394 if (messagesDisabled) {
2395 iucv_setmask(~0);
2396 messagesDisabled = 0;
2397 }
2398 if (h) {
2399 if (interrupt->ConnectionComplete)
2400 {
2401 interrupt->ConnectionComplete(
2402 (iucv_ConnectionComplete *)int_buf,
2403 h->pgm_data);
2404 }
2405 else
2406 iucv_debug(1,
2407 "ConnectionComplete not called");
2408 } else
2409 iucv_sever(int_buf->ippathid, no_listener);
2410 break;
2411
2412 case 0x03: /* connection severed */
2413 if (messagesDisabled) {
2414 iucv_setmask(~0);
2415 messagesDisabled = 0;
2416 }
2417 if (h) {
2418 if (interrupt->ConnectionSevered)
2419 interrupt->ConnectionSevered(
2420 (iucv_ConnectionSevered *)int_buf,
2421 h->pgm_data);
2422
2423 else
2424 iucv_sever (int_buf->ippathid, no_listener);
2425 } else
2426 iucv_sever(int_buf->ippathid, no_listener);
2427 break;
2428
2429 case 0x04: /* connection quiesced */
2430 if (messagesDisabled) {
2431 iucv_setmask(~0);
2432 messagesDisabled = 0;
2433 }
2434 if (h) {
2435 if (interrupt->ConnectionQuiesced)
2436 interrupt->ConnectionQuiesced(
2437 (iucv_ConnectionQuiesced *)int_buf,
2438 h->pgm_data);
2439 else
2440 iucv_debug(1,
2441 "ConnectionQuiesced not called");
2442 }
2443 break;
2444
2445 case 0x05: /* connection resumed */
2446 if (messagesDisabled) {
2447 iucv_setmask(~0);
2448 messagesDisabled = 0;
2449 }
2450 if (h) {
2451 if (interrupt->ConnectionResumed)
2452 interrupt->ConnectionResumed(
2453 (iucv_ConnectionResumed *)int_buf,
2454 h->pgm_data);
2455 else
2456 iucv_debug(1,
2457 "ConnectionResumed not called");
2458 }
2459 break;
2460
2461 case 0x06: /* priority message complete */
2462 case 0x07: /* nonpriority message complete */
2463 if (h) {
2464 if (interrupt->MessageComplete)
2465 interrupt->MessageComplete(
2466 (iucv_MessageComplete *)int_buf,
2467 h->pgm_data);
2468 else
2469 iucv_debug(2,
2470 "MessageComplete not called");
2471 }
2472 break;
2473
2474 case 0x08: /* priority message pending */
2475 case 0x09: /* nonpriority message pending */
2476 if (h) {
2477 if (interrupt->MessagePending)
2478 interrupt->MessagePending(
2479 (iucv_MessagePending *) int_buf,
2480 h->pgm_data);
2481 else
2482 iucv_debug(2,
2483 "MessagePending not called");
2484 }
2485 break;
2486 default: /* unknown iucv type */
2487 printk(KERN_WARNING "%s: unknown iucv interrupt\n",
2488 __FUNCTION__);
2489 break;
2490 } /* end switch */
2491
2492 iucv_debug(2, "exiting pathid %d, type %02X",
2493 int_buf->ippathid, int_buf->iptype);
2494
2495 return;
2496}
2497
2498/**
2499 * iucv_tasklet_handler:
2500 *
2501 * This function loops over the queue of irq buffers and runs iucv_do_int()
2502 * on every queue element.
2503 */
2504static void
2505iucv_tasklet_handler(unsigned long ignored)
2506{
2507 struct list_head head;
2508 struct list_head *next;
2509 ulong flags;
2510
2511 spin_lock_irqsave(&iucv_irq_queue_lock, flags);
2512 list_add(&head, &iucv_irq_queue);
2513 list_del_init(&iucv_irq_queue);
2514 spin_unlock_irqrestore (&iucv_irq_queue_lock, flags);
2515
2516 next = head.next;
2517 while (next != &head) {
2518 iucv_irqdata *p = list_entry(next, iucv_irqdata, queue);
2519
2520 next = next->next;
2521 iucv_do_int(&p->data);
2522 kfree(p);
2523 }
2524
2525 return;
2526}
2527
2528subsys_initcall(iucv_init);
2529module_exit(iucv_exit);
2530
2531/**
2532 * Export all public stuff
2533 */
2534EXPORT_SYMBOL (iucv_bus);
2535EXPORT_SYMBOL (iucv_root);
2536EXPORT_SYMBOL (iucv_accept);
2537EXPORT_SYMBOL (iucv_connect);
2538#if 0
2539EXPORT_SYMBOL (iucv_purge);
2540EXPORT_SYMBOL (iucv_query_maxconn);
2541EXPORT_SYMBOL (iucv_query_bufsize);
2542EXPORT_SYMBOL (iucv_quiesce);
2543#endif
2544EXPORT_SYMBOL (iucv_receive);
2545#if 0
2546EXPORT_SYMBOL (iucv_receive_array);
2547#endif
2548EXPORT_SYMBOL (iucv_reject);
2549#if 0
2550EXPORT_SYMBOL (iucv_reply);
2551EXPORT_SYMBOL (iucv_reply_array);
2552EXPORT_SYMBOL (iucv_resume);
2553#endif
2554EXPORT_SYMBOL (iucv_reply_prmmsg);
2555EXPORT_SYMBOL (iucv_send);
2556#if 0
2557EXPORT_SYMBOL (iucv_send2way);
2558EXPORT_SYMBOL (iucv_send2way_array);
2559EXPORT_SYMBOL (iucv_send_array);
2560EXPORT_SYMBOL (iucv_send2way_prmmsg);
2561EXPORT_SYMBOL (iucv_send2way_prmmsg_array);
2562EXPORT_SYMBOL (iucv_send_prmmsg);
2563EXPORT_SYMBOL (iucv_setmask);
2564#endif
2565EXPORT_SYMBOL (iucv_sever);
2566EXPORT_SYMBOL (iucv_register_program);
2567EXPORT_SYMBOL (iucv_unregister_program);
diff --git a/drivers/s390/net/iucv.h b/drivers/s390/net/iucv.h
new file mode 100644
index 000000000000..198330217eff
--- /dev/null
+++ b/drivers/s390/net/iucv.h
@@ -0,0 +1,849 @@
1/*
2 * drivers/s390/net/iucv.h
3 * IUCV base support.
4 *
5 * S390 version
6 * Copyright (C) 2000 IBM Corporation
7 * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com)
8 * Xenia Tkatschow (xenia@us.ibm.com)
9 *
10 *
11 * Functionality:
12 * To explore any of the IUCV functions, one must first register
13 * their program using iucv_register_program(). Once your program has
14 * successfully completed a register, it can exploit the other functions.
15 * For furthur reference on all IUCV functionality, refer to the
16 * CP Programming Services book, also available on the web
17 * thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
18 *
19 * Definition of Return Codes
20 * -All positive return codes including zero are reflected back
21 * from CP except for iucv_register_program. The definition of each
22 * return code can be found in CP Programming Services book.
23 * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
24 * - Return Code of:
25 * (-EINVAL) Invalid value
26 * (-ENOMEM) storage allocation failed
27 * pgmask defined in iucv_register_program will be set depending on input
28 * paramters.
29 *
30 */
31
32#include <linux/types.h>
33#include <asm/debug.h>
34
35/**
36 * Debug Facility stuff
37 */
38#define IUCV_DBF_SETUP_NAME "iucv_setup"
39#define IUCV_DBF_SETUP_LEN 32
40#define IUCV_DBF_SETUP_INDEX 1
41#define IUCV_DBF_SETUP_NR_AREAS 1
42#define IUCV_DBF_SETUP_LEVEL 3
43
44#define IUCV_DBF_DATA_NAME "iucv_data"
45#define IUCV_DBF_DATA_LEN 128
46#define IUCV_DBF_DATA_INDEX 1
47#define IUCV_DBF_DATA_NR_AREAS 1
48#define IUCV_DBF_DATA_LEVEL 2
49
50#define IUCV_DBF_TRACE_NAME "iucv_trace"
51#define IUCV_DBF_TRACE_LEN 16
52#define IUCV_DBF_TRACE_INDEX 2
53#define IUCV_DBF_TRACE_NR_AREAS 1
54#define IUCV_DBF_TRACE_LEVEL 3
55
56#define IUCV_DBF_TEXT(name,level,text) \
57 do { \
58 debug_text_event(iucv_dbf_##name,level,text); \
59 } while (0)
60
61#define IUCV_DBF_HEX(name,level,addr,len) \
62 do { \
63 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
64 } while (0)
65
66DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
67
68#define IUCV_DBF_TEXT_(name,level,text...) \
69 do { \
70 char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
71 sprintf(iucv_dbf_txt_buf, text); \
72 debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
73 put_cpu_var(iucv_dbf_txt_buf); \
74 } while (0)
75
76#define IUCV_DBF_SPRINTF(name,level,text...) \
77 do { \
78 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
79 debug_sprintf_event(iucv_dbf_trace, level, text ); \
80 } while (0)
81
82/**
83 * some more debug stuff
84 */
85#define IUCV_HEXDUMP16(importance,header,ptr) \
86PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
87 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
88 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
89 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
90 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
91 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
92 *(((char*)ptr)+12),*(((char*)ptr)+13), \
93 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
94PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
95 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
96 *(((char*)ptr)+16),*(((char*)ptr)+17), \
97 *(((char*)ptr)+18),*(((char*)ptr)+19), \
98 *(((char*)ptr)+20),*(((char*)ptr)+21), \
99 *(((char*)ptr)+22),*(((char*)ptr)+23), \
100 *(((char*)ptr)+24),*(((char*)ptr)+25), \
101 *(((char*)ptr)+26),*(((char*)ptr)+27), \
102 *(((char*)ptr)+28),*(((char*)ptr)+29), \
103 *(((char*)ptr)+30),*(((char*)ptr)+31));
104
105static inline void
106iucv_hex_dump(unsigned char *buf, size_t len)
107{
108 size_t i;
109
110 for (i = 0; i < len; i++) {
111 if (i && !(i % 16))
112 printk("\n");
113 printk("%02x ", *(buf + i));
114 }
115 printk("\n");
116}
117/**
118 * end of debug stuff
119 */
120
121#define uchar unsigned char
122#define ushort unsigned short
123#define ulong unsigned long
124#define iucv_handle_t void *
125
126/* flags1:
127 * All flags are defined in the field IPFLAGS1 of each function
128 * and can be found in CP Programming Services.
129 * IPLOCAL - Indicates the connect can only be satisfied on the
130 * local system
131 * IPPRTY - Indicates a priority message
132 * IPQUSCE - Indicates you do not want to receive messages on a
133 * path until an iucv_resume is issued
134 * IPRMDATA - Indicates that the message is in the parameter list
135 */
136#define IPLOCAL 0x01
137#define IPPRTY 0x20
138#define IPQUSCE 0x40
139#define IPRMDATA 0x80
140
141/* flags1_out:
142 * All flags are defined in the output field of IPFLAGS1 for each function
143 * and can be found in CP Programming Services.
144 * IPNORPY - Specifies this is a one-way message and no reply is expected.
145 * IPPRTY - Indicates a priority message is permitted. Defined in flags1.
146 */
147#define IPNORPY 0x10
148
149#define Nonpriority_MessagePendingInterruptsFlag 0x80
150#define Priority_MessagePendingInterruptsFlag 0x40
151#define Nonpriority_MessageCompletionInterruptsFlag 0x20
152#define Priority_MessageCompletionInterruptsFlag 0x10
153#define IUCVControlInterruptsFlag 0x08
154#define AllInterrupts 0xf8
155/*
156 * Mapping of external interrupt buffers should be used with the corresponding
157 * interrupt types.
158 * Names: iucv_ConnectionPending -> connection pending
159 * iucv_ConnectionComplete -> connection complete
160 * iucv_ConnectionSevered -> connection severed
161 * iucv_ConnectionQuiesced -> connection quiesced
162 * iucv_ConnectionResumed -> connection resumed
163 * iucv_MessagePending -> message pending
164 * iucv_MessageComplete -> message complete
165 */
166typedef struct {
167 u16 ippathid;
168 uchar ipflags1;
169 uchar iptype;
170 u16 ipmsglim;
171 u16 res1;
172 uchar ipvmid[8];
173 uchar ipuser[16];
174 u32 res3;
175 uchar ippollfg;
176 uchar res4[3];
177} iucv_ConnectionPending;
178
179typedef struct {
180 u16 ippathid;
181 uchar ipflags1;
182 uchar iptype;
183 u16 ipmsglim;
184 u16 res1;
185 uchar res2[8];
186 uchar ipuser[16];
187 u32 res3;
188 uchar ippollfg;
189 uchar res4[3];
190} iucv_ConnectionComplete;
191
192typedef struct {
193 u16 ippathid;
194 uchar res1;
195 uchar iptype;
196 u32 res2;
197 uchar res3[8];
198 uchar ipuser[16];
199 u32 res4;
200 uchar ippollfg;
201 uchar res5[3];
202} iucv_ConnectionSevered;
203
204typedef struct {
205 u16 ippathid;
206 uchar res1;
207 uchar iptype;
208 u32 res2;
209 uchar res3[8];
210 uchar ipuser[16];
211 u32 res4;
212 uchar ippollfg;
213 uchar res5[3];
214} iucv_ConnectionQuiesced;
215
216typedef struct {
217 u16 ippathid;
218 uchar res1;
219 uchar iptype;
220 u32 res2;
221 uchar res3[8];
222 uchar ipuser[16];
223 u32 res4;
224 uchar ippollfg;
225 uchar res5[3];
226} iucv_ConnectionResumed;
227
228typedef struct {
229 u16 ippathid;
230 uchar ipflags1;
231 uchar iptype;
232 u32 ipmsgid;
233 u32 iptrgcls;
234 union u2 {
235 u32 iprmmsg1_u32;
236 uchar iprmmsg1[4];
237 } ln1msg1;
238 union u1 {
239 u32 ipbfln1f;
240 uchar iprmmsg2[4];
241 } ln1msg2;
242 u32 res1[3];
243 u32 ipbfln2f;
244 uchar ippollfg;
245 uchar res2[3];
246} iucv_MessagePending;
247
248typedef struct {
249 u16 ippathid;
250 uchar ipflags1;
251 uchar iptype;
252 u32 ipmsgid;
253 u32 ipaudit;
254 uchar iprmmsg[8];
255 u32 ipsrccls;
256 u32 ipmsgtag;
257 u32 res;
258 u32 ipbfln2f;
259 uchar ippollfg;
260 uchar res2[3];
261} iucv_MessageComplete;
262
263/*
264 * iucv_interrupt_ops_t: Is a vector of functions that handle
265 * IUCV interrupts.
266 * Parameter list:
267 * eib - is a pointer to a 40-byte area described
268 * with one of the structures above.
269 * pgm_data - this data is strictly for the
270 * interrupt handler that is passed by
271 * the application. This may be an address
272 * or token.
273*/
274typedef struct {
275 void (*ConnectionPending) (iucv_ConnectionPending * eib,
276 void *pgm_data);
277 void (*ConnectionComplete) (iucv_ConnectionComplete * eib,
278 void *pgm_data);
279 void (*ConnectionSevered) (iucv_ConnectionSevered * eib,
280 void *pgm_data);
281 void (*ConnectionQuiesced) (iucv_ConnectionQuiesced * eib,
282 void *pgm_data);
283 void (*ConnectionResumed) (iucv_ConnectionResumed * eib,
284 void *pgm_data);
285 void (*MessagePending) (iucv_MessagePending * eib, void *pgm_data);
286 void (*MessageComplete) (iucv_MessageComplete * eib, void *pgm_data);
287} iucv_interrupt_ops_t;
288
289/*
290 *iucv_array_t : Defines buffer array.
291 * Inside the array may be 31- bit addresses and 31-bit lengths.
292*/
293typedef struct {
294 u32 address;
295 u32 length;
296} iucv_array_t __attribute__ ((aligned (8)));
297
298extern struct bus_type iucv_bus;
299extern struct device *iucv_root;
300
301/* -prototypes- */
302/*
303 * Name: iucv_register_program
304 * Purpose: Registers an application with IUCV
305 * Input: prmname - user identification
306 * userid - machine identification
307 * pgmmask - indicates which bits in the prmname and userid combined will be
308 * used to determine who is given control
309 * ops - address of vector of interrupt handlers
310 * pgm_data- application data passed to interrupt handlers
311 * Output: NA
312 * Return: address of handler
313 * (0) - Error occurred, registration not completed.
314 * NOTE: Exact cause of failure will be recorded in syslog.
315*/
316iucv_handle_t iucv_register_program (uchar pgmname[16],
317 uchar userid[8],
318 uchar pgmmask[24],
319 iucv_interrupt_ops_t * ops,
320 void *pgm_data);
321
322/*
323 * Name: iucv_unregister_program
324 * Purpose: Unregister application with IUCV
325 * Input: address of handler
326 * Output: NA
327 * Return: (0) - Normal return
328 * (-EINVAL) - Internal error, wild pointer
329*/
330int iucv_unregister_program (iucv_handle_t handle);
331
332/*
333 * Name: iucv_accept
334 * Purpose: This function is issued after the user receives a Connection Pending external
335 * interrupt and now wishes to complete the IUCV communication path.
336 * Input: pathid - u16 , Path identification number
337 * msglim_reqstd - u16, The number of outstanding messages requested.
338 * user_data - uchar[16], Data specified by the iucv_connect function.
339 * flags1 - int, Contains options for this path.
340 * -IPPRTY - 0x20- Specifies if you want to send priority message.
341 * -IPRMDATA - 0x80, Specifies whether your program can handle a message
342 * in the parameter list.
343 * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
344 * established.
345 * handle - iucv_handle_t, Address of handler.
346 * pgm_data - void *, Application data passed to interrupt handlers.
347 * flags1_out - int * Contains information about the path
348 * - IPPRTY - 0x20, Indicates you may send priority messages.
349 * msglim - *u16, Number of outstanding messages.
350 * Output: return code from CP IUCV call.
351*/
352
353int iucv_accept (u16 pathid,
354 u16 msglim_reqstd,
355 uchar user_data[16],
356 int flags1,
357 iucv_handle_t handle,
358 void *pgm_data, int *flags1_out, u16 * msglim);
359
360/*
361 * Name: iucv_connect
362 * Purpose: This function establishes an IUCV path. Although the connect may complete
363 * successfully, you are not able to use the path until you receive an IUCV
364 * Connection Complete external interrupt.
365 * Input: pathid - u16 *, Path identification number
366 * msglim_reqstd - u16, Number of outstanding messages requested
367 * user_data - uchar[16], 16-byte user data
368 * userid - uchar[8], User identification
369 * system_name - uchar[8], 8-byte identifying the system name
370 * flags1 - int, Contains options for this path.
371 * -IPPRTY - 0x20, Specifies if you want to send priority message.
372 * -IPRMDATA - 0x80, Specifies whether your program can handle a message
373 * in the parameter list.
374 * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
375 * established.
376 * -IPLOCAL - 0X01, Allows an application to force the partner to be on
377 * the local system. If local is specified then target class cannot be
378 * specified.
379 * flags1_out - int * Contains information about the path
380 * - IPPRTY - 0x20, Indicates you may send priority messages.
381 * msglim - * u16, Number of outstanding messages
382 * handle - iucv_handle_t, Address of handler
383 * pgm_data - void *, Application data passed to interrupt handlers
384 * Output: return code from CP IUCV call
385 * rc - return code from iucv_declare_buffer
386 * -EINVAL - Invalid handle passed by application
387 * -EINVAL - Pathid address is NULL
388 * add_pathid_result - Return code from internal function add_pathid
389*/
390int
391 iucv_connect (u16 * pathid,
392 u16 msglim_reqstd,
393 uchar user_data[16],
394 uchar userid[8],
395 uchar system_name[8],
396 int flags1,
397 int *flags1_out,
398 u16 * msglim, iucv_handle_t handle, void *pgm_data);
399
400/*
401 * Name: iucv_purge
402 * Purpose: This function cancels a message that you have sent.
403 * Input: pathid - Path identification number.
404 * msgid - Specifies the message ID of the message to be purged.
405 * srccls - Specifies the source message class.
406 * Output: audit - Contains information about asynchronous error
407 * that may have affected the normal completion
408 * of this message.
409 * Return: Return code from CP IUCV call.
410*/
411int iucv_purge (u16 pathid, u32 msgid, u32 srccls, __u32 *audit);
412/*
413 * Name: iucv_query_maxconn
414 * Purpose: This function determines the maximum number of communication paths you
415 * may establish.
416 * Return: maxconn - ulong, Maximum number of connection the virtual machine may
417 * establish.
418*/
419ulong iucv_query_maxconn (void);
420
421/*
422 * Name: iucv_query_bufsize
423 * Purpose: This function determines how large an external interrupt
424 * buffer IUCV requires to store information.
425 * Return: bufsize - ulong, Size of external interrupt buffer.
426 */
427ulong iucv_query_bufsize (void);
428
429/*
430 * Name: iucv_quiesce
431 * Purpose: This function temporarily suspends incoming messages on an
432 * IUCV path. You can later reactivate the path by invoking
433 * the iucv_resume function.
434 * Input: pathid - Path identification number
435 * user_data - 16-bytes of user data
436 * Output: NA
437 * Return: Return code from CP IUCV call.
438*/
439int iucv_quiesce (u16 pathid, uchar user_data[16]);
440
441/*
442 * Name: iucv_receive
443 * Purpose: This function receives messages that are being sent to you
444 * over established paths. Data will be returned in buffer for length of
445 * buflen.
446 * Input:
447 * pathid - Path identification number.
448 * buffer - Address of buffer to receive.
449 * buflen - Length of buffer to receive.
450 * msgid - Specifies the message ID.
451 * trgcls - Specifies target class.
452 * Output:
453 * flags1_out: int *, Contains information about this path.
454 * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
455 * expected.
456 * IPPRTY - 0x20 Specifies if you want to send priority message.
457 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
458 * residual_buffer - address of buffer updated by the number
459 * of bytes you have received.
460 * residual_length -
461 * Contains one of the following values, if the receive buffer is:
462 * The same length as the message, this field is zero.
463 * Longer than the message, this field contains the number of
464 * bytes remaining in the buffer.
465 * Shorter than the message, this field contains the residual
466 * count (that is, the number of bytes remaining in the
467 * message that does not fit into the buffer. In this
468 * case b2f0_result = 5.
469 * Return: Return code from CP IUCV call.
470 * (-EINVAL) - buffer address is pointing to NULL
471*/
472int iucv_receive (u16 pathid,
473 u32 msgid,
474 u32 trgcls,
475 void *buffer,
476 ulong buflen,
477 int *flags1_out,
478 ulong * residual_buffer, ulong * residual_length);
479
480 /*
481 * Name: iucv_receive_array
482 * Purpose: This function receives messages that are being sent to you
483 * over established paths. Data will be returned in first buffer for
484 * length of first buffer.
485 * Input: pathid - Path identification number.
486 * msgid - specifies the message ID.
487 * trgcls - Specifies target class.
488 * buffer - Address of array of buffers.
489 * buflen - Total length of buffers.
490 * Output:
491 * flags1_out: int *, Contains information about this path.
492 * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
493 * expected.
494 * IPPRTY - 0x20 Specifies if you want to send priority message.
495 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
496 * residual_buffer - address points to the current list entry IUCV
497 * is working on.
498 * residual_length -
499 * Contains one of the following values, if the receive buffer is:
500 * The same length as the message, this field is zero.
501 * Longer than the message, this field contains the number of
502 * bytes remaining in the buffer.
503 * Shorter than the message, this field contains the residual
504 * count (that is, the number of bytes remaining in the
505 * message that does not fit into the buffer. In this
506 * case b2f0_result = 5.
507 * Return: Return code from CP IUCV call.
508 * (-EINVAL) - Buffer address is NULL.
509 */
510int iucv_receive_array (u16 pathid,
511 u32 msgid,
512 u32 trgcls,
513 iucv_array_t * buffer,
514 ulong buflen,
515 int *flags1_out,
516 ulong * residual_buffer, ulong * residual_length);
517
518/*
519 * Name: iucv_reject
520 * Purpose: The reject function refuses a specified message. Between the
521 * time you are notified of a message and the time that you
522 * complete the message, the message may be rejected.
523 * Input: pathid - Path identification number.
524 * msgid - Specifies the message ID.
525 * trgcls - Specifies target class.
526 * Output: NA
527 * Return: Return code from CP IUCV call.
528*/
529int iucv_reject (u16 pathid, u32 msgid, u32 trgcls);
530
531/*
532 * Name: iucv_reply
533 * Purpose: This function responds to the two-way messages that you
534 * receive. You must identify completely the message to
535 * which you wish to reply. ie, pathid, msgid, and trgcls.
536 * Input: pathid - Path identification number.
537 * msgid - Specifies the message ID.
538 * trgcls - Specifies target class.
539 * flags1 - Option for path.
540 * IPPRTY- 0x20, Specifies if you want to send priority message.
541 * buffer - Address of reply buffer.
542 * buflen - Length of reply buffer.
543 * Output: residual_buffer - Address of buffer updated by the number
544 * of bytes you have moved.
545 * residual_length - Contains one of the following values:
546 * If the answer buffer is the same length as the reply, this field
547 * contains zero.
548 * If the answer buffer is longer than the reply, this field contains
549 * the number of bytes remaining in the buffer.
550 * If the answer buffer is shorter than the reply, this field contains
551 * a residual count (that is, the number of bytes remianing in the
552 * reply that does not fit into the buffer. In this
553 * case b2f0_result = 5.
554 * Return: Return code from CP IUCV call.
555 * (-EINVAL) - Buffer address is NULL.
556*/
557int iucv_reply (u16 pathid,
558 u32 msgid,
559 u32 trgcls,
560 int flags1,
561 void *buffer, ulong buflen, ulong * residual_buffer,
562 ulong * residual_length);
563
564/*
565 * Name: iucv_reply_array
566 * Purpose: This function responds to the two-way messages that you
567 * receive. You must identify completely the message to
568 * which you wish to reply. ie, pathid, msgid, and trgcls.
569 * The array identifies a list of addresses and lengths of
570 * discontiguous buffers that contains the reply data.
571 * Input: pathid - Path identification number
572 * msgid - Specifies the message ID.
573 * trgcls - Specifies target class.
574 * flags1 - Option for path.
575 * IPPRTY- 0x20, Specifies if you want to send priority message.
576 * buffer - Address of array of reply buffers.
577 * buflen - Total length of reply buffers.
578 * Output: residual_buffer - Address of buffer which IUCV is currently working on.
579 * residual_length - Contains one of the following values:
580 * If the answer buffer is the same length as the reply, this field
581 * contains zero.
582 * If the answer buffer is longer than the reply, this field contains
583 * the number of bytes remaining in the buffer.
584 * If the answer buffer is shorter than the reply, this field contains
585 * a residual count (that is, the number of bytes remianing in the
586 * reply that does not fit into the buffer. In this
587 * case b2f0_result = 5.
588 * Return: Return code from CP IUCV call.
589 * (-EINVAL) - Buffer address is NULL.
590*/
591int iucv_reply_array (u16 pathid,
592 u32 msgid,
593 u32 trgcls,
594 int flags1,
595 iucv_array_t * buffer,
596 ulong buflen, ulong * residual_address,
597 ulong * residual_length);
598
599/*
600 * Name: iucv_reply_prmmsg
601 * Purpose: This function responds to the two-way messages that you
602 * receive. You must identify completely the message to
603 * which you wish to reply. ie, pathid, msgid, and trgcls.
604 * Prmmsg signifies the data is moved into the
605 * parameter list.
606 * Input: pathid - Path identification number.
607 * msgid - Specifies the message ID.
608 * trgcls - Specifies target class.
609 * flags1 - Option for path.
610 * IPPRTY- 0x20 Specifies if you want to send priority message.
611 * prmmsg - 8-bytes of data to be placed into the parameter.
612 * list.
613 * Output: NA
614 * Return: Return code from CP IUCV call.
615*/
616int iucv_reply_prmmsg (u16 pathid,
617 u32 msgid, u32 trgcls, int flags1, uchar prmmsg[8]);
618
619/*
620 * Name: iucv_resume
621 * Purpose: This function restores communications over a quiesced path
622 * Input: pathid - Path identification number.
623 * user_data - 16-bytes of user data.
624 * Output: NA
625 * Return: Return code from CP IUCV call.
626*/
627int iucv_resume (u16 pathid, uchar user_data[16]);
628
629/*
630 * Name: iucv_send
631 * Purpose: This function transmits data to another application.
632 * Data to be transmitted is in a buffer and this is a
633 * one-way message and the receiver will not reply to the
634 * message.
635 * Input: pathid - Path identification number.
636 * trgcls - Specifies target class.
637 * srccls - Specifies the source message class.
638 * msgtag - Specifies a tag to be associated with the message.
639 * flags1 - Option for path.
640 * IPPRTY- 0x20 Specifies if you want to send priority message.
641 * buffer - Address of send buffer.
642 * buflen - Length of send buffer.
643 * Output: msgid - Specifies the message ID.
644 * Return: Return code from CP IUCV call.
645 * (-EINVAL) - Buffer address is NULL.
646*/
647int iucv_send (u16 pathid,
648 u32 * msgid,
649 u32 trgcls,
650 u32 srccls, u32 msgtag, int flags1, void *buffer, ulong buflen);
651
652/*
653 * Name: iucv_send_array
654 * Purpose: This function transmits data to another application.
655 * The contents of buffer is the address of the array of
656 * addresses and lengths of discontiguous buffers that hold
657 * the message text. This is a one-way message and the
658 * receiver will not reply to the message.
659 * Input: pathid - Path identification number.
660 * trgcls - Specifies target class.
661 * srccls - Specifies the source message class.
662 * msgtag - Specifies a tag to be associated witht the message.
663 * flags1 - Option for path.
664 * IPPRTY- specifies if you want to send priority message.
665 * buffer - Address of array of send buffers.
666 * buflen - Total length of send buffers.
667 * Output: msgid - Specifies the message ID.
668 * Return: Return code from CP IUCV call.
669 * (-EINVAL) - Buffer address is NULL.
670*/
671int iucv_send_array (u16 pathid,
672 u32 * msgid,
673 u32 trgcls,
674 u32 srccls,
675 u32 msgtag,
676 int flags1, iucv_array_t * buffer, ulong buflen);
677
678/*
679 * Name: iucv_send_prmmsg
680 * Purpose: This function transmits data to another application.
681 * Prmmsg specifies that the 8-bytes of data are to be moved
682 * into the parameter list. This is a one-way message and the
683 * receiver will not reply to the message.
684 * Input: pathid - Path identification number.
685 * trgcls - Specifies target class.
686 * srccls - Specifies the source message class.
687 * msgtag - Specifies a tag to be associated with the message.
688 * flags1 - Option for path.
689 * IPPRTY- 0x20 specifies if you want to send priority message.
690 * prmmsg - 8-bytes of data to be placed into parameter list.
691 * Output: msgid - Specifies the message ID.
692 * Return: Return code from CP IUCV call.
693*/
694int iucv_send_prmmsg (u16 pathid,
695 u32 * msgid,
696 u32 trgcls,
697 u32 srccls, u32 msgtag, int flags1, uchar prmmsg[8]);
698
699/*
700 * Name: iucv_send2way
701 * Purpose: This function transmits data to another application.
702 * Data to be transmitted is in a buffer. The receiver
703 * of the send is expected to reply to the message and
704 * a buffer is provided into which IUCV moves the reply
705 * to this message.
706 * Input: pathid - Path identification number.
707 * trgcls - Specifies target class.
708 * srccls - Specifies the source message class.
709 * msgtag - Specifies a tag associated with the message.
710 * flags1 - Option for path.
711 * IPPRTY- 0x20 Specifies if you want to send priority message.
712 * buffer - Address of send buffer.
713 * buflen - Length of send buffer.
714 * ansbuf - Address of buffer into which IUCV moves the reply of
715 * this message.
716 * anslen - Address of length of buffer.
717 * Output: msgid - Specifies the message ID.
718 * Return: Return code from CP IUCV call.
719 * (-EINVAL) - Buffer or ansbuf address is NULL.
720*/
721int iucv_send2way (u16 pathid,
722 u32 * msgid,
723 u32 trgcls,
724 u32 srccls,
725 u32 msgtag,
726 int flags1,
727 void *buffer, ulong buflen, void *ansbuf, ulong anslen);
728
729/*
730 * Name: iucv_send2way_array
731 * Purpose: This function transmits data to another application.
732 * The contents of buffer is the address of the array of
733 * addresses and lengths of discontiguous buffers that hold
734 * the message text. The receiver of the send is expected to
735 * reply to the message and a buffer is provided into which
736 * IUCV moves the reply to this message.
737 * Input: pathid - Path identification number.
738 * trgcls - Specifies target class.
739 * srccls - Specifies the source message class.
740 * msgtag - Specifies a tag to be associated with the message.
741 * flags1 - Option for path.
742 * IPPRTY- 0x20 Specifies if you want to send priority message.
743 * buffer - Sddress of array of send buffers.
744 * buflen - Total length of send buffers.
745 * ansbuf - Address of array of buffer into which IUCV moves the reply
746 * of this message.
747 * anslen - Address of length reply buffers.
748 * Output: msgid - Specifies the message ID.
749 * Return: Return code from CP IUCV call.
750 * (-EINVAL) - Buffer address is NULL.
751*/
752int iucv_send2way_array (u16 pathid,
753 u32 * msgid,
754 u32 trgcls,
755 u32 srccls,
756 u32 msgtag,
757 int flags1,
758 iucv_array_t * buffer,
759 ulong buflen, iucv_array_t * ansbuf, ulong anslen);
760
761/*
762 * Name: iucv_send2way_prmmsg
763 * Purpose: This function transmits data to another application.
764 * Prmmsg specifies that the 8-bytes of data are to be moved
765 * into the parameter list. This is a two-way message and the
766 * receiver of the message is expected to reply. A buffer
767 * is provided into which IUCV moves the reply to this
768 * message.
769 * Input: pathid - Rath identification number.
770 * trgcls - Specifies target class.
771 * srccls - Specifies the source message class.
772 * msgtag - Specifies a tag to be associated with the message.
773 * flags1 - Option for path.
774 * IPPRTY- 0x20 Specifies if you want to send priority message.
775 * prmmsg - 8-bytes of data to be placed in parameter list.
776 * ansbuf - Address of buffer into which IUCV moves the reply of
777 * this message.
778 * anslen - Address of length of buffer.
779 * Output: msgid - Specifies the message ID.
780 * Return: Return code from CP IUCV call.
781 * (-EINVAL) - Buffer address is NULL.
782*/
783int iucv_send2way_prmmsg (u16 pathid,
784 u32 * msgid,
785 u32 trgcls,
786 u32 srccls,
787 u32 msgtag,
788 ulong flags1,
789 uchar prmmsg[8], void *ansbuf, ulong anslen);
790
791/*
792 * Name: iucv_send2way_prmmsg_array
793 * Purpose: This function transmits data to another application.
794 * Prmmsg specifies that the 8-bytes of data are to be moved
795 * into the parameter list. This is a two-way message and the
796 * receiver of the message is expected to reply. A buffer
797 * is provided into which IUCV moves the reply to this
798 * message. The contents of ansbuf is the address of the
799 * array of addresses and lengths of discontiguous buffers
800 * that contain the reply.
801 * Input: pathid - Path identification number.
802 * trgcls - Specifies target class.
803 * srccls - Specifies the source message class.
804 * msgtag - Specifies a tag to be associated with the message.
805 * flags1 - Option for path.
806 * IPPRTY- 0x20 specifies if you want to send priority message.
807 * prmmsg - 8-bytes of data to be placed into the parameter list.
808 * ansbuf - Address of array of buffer into which IUCV moves the reply
809 * of this message.
810 * anslen - Address of length of reply buffers.
811 * Output: msgid - Specifies the message ID.
812 * Return: Return code from CP IUCV call.
813 * (-EINVAL) - Ansbuf address is NULL.
814*/
815int iucv_send2way_prmmsg_array (u16 pathid,
816 u32 * msgid,
817 u32 trgcls,
818 u32 srccls,
819 u32 msgtag,
820 int flags1,
821 uchar prmmsg[8],
822 iucv_array_t * ansbuf, ulong anslen);
823
824/*
825 * Name: iucv_setmask
826 * Purpose: This function enables or disables the following IUCV
827 * external interruptions: Nonpriority and priority message
828 * interrupts, nonpriority and priority reply interrupts.
829 * Input: SetMaskFlag - options for interrupts
830 * 0x80 - Nonpriority_MessagePendingInterruptsFlag
831 * 0x40 - Priority_MessagePendingInterruptsFlag
832 * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
833 * 0x10 - Priority_MessageCompletionInterruptsFlag
834 * 0x08 - IUCVControlInterruptsFlag
835 * Output: NA
836 * Return: Return code from CP IUCV call.
837*/
838int iucv_setmask (int SetMaskFlag);
839
840/*
841 * Name: iucv_sever
842 * Purpose: This function terminates an IUCV path.
843 * Input: pathid - Path identification number.
844 * user_data - 16-bytes of user data.
845 * Output: NA
846 * Return: Return code from CP IUCV call.
847 * (-EINVAL) - Interal error, wild pointer.
848*/
849int iucv_sever (u16 pathid, uchar user_data[16]);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
new file mode 100644
index 000000000000..0f76e945b984
--- /dev/null
+++ b/drivers/s390/net/lcs.c
@@ -0,0 +1,2347 @@
1/*
2 * linux/drivers/s390/net/lcs.c
3 *
4 * Linux for S/390 Lan Channel Station Network Driver
5 *
6 * Copyright (C) 1999-2001 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Original Code written by
9 * DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
10 * Rewritten by
11 * Frank Pavlic (pavlic@de.ibm.com) and
12 * Martin Schwidefsky <schwidefsky@de.ibm.com>
13 *
14 * $Revision: 1.96 $ $Date: 2004/11/11 13:42:33 $
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2, or (at your option)
19 * any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 */
30
31#include <linux/module.h>
32#include <linux/if.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/trdevice.h>
36#include <linux/fddidevice.h>
37#include <linux/inetdevice.h>
38#include <linux/in.h>
39#include <linux/igmp.h>
40#include <linux/delay.h>
41#include <net/arp.h>
42#include <net/ip.h>
43
44#include <asm/debug.h>
45#include <asm/idals.h>
46#include <asm/timex.h>
47#include <linux/device.h>
48#include <asm/ccwgroup.h>
49
50#include "lcs.h"
51#include "cu3088.h"
52
53
54#if !defined(CONFIG_NET_ETHERNET) && \
55 !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
56#error Cannot compile lcs.c without some net devices switched on.
57#endif
58
59/**
60 * initialization string for output
61 */
62#define VERSION_LCS_C "$Revision: 1.96 $"
63
64static char version[] __initdata = "LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")";
65static char debug_buffer[255];
66
67/**
68 * Some prototypes.
69 */
70static void lcs_tasklet(unsigned long);
71static void lcs_start_kernel_thread(struct lcs_card *card);
72static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
73static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
74
75/**
76 * Debug Facility Stuff
77 */
78static debug_info_t *lcs_dbf_setup;
79static debug_info_t *lcs_dbf_trace;
80
81/**
82 * LCS Debug Facility functions
83 */
84static void
85lcs_unregister_debug_facility(void)
86{
87 if (lcs_dbf_setup)
88 debug_unregister(lcs_dbf_setup);
89 if (lcs_dbf_trace)
90 debug_unregister(lcs_dbf_trace);
91}
92
93static int
94lcs_register_debug_facility(void)
95{
96 lcs_dbf_setup = debug_register("lcs_setup", 1, 1, 8);
97 lcs_dbf_trace = debug_register("lcs_trace", 1, 2, 8);
98 if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
99 PRINT_ERR("Not enough memory for debug facility.\n");
100 lcs_unregister_debug_facility();
101 return -ENOMEM;
102 }
103 debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
104 debug_set_level(lcs_dbf_setup, 4);
105 debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
106 debug_set_level(lcs_dbf_trace, 4);
107 return 0;
108}
109
110/**
111 * Allocate io buffers.
112 */
113static int
114lcs_alloc_channel(struct lcs_channel *channel)
115{
116 int cnt;
117
118 LCS_DBF_TEXT(2, setup, "ichalloc");
119 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
120 /* alloc memory fo iobuffer */
121 channel->iob[cnt].data = (void *)
122 kmalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
123 if (channel->iob[cnt].data == NULL)
124 break;
125 memset(channel->iob[cnt].data, 0, LCS_IOBUFFERSIZE);
126 channel->iob[cnt].state = BUF_STATE_EMPTY;
127 }
128 if (cnt < LCS_NUM_BUFFS) {
129 /* Not all io buffers could be allocated. */
130 LCS_DBF_TEXT(2, setup, "echalloc");
131 while (cnt-- > 0)
132 kfree(channel->iob[cnt].data);
133 return -ENOMEM;
134 }
135 return 0;
136}
137
138/**
139 * Free io buffers.
140 */
141static void
142lcs_free_channel(struct lcs_channel *channel)
143{
144 int cnt;
145
146 LCS_DBF_TEXT(2, setup, "ichfree");
147 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
148 if (channel->iob[cnt].data != NULL)
149 kfree(channel->iob[cnt].data);
150 channel->iob[cnt].data = NULL;
151 }
152}
153
154/*
155 * Cleanup channel.
156 */
157static void
158lcs_cleanup_channel(struct lcs_channel *channel)
159{
160 LCS_DBF_TEXT(3, setup, "cleanch");
161 /* Kill write channel tasklets. */
162 tasklet_kill(&channel->irq_tasklet);
163 /* Free channel buffers. */
164 lcs_free_channel(channel);
165}
166
167/**
168 * LCS free memory for card and channels.
169 */
170static void
171lcs_free_card(struct lcs_card *card)
172{
173 LCS_DBF_TEXT(2, setup, "remcard");
174 LCS_DBF_HEX(2, setup, &card, sizeof(void*));
175 kfree(card);
176}
177
178/**
179 * LCS alloc memory for card and channels
180 */
181static struct lcs_card *
182lcs_alloc_card(void)
183{
184 struct lcs_card *card;
185 int rc;
186
187 LCS_DBF_TEXT(2, setup, "alloclcs");
188
189 card = kmalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
190 if (card == NULL)
191 return NULL;
192 memset(card, 0, sizeof(struct lcs_card));
193 card->lan_type = LCS_FRAME_TYPE_AUTO;
194 card->pkt_seq = 0;
195 card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
196 /* Allocate io buffers for the read channel. */
197 rc = lcs_alloc_channel(&card->read);
198 if (rc){
199 LCS_DBF_TEXT(2, setup, "iccwerr");
200 lcs_free_card(card);
201 return NULL;
202 }
203 /* Allocate io buffers for the write channel. */
204 rc = lcs_alloc_channel(&card->write);
205 if (rc) {
206 LCS_DBF_TEXT(2, setup, "iccwerr");
207 lcs_cleanup_channel(&card->read);
208 lcs_free_card(card);
209 return NULL;
210 }
211
212#ifdef CONFIG_IP_MULTICAST
213 INIT_LIST_HEAD(&card->ipm_list);
214#endif
215 LCS_DBF_HEX(2, setup, &card, sizeof(void*));
216 return card;
217}
218
219/*
220 * Setup read channel.
221 */
222static void
223lcs_setup_read_ccws(struct lcs_card *card)
224{
225 int cnt;
226
227 LCS_DBF_TEXT(2, setup, "ireadccw");
228 /* Setup read ccws. */
229 memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
230 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
231 card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
232 card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
233 card->read.ccws[cnt].flags =
234 CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
235 /*
236 * Note: we have allocated the buffer with GFP_DMA, so
237 * we do not need to do set_normalized_cda.
238 */
239 card->read.ccws[cnt].cda =
240 (__u32) __pa(card->read.iob[cnt].data);
241 ((struct lcs_header *)
242 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
243 card->read.iob[cnt].callback = lcs_get_frames_cb;
244 card->read.iob[cnt].state = BUF_STATE_READY;
245 card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
246 }
247 card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
248 card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
249 card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
250 /* Last ccw is a tic (transfer in channel). */
251 card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
252 card->read.ccws[LCS_NUM_BUFFS].cda =
253 (__u32) __pa(card->read.ccws);
254 /* Setg initial state of the read channel. */
255 card->read.state = CH_STATE_INIT;
256
257 card->read.io_idx = 0;
258 card->read.buf_idx = 0;
259}
260
261static void
262lcs_setup_read(struct lcs_card *card)
263{
264 LCS_DBF_TEXT(3, setup, "initread");
265
266 lcs_setup_read_ccws(card);
267 /* Initialize read channel tasklet. */
268 card->read.irq_tasklet.data = (unsigned long) &card->read;
269 card->read.irq_tasklet.func = lcs_tasklet;
270 /* Initialize waitqueue. */
271 init_waitqueue_head(&card->read.wait_q);
272}
273
274/*
275 * Setup write channel.
276 */
277static void
278lcs_setup_write_ccws(struct lcs_card *card)
279{
280 int cnt;
281
282 LCS_DBF_TEXT(3, setup, "iwritccw");
283 /* Setup write ccws. */
284 memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1);
285 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
286 card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
287 card->write.ccws[cnt].count = 0;
288 card->write.ccws[cnt].flags =
289 CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
290 /*
291 * Note: we have allocated the buffer with GFP_DMA, so
292 * we do not need to do set_normalized_cda.
293 */
294 card->write.ccws[cnt].cda =
295 (__u32) __pa(card->write.iob[cnt].data);
296 }
297 /* Last ccw is a tic (transfer in channel). */
298 card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
299 card->write.ccws[LCS_NUM_BUFFS].cda =
300 (__u32) __pa(card->write.ccws);
301 /* Set initial state of the write channel. */
302 card->read.state = CH_STATE_INIT;
303
304 card->write.io_idx = 0;
305 card->write.buf_idx = 0;
306}
307
308static void
309lcs_setup_write(struct lcs_card *card)
310{
311 LCS_DBF_TEXT(3, setup, "initwrit");
312
313 lcs_setup_write_ccws(card);
314 /* Initialize write channel tasklet. */
315 card->write.irq_tasklet.data = (unsigned long) &card->write;
316 card->write.irq_tasklet.func = lcs_tasklet;
317 /* Initialize waitqueue. */
318 init_waitqueue_head(&card->write.wait_q);
319}
320
321static void
322lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
323{
324 unsigned long flags;
325
326 spin_lock_irqsave(&card->mask_lock, flags);
327 card->thread_allowed_mask = threads;
328 spin_unlock_irqrestore(&card->mask_lock, flags);
329 wake_up(&card->wait_q);
330}
331static inline int
332lcs_threads_running(struct lcs_card *card, unsigned long threads)
333{
334 unsigned long flags;
335 int rc = 0;
336
337 spin_lock_irqsave(&card->mask_lock, flags);
338 rc = (card->thread_running_mask & threads);
339 spin_unlock_irqrestore(&card->mask_lock, flags);
340 return rc;
341}
342
343static int
344lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
345{
346 return wait_event_interruptible(card->wait_q,
347 lcs_threads_running(card, threads) == 0);
348}
349
350static inline int
351lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
352{
353 unsigned long flags;
354
355 spin_lock_irqsave(&card->mask_lock, flags);
356 if ( !(card->thread_allowed_mask & thread) ||
357 (card->thread_start_mask & thread) ) {
358 spin_unlock_irqrestore(&card->mask_lock, flags);
359 return -EPERM;
360 }
361 card->thread_start_mask |= thread;
362 spin_unlock_irqrestore(&card->mask_lock, flags);
363 return 0;
364}
365
366static void
367lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
368{
369 unsigned long flags;
370
371 spin_lock_irqsave(&card->mask_lock, flags);
372 card->thread_running_mask &= ~thread;
373 spin_unlock_irqrestore(&card->mask_lock, flags);
374 wake_up(&card->wait_q);
375}
376
377static inline int
378__lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
379{
380 unsigned long flags;
381 int rc = 0;
382
383 spin_lock_irqsave(&card->mask_lock, flags);
384 if (card->thread_start_mask & thread){
385 if ((card->thread_allowed_mask & thread) &&
386 !(card->thread_running_mask & thread)){
387 rc = 1;
388 card->thread_start_mask &= ~thread;
389 card->thread_running_mask |= thread;
390 } else
391 rc = -EPERM;
392 }
393 spin_unlock_irqrestore(&card->mask_lock, flags);
394 return rc;
395}
396
397static int
398lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
399{
400 int rc = 0;
401 wait_event(card->wait_q,
402 (rc = __lcs_do_run_thread(card, thread)) >= 0);
403 return rc;
404}
405
406static int
407lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
408{
409 unsigned long flags;
410 int rc = 0;
411
412 spin_lock_irqsave(&card->mask_lock, flags);
413 LCS_DBF_TEXT_(4, trace, " %02x%02x%02x",
414 (u8) card->thread_start_mask,
415 (u8) card->thread_allowed_mask,
416 (u8) card->thread_running_mask);
417 rc = (card->thread_start_mask & thread);
418 spin_unlock_irqrestore(&card->mask_lock, flags);
419 return rc;
420}
421
422/**
423 * Initialize channels,card and state machines.
424 */
425static void
426lcs_setup_card(struct lcs_card *card)
427{
428 LCS_DBF_TEXT(2, setup, "initcard");
429 LCS_DBF_HEX(2, setup, &card, sizeof(void*));
430
431 lcs_setup_read(card);
432 lcs_setup_write(card);
433 /* Set cards initial state. */
434 card->state = DEV_STATE_DOWN;
435 card->tx_buffer = NULL;
436 card->tx_emitted = 0;
437
438 /* Initialize kernel thread task used for LGW commands. */
439 INIT_WORK(&card->kernel_thread_starter,
440 (void *)lcs_start_kernel_thread,card);
441 card->thread_start_mask = 0;
442 card->thread_allowed_mask = 0;
443 card->thread_running_mask = 0;
444 init_waitqueue_head(&card->wait_q);
445 spin_lock_init(&card->lock);
446 spin_lock_init(&card->ipm_lock);
447 spin_lock_init(&card->mask_lock);
448#ifdef CONFIG_IP_MULTICAST
449 INIT_LIST_HEAD(&card->ipm_list);
450#endif
451 INIT_LIST_HEAD(&card->lancmd_waiters);
452}
453
454static inline void
455lcs_clear_multicast_list(struct lcs_card *card)
456{
457#ifdef CONFIG_IP_MULTICAST
458 struct lcs_ipm_list *ipm;
459 unsigned long flags;
460
461 /* Free multicast list. */
462 LCS_DBF_TEXT(3, setup, "clmclist");
463 spin_lock_irqsave(&card->ipm_lock, flags);
464 while (!list_empty(&card->ipm_list)){
465 ipm = list_entry(card->ipm_list.next,
466 struct lcs_ipm_list, list);
467 list_del(&ipm->list);
468 if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
469 spin_unlock_irqrestore(&card->ipm_lock, flags);
470 lcs_send_delipm(card, ipm);
471 spin_lock_irqsave(&card->ipm_lock, flags);
472 }
473 kfree(ipm);
474 }
475 spin_unlock_irqrestore(&card->ipm_lock, flags);
476#endif
477}
478/**
479 * Cleanup channels,card and state machines.
480 */
481static void
482lcs_cleanup_card(struct lcs_card *card)
483{
484
485 LCS_DBF_TEXT(3, setup, "cleancrd");
486 LCS_DBF_HEX(2,setup,&card,sizeof(void*));
487
488 if (card->dev != NULL)
489 free_netdev(card->dev);
490 /* Cleanup channels. */
491 lcs_cleanup_channel(&card->write);
492 lcs_cleanup_channel(&card->read);
493}
494
495/**
496 * Start channel.
497 */
498static int
499lcs_start_channel(struct lcs_channel *channel)
500{
501 unsigned long flags;
502 int rc;
503
504 LCS_DBF_TEXT_(4,trace,"ssch%s", channel->ccwdev->dev.bus_id);
505 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
506 rc = ccw_device_start(channel->ccwdev,
507 channel->ccws + channel->io_idx, 0, 0,
508 DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
509 if (rc == 0)
510 channel->state = CH_STATE_RUNNING;
511 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
512 if (rc) {
513 LCS_DBF_TEXT_(4,trace,"essh%s", channel->ccwdev->dev.bus_id);
514 PRINT_ERR("Error in starting channel, rc=%d!\n", rc);
515 }
516 return rc;
517}
518
519static int
520lcs_clear_channel(struct lcs_channel *channel)
521{
522 unsigned long flags;
523 int rc;
524
525 LCS_DBF_TEXT(4,trace,"clearch");
526 LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
527 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
528 rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
529 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
530 if (rc) {
531 LCS_DBF_TEXT_(4,trace,"ecsc%s", channel->ccwdev->dev.bus_id);
532 return rc;
533 }
534 wait_event(channel->wait_q, (channel->state == CH_STATE_CLEARED));
535 channel->state = CH_STATE_STOPPED;
536 return rc;
537}
538
539
540/**
541 * Stop channel.
542 */
543static int
544lcs_stop_channel(struct lcs_channel *channel)
545{
546 unsigned long flags;
547 int rc;
548
549 if (channel->state == CH_STATE_STOPPED)
550 return 0;
551 LCS_DBF_TEXT(4,trace,"haltsch");
552 LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
553 channel->state = CH_STATE_INIT;
554 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
555 rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
556 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
557 if (rc) {
558 LCS_DBF_TEXT_(4,trace,"ehsc%s", channel->ccwdev->dev.bus_id);
559 return rc;
560 }
561 /* Asynchronous halt initialted. Wait for its completion. */
562 wait_event(channel->wait_q, (channel->state == CH_STATE_HALTED));
563 lcs_clear_channel(channel);
564 return 0;
565}
566
567/**
568 * start read and write channel
569 */
570static int
571lcs_start_channels(struct lcs_card *card)
572{
573 int rc;
574
575 LCS_DBF_TEXT(2, trace, "chstart");
576 /* start read channel */
577 rc = lcs_start_channel(&card->read);
578 if (rc)
579 return rc;
580 /* start write channel */
581 rc = lcs_start_channel(&card->write);
582 if (rc)
583 lcs_stop_channel(&card->read);
584 return rc;
585}
586
587/**
588 * stop read and write channel
589 */
590static int
591lcs_stop_channels(struct lcs_card *card)
592{
593 LCS_DBF_TEXT(2, trace, "chhalt");
594 lcs_stop_channel(&card->read);
595 lcs_stop_channel(&card->write);
596 return 0;
597}
598
599/**
600 * Get empty buffer.
601 */
602static struct lcs_buffer *
603__lcs_get_buffer(struct lcs_channel *channel)
604{
605 int index;
606
607 LCS_DBF_TEXT(5, trace, "_getbuff");
608 index = channel->io_idx;
609 do {
610 if (channel->iob[index].state == BUF_STATE_EMPTY) {
611 channel->iob[index].state = BUF_STATE_LOCKED;
612 return channel->iob + index;
613 }
614 index = (index + 1) & (LCS_NUM_BUFFS - 1);
615 } while (index != channel->io_idx);
616 return NULL;
617}
618
619static struct lcs_buffer *
620lcs_get_buffer(struct lcs_channel *channel)
621{
622 struct lcs_buffer *buffer;
623 unsigned long flags;
624
625 LCS_DBF_TEXT(5, trace, "getbuff");
626 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
627 buffer = __lcs_get_buffer(channel);
628 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
629 return buffer;
630}
631
632/**
633 * Resume channel program if the channel is suspended.
634 */
635static int
636__lcs_resume_channel(struct lcs_channel *channel)
637{
638 int rc;
639
640 if (channel->state != CH_STATE_SUSPENDED)
641 return 0;
642 if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
643 return 0;
644 LCS_DBF_TEXT_(5, trace, "rsch%s", channel->ccwdev->dev.bus_id);
645 rc = ccw_device_resume(channel->ccwdev);
646 if (rc) {
647 LCS_DBF_TEXT_(4, trace, "ersc%s", channel->ccwdev->dev.bus_id);
648 PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc);
649 } else
650 channel->state = CH_STATE_RUNNING;
651 return rc;
652
653}
654
655/**
656 * Make a buffer ready for processing.
657 */
658static inline void
659__lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
660{
661 int prev, next;
662
663 LCS_DBF_TEXT(5, trace, "rdybits");
664 prev = (index - 1) & (LCS_NUM_BUFFS - 1);
665 next = (index + 1) & (LCS_NUM_BUFFS - 1);
666 /* Check if we may clear the suspend bit of this buffer. */
667 if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
668 /* Check if we have to set the PCI bit. */
669 if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
670 /* Suspend bit of the previous buffer is not set. */
671 channel->ccws[index].flags |= CCW_FLAG_PCI;
672 /* Suspend bit of the next buffer is set. */
673 channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
674 }
675}
676
677static int
678lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
679{
680 unsigned long flags;
681 int index, rc;
682
683 LCS_DBF_TEXT(5, trace, "rdybuff");
684 if (buffer->state != BUF_STATE_LOCKED &&
685 buffer->state != BUF_STATE_PROCESSED)
686 BUG();
687 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
688 buffer->state = BUF_STATE_READY;
689 index = buffer - channel->iob;
690 /* Set length. */
691 channel->ccws[index].count = buffer->count;
692 /* Check relevant PCI/suspend bits. */
693 __lcs_ready_buffer_bits(channel, index);
694 rc = __lcs_resume_channel(channel);
695 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
696 return rc;
697}
698
699/**
700 * Mark the buffer as processed. Take care of the suspend bit
701 * of the previous buffer. This function is called from
702 * interrupt context, so the lock must not be taken.
703 */
704static int
705__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
706{
707 int index, prev, next;
708
709 LCS_DBF_TEXT(5, trace, "prcsbuff");
710 if (buffer->state != BUF_STATE_READY)
711 BUG();
712 buffer->state = BUF_STATE_PROCESSED;
713 index = buffer - channel->iob;
714 prev = (index - 1) & (LCS_NUM_BUFFS - 1);
715 next = (index + 1) & (LCS_NUM_BUFFS - 1);
716 /* Set the suspend bit and clear the PCI bit of this buffer. */
717 channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
718 channel->ccws[index].flags &= ~CCW_FLAG_PCI;
719 /* Check the suspend bit of the previous buffer. */
720 if (channel->iob[prev].state == BUF_STATE_READY) {
721 /*
722 * Previous buffer is in state ready. It might have
723 * happened in lcs_ready_buffer that the suspend bit
724 * has not been cleared to avoid an endless loop.
725 * Do it now.
726 */
727 __lcs_ready_buffer_bits(channel, prev);
728 }
729 /* Clear PCI bit of next buffer. */
730 channel->ccws[next].flags &= ~CCW_FLAG_PCI;
731 return __lcs_resume_channel(channel);
732}
733
734/**
735 * Put a processed buffer back to state empty.
736 */
737static void
738lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
739{
740 unsigned long flags;
741
742 LCS_DBF_TEXT(5, trace, "relbuff");
743 if (buffer->state != BUF_STATE_LOCKED &&
744 buffer->state != BUF_STATE_PROCESSED)
745 BUG();
746 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
747 buffer->state = BUF_STATE_EMPTY;
748 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
749}
750
751/**
752 * Get buffer for a lan command.
753 */
754static struct lcs_buffer *
755lcs_get_lancmd(struct lcs_card *card, int count)
756{
757 struct lcs_buffer *buffer;
758 struct lcs_cmd *cmd;
759
760 LCS_DBF_TEXT(4, trace, "getlncmd");
761 /* Get buffer and wait if none is available. */
762 wait_event(card->write.wait_q,
763 ((buffer = lcs_get_buffer(&card->write)) != NULL));
764 count += sizeof(struct lcs_header);
765 *(__u16 *)(buffer->data + count) = 0;
766 buffer->count = count + sizeof(__u16);
767 buffer->callback = lcs_release_buffer;
768 cmd = (struct lcs_cmd *) buffer->data;
769 cmd->offset = count;
770 cmd->type = LCS_FRAME_TYPE_CONTROL;
771 cmd->slot = 0;
772 return buffer;
773}
774
775
776static void
777lcs_get_reply(struct lcs_reply *reply)
778{
779 WARN_ON(atomic_read(&reply->refcnt) <= 0);
780 atomic_inc(&reply->refcnt);
781}
782
783static void
784lcs_put_reply(struct lcs_reply *reply)
785{
786 WARN_ON(atomic_read(&reply->refcnt) <= 0);
787 if (atomic_dec_and_test(&reply->refcnt)) {
788 kfree(reply);
789 }
790
791}
792
793static struct lcs_reply *
794lcs_alloc_reply(struct lcs_cmd *cmd)
795{
796 struct lcs_reply *reply;
797
798 LCS_DBF_TEXT(4, trace, "getreply");
799
800 reply = kmalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
801 if (!reply)
802 return NULL;
803 memset(reply,0,sizeof(struct lcs_reply));
804 atomic_set(&reply->refcnt,1);
805 reply->sequence_no = cmd->sequence_no;
806 reply->received = 0;
807 reply->rc = 0;
808 init_waitqueue_head(&reply->wait_q);
809
810 return reply;
811}
812
813/**
814 * Notifier function for lancmd replies. Called from read irq.
815 */
816static void
817lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
818{
819 struct list_head *l, *n;
820 struct lcs_reply *reply;
821
822 LCS_DBF_TEXT(4, trace, "notiwait");
823 spin_lock(&card->lock);
824 list_for_each_safe(l, n, &card->lancmd_waiters) {
825 reply = list_entry(l, struct lcs_reply, list);
826 if (reply->sequence_no == cmd->sequence_no) {
827 lcs_get_reply(reply);
828 list_del_init(&reply->list);
829 if (reply->callback != NULL)
830 reply->callback(card, cmd);
831 reply->received = 1;
832 reply->rc = cmd->return_code;
833 wake_up(&reply->wait_q);
834 lcs_put_reply(reply);
835 break;
836 }
837 }
838 spin_unlock(&card->lock);
839}
840
841/**
842 * Emit buffer of a lan comand.
843 */
844void
845lcs_lancmd_timeout(unsigned long data)
846{
847 struct lcs_reply *reply, *list_reply, *r;
848 unsigned long flags;
849
850 LCS_DBF_TEXT(4, trace, "timeout");
851 reply = (struct lcs_reply *) data;
852 spin_lock_irqsave(&reply->card->lock, flags);
853 list_for_each_entry_safe(list_reply, r,
854 &reply->card->lancmd_waiters,list) {
855 if (reply == list_reply) {
856 lcs_get_reply(reply);
857 list_del_init(&reply->list);
858 spin_unlock_irqrestore(&reply->card->lock, flags);
859 reply->received = 1;
860 reply->rc = -ETIME;
861 wake_up(&reply->wait_q);
862 lcs_put_reply(reply);
863 return;
864 }
865 }
866 spin_unlock_irqrestore(&reply->card->lock, flags);
867}
868
869static int
870lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
871 void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
872{
873 struct lcs_reply *reply;
874 struct lcs_cmd *cmd;
875 struct timer_list timer;
876 unsigned long flags;
877 int rc;
878
879 LCS_DBF_TEXT(4, trace, "sendcmd");
880 cmd = (struct lcs_cmd *) buffer->data;
881 cmd->return_code = 0;
882 cmd->sequence_no = card->sequence_no++;
883 reply = lcs_alloc_reply(cmd);
884 if (!reply)
885 return -ENOMEM;
886 reply->callback = reply_callback;
887 reply->card = card;
888 spin_lock_irqsave(&card->lock, flags);
889 list_add_tail(&reply->list, &card->lancmd_waiters);
890 spin_unlock_irqrestore(&card->lock, flags);
891
892 buffer->callback = lcs_release_buffer;
893 rc = lcs_ready_buffer(&card->write, buffer);
894 if (rc)
895 return rc;
896 init_timer(&timer);
897 timer.function = lcs_lancmd_timeout;
898 timer.data = (unsigned long) reply;
899 timer.expires = jiffies + HZ*card->lancmd_timeout;
900 add_timer(&timer);
901 wait_event(reply->wait_q, reply->received);
902 del_timer_sync(&timer);
903 LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
904 rc = reply->rc;
905 lcs_put_reply(reply);
906 return rc ? -EIO : 0;
907}
908
909/**
910 * LCS startup command
911 */
912static int
913lcs_send_startup(struct lcs_card *card, __u8 initiator)
914{
915 struct lcs_buffer *buffer;
916 struct lcs_cmd *cmd;
917
918 LCS_DBF_TEXT(2, trace, "startup");
919 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
920 cmd = (struct lcs_cmd *) buffer->data;
921 cmd->cmd_code = LCS_CMD_STARTUP;
922 cmd->initiator = initiator;
923 cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
924 return lcs_send_lancmd(card, buffer, NULL);
925}
926
927/**
928 * LCS shutdown command
929 */
930static int
931lcs_send_shutdown(struct lcs_card *card)
932{
933 struct lcs_buffer *buffer;
934 struct lcs_cmd *cmd;
935
936 LCS_DBF_TEXT(2, trace, "shutdown");
937 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
938 cmd = (struct lcs_cmd *) buffer->data;
939 cmd->cmd_code = LCS_CMD_SHUTDOWN;
940 cmd->initiator = LCS_INITIATOR_TCPIP;
941 return lcs_send_lancmd(card, buffer, NULL);
942}
943
944/**
945 * LCS lanstat command
946 */
947static void
948__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
949{
950 LCS_DBF_TEXT(2, trace, "statcb");
951 memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
952}
953
954static int
955lcs_send_lanstat(struct lcs_card *card)
956{
957 struct lcs_buffer *buffer;
958 struct lcs_cmd *cmd;
959
960 LCS_DBF_TEXT(2,trace, "cmdstat");
961 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
962 cmd = (struct lcs_cmd *) buffer->data;
963 /* Setup lanstat command. */
964 cmd->cmd_code = LCS_CMD_LANSTAT;
965 cmd->initiator = LCS_INITIATOR_TCPIP;
966 cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
967 cmd->cmd.lcs_std_cmd.portno = card->portno;
968 return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
969}
970
971/**
972 * send stoplan command
973 */
974static int
975lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
976{
977 struct lcs_buffer *buffer;
978 struct lcs_cmd *cmd;
979
980 LCS_DBF_TEXT(2, trace, "cmdstpln");
981 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
982 cmd = (struct lcs_cmd *) buffer->data;
983 cmd->cmd_code = LCS_CMD_STOPLAN;
984 cmd->initiator = initiator;
985 cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
986 cmd->cmd.lcs_std_cmd.portno = card->portno;
987 return lcs_send_lancmd(card, buffer, NULL);
988}
989
990/**
991 * send startlan command
992 */
993static void
994__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
995{
996 LCS_DBF_TEXT(2, trace, "srtlancb");
997 card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
998 card->portno = cmd->cmd.lcs_std_cmd.portno;
999}
1000
1001static int
1002lcs_send_startlan(struct lcs_card *card, __u8 initiator)
1003{
1004 struct lcs_buffer *buffer;
1005 struct lcs_cmd *cmd;
1006
1007 LCS_DBF_TEXT(2, trace, "cmdstaln");
1008 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
1009 cmd = (struct lcs_cmd *) buffer->data;
1010 cmd->cmd_code = LCS_CMD_STARTLAN;
1011 cmd->initiator = initiator;
1012 cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
1013 cmd->cmd.lcs_std_cmd.portno = card->portno;
1014 return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
1015}
1016
1017#ifdef CONFIG_IP_MULTICAST
1018/**
1019 * send setipm command (Multicast)
1020 */
1021static int
1022lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
1023{
1024 struct lcs_buffer *buffer;
1025 struct lcs_cmd *cmd;
1026
1027 LCS_DBF_TEXT(2, trace, "cmdsetim");
1028 buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
1029 cmd = (struct lcs_cmd *) buffer->data;
1030 cmd->cmd_code = LCS_CMD_SETIPM;
1031 cmd->initiator = LCS_INITIATOR_TCPIP;
1032 cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1033 cmd->cmd.lcs_qipassist.portno = card->portno;
1034 cmd->cmd.lcs_qipassist.version = 4;
1035 cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1036 memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
1037 &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
1038 LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
1039 return lcs_send_lancmd(card, buffer, NULL);
1040}
1041
1042/**
1043 * send delipm command (Multicast)
1044 */
1045static int
1046lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
1047{
1048 struct lcs_buffer *buffer;
1049 struct lcs_cmd *cmd;
1050
1051 LCS_DBF_TEXT(2, trace, "cmddelim");
1052 buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
1053 cmd = (struct lcs_cmd *) buffer->data;
1054 cmd->cmd_code = LCS_CMD_DELIPM;
1055 cmd->initiator = LCS_INITIATOR_TCPIP;
1056 cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1057 cmd->cmd.lcs_qipassist.portno = card->portno;
1058 cmd->cmd.lcs_qipassist.version = 4;
1059 cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1060 memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
1061 &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
1062 LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
1063 return lcs_send_lancmd(card, buffer, NULL);
1064}
1065
1066/**
1067 * check if multicast is supported by LCS
1068 */
1069static void
1070__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
1071{
1072 LCS_DBF_TEXT(2, trace, "chkmccb");
1073 card->ip_assists_supported =
1074 cmd->cmd.lcs_qipassist.ip_assists_supported;
1075 card->ip_assists_enabled =
1076 cmd->cmd.lcs_qipassist.ip_assists_enabled;
1077}
1078
1079static int
1080lcs_check_multicast_support(struct lcs_card *card)
1081{
1082 struct lcs_buffer *buffer;
1083 struct lcs_cmd *cmd;
1084 int rc;
1085
1086 LCS_DBF_TEXT(2, trace, "cmdqipa");
1087 /* Send query ipassist. */
1088 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
1089 cmd = (struct lcs_cmd *) buffer->data;
1090 cmd->cmd_code = LCS_CMD_QIPASSIST;
1091 cmd->initiator = LCS_INITIATOR_TCPIP;
1092 cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1093 cmd->cmd.lcs_qipassist.portno = card->portno;
1094 cmd->cmd.lcs_qipassist.version = 4;
1095 cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1096 rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
1097 if (rc != 0) {
1098 PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n");
1099 return -EOPNOTSUPP;
1100 }
1101 /* Print out supported assists: IPv6 */
1102 PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name,
1103 (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
1104 "with" : "without");
1105 /* Print out supported assist: Multicast */
1106 PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name,
1107 (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
1108 "with" : "without");
1109 if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
1110 return 0;
1111 return -EOPNOTSUPP;
1112}
1113
1114/**
1115 * set or del multicast address on LCS card
1116 */
1117static void
1118lcs_fix_multicast_list(struct lcs_card *card)
1119{
1120 struct list_head failed_list;
1121 struct lcs_ipm_list *ipm, *tmp;
1122 unsigned long flags;
1123 int rc;
1124
1125 LCS_DBF_TEXT(4,trace, "fixipm");
1126 INIT_LIST_HEAD(&failed_list);
1127 spin_lock_irqsave(&card->ipm_lock, flags);
1128list_modified:
1129 list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
1130 switch (ipm->ipm_state) {
1131 case LCS_IPM_STATE_SET_REQUIRED:
1132 /* del from ipm_list so noone else can tamper with
1133 * this entry */
1134 list_del_init(&ipm->list);
1135 spin_unlock_irqrestore(&card->ipm_lock, flags);
1136 rc = lcs_send_setipm(card, ipm);
1137 spin_lock_irqsave(&card->ipm_lock, flags);
1138 if (rc) {
1139 PRINT_INFO("Adding multicast address failed."
1140 "Table possibly full!\n");
1141 /* store ipm in failed list -> will be added
1142 * to ipm_list again, so a retry will be done
1143 * during the next call of this function */
1144 list_add_tail(&ipm->list, &failed_list);
1145 } else {
1146 ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
1147 /* re-insert into ipm_list */
1148 list_add_tail(&ipm->list, &card->ipm_list);
1149 }
1150 goto list_modified;
1151 case LCS_IPM_STATE_DEL_REQUIRED:
1152 list_del(&ipm->list);
1153 spin_unlock_irqrestore(&card->ipm_lock, flags);
1154 lcs_send_delipm(card, ipm);
1155 spin_lock_irqsave(&card->ipm_lock, flags);
1156 kfree(ipm);
1157 goto list_modified;
1158 case LCS_IPM_STATE_ON_CARD:
1159 break;
1160 }
1161 }
1162 /* re-insert all entries from the failed_list into ipm_list */
1163 list_for_each_entry(ipm, &failed_list, list) {
1164 list_del_init(&ipm->list);
1165 list_add_tail(&ipm->list, &card->ipm_list);
1166 }
1167 spin_unlock_irqrestore(&card->ipm_lock, flags);
1168 if (card->state == DEV_STATE_UP)
1169 netif_wake_queue(card->dev);
1170}
1171
1172/**
1173 * get mac address for the relevant Multicast address
1174 */
1175static void
1176lcs_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
1177{
1178 LCS_DBF_TEXT(4,trace, "getmac");
1179 if (dev->type == ARPHRD_IEEE802_TR)
1180 ip_tr_mc_map(ipm, mac);
1181 else
1182 ip_eth_mc_map(ipm, mac);
1183}
1184
1185/**
1186 * function called by net device to handle multicast address relevant things
1187 */
1188static inline void
1189lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1190{
1191 struct ip_mc_list *im4;
1192 struct list_head *l;
1193 struct lcs_ipm_list *ipm;
1194 unsigned long flags;
1195 char buf[MAX_ADDR_LEN];
1196
1197 LCS_DBF_TEXT(4, trace, "remmclst");
1198 spin_lock_irqsave(&card->ipm_lock, flags);
1199 list_for_each(l, &card->ipm_list) {
1200 ipm = list_entry(l, struct lcs_ipm_list, list);
1201 for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) {
1202 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1203 if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
1204 (memcmp(buf, &ipm->ipm.mac_addr,
1205 LCS_MAC_LENGTH) == 0) )
1206 break;
1207 }
1208 if (im4 == NULL)
1209 ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
1210 }
1211 spin_unlock_irqrestore(&card->ipm_lock, flags);
1212}
1213
1214static inline struct lcs_ipm_list *
1215lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
1216{
1217 struct lcs_ipm_list *tmp, *ipm = NULL;
1218 struct list_head *l;
1219 unsigned long flags;
1220
1221 LCS_DBF_TEXT(4, trace, "chkmcent");
1222 spin_lock_irqsave(&card->ipm_lock, flags);
1223 list_for_each(l, &card->ipm_list) {
1224 tmp = list_entry(l, struct lcs_ipm_list, list);
1225 if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
1226 (memcmp(buf, &tmp->ipm.mac_addr,
1227 LCS_MAC_LENGTH) == 0) ) {
1228 ipm = tmp;
1229 break;
1230 }
1231 }
1232 spin_unlock_irqrestore(&card->ipm_lock, flags);
1233 return ipm;
1234}
1235
1236static inline void
1237lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1238{
1239
1240 struct ip_mc_list *im4;
1241 struct lcs_ipm_list *ipm;
1242 char buf[MAX_ADDR_LEN];
1243 unsigned long flags;
1244
1245 LCS_DBF_TEXT(4, trace, "setmclst");
1246 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
1247 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1248 ipm = lcs_check_addr_entry(card, im4, buf);
1249 if (ipm != NULL)
1250 continue; /* Address already in list. */
1251 ipm = (struct lcs_ipm_list *)
1252 kmalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
1253 if (ipm == NULL) {
1254 PRINT_INFO("Not enough memory to add "
1255 "new multicast entry!\n");
1256 break;
1257 }
1258 memset(ipm, 0, sizeof(struct lcs_ipm_list));
1259 memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
1260 ipm->ipm.ip_addr = im4->multiaddr;
1261 ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
1262 spin_lock_irqsave(&card->ipm_lock, flags);
1263 list_add(&ipm->list, &card->ipm_list);
1264 spin_unlock_irqrestore(&card->ipm_lock, flags);
1265 }
1266}
1267
1268static int
1269lcs_register_mc_addresses(void *data)
1270{
1271 struct lcs_card *card;
1272 struct in_device *in4_dev;
1273
1274 card = (struct lcs_card *) data;
1275 daemonize("regipm");
1276
1277 if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
1278 return 0;
1279 LCS_DBF_TEXT(4, trace, "regmulti");
1280
1281 in4_dev = in_dev_get(card->dev);
1282 if (in4_dev == NULL)
1283 goto out;
1284 read_lock(&in4_dev->mc_list_lock);
1285 lcs_remove_mc_addresses(card,in4_dev);
1286 lcs_set_mc_addresses(card, in4_dev);
1287 read_unlock(&in4_dev->mc_list_lock);
1288 in_dev_put(in4_dev);
1289
1290 lcs_fix_multicast_list(card);
1291out:
1292 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
1293 return 0;
1294}
1295/**
1296 * function called by net device to
1297 * handle multicast address relevant things
1298 */
1299static void
1300lcs_set_multicast_list(struct net_device *dev)
1301{
1302 struct lcs_card *card;
1303
1304 LCS_DBF_TEXT(4, trace, "setmulti");
1305 card = (struct lcs_card *) dev->priv;
1306
1307 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) {
1308 schedule_work(&card->kernel_thread_starter);
1309 }
1310}
1311
1312#endif /* CONFIG_IP_MULTICAST */
1313
1314static long
1315lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1316{
1317 if (!IS_ERR(irb))
1318 return 0;
1319
1320 switch (PTR_ERR(irb)) {
1321 case -EIO:
1322 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
1323 LCS_DBF_TEXT(2, trace, "ckirberr");
1324 LCS_DBF_TEXT_(2, trace, " rc%d", -EIO);
1325 break;
1326 case -ETIMEDOUT:
1327 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
1328 LCS_DBF_TEXT(2, trace, "ckirberr");
1329 LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT);
1330 break;
1331 default:
1332 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
1333 cdev->dev.bus_id);
1334 LCS_DBF_TEXT(2, trace, "ckirberr");
1335 LCS_DBF_TEXT(2, trace, " rc???");
1336 }
1337 return PTR_ERR(irb);
1338}
1339
1340
1341/**
1342 * IRQ Handler for LCS channels
1343 */
1344static void
1345lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1346{
1347 struct lcs_card *card;
1348 struct lcs_channel *channel;
1349 int index;
1350
1351 if (lcs_check_irb_error(cdev, irb))
1352 return;
1353
1354 card = CARD_FROM_DEV(cdev);
1355 if (card->read.ccwdev == cdev)
1356 channel = &card->read;
1357 else
1358 channel = &card->write;
1359
1360 LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id);
1361 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.cstat, irb->scsw.dstat);
1362 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.fctl, irb->scsw.actl);
1363
1364 /* How far in the ccw chain have we processed? */
1365 if ((channel->state != CH_STATE_INIT) &&
1366 (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
1367 index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa)
1368 - channel->ccws;
1369 if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) ||
1370 (irb->scsw.cstat | SCHN_STAT_PCI))
1371 /* Bloody io subsystem tells us lies about cpa... */
1372 index = (index - 1) & (LCS_NUM_BUFFS - 1);
1373 while (channel->io_idx != index) {
1374 __lcs_processed_buffer(channel,
1375 channel->iob + channel->io_idx);
1376 channel->io_idx =
1377 (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
1378 }
1379 }
1380
1381 if ((irb->scsw.dstat & DEV_STAT_DEV_END) ||
1382 (irb->scsw.dstat & DEV_STAT_CHN_END) ||
1383 (irb->scsw.dstat & DEV_STAT_UNIT_CHECK))
1384 /* Mark channel as stopped. */
1385 channel->state = CH_STATE_STOPPED;
1386 else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED)
1387 /* CCW execution stopped on a suspend bit. */
1388 channel->state = CH_STATE_SUSPENDED;
1389
1390 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
1391 if (irb->scsw.cc != 0) {
1392 ccw_device_halt(channel->ccwdev, (addr_t) channel);
1393 return;
1394 }
1395 /* The channel has been stopped by halt_IO. */
1396 channel->state = CH_STATE_HALTED;
1397 }
1398
1399 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
1400 channel->state = CH_STATE_CLEARED;
1401 }
1402 /* Do the rest in the tasklet. */
1403 tasklet_schedule(&channel->irq_tasklet);
1404}
1405
1406/**
1407 * Tasklet for IRQ handler
1408 */
1409static void
1410lcs_tasklet(unsigned long data)
1411{
1412 unsigned long flags;
1413 struct lcs_channel *channel;
1414 struct lcs_buffer *iob;
1415 int buf_idx;
1416 int rc;
1417
1418 channel = (struct lcs_channel *) data;
1419 LCS_DBF_TEXT_(5, trace, "tlet%s",channel->ccwdev->dev.bus_id);
1420
1421 /* Check for processed buffers. */
1422 iob = channel->iob;
1423 buf_idx = channel->buf_idx;
1424 while (iob[buf_idx].state == BUF_STATE_PROCESSED) {
1425 /* Do the callback thing. */
1426 if (iob[buf_idx].callback != NULL)
1427 iob[buf_idx].callback(channel, iob + buf_idx);
1428 buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
1429 }
1430 channel->buf_idx = buf_idx;
1431
1432 if (channel->state == CH_STATE_STOPPED)
1433 // FIXME: what if rc != 0 ??
1434 rc = lcs_start_channel(channel);
1435 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1436 if (channel->state == CH_STATE_SUSPENDED &&
1437 channel->iob[channel->io_idx].state == BUF_STATE_READY) {
1438 // FIXME: what if rc != 0 ??
1439 rc = __lcs_resume_channel(channel);
1440 }
1441 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1442
1443 /* Something happened on the channel. Wake up waiters. */
1444 wake_up(&channel->wait_q);
1445}
1446
1447/**
1448 * Finish current tx buffer and make it ready for transmit.
1449 */
1450static void
1451__lcs_emit_txbuffer(struct lcs_card *card)
1452{
1453 LCS_DBF_TEXT(5, trace, "emittx");
1454 *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
1455 card->tx_buffer->count += 2;
1456 lcs_ready_buffer(&card->write, card->tx_buffer);
1457 card->tx_buffer = NULL;
1458 card->tx_emitted++;
1459}
1460
1461/**
1462 * Callback for finished tx buffers.
1463 */
1464static void
1465lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
1466{
1467 struct lcs_card *card;
1468
1469 LCS_DBF_TEXT(5, trace, "txbuffcb");
1470 /* Put buffer back to pool. */
1471 lcs_release_buffer(channel, buffer);
1472 card = (struct lcs_card *)
1473 ((char *) channel - offsetof(struct lcs_card, write));
1474 spin_lock(&card->lock);
1475 card->tx_emitted--;
1476 if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
1477 /*
1478 * Last running tx buffer has finished. Submit partially
1479 * filled current buffer.
1480 */
1481 __lcs_emit_txbuffer(card);
1482 spin_unlock(&card->lock);
1483}
1484
1485/**
1486 * Packet transmit function called by network stack
1487 */
1488static int
1489__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
1490 struct net_device *dev)
1491{
1492 struct lcs_header *header;
1493
1494 LCS_DBF_TEXT(5, trace, "hardxmit");
1495 if (skb == NULL) {
1496 card->stats.tx_dropped++;
1497 card->stats.tx_errors++;
1498 return -EIO;
1499 }
1500 if (card->state != DEV_STATE_UP) {
1501 dev_kfree_skb(skb);
1502 card->stats.tx_dropped++;
1503 card->stats.tx_errors++;
1504 card->stats.tx_carrier_errors++;
1505 return 0;
1506 }
1507 if (netif_queue_stopped(dev) ) {
1508 card->stats.tx_dropped++;
1509 return -EBUSY;
1510 }
1511 if (card->tx_buffer != NULL &&
1512 card->tx_buffer->count + sizeof(struct lcs_header) +
1513 skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
1514 /* skb too big for current tx buffer. */
1515 __lcs_emit_txbuffer(card);
1516 if (card->tx_buffer == NULL) {
1517 /* Get new tx buffer */
1518 card->tx_buffer = lcs_get_buffer(&card->write);
1519 if (card->tx_buffer == NULL) {
1520 card->stats.tx_dropped++;
1521 return -EBUSY;
1522 }
1523 card->tx_buffer->callback = lcs_txbuffer_cb;
1524 card->tx_buffer->count = 0;
1525 }
1526 header = (struct lcs_header *)
1527 (card->tx_buffer->data + card->tx_buffer->count);
1528 card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
1529 header->offset = card->tx_buffer->count;
1530 header->type = card->lan_type;
1531 header->slot = card->portno;
1532 memcpy(header + 1, skb->data, skb->len);
1533 card->stats.tx_bytes += skb->len;
1534 card->stats.tx_packets++;
1535 dev_kfree_skb(skb);
1536 if (card->tx_emitted <= 0)
1537 /* If this is the first tx buffer emit it immediately. */
1538 __lcs_emit_txbuffer(card);
1539 return 0;
1540}
1541
1542static int
1543lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
1544{
1545 struct lcs_card *card;
1546 int rc;
1547
1548 LCS_DBF_TEXT(5, trace, "pktxmit");
1549 card = (struct lcs_card *) dev->priv;
1550 spin_lock(&card->lock);
1551 rc = __lcs_start_xmit(card, skb, dev);
1552 spin_unlock(&card->lock);
1553 return rc;
1554}
1555
1556/**
1557 * send startlan and lanstat command to make LCS device ready
1558 */
1559static int
1560lcs_startlan_auto(struct lcs_card *card)
1561{
1562 int rc;
1563
1564 LCS_DBF_TEXT(2, trace, "strtauto");
1565#ifdef CONFIG_NET_ETHERNET
1566 card->lan_type = LCS_FRAME_TYPE_ENET;
1567 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1568 if (rc == 0)
1569 return 0;
1570
1571#endif
1572#ifdef CONFIG_TR
1573 card->lan_type = LCS_FRAME_TYPE_TR;
1574 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1575 if (rc == 0)
1576 return 0;
1577#endif
1578#ifdef CONFIG_FDDI
1579 card->lan_type = LCS_FRAME_TYPE_FDDI;
1580 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1581 if (rc == 0)
1582 return 0;
1583#endif
1584 return -EIO;
1585}
1586
1587static int
1588lcs_startlan(struct lcs_card *card)
1589{
1590 int rc, i;
1591
1592 LCS_DBF_TEXT(2, trace, "startlan");
1593 rc = 0;
1594 if (card->portno != LCS_INVALID_PORT_NO) {
1595 if (card->lan_type == LCS_FRAME_TYPE_AUTO)
1596 rc = lcs_startlan_auto(card);
1597 else
1598 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1599 } else {
1600 for (i = 0; i <= 16; i++) {
1601 card->portno = i;
1602 if (card->lan_type != LCS_FRAME_TYPE_AUTO)
1603 rc = lcs_send_startlan(card,
1604 LCS_INITIATOR_TCPIP);
1605 else
1606 /* autodetecting lan type */
1607 rc = lcs_startlan_auto(card);
1608 if (rc == 0)
1609 break;
1610 }
1611 }
1612 if (rc == 0)
1613 return lcs_send_lanstat(card);
1614 return rc;
1615}
1616
1617/**
1618 * LCS detect function
1619 * setup channels and make them I/O ready
1620 */
1621static int
1622lcs_detect(struct lcs_card *card)
1623{
1624 int rc = 0;
1625
1626 LCS_DBF_TEXT(2, setup, "lcsdetct");
1627 /* start/reset card */
1628 if (card->dev)
1629 netif_stop_queue(card->dev);
1630 rc = lcs_stop_channels(card);
1631 if (rc == 0) {
1632 rc = lcs_start_channels(card);
1633 if (rc == 0) {
1634 rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
1635 if (rc == 0)
1636 rc = lcs_startlan(card);
1637 }
1638 }
1639 if (rc == 0) {
1640 card->state = DEV_STATE_UP;
1641 } else {
1642 card->state = DEV_STATE_DOWN;
1643 card->write.state = CH_STATE_INIT;
1644 card->read.state = CH_STATE_INIT;
1645 }
1646 return rc;
1647}
1648
1649/**
1650 * reset card
1651 */
1652static int
1653lcs_resetcard(struct lcs_card *card)
1654{
1655 int retries;
1656
1657 LCS_DBF_TEXT(2, trace, "rescard");
1658 for (retries = 0; retries < 10; retries++) {
1659 if (lcs_detect(card) == 0) {
1660 netif_wake_queue(card->dev);
1661 card->state = DEV_STATE_UP;
1662 PRINT_INFO("LCS device %s successfully restarted!\n",
1663 card->dev->name);
1664 return 0;
1665 }
1666 msleep(3000);
1667 }
1668 PRINT_ERR("Error in Reseting LCS card!\n");
1669 return -EIO;
1670}
1671
1672
1673/**
1674 * LCS Stop card
1675 */
1676static int
1677lcs_stopcard(struct lcs_card *card)
1678{
1679 int rc;
1680
1681 LCS_DBF_TEXT(3, setup, "stopcard");
1682
1683 if (card->read.state != CH_STATE_STOPPED &&
1684 card->write.state != CH_STATE_STOPPED &&
1685 card->state == DEV_STATE_UP) {
1686 lcs_clear_multicast_list(card);
1687 rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
1688 rc = lcs_send_shutdown(card);
1689 }
1690 rc = lcs_stop_channels(card);
1691 card->state = DEV_STATE_DOWN;
1692
1693 return rc;
1694}
1695
1696/**
1697 * LGW initiated commands
1698 */
1699static int
1700lcs_lgw_startlan_thread(void *data)
1701{
1702 struct lcs_card *card;
1703
1704 card = (struct lcs_card *) data;
1705 daemonize("lgwstpln");
1706
1707 if (!lcs_do_run_thread(card, LCS_STARTLAN_THREAD))
1708 return 0;
1709 LCS_DBF_TEXT(4, trace, "lgwstpln");
1710 if (card->dev)
1711 netif_stop_queue(card->dev);
1712 if (lcs_startlan(card) == 0) {
1713 netif_wake_queue(card->dev);
1714 card->state = DEV_STATE_UP;
1715 PRINT_INFO("LCS Startlan for device %s succeeded!\n",
1716 card->dev->name);
1717
1718 } else
1719 PRINT_ERR("LCS Startlan for device %s failed!\n",
1720 card->dev->name);
1721 lcs_clear_thread_running_bit(card, LCS_STARTLAN_THREAD);
1722 return 0;
1723}
1724
1725/**
1726 * Send startup command initiated by Lan Gateway
1727 */
1728static int
1729lcs_lgw_startup_thread(void *data)
1730{
1731 int rc;
1732
1733 struct lcs_card *card;
1734
1735 card = (struct lcs_card *) data;
1736 daemonize("lgwstaln");
1737
1738 if (!lcs_do_run_thread(card, LCS_STARTUP_THREAD))
1739 return 0;
1740 LCS_DBF_TEXT(4, trace, "lgwstaln");
1741 if (card->dev)
1742 netif_stop_queue(card->dev);
1743 rc = lcs_send_startup(card, LCS_INITIATOR_LGW);
1744 if (rc != 0) {
1745 PRINT_ERR("Startup for LCS device %s initiated " \
1746 "by LGW failed!\nReseting card ...\n",
1747 card->dev->name);
1748 /* do a card reset */
1749 rc = lcs_resetcard(card);
1750 if (rc == 0)
1751 goto Done;
1752 }
1753 rc = lcs_startlan(card);
1754 if (rc == 0) {
1755 netif_wake_queue(card->dev);
1756 card->state = DEV_STATE_UP;
1757 }
1758Done:
1759 if (rc == 0)
1760 PRINT_INFO("LCS Startup for device %s succeeded!\n",
1761 card->dev->name);
1762 else
1763 PRINT_ERR("LCS Startup for device %s failed!\n",
1764 card->dev->name);
1765 lcs_clear_thread_running_bit(card, LCS_STARTUP_THREAD);
1766 return 0;
1767}
1768
1769
1770/**
1771 * send stoplan command initiated by Lan Gateway
1772 */
1773static int
1774lcs_lgw_stoplan_thread(void *data)
1775{
1776 struct lcs_card *card;
1777 int rc;
1778
1779 card = (struct lcs_card *) data;
1780 daemonize("lgwstop");
1781
1782 if (!lcs_do_run_thread(card, LCS_STOPLAN_THREAD))
1783 return 0;
1784 LCS_DBF_TEXT(4, trace, "lgwstop");
1785 if (card->dev)
1786 netif_stop_queue(card->dev);
1787 if (lcs_send_stoplan(card, LCS_INITIATOR_LGW) == 0)
1788 PRINT_INFO("Stoplan for %s initiated by LGW succeeded!\n",
1789 card->dev->name);
1790 else
1791 PRINT_ERR("Stoplan %s initiated by LGW failed!\n",
1792 card->dev->name);
1793 /*Try to reset the card, stop it on failure */
1794 rc = lcs_resetcard(card);
1795 if (rc != 0)
1796 rc = lcs_stopcard(card);
1797 lcs_clear_thread_running_bit(card, LCS_STOPLAN_THREAD);
1798 return rc;
1799}
1800
1801/**
1802 * Kernel Thread helper functions for LGW initiated commands
1803 */
1804static void
1805lcs_start_kernel_thread(struct lcs_card *card)
1806{
1807 LCS_DBF_TEXT(5, trace, "krnthrd");
1808 if (lcs_do_start_thread(card, LCS_STARTUP_THREAD))
1809 kernel_thread(lcs_lgw_startup_thread, (void *) card, SIGCHLD);
1810 if (lcs_do_start_thread(card, LCS_STARTLAN_THREAD))
1811 kernel_thread(lcs_lgw_startlan_thread, (void *) card, SIGCHLD);
1812 if (lcs_do_start_thread(card, LCS_STOPLAN_THREAD))
1813 kernel_thread(lcs_lgw_stoplan_thread, (void *) card, SIGCHLD);
1814#ifdef CONFIG_IP_MULTICAST
1815 if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
1816 kernel_thread(lcs_register_mc_addresses, (void *) card, SIGCHLD);
1817#endif
1818}
1819
1820/**
1821 * Process control frames.
1822 */
1823static void
1824lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
1825{
1826 LCS_DBF_TEXT(5, trace, "getctrl");
1827 if (cmd->initiator == LCS_INITIATOR_LGW) {
1828 switch(cmd->cmd_code) {
1829 case LCS_CMD_STARTUP:
1830 if (!lcs_set_thread_start_bit(card,
1831 LCS_STARTUP_THREAD))
1832 schedule_work(&card->kernel_thread_starter);
1833 break;
1834 case LCS_CMD_STARTLAN:
1835 if (!lcs_set_thread_start_bit(card,
1836 LCS_STARTLAN_THREAD))
1837 schedule_work(&card->kernel_thread_starter);
1838 break;
1839 case LCS_CMD_STOPLAN:
1840 if (!lcs_set_thread_start_bit(card,
1841 LCS_STOPLAN_THREAD))
1842 schedule_work(&card->kernel_thread_starter);
1843 break;
1844 default:
1845 PRINT_INFO("UNRECOGNIZED LGW COMMAND\n");
1846 break;
1847 }
1848 } else
1849 lcs_notify_lancmd_waiters(card, cmd);
1850}
1851
1852/**
1853 * Unpack network packet.
1854 */
1855static void
1856lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
1857{
1858 struct sk_buff *skb;
1859
1860 LCS_DBF_TEXT(5, trace, "getskb");
1861 if (card->dev == NULL ||
1862 card->state != DEV_STATE_UP)
1863 /* The card isn't up. Ignore the packet. */
1864 return;
1865
1866 skb = dev_alloc_skb(skb_len);
1867 if (skb == NULL) {
1868 PRINT_ERR("LCS: alloc_skb failed for device=%s\n",
1869 card->dev->name);
1870 card->stats.rx_dropped++;
1871 return;
1872 }
1873 skb->dev = card->dev;
1874 memcpy(skb_put(skb, skb_len), skb_data, skb_len);
1875 skb->protocol = card->lan_type_trans(skb, card->dev);
1876 card->stats.rx_bytes += skb_len;
1877 card->stats.rx_packets++;
1878 *((__u32 *)skb->cb) = ++card->pkt_seq;
1879 netif_rx(skb);
1880}
1881
1882/**
1883 * LCS main routine to get packets and lancmd replies from the buffers
1884 */
1885static void
1886lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
1887{
1888 struct lcs_card *card;
1889 struct lcs_header *lcs_hdr;
1890 __u16 offset;
1891
1892 LCS_DBF_TEXT(5, trace, "lcsgtpkt");
1893 lcs_hdr = (struct lcs_header *) buffer->data;
1894 if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
1895 LCS_DBF_TEXT(4, trace, "-eiogpkt");
1896 return;
1897 }
1898 card = (struct lcs_card *)
1899 ((char *) channel - offsetof(struct lcs_card, read));
1900 offset = 0;
1901 while (lcs_hdr->offset != 0) {
1902 if (lcs_hdr->offset <= 0 ||
1903 lcs_hdr->offset > LCS_IOBUFFERSIZE ||
1904 lcs_hdr->offset < offset) {
1905 /* Offset invalid. */
1906 card->stats.rx_length_errors++;
1907 card->stats.rx_errors++;
1908 return;
1909 }
1910 /* What kind of frame is it? */
1911 if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
1912 /* Control frame. */
1913 lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
1914 else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
1915 lcs_hdr->type == LCS_FRAME_TYPE_TR ||
1916 lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
1917 /* Normal network packet. */
1918 lcs_get_skb(card, (char *)(lcs_hdr + 1),
1919 lcs_hdr->offset - offset -
1920 sizeof(struct lcs_header));
1921 else
1922 /* Unknown frame type. */
1923 ; // FIXME: error message ?
1924 /* Proceed to next frame. */
1925 offset = lcs_hdr->offset;
1926 lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
1927 lcs_hdr = (struct lcs_header *) (buffer->data + offset);
1928 }
1929 /* The buffer is now empty. Make it ready again. */
1930 lcs_ready_buffer(&card->read, buffer);
1931}
1932
1933/**
1934 * get network statistics for ifconfig and other user programs
1935 */
1936static struct net_device_stats *
1937lcs_getstats(struct net_device *dev)
1938{
1939 struct lcs_card *card;
1940
1941 LCS_DBF_TEXT(4, trace, "netstats");
1942 card = (struct lcs_card *) dev->priv;
1943 return &card->stats;
1944}
1945
1946/**
1947 * stop lcs device
1948 * This function will be called by user doing ifconfig xxx down
1949 */
1950static int
1951lcs_stop_device(struct net_device *dev)
1952{
1953 struct lcs_card *card;
1954 int rc;
1955
1956 LCS_DBF_TEXT(2, trace, "stopdev");
1957 card = (struct lcs_card *) dev->priv;
1958 netif_stop_queue(dev);
1959 dev->flags &= ~IFF_UP;
1960 rc = lcs_stopcard(card);
1961 if (rc)
1962 PRINT_ERR("Try it again!\n ");
1963 return rc;
1964}
1965
1966/**
1967 * start lcs device and make it runnable
1968 * This function will be called by user doing ifconfig xxx up
1969 */
1970static int
1971lcs_open_device(struct net_device *dev)
1972{
1973 struct lcs_card *card;
1974 int rc;
1975
1976 LCS_DBF_TEXT(2, trace, "opendev");
1977 card = (struct lcs_card *) dev->priv;
1978 /* initialize statistics */
1979 rc = lcs_detect(card);
1980 if (rc) {
1981 PRINT_ERR("LCS:Error in opening device!\n");
1982
1983 } else {
1984 dev->flags |= IFF_UP;
1985 netif_wake_queue(dev);
1986 card->state = DEV_STATE_UP;
1987 }
1988 return rc;
1989}
1990
1991/**
1992 * show function for portno called by cat or similar things
1993 */
1994static ssize_t
1995lcs_portno_show (struct device *dev, char *buf)
1996{
1997 struct lcs_card *card;
1998
1999 card = (struct lcs_card *)dev->driver_data;
2000
2001 if (!card)
2002 return 0;
2003
2004 return sprintf(buf, "%d\n", card->portno);
2005}
2006
2007/**
2008 * store the value which is piped to file portno
2009 */
2010static ssize_t
2011lcs_portno_store (struct device *dev, const char *buf, size_t count)
2012{
2013 struct lcs_card *card;
2014 int value;
2015
2016 card = (struct lcs_card *)dev->driver_data;
2017
2018 if (!card)
2019 return 0;
2020
2021 sscanf(buf, "%u", &value);
2022 /* TODO: sanity checks */
2023 card->portno = value;
2024
2025 return count;
2026
2027}
2028
2029static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
2030
2031static ssize_t
2032lcs_type_show(struct device *dev, char *buf)
2033{
2034 struct ccwgroup_device *cgdev;
2035
2036 cgdev = to_ccwgroupdev(dev);
2037 if (!cgdev)
2038 return -ENODEV;
2039
2040 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2041}
2042
2043static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
2044
2045static ssize_t
2046lcs_timeout_show(struct device *dev, char *buf)
2047{
2048 struct lcs_card *card;
2049
2050 card = (struct lcs_card *)dev->driver_data;
2051
2052 return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
2053}
2054
2055static ssize_t
2056lcs_timeout_store (struct device *dev, const char *buf, size_t count)
2057{
2058 struct lcs_card *card;
2059 int value;
2060
2061 card = (struct lcs_card *)dev->driver_data;
2062
2063 if (!card)
2064 return 0;
2065
2066 sscanf(buf, "%u", &value);
2067 /* TODO: sanity checks */
2068 card->lancmd_timeout = value;
2069
2070 return count;
2071
2072}
2073
2074DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
2075
2076static struct attribute * lcs_attrs[] = {
2077 &dev_attr_portno.attr,
2078 &dev_attr_type.attr,
2079 &dev_attr_lancmd_timeout.attr,
2080 NULL,
2081};
2082
2083static struct attribute_group lcs_attr_group = {
2084 .attrs = lcs_attrs,
2085};
2086
2087/**
2088 * lcs_probe_device is called on establishing a new ccwgroup_device.
2089 */
2090static int
2091lcs_probe_device(struct ccwgroup_device *ccwgdev)
2092{
2093 struct lcs_card *card;
2094 int ret;
2095
2096 if (!get_device(&ccwgdev->dev))
2097 return -ENODEV;
2098
2099 LCS_DBF_TEXT(2, setup, "add_dev");
2100 card = lcs_alloc_card();
2101 if (!card) {
2102 PRINT_ERR("Allocation of lcs card failed\n");
2103 put_device(&ccwgdev->dev);
2104 return -ENOMEM;
2105 }
2106 ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2107 if (ret) {
2108 PRINT_ERR("Creating attributes failed");
2109 lcs_free_card(card);
2110 put_device(&ccwgdev->dev);
2111 return ret;
2112 }
2113 ccwgdev->dev.driver_data = card;
2114 ccwgdev->cdev[0]->handler = lcs_irq;
2115 ccwgdev->cdev[1]->handler = lcs_irq;
2116 return 0;
2117}
2118
2119static int
2120lcs_register_netdev(struct ccwgroup_device *ccwgdev)
2121{
2122 struct lcs_card *card;
2123
2124 LCS_DBF_TEXT(2, setup, "regnetdv");
2125 card = (struct lcs_card *)ccwgdev->dev.driver_data;
2126 if (card->dev->reg_state != NETREG_UNINITIALIZED)
2127 return 0;
2128 SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
2129 return register_netdev(card->dev);
2130}
2131
2132/**
2133 * lcs_new_device will be called by setting the group device online.
2134 */
2135
2136static int
2137lcs_new_device(struct ccwgroup_device *ccwgdev)
2138{
2139 struct lcs_card *card;
2140 struct net_device *dev=NULL;
2141 enum lcs_dev_states recover_state;
2142 int rc;
2143
2144 card = (struct lcs_card *)ccwgdev->dev.driver_data;
2145 if (!card)
2146 return -ENODEV;
2147
2148 LCS_DBF_TEXT(2, setup, "newdev");
2149 LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2150 card->read.ccwdev = ccwgdev->cdev[0];
2151 card->write.ccwdev = ccwgdev->cdev[1];
2152
2153 recover_state = card->state;
2154 ccw_device_set_online(card->read.ccwdev);
2155 ccw_device_set_online(card->write.ccwdev);
2156
2157 LCS_DBF_TEXT(3, setup, "lcsnewdv");
2158
2159 lcs_setup_card(card);
2160 rc = lcs_detect(card);
2161 if (rc) {
2162 LCS_DBF_TEXT(2, setup, "dtctfail");
2163 PRINT_WARN("Detection of LCS card failed with return code "
2164 "%d (0x%x)\n", rc, rc);
2165 lcs_stopcard(card);
2166 goto out;
2167 }
2168 if (card->dev) {
2169 LCS_DBF_TEXT(2, setup, "samedev");
2170 LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2171 goto netdev_out;
2172 }
2173 switch (card->lan_type) {
2174#ifdef CONFIG_NET_ETHERNET
2175 case LCS_FRAME_TYPE_ENET:
2176 card->lan_type_trans = eth_type_trans;
2177 dev = alloc_etherdev(0);
2178 break;
2179#endif
2180#ifdef CONFIG_TR
2181 case LCS_FRAME_TYPE_TR:
2182 card->lan_type_trans = tr_type_trans;
2183 dev = alloc_trdev(0);
2184 break;
2185#endif
2186#ifdef CONFIG_FDDI
2187 case LCS_FRAME_TYPE_FDDI:
2188 card->lan_type_trans = fddi_type_trans;
2189 dev = alloc_fddidev(0);
2190 break;
2191#endif
2192 default:
2193 LCS_DBF_TEXT(3, setup, "errinit");
2194 PRINT_ERR("LCS: Initialization failed\n");
2195 PRINT_ERR("LCS: No device found!\n");
2196 goto out;
2197 }
2198 if (!dev)
2199 goto out;
2200 card->dev = dev;
2201netdev_out:
2202 card->dev->priv = card;
2203 card->dev->open = lcs_open_device;
2204 card->dev->stop = lcs_stop_device;
2205 card->dev->hard_start_xmit = lcs_start_xmit;
2206 card->dev->get_stats = lcs_getstats;
2207 SET_MODULE_OWNER(dev);
2208 if (lcs_register_netdev(ccwgdev) != 0)
2209 goto out;
2210 memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
2211#ifdef CONFIG_IP_MULTICAST
2212 if (!lcs_check_multicast_support(card))
2213 card->dev->set_multicast_list = lcs_set_multicast_list;
2214#endif
2215 netif_stop_queue(card->dev);
2216 lcs_set_allowed_threads(card,0xffffffff);
2217 if (recover_state == DEV_STATE_RECOVER) {
2218 lcs_set_multicast_list(card->dev);
2219 card->dev->flags |= IFF_UP;
2220 netif_wake_queue(card->dev);
2221 card->state = DEV_STATE_UP;
2222 } else
2223 lcs_stopcard(card);
2224
2225 return 0;
2226out:
2227
2228 ccw_device_set_offline(card->read.ccwdev);
2229 ccw_device_set_offline(card->write.ccwdev);
2230 return -ENODEV;
2231}
2232
2233/**
2234 * lcs_shutdown_device, called when setting the group device offline.
2235 */
2236static int
2237lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
2238{
2239 struct lcs_card *card;
2240 enum lcs_dev_states recover_state;
2241 int ret;
2242
2243 LCS_DBF_TEXT(3, setup, "shtdndev");
2244 card = (struct lcs_card *)ccwgdev->dev.driver_data;
2245 if (!card)
2246 return -ENODEV;
2247 lcs_set_allowed_threads(card, 0);
2248 if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
2249 return -ERESTARTSYS;
2250 LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2251 recover_state = card->state;
2252
2253 ret = lcs_stop_device(card->dev);
2254 ret = ccw_device_set_offline(card->read.ccwdev);
2255 ret = ccw_device_set_offline(card->write.ccwdev);
2256 if (recover_state == DEV_STATE_UP) {
2257 card->state = DEV_STATE_RECOVER;
2258 }
2259 if (ret)
2260 return ret;
2261 return 0;
2262}
2263
2264/**
2265 * lcs_remove_device, free buffers and card
2266 */
2267static void
2268lcs_remove_device(struct ccwgroup_device *ccwgdev)
2269{
2270 struct lcs_card *card;
2271
2272 card = (struct lcs_card *)ccwgdev->dev.driver_data;
2273 if (!card)
2274 return;
2275
2276 PRINT_INFO("Removing lcs group device ....\n");
2277 LCS_DBF_TEXT(3, setup, "remdev");
2278 LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2279 if (ccwgdev->state == CCWGROUP_ONLINE) {
2280 lcs_shutdown_device(ccwgdev);
2281 }
2282 if (card->dev)
2283 unregister_netdev(card->dev);
2284 sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2285 lcs_cleanup_card(card);
2286 lcs_free_card(card);
2287 put_device(&ccwgdev->dev);
2288}
2289
2290/**
2291 * LCS ccwgroup driver registration
2292 */
2293static struct ccwgroup_driver lcs_group_driver = {
2294 .owner = THIS_MODULE,
2295 .name = "lcs",
2296 .max_slaves = 2,
2297 .driver_id = 0xD3C3E2,
2298 .probe = lcs_probe_device,
2299 .remove = lcs_remove_device,
2300 .set_online = lcs_new_device,
2301 .set_offline = lcs_shutdown_device,
2302};
2303
2304/**
2305 * LCS Module/Kernel initialization function
2306 */
2307static int
2308__init lcs_init_module(void)
2309{
2310 int rc;
2311
2312 PRINT_INFO("Loading %s\n",version);
2313 rc = lcs_register_debug_facility();
2314 LCS_DBF_TEXT(0, setup, "lcsinit");
2315 if (rc) {
2316 PRINT_ERR("Initialization failed\n");
2317 return rc;
2318 }
2319
2320 rc = register_cu3088_discipline(&lcs_group_driver);
2321 if (rc) {
2322 PRINT_ERR("Initialization failed\n");
2323 return rc;
2324 }
2325
2326 return 0;
2327}
2328
2329
2330/**
2331 * LCS module cleanup function
2332 */
2333static void
2334__exit lcs_cleanup_module(void)
2335{
2336 PRINT_INFO("Terminating lcs module.\n");
2337 LCS_DBF_TEXT(0, trace, "cleanup");
2338 unregister_cu3088_discipline(&lcs_group_driver);
2339 lcs_unregister_debug_facility();
2340}
2341
2342module_init(lcs_init_module);
2343module_exit(lcs_cleanup_module);
2344
2345MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
2346MODULE_LICENSE("GPL");
2347
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
new file mode 100644
index 000000000000..a7f348ef1b08
--- /dev/null
+++ b/drivers/s390/net/lcs.h
@@ -0,0 +1,321 @@
1/*lcs.h*/
2
3#include <linux/interrupt.h>
4#include <linux/netdevice.h>
5#include <linux/skbuff.h>
6#include <linux/workqueue.h>
7#include <asm/ccwdev.h>
8
9#define VERSION_LCS_H "$Revision: 1.19 $"
10
11#define LCS_DBF_TEXT(level, name, text) \
12 do { \
13 debug_text_event(lcs_dbf_##name, level, text); \
14 } while (0)
15
16#define LCS_DBF_HEX(level,name,addr,len) \
17do { \
18 debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
19} while (0)
20
21#define LCS_DBF_TEXT_(level,name,text...) \
22do { \
23 sprintf(debug_buffer, text); \
24 debug_text_event(lcs_dbf_##name,level, debug_buffer);\
25} while (0)
26
27/**
28 * some more definitions for debug or output stuff
29 */
30#define PRINTK_HEADER " lcs: "
31
32/**
33 * sysfs related stuff
34 */
35#define CARD_FROM_DEV(cdev) \
36 (struct lcs_card *) \
37 ((struct ccwgroup_device *)cdev->dev.driver_data)->dev.driver_data;
38/**
39 * CCW commands used in this driver
40 */
41#define LCS_CCW_WRITE 0x01
42#define LCS_CCW_READ 0x02
43#define LCS_CCW_TRANSFER 0x08
44
45/**
46 * LCS device status primitives
47 */
48#define LCS_CMD_STARTLAN 0x01
49#define LCS_CMD_STOPLAN 0x02
50#define LCS_CMD_LANSTAT 0x04
51#define LCS_CMD_STARTUP 0x07
52#define LCS_CMD_SHUTDOWN 0x08
53#define LCS_CMD_QIPASSIST 0xb2
54#define LCS_CMD_SETIPM 0xb4
55#define LCS_CMD_DELIPM 0xb5
56
57#define LCS_INITIATOR_TCPIP 0x00
58#define LCS_INITIATOR_LGW 0x01
59#define LCS_STD_CMD_SIZE 16
60#define LCS_MULTICAST_CMD_SIZE 404
61
62/**
63 * LCS IPASSIST MASKS,only used when multicast is switched on
64 */
65/* Not supported by LCS */
66#define LCS_IPASS_ARP_PROCESSING 0x0001
67#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002
68#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004
69#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
70#define LCS_IPASS_IP_FILTERING 0x0010
71/* Supported by lcs 3172 */
72#define LCS_IPASS_IPV6_SUPPORT 0x0020
73#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
74
75/**
76 * LCS sense byte definitions
77 */
78#define LCS_SENSE_INTERFACE_DISCONNECT 0x01
79#define LCS_SENSE_EQUIPMENT_CHECK 0x10
80#define LCS_SENSE_BUS_OUT_CHECK 0x20
81#define LCS_SENSE_INTERVENTION_REQUIRED 0x40
82#define LCS_SENSE_CMD_REJECT 0x80
83#define LCS_SENSE_RESETTING_EVENT 0x0080
84#define LCS_SENSE_DEVICE_ONLINE 0x0020
85
86/**
87 * LCS packet type definitions
88 */
89#define LCS_FRAME_TYPE_CONTROL 0
90#define LCS_FRAME_TYPE_ENET 1
91#define LCS_FRAME_TYPE_TR 2
92#define LCS_FRAME_TYPE_FDDI 7
93#define LCS_FRAME_TYPE_AUTO -1
94
95/**
96 * some more definitions,we will sort them later
97 */
98#define LCS_ILLEGAL_OFFSET 0xffff
99#define LCS_IOBUFFERSIZE 0x5000
100#define LCS_NUM_BUFFS 8 /* needs to be power of 2 */
101#define LCS_MAC_LENGTH 6
102#define LCS_INVALID_PORT_NO -1
103#define LCS_LANCMD_TIMEOUT_DEFAULT 5
104
105/**
106 * Multicast state
107 */
108#define LCS_IPM_STATE_SET_REQUIRED 0
109#define LCS_IPM_STATE_DEL_REQUIRED 1
110#define LCS_IPM_STATE_ON_CARD 2
111
112/**
113 * LCS IP Assist declarations
114 * seems to be only used for multicast
115 */
116#define LCS_IPASS_ARP_PROCESSING 0x0001
117#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002
118#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004
119#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
120#define LCS_IPASS_IP_FILTERING 0x0010
121#define LCS_IPASS_IPV6_SUPPORT 0x0020
122#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
123
124/**
125 * LCS Buffer states
126 */
127enum lcs_buffer_states {
128 BUF_STATE_EMPTY, /* buffer is empty */
129 BUF_STATE_LOCKED, /* buffer is locked, don't touch */
130 BUF_STATE_READY, /* buffer is ready for read/write */
131 BUF_STATE_PROCESSED,
132};
133
134/**
135 * LCS Channel State Machine declarations
136 */
137enum lcs_channel_states {
138 CH_STATE_INIT,
139 CH_STATE_HALTED,
140 CH_STATE_STOPPED,
141 CH_STATE_RUNNING,
142 CH_STATE_SUSPENDED,
143 CH_STATE_CLEARED,
144};
145
146/**
147 * LCS device state machine
148 */
149enum lcs_dev_states {
150 DEV_STATE_DOWN,
151 DEV_STATE_UP,
152 DEV_STATE_RECOVER,
153};
154
155enum lcs_threads {
156 LCS_SET_MC_THREAD = 1,
157 LCS_STARTLAN_THREAD = 2,
158 LCS_STOPLAN_THREAD = 4,
159 LCS_STARTUP_THREAD = 8,
160};
161/**
162 * LCS struct declarations
163 */
164struct lcs_header {
165 __u16 offset;
166 __u8 type;
167 __u8 slot;
168} __attribute__ ((packed));
169
170struct lcs_ip_mac_pair {
171 __u32 ip_addr;
172 __u8 mac_addr[LCS_MAC_LENGTH];
173 __u8 reserved[2];
174} __attribute__ ((packed));
175
176struct lcs_ipm_list {
177 struct list_head list;
178 struct lcs_ip_mac_pair ipm;
179 __u8 ipm_state;
180};
181
182struct lcs_cmd {
183 __u16 offset;
184 __u8 type;
185 __u8 slot;
186 __u8 cmd_code;
187 __u8 initiator;
188 __u16 sequence_no;
189 __u16 return_code;
190 union {
191 struct {
192 __u8 lan_type;
193 __u8 portno;
194 __u16 parameter_count;
195 __u8 operator_flags[3];
196 __u8 reserved[3];
197 } lcs_std_cmd;
198 struct {
199 __u16 unused1;
200 __u16 buff_size;
201 __u8 unused2[6];
202 } lcs_startup;
203 struct {
204 __u8 lan_type;
205 __u8 portno;
206 __u8 unused[10];
207 __u8 mac_addr[LCS_MAC_LENGTH];
208 __u32 num_packets_deblocked;
209 __u32 num_packets_blocked;
210 __u32 num_packets_tx_on_lan;
211 __u32 num_tx_errors_detected;
212 __u32 num_tx_packets_disgarded;
213 __u32 num_packets_rx_from_lan;
214 __u32 num_rx_errors_detected;
215 __u32 num_rx_discarded_nobuffs_avail;
216 __u32 num_rx_packets_too_large;
217 } lcs_lanstat_cmd;
218#ifdef CONFIG_IP_MULTICAST
219 struct {
220 __u8 lan_type;
221 __u8 portno;
222 __u16 num_ip_pairs;
223 __u16 ip_assists_supported;
224 __u16 ip_assists_enabled;
225 __u16 version;
226 struct {
227 struct lcs_ip_mac_pair
228 ip_mac_pair[32];
229 __u32 response_data;
230 } lcs_ipass_ctlmsg __attribute ((packed));
231 } lcs_qipassist __attribute__ ((packed));
232#endif /*CONFIG_IP_MULTICAST */
233 } cmd __attribute__ ((packed));
234} __attribute__ ((packed));
235
236/**
237 * Forward declarations.
238 */
239struct lcs_card;
240struct lcs_channel;
241
242/**
243 * Definition of an lcs buffer.
244 */
245struct lcs_buffer {
246 enum lcs_buffer_states state;
247 void *data;
248 int count;
249 /* Callback for completion notification. */
250 void (*callback)(struct lcs_channel *, struct lcs_buffer *);
251};
252
253struct lcs_reply {
254 struct list_head list;
255 __u16 sequence_no;
256 atomic_t refcnt;
257 /* Callback for completion notification. */
258 void (*callback)(struct lcs_card *, struct lcs_cmd *);
259 wait_queue_head_t wait_q;
260 struct lcs_card *card;
261 int received;
262 int rc;
263};
264
265/**
266 * Definition of an lcs channel
267 */
268struct lcs_channel {
269 enum lcs_channel_states state;
270 struct ccw_device *ccwdev;
271 struct ccw1 ccws[LCS_NUM_BUFFS + 1];
272 wait_queue_head_t wait_q;
273 struct tasklet_struct irq_tasklet;
274 struct lcs_buffer iob[LCS_NUM_BUFFS];
275 int io_idx;
276 int buf_idx;
277};
278
279
280/**
281 * definition of the lcs card
282 */
283struct lcs_card {
284 spinlock_t lock;
285 spinlock_t ipm_lock;
286 enum lcs_dev_states state;
287 struct net_device *dev;
288 struct net_device_stats stats;
289 unsigned short (*lan_type_trans)(struct sk_buff *skb,
290 struct net_device *dev);
291 struct lcs_channel read;
292 struct lcs_channel write;
293 struct lcs_buffer *tx_buffer;
294 int tx_emitted;
295 struct list_head lancmd_waiters;
296 int lancmd_timeout;
297
298 struct work_struct kernel_thread_starter;
299 spinlock_t mask_lock;
300 unsigned long thread_start_mask;
301 unsigned long thread_running_mask;
302 unsigned long thread_allowed_mask;
303 wait_queue_head_t wait_q;
304
305#ifdef CONFIG_IP_MULTICAST
306 struct list_head ipm_list;
307#endif
308 __u8 mac[LCS_MAC_LENGTH];
309 __u16 ip_assists_supported;
310 __u16 ip_assists_enabled;
311 __s8 lan_type;
312 __u32 pkt_seq;
313 __u16 sequence_no;
314 __s16 portno;
315 /* Some info copied from probeinfo */
316 u8 device_forced;
317 u8 max_port_no;
318 u8 hint_port_no;
319 s16 port_protocol_no;
320} __attribute__ ((aligned(8)));
321
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
new file mode 100644
index 000000000000..16e8e69afb10
--- /dev/null
+++ b/drivers/s390/net/netiucv.c
@@ -0,0 +1,2149 @@
1/*
2 * $Id: netiucv.c,v 1.63 2004/07/27 13:36:05 mschwide Exp $
3 *
4 * IUCV network driver
5 *
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 *
9 * Driverfs integration and all bugs therein by Cornelia Huck(cohuck@de.ibm.com)
10 *
11 * Documentation used:
12 * the source of the original IUCV driver by:
13 * Stefan Hegewald <hegewald@de.ibm.com>
14 * Hartmut Penner <hpenner@de.ibm.com>
15 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
16 * Martin Schwidefsky (schwidefsky@de.ibm.com)
17 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2, or (at your option)
22 * any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 *
33 * RELEASE-TAG: IUCV network driver $Revision: 1.63 $
34 *
35 */
36
37#undef DEBUG
38
39#include <linux/module.h>
40#include <linux/init.h>
41#include <linux/kernel.h>
42#include <linux/slab.h>
43#include <linux/errno.h>
44#include <linux/types.h>
45#include <linux/interrupt.h>
46#include <linux/timer.h>
47#include <linux/sched.h>
48#include <linux/bitops.h>
49
50#include <linux/signal.h>
51#include <linux/string.h>
52#include <linux/device.h>
53
54#include <linux/ip.h>
55#include <linux/if_arp.h>
56#include <linux/tcp.h>
57#include <linux/skbuff.h>
58#include <linux/ctype.h>
59#include <net/dst.h>
60
61#include <asm/io.h>
62#include <asm/uaccess.h>
63
64#include "iucv.h"
65#include "fsm.h"
66
67MODULE_AUTHOR
68 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
69MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
70
71
72#define PRINTK_HEADER " iucv: " /* for debugging */
73
74static struct device_driver netiucv_driver = {
75 .name = "netiucv",
76 .bus = &iucv_bus,
77};
78
79/**
80 * Per connection profiling data
81 */
82struct connection_profile {
83 unsigned long maxmulti;
84 unsigned long maxcqueue;
85 unsigned long doios_single;
86 unsigned long doios_multi;
87 unsigned long txlen;
88 unsigned long tx_time;
89 struct timespec send_stamp;
90 unsigned long tx_pending;
91 unsigned long tx_max_pending;
92};
93
94/**
95 * Representation of one iucv connection
96 */
97struct iucv_connection {
98 struct iucv_connection *next;
99 iucv_handle_t handle;
100 __u16 pathid;
101 struct sk_buff *rx_buff;
102 struct sk_buff *tx_buff;
103 struct sk_buff_head collect_queue;
104 struct sk_buff_head commit_queue;
105 spinlock_t collect_lock;
106 int collect_len;
107 int max_buffsize;
108 fsm_timer timer;
109 fsm_instance *fsm;
110 struct net_device *netdev;
111 struct connection_profile prof;
112 char userid[9];
113};
114
115/**
116 * Linked list of all connection structs.
117 */
118static struct iucv_connection *iucv_connections;
119
120/**
121 * Representation of event-data for the
122 * connection state machine.
123 */
124struct iucv_event {
125 struct iucv_connection *conn;
126 void *data;
127};
128
129/**
130 * Private part of the network device structure
131 */
132struct netiucv_priv {
133 struct net_device_stats stats;
134 unsigned long tbusy;
135 fsm_instance *fsm;
136 struct iucv_connection *conn;
137 struct device *dev;
138};
139
140/**
141 * Link level header for a packet.
142 */
143typedef struct ll_header_t {
144 __u16 next;
145} ll_header;
146
147#define NETIUCV_HDRLEN (sizeof(ll_header))
148#define NETIUCV_BUFSIZE_MAX 32768
149#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
150#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
151#define NETIUCV_MTU_DEFAULT 9216
152#define NETIUCV_QUEUELEN_DEFAULT 50
153#define NETIUCV_TIMEOUT_5SEC 5000
154
155/**
156 * Compatibility macros for busy handling
157 * of network devices.
158 */
159static __inline__ void netiucv_clear_busy(struct net_device *dev)
160{
161 clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy));
162 netif_wake_queue(dev);
163}
164
165static __inline__ int netiucv_test_and_set_busy(struct net_device *dev)
166{
167 netif_stop_queue(dev);
168 return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy);
169}
170
171static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
172static __u8 iucvMagic[16] = {
173 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
174 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
175};
176
177/**
178 * This mask means the 16-byte IUCV "magic" and the origin userid must
179 * match exactly as specified in order to give connection_pending()
180 * control.
181 */
182static __u8 netiucv_mask[] = {
183 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
184 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
185 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
186};
187
188/**
189 * Convert an iucv userId to its printable
190 * form (strip whitespace at end).
191 *
192 * @param An iucv userId
193 *
194 * @returns The printable string (static data!!)
195 */
196static __inline__ char *
197netiucv_printname(char *name)
198{
199 static char tmp[9];
200 char *p = tmp;
201 memcpy(tmp, name, 8);
202 tmp[8] = '\0';
203 while (*p && (!isspace(*p)))
204 p++;
205 *p = '\0';
206 return tmp;
207}
208
209/**
210 * States of the interface statemachine.
211 */
212enum dev_states {
213 DEV_STATE_STOPPED,
214 DEV_STATE_STARTWAIT,
215 DEV_STATE_STOPWAIT,
216 DEV_STATE_RUNNING,
217 /**
218 * MUST be always the last element!!
219 */
220 NR_DEV_STATES
221};
222
223static const char *dev_state_names[] = {
224 "Stopped",
225 "StartWait",
226 "StopWait",
227 "Running",
228};
229
230/**
231 * Events of the interface statemachine.
232 */
233enum dev_events {
234 DEV_EVENT_START,
235 DEV_EVENT_STOP,
236 DEV_EVENT_CONUP,
237 DEV_EVENT_CONDOWN,
238 /**
239 * MUST be always the last element!!
240 */
241 NR_DEV_EVENTS
242};
243
244static const char *dev_event_names[] = {
245 "Start",
246 "Stop",
247 "Connection up",
248 "Connection down",
249};
250
251/**
252 * Events of the connection statemachine
253 */
254enum conn_events {
255 /**
256 * Events, representing callbacks from
257 * lowlevel iucv layer)
258 */
259 CONN_EVENT_CONN_REQ,
260 CONN_EVENT_CONN_ACK,
261 CONN_EVENT_CONN_REJ,
262 CONN_EVENT_CONN_SUS,
263 CONN_EVENT_CONN_RES,
264 CONN_EVENT_RX,
265 CONN_EVENT_TXDONE,
266
267 /**
268 * Events, representing errors return codes from
269 * calls to lowlevel iucv layer
270 */
271
272 /**
273 * Event, representing timer expiry.
274 */
275 CONN_EVENT_TIMER,
276
277 /**
278 * Events, representing commands from upper levels.
279 */
280 CONN_EVENT_START,
281 CONN_EVENT_STOP,
282
283 /**
284 * MUST be always the last element!!
285 */
286 NR_CONN_EVENTS,
287};
288
289static const char *conn_event_names[] = {
290 "Remote connection request",
291 "Remote connection acknowledge",
292 "Remote connection reject",
293 "Connection suspended",
294 "Connection resumed",
295 "Data received",
296 "Data sent",
297
298 "Timer",
299
300 "Start",
301 "Stop",
302};
303
304/**
305 * States of the connection statemachine.
306 */
307enum conn_states {
308 /**
309 * Connection not assigned to any device,
310 * initial state, invalid
311 */
312 CONN_STATE_INVALID,
313
314 /**
315 * Userid assigned but not operating
316 */
317 CONN_STATE_STOPPED,
318
319 /**
320 * Connection registered,
321 * no connection request sent yet,
322 * no connection request received
323 */
324 CONN_STATE_STARTWAIT,
325
326 /**
327 * Connection registered and connection request sent,
328 * no acknowledge and no connection request received yet.
329 */
330 CONN_STATE_SETUPWAIT,
331
332 /**
333 * Connection up and running idle
334 */
335 CONN_STATE_IDLE,
336
337 /**
338 * Data sent, awaiting CONN_EVENT_TXDONE
339 */
340 CONN_STATE_TX,
341
342 /**
343 * Error during registration.
344 */
345 CONN_STATE_REGERR,
346
347 /**
348 * Error during registration.
349 */
350 CONN_STATE_CONNERR,
351
352 /**
353 * MUST be always the last element!!
354 */
355 NR_CONN_STATES,
356};
357
358static const char *conn_state_names[] = {
359 "Invalid",
360 "Stopped",
361 "StartWait",
362 "SetupWait",
363 "Idle",
364 "TX",
365 "Terminating",
366 "Registration error",
367 "Connect error",
368};
369
370
371/**
372 * Debug Facility Stuff
373 */
374static debug_info_t *iucv_dbf_setup = NULL;
375static debug_info_t *iucv_dbf_data = NULL;
376static debug_info_t *iucv_dbf_trace = NULL;
377
378DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
379
380static void
381iucv_unregister_dbf_views(void)
382{
383 if (iucv_dbf_setup)
384 debug_unregister(iucv_dbf_setup);
385 if (iucv_dbf_data)
386 debug_unregister(iucv_dbf_data);
387 if (iucv_dbf_trace)
388 debug_unregister(iucv_dbf_trace);
389}
390static int
391iucv_register_dbf_views(void)
392{
393 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
394 IUCV_DBF_SETUP_INDEX,
395 IUCV_DBF_SETUP_NR_AREAS,
396 IUCV_DBF_SETUP_LEN);
397 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
398 IUCV_DBF_DATA_INDEX,
399 IUCV_DBF_DATA_NR_AREAS,
400 IUCV_DBF_DATA_LEN);
401 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
402 IUCV_DBF_TRACE_INDEX,
403 IUCV_DBF_TRACE_NR_AREAS,
404 IUCV_DBF_TRACE_LEN);
405
406 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
407 (iucv_dbf_trace == NULL)) {
408 iucv_unregister_dbf_views();
409 return -ENOMEM;
410 }
411 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
412 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
413
414 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
415 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
416
417 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
418 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
419
420 return 0;
421}
422
423/**
424 * Callback-wrappers, called from lowlevel iucv layer.
425 *****************************************************************************/
426
427static void
428netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data)
429{
430 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
431 struct iucv_event ev;
432
433 ev.conn = conn;
434 ev.data = (void *)eib;
435
436 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
437}
438
439static void
440netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data)
441{
442 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
443 struct iucv_event ev;
444
445 ev.conn = conn;
446 ev.data = (void *)eib;
447 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
448}
449
450static void
451netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data)
452{
453 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
454 struct iucv_event ev;
455
456 ev.conn = conn;
457 ev.data = (void *)eib;
458 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev);
459}
460
461static void
462netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data)
463{
464 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
465 struct iucv_event ev;
466
467 ev.conn = conn;
468 ev.data = (void *)eib;
469 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
470}
471
472static void
473netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data)
474{
475 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
476 struct iucv_event ev;
477
478 ev.conn = conn;
479 ev.data = (void *)eib;
480 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev);
481}
482
483static void
484netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data)
485{
486 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
487 struct iucv_event ev;
488
489 ev.conn = conn;
490 ev.data = (void *)eib;
491 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev);
492}
493
494static void
495netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data)
496{
497 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
498 struct iucv_event ev;
499
500 ev.conn = conn;
501 ev.data = (void *)eib;
502 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev);
503}
504
505static iucv_interrupt_ops_t netiucv_ops = {
506 .ConnectionPending = netiucv_callback_connreq,
507 .ConnectionComplete = netiucv_callback_connack,
508 .ConnectionSevered = netiucv_callback_connrej,
509 .ConnectionQuiesced = netiucv_callback_connsusp,
510 .ConnectionResumed = netiucv_callback_connres,
511 .MessagePending = netiucv_callback_rx,
512 .MessageComplete = netiucv_callback_txdone
513};
514
515/**
516 * Dummy NOP action for all statemachines
517 */
518static void
519fsm_action_nop(fsm_instance *fi, int event, void *arg)
520{
521}
522
523/**
524 * Actions of the connection statemachine
525 *****************************************************************************/
526
527/**
528 * Helper function for conn_action_rx()
529 * Unpack a just received skb and hand it over to
530 * upper layers.
531 *
532 * @param conn The connection where this skb has been received.
533 * @param pskb The received skb.
534 */
535//static __inline__ void
536static void
537netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
538{
539 struct net_device *dev = conn->netdev;
540 struct netiucv_priv *privptr = dev->priv;
541 __u16 offset = 0;
542
543 skb_put(pskb, NETIUCV_HDRLEN);
544 pskb->dev = dev;
545 pskb->ip_summed = CHECKSUM_NONE;
546 pskb->protocol = ntohs(ETH_P_IP);
547
548 while (1) {
549 struct sk_buff *skb;
550 ll_header *header = (ll_header *)pskb->data;
551
552 if (!header->next)
553 break;
554
555 skb_pull(pskb, NETIUCV_HDRLEN);
556 header->next -= offset;
557 offset += header->next;
558 header->next -= NETIUCV_HDRLEN;
559 if (skb_tailroom(pskb) < header->next) {
560 PRINT_WARN("%s: Illegal next field in iucv header: "
561 "%d > %d\n",
562 dev->name, header->next, skb_tailroom(pskb));
563 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
564 header->next, skb_tailroom(pskb));
565 return;
566 }
567 skb_put(pskb, header->next);
568 pskb->mac.raw = pskb->data;
569 skb = dev_alloc_skb(pskb->len);
570 if (!skb) {
571 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
572 dev->name);
573 IUCV_DBF_TEXT(data, 2,
574 "Out of memory in netiucv_unpack_skb\n");
575 privptr->stats.rx_dropped++;
576 return;
577 }
578 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
579 skb->mac.raw = skb->data;
580 skb->dev = pskb->dev;
581 skb->protocol = pskb->protocol;
582 pskb->ip_summed = CHECKSUM_UNNECESSARY;
583 /*
584 * Since receiving is always initiated from a tasklet (in iucv.c),
585 * we must use netif_rx_ni() instead of netif_rx()
586 */
587 netif_rx_ni(skb);
588 dev->last_rx = jiffies;
589 privptr->stats.rx_packets++;
590 privptr->stats.rx_bytes += skb->len;
591 skb_pull(pskb, header->next);
592 skb_put(pskb, NETIUCV_HDRLEN);
593 }
594}
595
596static void
597conn_action_rx(fsm_instance *fi, int event, void *arg)
598{
599 struct iucv_event *ev = (struct iucv_event *)arg;
600 struct iucv_connection *conn = ev->conn;
601 iucv_MessagePending *eib = (iucv_MessagePending *)ev->data;
602 struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv;
603
604 __u32 msglen = eib->ln1msg2.ipbfln1f;
605 int rc;
606
607 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
608
609 if (!conn->netdev) {
610 /* FRITZ: How to tell iucv LL to drop the msg? */
611 PRINT_WARN("Received data for unlinked connection\n");
612 IUCV_DBF_TEXT(data, 2,
613 "Received data for unlinked connection\n");
614 return;
615 }
616 if (msglen > conn->max_buffsize) {
617 /* FRITZ: How to tell iucv LL to drop the msg? */
618 privptr->stats.rx_dropped++;
619 PRINT_WARN("msglen %d > max_buffsize %d\n",
620 msglen, conn->max_buffsize);
621 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
622 msglen, conn->max_buffsize);
623 return;
624 }
625 conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
626 conn->rx_buff->len = 0;
627 rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls,
628 conn->rx_buff->data, msglen, NULL, NULL, NULL);
629 if (rc || msglen < 5) {
630 privptr->stats.rx_errors++;
631 PRINT_WARN("iucv_receive returned %08x\n", rc);
632 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
633 return;
634 }
635 netiucv_unpack_skb(conn, conn->rx_buff);
636}
637
638static void
639conn_action_txdone(fsm_instance *fi, int event, void *arg)
640{
641 struct iucv_event *ev = (struct iucv_event *)arg;
642 struct iucv_connection *conn = ev->conn;
643 iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data;
644 struct netiucv_priv *privptr = NULL;
645 /* Shut up, gcc! skb is always below 2G. */
646 __u32 single_flag = eib->ipmsgtag;
647 __u32 txbytes = 0;
648 __u32 txpackets = 0;
649 __u32 stat_maxcq = 0;
650 struct sk_buff *skb;
651 unsigned long saveflags;
652 ll_header header;
653
654 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
655
656 if (conn && conn->netdev && conn->netdev->priv)
657 privptr = (struct netiucv_priv *)conn->netdev->priv;
658 conn->prof.tx_pending--;
659 if (single_flag) {
660 if ((skb = skb_dequeue(&conn->commit_queue))) {
661 atomic_dec(&skb->users);
662 dev_kfree_skb_any(skb);
663 if (privptr) {
664 privptr->stats.tx_packets++;
665 privptr->stats.tx_bytes +=
666 (skb->len - NETIUCV_HDRLEN
667 - NETIUCV_HDRLEN);
668 }
669 }
670 }
671 conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head;
672 conn->tx_buff->len = 0;
673 spin_lock_irqsave(&conn->collect_lock, saveflags);
674 while ((skb = skb_dequeue(&conn->collect_queue))) {
675 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
676 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
677 NETIUCV_HDRLEN);
678 memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len);
679 txbytes += skb->len;
680 txpackets++;
681 stat_maxcq++;
682 atomic_dec(&skb->users);
683 dev_kfree_skb_any(skb);
684 }
685 if (conn->collect_len > conn->prof.maxmulti)
686 conn->prof.maxmulti = conn->collect_len;
687 conn->collect_len = 0;
688 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
689 if (conn->tx_buff->len) {
690 int rc;
691
692 header.next = 0;
693 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
694 NETIUCV_HDRLEN);
695
696 conn->prof.send_stamp = xtime;
697 rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0,
698 conn->tx_buff->data, conn->tx_buff->len);
699 conn->prof.doios_multi++;
700 conn->prof.txlen += conn->tx_buff->len;
701 conn->prof.tx_pending++;
702 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
703 conn->prof.tx_max_pending = conn->prof.tx_pending;
704 if (rc) {
705 conn->prof.tx_pending--;
706 fsm_newstate(fi, CONN_STATE_IDLE);
707 if (privptr)
708 privptr->stats.tx_errors += txpackets;
709 PRINT_WARN("iucv_send returned %08x\n", rc);
710 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
711 } else {
712 if (privptr) {
713 privptr->stats.tx_packets += txpackets;
714 privptr->stats.tx_bytes += txbytes;
715 }
716 if (stat_maxcq > conn->prof.maxcqueue)
717 conn->prof.maxcqueue = stat_maxcq;
718 }
719 } else
720 fsm_newstate(fi, CONN_STATE_IDLE);
721}
722
723static void
724conn_action_connaccept(fsm_instance *fi, int event, void *arg)
725{
726 struct iucv_event *ev = (struct iucv_event *)arg;
727 struct iucv_connection *conn = ev->conn;
728 iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
729 struct net_device *netdev = conn->netdev;
730 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
731 int rc;
732 __u16 msglimit;
733 __u8 udata[16];
734
735 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
736
737 rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0,
738 conn->handle, conn, NULL, &msglimit);
739 if (rc) {
740 PRINT_WARN("%s: IUCV accept failed with error %d\n",
741 netdev->name, rc);
742 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
743 return;
744 }
745 fsm_newstate(fi, CONN_STATE_IDLE);
746 conn->pathid = eib->ippathid;
747 netdev->tx_queue_len = msglimit;
748 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
749}
750
751static void
752conn_action_connreject(fsm_instance *fi, int event, void *arg)
753{
754 struct iucv_event *ev = (struct iucv_event *)arg;
755 struct iucv_connection *conn = ev->conn;
756 struct net_device *netdev = conn->netdev;
757 iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
758 __u8 udata[16];
759
760 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
761
762 iucv_sever(eib->ippathid, udata);
763 if (eib->ippathid != conn->pathid) {
764 PRINT_INFO("%s: IR Connection Pending; "
765 "pathid %d does not match original pathid %d\n",
766 netdev->name, eib->ippathid, conn->pathid);
767 IUCV_DBF_TEXT_(data, 2,
768 "connreject: IR pathid %d, conn. pathid %d\n",
769 eib->ippathid, conn->pathid);
770 iucv_sever(conn->pathid, udata);
771 }
772}
773
774static void
775conn_action_connack(fsm_instance *fi, int event, void *arg)
776{
777 struct iucv_event *ev = (struct iucv_event *)arg;
778 struct iucv_connection *conn = ev->conn;
779 iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data;
780 struct net_device *netdev = conn->netdev;
781 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
782
783 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
784
785 fsm_deltimer(&conn->timer);
786 fsm_newstate(fi, CONN_STATE_IDLE);
787 if (eib->ippathid != conn->pathid) {
788 PRINT_INFO("%s: IR Connection Complete; "
789 "pathid %d does not match original pathid %d\n",
790 netdev->name, eib->ippathid, conn->pathid);
791 IUCV_DBF_TEXT_(data, 2,
792 "connack: IR pathid %d, conn. pathid %d\n",
793 eib->ippathid, conn->pathid);
794 conn->pathid = eib->ippathid;
795 }
796 netdev->tx_queue_len = eib->ipmsglim;
797 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
798}
799
800static void
801conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
802{
803 struct iucv_connection *conn = (struct iucv_connection *)arg;
804 __u8 udata[16];
805
806 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
807
808 fsm_deltimer(&conn->timer);
809 iucv_sever(conn->pathid, udata);
810 fsm_newstate(fi, CONN_STATE_STARTWAIT);
811}
812
813static void
814conn_action_connsever(fsm_instance *fi, int event, void *arg)
815{
816 struct iucv_event *ev = (struct iucv_event *)arg;
817 struct iucv_connection *conn = ev->conn;
818 struct net_device *netdev = conn->netdev;
819 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
820 __u8 udata[16];
821
822 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
823
824 fsm_deltimer(&conn->timer);
825 iucv_sever(conn->pathid, udata);
826 PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
827 IUCV_DBF_TEXT(data, 2,
828 "conn_action_connsever: Remote dropped connection\n");
829 fsm_newstate(fi, CONN_STATE_STARTWAIT);
830 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
831}
832
833static void
834conn_action_start(fsm_instance *fi, int event, void *arg)
835{
836 struct iucv_event *ev = (struct iucv_event *)arg;
837 struct iucv_connection *conn = ev->conn;
838 __u16 msglimit;
839 int rc;
840
841 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
842
843 if (!conn->handle) {
844 IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
845 conn->handle =
846 iucv_register_program(iucvMagic, conn->userid,
847 netiucv_mask,
848 &netiucv_ops, conn);
849 fsm_newstate(fi, CONN_STATE_STARTWAIT);
850 if (!conn->handle) {
851 fsm_newstate(fi, CONN_STATE_REGERR);
852 conn->handle = NULL;
853 IUCV_DBF_TEXT(setup, 2,
854 "NULL from iucv_register_program\n");
855 return;
856 }
857
858 PRINT_DEBUG("%s('%s'): registered successfully\n",
859 conn->netdev->name, conn->userid);
860 }
861
862 PRINT_DEBUG("%s('%s'): connecting ...\n",
863 conn->netdev->name, conn->userid);
864
865 /* We must set the state before calling iucv_connect because the callback
866 * handler could be called at any point after the connection request is
867 * sent */
868
869 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
870 rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic,
871 conn->userid, iucv_host, 0, NULL, &msglimit,
872 conn->handle, conn);
873 switch (rc) {
874 case 0:
875 conn->netdev->tx_queue_len = msglimit;
876 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
877 CONN_EVENT_TIMER, conn);
878 return;
879 case 11:
880 PRINT_INFO("%s: User %s is currently not available.\n",
881 conn->netdev->name,
882 netiucv_printname(conn->userid));
883 fsm_newstate(fi, CONN_STATE_STARTWAIT);
884 return;
885 case 12:
886 PRINT_INFO("%s: User %s is currently not ready.\n",
887 conn->netdev->name,
888 netiucv_printname(conn->userid));
889 fsm_newstate(fi, CONN_STATE_STARTWAIT);
890 return;
891 case 13:
892 PRINT_WARN("%s: Too many IUCV connections.\n",
893 conn->netdev->name);
894 fsm_newstate(fi, CONN_STATE_CONNERR);
895 break;
896 case 14:
897 PRINT_WARN(
898 "%s: User %s has too many IUCV connections.\n",
899 conn->netdev->name,
900 netiucv_printname(conn->userid));
901 fsm_newstate(fi, CONN_STATE_CONNERR);
902 break;
903 case 15:
904 PRINT_WARN(
905 "%s: No IUCV authorization in CP directory.\n",
906 conn->netdev->name);
907 fsm_newstate(fi, CONN_STATE_CONNERR);
908 break;
909 default:
910 PRINT_WARN("%s: iucv_connect returned error %d\n",
911 conn->netdev->name, rc);
912 fsm_newstate(fi, CONN_STATE_CONNERR);
913 break;
914 }
915 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
916 IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
917 iucv_unregister_program(conn->handle);
918 conn->handle = NULL;
919}
920
921static void
922netiucv_purge_skb_queue(struct sk_buff_head *q)
923{
924 struct sk_buff *skb;
925
926 while ((skb = skb_dequeue(q))) {
927 atomic_dec(&skb->users);
928 dev_kfree_skb_any(skb);
929 }
930}
931
932static void
933conn_action_stop(fsm_instance *fi, int event, void *arg)
934{
935 struct iucv_event *ev = (struct iucv_event *)arg;
936 struct iucv_connection *conn = ev->conn;
937 struct net_device *netdev = conn->netdev;
938 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
939
940 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
941
942 fsm_deltimer(&conn->timer);
943 fsm_newstate(fi, CONN_STATE_STOPPED);
944 netiucv_purge_skb_queue(&conn->collect_queue);
945 if (conn->handle)
946 IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
947 iucv_unregister_program(conn->handle);
948 conn->handle = NULL;
949 netiucv_purge_skb_queue(&conn->commit_queue);
950 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
951}
952
953static void
954conn_action_inval(fsm_instance *fi, int event, void *arg)
955{
956 struct iucv_event *ev = (struct iucv_event *)arg;
957 struct iucv_connection *conn = ev->conn;
958 struct net_device *netdev = conn->netdev;
959
960 PRINT_WARN("%s: Cannot connect without username\n",
961 netdev->name);
962 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
963}
964
965static const fsm_node conn_fsm[] = {
966 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
967 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
968
969 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
970 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
971 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
972 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
973 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
974 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
975 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
976
977 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
978 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
979 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
980 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
981 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
982
983 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
984 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
985
986 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
987 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
988 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
989
990 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
991 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
992
993 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
994 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
995};
996
997static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
998
999
1000/**
1001 * Actions for interface - statemachine.
1002 *****************************************************************************/
1003
1004/**
1005 * Startup connection by sending CONN_EVENT_START to it.
1006 *
1007 * @param fi An instance of an interface statemachine.
1008 * @param event The event, just happened.
1009 * @param arg Generic pointer, casted from struct net_device * upon call.
1010 */
1011static void
1012dev_action_start(fsm_instance *fi, int event, void *arg)
1013{
1014 struct net_device *dev = (struct net_device *)arg;
1015 struct netiucv_priv *privptr = dev->priv;
1016 struct iucv_event ev;
1017
1018 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1019
1020 ev.conn = privptr->conn;
1021 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1022 fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev);
1023}
1024
1025/**
1026 * Shutdown connection by sending CONN_EVENT_STOP to it.
1027 *
1028 * @param fi An instance of an interface statemachine.
1029 * @param event The event, just happened.
1030 * @param arg Generic pointer, casted from struct net_device * upon call.
1031 */
1032static void
1033dev_action_stop(fsm_instance *fi, int event, void *arg)
1034{
1035 struct net_device *dev = (struct net_device *)arg;
1036 struct netiucv_priv *privptr = dev->priv;
1037 struct iucv_event ev;
1038
1039 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1040
1041 ev.conn = privptr->conn;
1042
1043 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1044 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1045}
1046
1047/**
1048 * Called from connection statemachine
1049 * when a connection is up and running.
1050 *
1051 * @param fi An instance of an interface statemachine.
1052 * @param event The event, just happened.
1053 * @param arg Generic pointer, casted from struct net_device * upon call.
1054 */
1055static void
1056dev_action_connup(fsm_instance *fi, int event, void *arg)
1057{
1058 struct net_device *dev = (struct net_device *)arg;
1059 struct netiucv_priv *privptr = dev->priv;
1060
1061 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1062
1063 switch (fsm_getstate(fi)) {
1064 case DEV_STATE_STARTWAIT:
1065 fsm_newstate(fi, DEV_STATE_RUNNING);
1066 PRINT_INFO("%s: connected with remote side %s\n",
1067 dev->name, privptr->conn->userid);
1068 IUCV_DBF_TEXT(setup, 3,
1069 "connection is up and running\n");
1070 break;
1071 case DEV_STATE_STOPWAIT:
1072 PRINT_INFO(
1073 "%s: got connection UP event during shutdown!\n",
1074 dev->name);
1075 IUCV_DBF_TEXT(data, 2,
1076 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1077 break;
1078 }
1079}
1080
1081/**
1082 * Called from connection statemachine
1083 * when a connection has been shutdown.
1084 *
1085 * @param fi An instance of an interface statemachine.
1086 * @param event The event, just happened.
1087 * @param arg Generic pointer, casted from struct net_device * upon call.
1088 */
1089static void
1090dev_action_conndown(fsm_instance *fi, int event, void *arg)
1091{
1092 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1093
1094 switch (fsm_getstate(fi)) {
1095 case DEV_STATE_RUNNING:
1096 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1097 break;
1098 case DEV_STATE_STOPWAIT:
1099 fsm_newstate(fi, DEV_STATE_STOPPED);
1100 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1101 break;
1102 }
1103}
1104
1105static const fsm_node dev_fsm[] = {
1106 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1107
1108 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1109 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1110
1111 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1112 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1113
1114 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1115 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1116 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
1117};
1118
1119static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1120
1121/**
1122 * Transmit a packet.
1123 * This is a helper function for netiucv_tx().
1124 *
1125 * @param conn Connection to be used for sending.
1126 * @param skb Pointer to struct sk_buff of packet to send.
1127 * The linklevel header has already been set up
1128 * by netiucv_tx().
1129 *
1130 * @return 0 on success, -ERRNO on failure. (Never fails.)
1131 */
1132static int
1133netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1134 unsigned long saveflags;
1135 ll_header header;
1136 int rc = 0;
1137
1138 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1139 int l = skb->len + NETIUCV_HDRLEN;
1140
1141 spin_lock_irqsave(&conn->collect_lock, saveflags);
1142 if (conn->collect_len + l >
1143 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1144 rc = -EBUSY;
1145 IUCV_DBF_TEXT(data, 2,
1146 "EBUSY from netiucv_transmit_skb\n");
1147 } else {
1148 atomic_inc(&skb->users);
1149 skb_queue_tail(&conn->collect_queue, skb);
1150 conn->collect_len += l;
1151 }
1152 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1153 } else {
1154 struct sk_buff *nskb = skb;
1155 /**
1156 * Copy the skb to a new allocated skb in lowmem only if the
1157 * data is located above 2G in memory or tailroom is < 2.
1158 */
1159 unsigned long hi =
1160 ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31;
1161 int copied = 0;
1162 if (hi || (skb_tailroom(skb) < 2)) {
1163 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1164 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1165 if (!nskb) {
1166 PRINT_WARN("%s: Could not allocate tx_skb\n",
1167 conn->netdev->name);
1168 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1169 rc = -ENOMEM;
1170 return rc;
1171 } else {
1172 skb_reserve(nskb, NETIUCV_HDRLEN);
1173 memcpy(skb_put(nskb, skb->len),
1174 skb->data, skb->len);
1175 }
1176 copied = 1;
1177 }
1178 /**
1179 * skb now is below 2G and has enough room. Add headers.
1180 */
1181 header.next = nskb->len + NETIUCV_HDRLEN;
1182 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1183 header.next = 0;
1184 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1185
1186 fsm_newstate(conn->fsm, CONN_STATE_TX);
1187 conn->prof.send_stamp = xtime;
1188
1189 rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */,
1190 0, nskb->data, nskb->len);
1191 /* Shut up, gcc! nskb is always below 2G. */
1192 conn->prof.doios_single++;
1193 conn->prof.txlen += skb->len;
1194 conn->prof.tx_pending++;
1195 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1196 conn->prof.tx_max_pending = conn->prof.tx_pending;
1197 if (rc) {
1198 struct netiucv_priv *privptr;
1199 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1200 conn->prof.tx_pending--;
1201 privptr = (struct netiucv_priv *)conn->netdev->priv;
1202 if (privptr)
1203 privptr->stats.tx_errors++;
1204 if (copied)
1205 dev_kfree_skb(nskb);
1206 else {
1207 /**
1208 * Remove our headers. They get added
1209 * again on retransmit.
1210 */
1211 skb_pull(skb, NETIUCV_HDRLEN);
1212 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1213 }
1214 PRINT_WARN("iucv_send returned %08x\n", rc);
1215 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1216 } else {
1217 if (copied)
1218 dev_kfree_skb(skb);
1219 atomic_inc(&nskb->users);
1220 skb_queue_tail(&conn->commit_queue, nskb);
1221 }
1222 }
1223
1224 return rc;
1225}
1226
1227/**
1228 * Interface API for upper network layers
1229 *****************************************************************************/
1230
1231/**
1232 * Open an interface.
1233 * Called from generic network layer when ifconfig up is run.
1234 *
1235 * @param dev Pointer to interface struct.
1236 *
1237 * @return 0 on success, -ERRNO on failure. (Never fails.)
1238 */
1239static int
1240netiucv_open(struct net_device *dev) {
1241 fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev);
1242 return 0;
1243}
1244
1245/**
1246 * Close an interface.
1247 * Called from generic network layer when ifconfig down is run.
1248 *
1249 * @param dev Pointer to interface struct.
1250 *
1251 * @return 0 on success, -ERRNO on failure. (Never fails.)
1252 */
1253static int
1254netiucv_close(struct net_device *dev) {
1255 fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev);
1256 return 0;
1257}
1258
1259/**
1260 * Start transmission of a packet.
1261 * Called from generic network device layer.
1262 *
1263 * @param skb Pointer to buffer containing the packet.
1264 * @param dev Pointer to interface struct.
1265 *
1266 * @return 0 if packet consumed, !0 if packet rejected.
1267 * Note: If we return !0, then the packet is free'd by
1268 * the generic network layer.
1269 */
1270static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1271{
1272 int rc = 0;
1273 struct netiucv_priv *privptr = dev->priv;
1274
1275 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1276 /**
1277 * Some sanity checks ...
1278 */
1279 if (skb == NULL) {
1280 PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1281 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1282 privptr->stats.tx_dropped++;
1283 return 0;
1284 }
1285 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1286 PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1287 dev->name, NETIUCV_HDRLEN);
1288 IUCV_DBF_TEXT(data, 2,
1289 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1290 dev_kfree_skb(skb);
1291 privptr->stats.tx_dropped++;
1292 return 0;
1293 }
1294
1295 /**
1296 * If connection is not running, try to restart it
1297 * and throw away packet.
1298 */
1299 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1300 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
1301 dev_kfree_skb(skb);
1302 privptr->stats.tx_dropped++;
1303 privptr->stats.tx_errors++;
1304 privptr->stats.tx_carrier_errors++;
1305 return 0;
1306 }
1307
1308 if (netiucv_test_and_set_busy(dev)) {
1309 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1310 return -EBUSY;
1311 }
1312 dev->trans_start = jiffies;
1313 if (netiucv_transmit_skb(privptr->conn, skb))
1314 rc = 1;
1315 netiucv_clear_busy(dev);
1316 return rc;
1317}
1318
1319/**
1320 * Returns interface statistics of a device.
1321 *
1322 * @param dev Pointer to interface struct.
1323 *
1324 * @return Pointer to stats struct of this interface.
1325 */
1326static struct net_device_stats *
1327netiucv_stats (struct net_device * dev)
1328{
1329 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1330 return &((struct netiucv_priv *)dev->priv)->stats;
1331}
1332
1333/**
1334 * Sets MTU of an interface.
1335 *
1336 * @param dev Pointer to interface struct.
1337 * @param new_mtu The new MTU to use for this interface.
1338 *
1339 * @return 0 on success, -EINVAL if MTU is out of valid range.
1340 * (valid range is 576 .. NETIUCV_MTU_MAX).
1341 */
1342static int
1343netiucv_change_mtu (struct net_device * dev, int new_mtu)
1344{
1345 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1346 if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) {
1347 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1348 return -EINVAL;
1349 }
1350 dev->mtu = new_mtu;
1351 return 0;
1352}
1353
1354/**
1355 * attributes in sysfs
1356 *****************************************************************************/
1357
1358static ssize_t
1359user_show (struct device *dev, char *buf)
1360{
1361 struct netiucv_priv *priv = dev->driver_data;
1362
1363 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1364 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1365}
1366
1367static ssize_t
1368user_write (struct device *dev, const char *buf, size_t count)
1369{
1370 struct netiucv_priv *priv = dev->driver_data;
1371 struct net_device *ndev = priv->conn->netdev;
1372 char *p;
1373 char *tmp;
1374 char username[10];
1375 int i;
1376
1377 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1378 if (count>9) {
1379 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1380 IUCV_DBF_TEXT_(setup, 2,
1381 "%d is length of username\n", (int)count);
1382 return -EINVAL;
1383 }
1384
1385 tmp = strsep((char **) &buf, "\n");
1386 for (i=0, p=tmp; i<8 && *p; i++, p++) {
1387 if (isalnum(*p) || (*p == '$'))
1388 username[i]= *p;
1389 else if (*p == '\n') {
1390 /* trailing lf, grr */
1391 break;
1392 } else {
1393 PRINT_WARN("netiucv: Invalid char %c in username!\n",
1394 *p);
1395 IUCV_DBF_TEXT_(setup, 2,
1396 "username: invalid character %c\n",
1397 *p);
1398 return -EINVAL;
1399 }
1400 }
1401 while (i<9)
1402 username[i++] = ' ';
1403 username[9] = '\0';
1404
1405 if (memcmp(username, priv->conn->userid, 8)) {
1406 /* username changed */
1407 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
1408 PRINT_WARN(
1409 "netiucv: device %s active, connected to %s\n",
1410 dev->bus_id, priv->conn->userid);
1411 PRINT_WARN("netiucv: user cannot be updated\n");
1412 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1413 return -EBUSY;
1414 }
1415 }
1416 memcpy(priv->conn->userid, username, 9);
1417
1418 return count;
1419
1420}
1421
1422static DEVICE_ATTR(user, 0644, user_show, user_write);
1423
1424static ssize_t
1425buffer_show (struct device *dev, char *buf)
1426{
1427 struct netiucv_priv *priv = dev->driver_data;
1428
1429 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1430 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1431}
1432
1433static ssize_t
1434buffer_write (struct device *dev, const char *buf, size_t count)
1435{
1436 struct netiucv_priv *priv = dev->driver_data;
1437 struct net_device *ndev = priv->conn->netdev;
1438 char *e;
1439 int bs1;
1440
1441 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1442 if (count >= 39)
1443 return -EINVAL;
1444
1445 bs1 = simple_strtoul(buf, &e, 0);
1446
1447 if (e && (!isspace(*e))) {
1448 PRINT_WARN("netiucv: Invalid character in buffer!\n");
1449 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1450 return -EINVAL;
1451 }
1452 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1453 PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1454 bs1);
1455 IUCV_DBF_TEXT_(setup, 2,
1456 "buffer_write: buffer size %d too large\n",
1457 bs1);
1458 return -EINVAL;
1459 }
1460 if ((ndev->flags & IFF_RUNNING) &&
1461 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1462 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1463 bs1);
1464 IUCV_DBF_TEXT_(setup, 2,
1465 "buffer_write: buffer size %d too small\n",
1466 bs1);
1467 return -EINVAL;
1468 }
1469 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1470 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1471 bs1);
1472 IUCV_DBF_TEXT_(setup, 2,
1473 "buffer_write: buffer size %d too small\n",
1474 bs1);
1475 return -EINVAL;
1476 }
1477
1478 priv->conn->max_buffsize = bs1;
1479 if (!(ndev->flags & IFF_RUNNING))
1480 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1481
1482 return count;
1483
1484}
1485
1486static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1487
1488static ssize_t
1489dev_fsm_show (struct device *dev, char *buf)
1490{
1491 struct netiucv_priv *priv = dev->driver_data;
1492
1493 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1494 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1495}
1496
1497static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1498
1499static ssize_t
1500conn_fsm_show (struct device *dev, char *buf)
1501{
1502 struct netiucv_priv *priv = dev->driver_data;
1503
1504 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1505 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1506}
1507
1508static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1509
1510static ssize_t
1511maxmulti_show (struct device *dev, char *buf)
1512{
1513 struct netiucv_priv *priv = dev->driver_data;
1514
1515 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1516 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1517}
1518
1519static ssize_t
1520maxmulti_write (struct device *dev, const char *buf, size_t count)
1521{
1522 struct netiucv_priv *priv = dev->driver_data;
1523
1524 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1525 priv->conn->prof.maxmulti = 0;
1526 return count;
1527}
1528
1529static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1530
1531static ssize_t
1532maxcq_show (struct device *dev, char *buf)
1533{
1534 struct netiucv_priv *priv = dev->driver_data;
1535
1536 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1537 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1538}
1539
1540static ssize_t
1541maxcq_write (struct device *dev, const char *buf, size_t count)
1542{
1543 struct netiucv_priv *priv = dev->driver_data;
1544
1545 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1546 priv->conn->prof.maxcqueue = 0;
1547 return count;
1548}
1549
1550static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1551
1552static ssize_t
1553sdoio_show (struct device *dev, char *buf)
1554{
1555 struct netiucv_priv *priv = dev->driver_data;
1556
1557 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1558 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1559}
1560
1561static ssize_t
1562sdoio_write (struct device *dev, const char *buf, size_t count)
1563{
1564 struct netiucv_priv *priv = dev->driver_data;
1565
1566 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1567 priv->conn->prof.doios_single = 0;
1568 return count;
1569}
1570
1571static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1572
1573static ssize_t
1574mdoio_show (struct device *dev, char *buf)
1575{
1576 struct netiucv_priv *priv = dev->driver_data;
1577
1578 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1579 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1580}
1581
1582static ssize_t
1583mdoio_write (struct device *dev, const char *buf, size_t count)
1584{
1585 struct netiucv_priv *priv = dev->driver_data;
1586
1587 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1588 priv->conn->prof.doios_multi = 0;
1589 return count;
1590}
1591
1592static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1593
1594static ssize_t
1595txlen_show (struct device *dev, char *buf)
1596{
1597 struct netiucv_priv *priv = dev->driver_data;
1598
1599 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1600 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1601}
1602
1603static ssize_t
1604txlen_write (struct device *dev, const char *buf, size_t count)
1605{
1606 struct netiucv_priv *priv = dev->driver_data;
1607
1608 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1609 priv->conn->prof.txlen = 0;
1610 return count;
1611}
1612
1613static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1614
1615static ssize_t
1616txtime_show (struct device *dev, char *buf)
1617{
1618 struct netiucv_priv *priv = dev->driver_data;
1619
1620 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1621 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1622}
1623
1624static ssize_t
1625txtime_write (struct device *dev, const char *buf, size_t count)
1626{
1627 struct netiucv_priv *priv = dev->driver_data;
1628
1629 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1630 priv->conn->prof.tx_time = 0;
1631 return count;
1632}
1633
1634static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1635
1636static ssize_t
1637txpend_show (struct device *dev, char *buf)
1638{
1639 struct netiucv_priv *priv = dev->driver_data;
1640
1641 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1642 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1643}
1644
1645static ssize_t
1646txpend_write (struct device *dev, const char *buf, size_t count)
1647{
1648 struct netiucv_priv *priv = dev->driver_data;
1649
1650 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1651 priv->conn->prof.tx_pending = 0;
1652 return count;
1653}
1654
1655static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1656
1657static ssize_t
1658txmpnd_show (struct device *dev, char *buf)
1659{
1660 struct netiucv_priv *priv = dev->driver_data;
1661
1662 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1663 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1664}
1665
1666static ssize_t
1667txmpnd_write (struct device *dev, const char *buf, size_t count)
1668{
1669 struct netiucv_priv *priv = dev->driver_data;
1670
1671 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1672 priv->conn->prof.tx_max_pending = 0;
1673 return count;
1674}
1675
1676static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1677
1678static struct attribute *netiucv_attrs[] = {
1679 &dev_attr_buffer.attr,
1680 &dev_attr_user.attr,
1681 NULL,
1682};
1683
1684static struct attribute_group netiucv_attr_group = {
1685 .attrs = netiucv_attrs,
1686};
1687
1688static struct attribute *netiucv_stat_attrs[] = {
1689 &dev_attr_device_fsm_state.attr,
1690 &dev_attr_connection_fsm_state.attr,
1691 &dev_attr_max_tx_buffer_used.attr,
1692 &dev_attr_max_chained_skbs.attr,
1693 &dev_attr_tx_single_write_ops.attr,
1694 &dev_attr_tx_multi_write_ops.attr,
1695 &dev_attr_netto_bytes.attr,
1696 &dev_attr_max_tx_io_time.attr,
1697 &dev_attr_tx_pending.attr,
1698 &dev_attr_tx_max_pending.attr,
1699 NULL,
1700};
1701
1702static struct attribute_group netiucv_stat_attr_group = {
1703 .name = "stats",
1704 .attrs = netiucv_stat_attrs,
1705};
1706
1707static inline int
1708netiucv_add_files(struct device *dev)
1709{
1710 int ret;
1711
1712 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1713 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1714 if (ret)
1715 return ret;
1716 ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1717 if (ret)
1718 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1719 return ret;
1720}
1721
1722static inline void
1723netiucv_remove_files(struct device *dev)
1724{
1725 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1726 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1727 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1728}
1729
1730static int
1731netiucv_register_device(struct net_device *ndev)
1732{
1733 struct netiucv_priv *priv = ndev->priv;
1734 struct device *dev = kmalloc(sizeof(struct device), GFP_KERNEL);
1735 int ret;
1736
1737
1738 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1739
1740 if (dev) {
1741 memset(dev, 0, sizeof(struct device));
1742 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
1743 dev->bus = &iucv_bus;
1744 dev->parent = iucv_root;
1745 /*
1746 * The release function could be called after the
1747 * module has been unloaded. It's _only_ task is to
1748 * free the struct. Therefore, we specify kfree()
1749 * directly here. (Probably a little bit obfuscating
1750 * but legitime ...).
1751 */
1752 dev->release = (void (*)(struct device *))kfree;
1753 dev->driver = &netiucv_driver;
1754 } else
1755 return -ENOMEM;
1756
1757 ret = device_register(dev);
1758
1759 if (ret)
1760 return ret;
1761 ret = netiucv_add_files(dev);
1762 if (ret)
1763 goto out_unreg;
1764 priv->dev = dev;
1765 dev->driver_data = priv;
1766 return 0;
1767
1768out_unreg:
1769 device_unregister(dev);
1770 return ret;
1771}
1772
1773static void
1774netiucv_unregister_device(struct device *dev)
1775{
1776 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1777 netiucv_remove_files(dev);
1778 device_unregister(dev);
1779}
1780
1781/**
1782 * Allocate and initialize a new connection structure.
1783 * Add it to the list of netiucv connections;
1784 */
1785static struct iucv_connection *
1786netiucv_new_connection(struct net_device *dev, char *username)
1787{
1788 struct iucv_connection **clist = &iucv_connections;
1789 struct iucv_connection *conn =
1790 (struct iucv_connection *)
1791 kmalloc(sizeof(struct iucv_connection), GFP_KERNEL);
1792
1793 if (conn) {
1794 memset(conn, 0, sizeof(struct iucv_connection));
1795 skb_queue_head_init(&conn->collect_queue);
1796 skb_queue_head_init(&conn->commit_queue);
1797 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1798 conn->netdev = dev;
1799
1800 conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
1801 GFP_KERNEL | GFP_DMA);
1802 if (!conn->rx_buff) {
1803 kfree(conn);
1804 return NULL;
1805 }
1806 conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
1807 GFP_KERNEL | GFP_DMA);
1808 if (!conn->tx_buff) {
1809 kfree_skb(conn->rx_buff);
1810 kfree(conn);
1811 return NULL;
1812 }
1813 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1814 conn_event_names, NR_CONN_STATES,
1815 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1816 GFP_KERNEL);
1817 if (!conn->fsm) {
1818 kfree_skb(conn->tx_buff);
1819 kfree_skb(conn->rx_buff);
1820 kfree(conn);
1821 return NULL;
1822 }
1823 fsm_settimer(conn->fsm, &conn->timer);
1824 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1825
1826 if (username) {
1827 memcpy(conn->userid, username, 9);
1828 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1829 }
1830
1831 conn->next = *clist;
1832 *clist = conn;
1833 }
1834 return conn;
1835}
1836
1837/**
1838 * Release a connection structure and remove it from the
1839 * list of netiucv connections.
1840 */
1841static void
1842netiucv_remove_connection(struct iucv_connection *conn)
1843{
1844 struct iucv_connection **clist = &iucv_connections;
1845
1846 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1847 if (conn == NULL)
1848 return;
1849 while (*clist) {
1850 if (*clist == conn) {
1851 *clist = conn->next;
1852 if (conn->handle) {
1853 iucv_unregister_program(conn->handle);
1854 conn->handle = NULL;
1855 }
1856 fsm_deltimer(&conn->timer);
1857 kfree_fsm(conn->fsm);
1858 kfree_skb(conn->rx_buff);
1859 kfree_skb(conn->tx_buff);
1860 return;
1861 }
1862 clist = &((*clist)->next);
1863 }
1864}
1865
1866/**
1867 * Release everything of a net device.
1868 */
1869static void
1870netiucv_free_netdevice(struct net_device *dev)
1871{
1872 struct netiucv_priv *privptr;
1873
1874 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1875
1876 if (!dev)
1877 return;
1878
1879 privptr = (struct netiucv_priv *)dev->priv;
1880 if (privptr) {
1881 if (privptr->conn)
1882 netiucv_remove_connection(privptr->conn);
1883 if (privptr->fsm)
1884 kfree_fsm(privptr->fsm);
1885 privptr->conn = NULL; privptr->fsm = NULL;
1886 /* privptr gets freed by free_netdev() */
1887 }
1888 free_netdev(dev);
1889}
1890
1891/**
1892 * Initialize a net device. (Called from kernel in alloc_netdev())
1893 */
1894static void
1895netiucv_setup_netdevice(struct net_device *dev)
1896{
1897 memset(dev->priv, 0, sizeof(struct netiucv_priv));
1898
1899 dev->mtu = NETIUCV_MTU_DEFAULT;
1900 dev->hard_start_xmit = netiucv_tx;
1901 dev->open = netiucv_open;
1902 dev->stop = netiucv_close;
1903 dev->get_stats = netiucv_stats;
1904 dev->change_mtu = netiucv_change_mtu;
1905 dev->destructor = netiucv_free_netdevice;
1906 dev->hard_header_len = NETIUCV_HDRLEN;
1907 dev->addr_len = 0;
1908 dev->type = ARPHRD_SLIP;
1909 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1910 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1911 SET_MODULE_OWNER(dev);
1912}
1913
1914/**
1915 * Allocate and initialize everything of a net device.
1916 */
1917static struct net_device *
1918netiucv_init_netdevice(char *username)
1919{
1920 struct netiucv_priv *privptr;
1921 struct net_device *dev;
1922
1923 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1924 netiucv_setup_netdevice);
1925 if (!dev)
1926 return NULL;
1927 if (dev_alloc_name(dev, dev->name) < 0) {
1928 free_netdev(dev);
1929 return NULL;
1930 }
1931
1932 privptr = (struct netiucv_priv *)dev->priv;
1933 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1934 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1935 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1936 if (!privptr->fsm) {
1937 free_netdev(dev);
1938 return NULL;
1939 }
1940 privptr->conn = netiucv_new_connection(dev, username);
1941 if (!privptr->conn) {
1942 kfree_fsm(privptr->fsm);
1943 free_netdev(dev);
1944 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1945 return NULL;
1946 }
1947 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1948
1949 return dev;
1950}
1951
1952static ssize_t
1953conn_write(struct device_driver *drv, const char *buf, size_t count)
1954{
1955 char *p;
1956 char username[10];
1957 int i, ret;
1958 struct net_device *dev;
1959
1960 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1961 if (count>9) {
1962 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1963 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1964 return -EINVAL;
1965 }
1966
1967 for (i=0, p=(char *)buf; i<8 && *p; i++, p++) {
1968 if (isalnum(*p) || (*p == '$'))
1969 username[i]= *p;
1970 else if (*p == '\n') {
1971 /* trailing lf, grr */
1972 break;
1973 } else {
1974 PRINT_WARN("netiucv: Invalid character in username!\n");
1975 IUCV_DBF_TEXT_(setup, 2,
1976 "conn_write: invalid character %c\n", *p);
1977 return -EINVAL;
1978 }
1979 }
1980 while (i<9)
1981 username[i++] = ' ';
1982 username[9] = '\0';
1983 dev = netiucv_init_netdevice(username);
1984 if (!dev) {
1985 PRINT_WARN(
1986 "netiucv: Could not allocate network device structure "
1987 "for user '%s'\n", netiucv_printname(username));
1988 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1989 return -ENODEV;
1990 }
1991
1992 if ((ret = netiucv_register_device(dev))) {
1993 IUCV_DBF_TEXT_(setup, 2,
1994 "ret %d from netiucv_register_device\n", ret);
1995 goto out_free_ndev;
1996 }
1997
1998 /* sysfs magic */
1999 SET_NETDEV_DEV(dev,
2000 (struct device*)((struct netiucv_priv*)dev->priv)->dev);
2001
2002 if ((ret = register_netdev(dev))) {
2003 netiucv_unregister_device((struct device*)
2004 ((struct netiucv_priv*)dev->priv)->dev);
2005 goto out_free_ndev;
2006 }
2007
2008 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2009
2010 return count;
2011
2012out_free_ndev:
2013 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2014 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2015 netiucv_free_netdevice(dev);
2016 return ret;
2017}
2018
2019DRIVER_ATTR(connection, 0200, NULL, conn_write);
2020
2021static ssize_t
2022remove_write (struct device_driver *drv, const char *buf, size_t count)
2023{
2024 struct iucv_connection **clist = &iucv_connections;
2025 struct net_device *ndev;
2026 struct netiucv_priv *priv;
2027 struct device *dev;
2028 char name[IFNAMSIZ];
2029 char *p;
2030 int i;
2031
2032 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2033
2034 if (count >= IFNAMSIZ)
2035 count = IFNAMSIZ-1;
2036
2037 for (i=0, p=(char *)buf; i<count && *p; i++, p++) {
2038 if ((*p == '\n') | (*p == ' ')) {
2039 /* trailing lf, grr */
2040 break;
2041 } else {
2042 name[i]=*p;
2043 }
2044 }
2045 name[i] = '\0';
2046
2047 while (*clist) {
2048 ndev = (*clist)->netdev;
2049 priv = (struct netiucv_priv*)ndev->priv;
2050 dev = priv->dev;
2051
2052 if (strncmp(name, ndev->name, count)) {
2053 clist = &((*clist)->next);
2054 continue;
2055 }
2056 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2057 PRINT_WARN(
2058 "netiucv: net device %s active with peer %s\n",
2059 ndev->name, priv->conn->userid);
2060 PRINT_WARN("netiucv: %s cannot be removed\n",
2061 ndev->name);
2062 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2063 return -EBUSY;
2064 }
2065 unregister_netdev(ndev);
2066 netiucv_unregister_device(dev);
2067 return count;
2068 }
2069 PRINT_WARN("netiucv: net device %s unknown\n", name);
2070 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2071 return -EINVAL;
2072}
2073
2074DRIVER_ATTR(remove, 0200, NULL, remove_write);
2075
2076static void
2077netiucv_banner(void)
2078{
2079 char vbuf[] = "$Revision: 1.63 $";
2080 char *version = vbuf;
2081
2082 if ((version = strchr(version, ':'))) {
2083 char *p = strchr(version + 1, '$');
2084 if (p)
2085 *p = '\0';
2086 } else
2087 version = " ??? ";
2088 PRINT_INFO("NETIUCV driver Version%s initialized\n", version);
2089}
2090
2091static void __exit
2092netiucv_exit(void)
2093{
2094 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2095 while (iucv_connections) {
2096 struct net_device *ndev = iucv_connections->netdev;
2097 struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv;
2098 struct device *dev = priv->dev;
2099
2100 unregister_netdev(ndev);
2101 netiucv_unregister_device(dev);
2102 }
2103
2104 driver_remove_file(&netiucv_driver, &driver_attr_connection);
2105 driver_remove_file(&netiucv_driver, &driver_attr_remove);
2106 driver_unregister(&netiucv_driver);
2107 iucv_unregister_dbf_views();
2108
2109 PRINT_INFO("NETIUCV driver unloaded\n");
2110 return;
2111}
2112
2113static int __init
2114netiucv_init(void)
2115{
2116 int ret;
2117
2118 ret = iucv_register_dbf_views();
2119 if (ret) {
2120 PRINT_WARN("netiucv_init failed, "
2121 "iucv_register_dbf_views rc = %d\n", ret);
2122 return ret;
2123 }
2124 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2125 ret = driver_register(&netiucv_driver);
2126 if (ret) {
2127 PRINT_ERR("NETIUCV: failed to register driver.\n");
2128 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret);
2129 iucv_unregister_dbf_views();
2130 return ret;
2131 }
2132
2133 /* Add entry for specifying connections. */
2134 ret = driver_create_file(&netiucv_driver, &driver_attr_connection);
2135 if (!ret) {
2136 ret = driver_create_file(&netiucv_driver, &driver_attr_remove);
2137 netiucv_banner();
2138 } else {
2139 PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
2140 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
2141 driver_unregister(&netiucv_driver);
2142 iucv_unregister_dbf_views();
2143 }
2144 return ret;
2145}
2146
2147module_init(netiucv_init);
2148module_exit(netiucv_exit);
2149MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
new file mode 100644
index 000000000000..a341041a6cf7
--- /dev/null
+++ b/drivers/s390/net/qeth.h
@@ -0,0 +1,1162 @@
1#ifndef __QETH_H__
2#define __QETH_H__
3
4#include <linux/if.h>
5#include <linux/if_arp.h>
6
7#include <linux/if_tr.h>
8#include <linux/trdevice.h>
9#include <linux/etherdevice.h>
10#include <linux/if_vlan.h>
11
12#include <net/ipv6.h>
13#include <linux/in6.h>
14#include <net/if_inet6.h>
15#include <net/addrconf.h>
16
17
18#include <linux/bitops.h>
19
20#include <asm/debug.h>
21#include <asm/qdio.h>
22#include <asm/ccwdev.h>
23#include <asm/ccwgroup.h>
24
25#include "qeth_mpc.h"
26
27#define VERSION_QETH_H "$Revision: 1.135 $"
28
29#ifdef CONFIG_QETH_IPV6
30#define QETH_VERSION_IPV6 ":IPv6"
31#else
32#define QETH_VERSION_IPV6 ""
33#endif
34#ifdef CONFIG_QETH_VLAN
35#define QETH_VERSION_VLAN ":VLAN"
36#else
37#define QETH_VERSION_VLAN ""
38#endif
39
40/**
41 * Debug Facility stuff
42 */
43#define QETH_DBF_SETUP_NAME "qeth_setup"
44#define QETH_DBF_SETUP_LEN 8
45#define QETH_DBF_SETUP_INDEX 3
46#define QETH_DBF_SETUP_NR_AREAS 1
47#define QETH_DBF_SETUP_LEVEL 5
48
49#define QETH_DBF_MISC_NAME "qeth_misc"
50#define QETH_DBF_MISC_LEN 128
51#define QETH_DBF_MISC_INDEX 1
52#define QETH_DBF_MISC_NR_AREAS 1
53#define QETH_DBF_MISC_LEVEL 2
54
55#define QETH_DBF_DATA_NAME "qeth_data"
56#define QETH_DBF_DATA_LEN 96
57#define QETH_DBF_DATA_INDEX 3
58#define QETH_DBF_DATA_NR_AREAS 1
59#define QETH_DBF_DATA_LEVEL 2
60
61#define QETH_DBF_CONTROL_NAME "qeth_control"
62#define QETH_DBF_CONTROL_LEN 256
63#define QETH_DBF_CONTROL_INDEX 3
64#define QETH_DBF_CONTROL_NR_AREAS 2
65#define QETH_DBF_CONTROL_LEVEL 5
66
67#define QETH_DBF_TRACE_NAME "qeth_trace"
68#define QETH_DBF_TRACE_LEN 8
69#define QETH_DBF_TRACE_INDEX 2
70#define QETH_DBF_TRACE_NR_AREAS 2
71#define QETH_DBF_TRACE_LEVEL 3
72extern debug_info_t *qeth_dbf_trace;
73
74#define QETH_DBF_SENSE_NAME "qeth_sense"
75#define QETH_DBF_SENSE_LEN 64
76#define QETH_DBF_SENSE_INDEX 1
77#define QETH_DBF_SENSE_NR_AREAS 1
78#define QETH_DBF_SENSE_LEVEL 2
79
80#define QETH_DBF_QERR_NAME "qeth_qerr"
81#define QETH_DBF_QERR_LEN 8
82#define QETH_DBF_QERR_INDEX 1
83#define QETH_DBF_QERR_NR_AREAS 2
84#define QETH_DBF_QERR_LEVEL 2
85
86#define QETH_DBF_TEXT(name,level,text) \
87 do { \
88 debug_text_event(qeth_dbf_##name,level,text); \
89 } while (0)
90
91#define QETH_DBF_HEX(name,level,addr,len) \
92 do { \
93 debug_event(qeth_dbf_##name,level,(void*)(addr),len); \
94 } while (0)
95
96DECLARE_PER_CPU(char[256], qeth_dbf_txt_buf);
97
98#define QETH_DBF_TEXT_(name,level,text...) \
99 do { \
100 char* dbf_txt_buf = get_cpu_var(qeth_dbf_txt_buf); \
101 sprintf(dbf_txt_buf, text); \
102 debug_text_event(qeth_dbf_##name,level,dbf_txt_buf); \
103 put_cpu_var(qeth_dbf_txt_buf); \
104 } while (0)
105
106#define QETH_DBF_SPRINTF(name,level,text...) \
107 do { \
108 debug_sprintf_event(qeth_dbf_trace, level, ##text ); \
109 debug_sprintf_event(qeth_dbf_trace, level, text ); \
110 } while (0)
111
112/**
113 * some more debug stuff
114 */
115#define PRINTK_HEADER "qeth: "
116
117#define HEXDUMP16(importance,header,ptr) \
118PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
119 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
120 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
121 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
122 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
123 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
124 *(((char*)ptr)+12),*(((char*)ptr)+13), \
125 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
126PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
127 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
128 *(((char*)ptr)+16),*(((char*)ptr)+17), \
129 *(((char*)ptr)+18),*(((char*)ptr)+19), \
130 *(((char*)ptr)+20),*(((char*)ptr)+21), \
131 *(((char*)ptr)+22),*(((char*)ptr)+23), \
132 *(((char*)ptr)+24),*(((char*)ptr)+25), \
133 *(((char*)ptr)+26),*(((char*)ptr)+27), \
134 *(((char*)ptr)+28),*(((char*)ptr)+29), \
135 *(((char*)ptr)+30),*(((char*)ptr)+31));
136
137static inline void
138qeth_hex_dump(unsigned char *buf, size_t len)
139{
140 size_t i;
141
142 for (i = 0; i < len; i++) {
143 if (i && !(i % 16))
144 printk("\n");
145 printk("%02x ", *(buf + i));
146 }
147 printk("\n");
148}
149
150#define SENSE_COMMAND_REJECT_BYTE 0
151#define SENSE_COMMAND_REJECT_FLAG 0x80
152#define SENSE_RESETTING_EVENT_BYTE 1
153#define SENSE_RESETTING_EVENT_FLAG 0x80
154
155#define atomic_swap(a,b) xchg((int *)a.counter, b)
156
157/*
158 * Common IO related definitions
159 */
160extern struct device *qeth_root_dev;
161extern struct ccw_driver qeth_ccw_driver;
162extern struct ccwgroup_driver qeth_ccwgroup_driver;
163
164#define CARD_RDEV(card) card->read.ccwdev
165#define CARD_WDEV(card) card->write.ccwdev
166#define CARD_DDEV(card) card->data.ccwdev
167#define CARD_BUS_ID(card) card->gdev->dev.bus_id
168#define CARD_RDEV_ID(card) card->read.ccwdev->dev.bus_id
169#define CARD_WDEV_ID(card) card->write.ccwdev->dev.bus_id
170#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id
171#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id
172
173#define CARD_FROM_CDEV(cdev) (struct qeth_card *) \
174 ((struct ccwgroup_device *)cdev->dev.driver_data)\
175 ->dev.driver_data;
176
177/**
178 * card stuff
179 */
180#ifdef CONFIG_QETH_PERF_STATS
181struct qeth_perf_stats {
182 unsigned int bufs_rec;
183 unsigned int bufs_sent;
184
185 unsigned int skbs_sent_pack;
186 unsigned int bufs_sent_pack;
187
188 unsigned int sc_dp_p;
189 unsigned int sc_p_dp;
190 /* qdio_input_handler: number of times called, time spent in */
191 __u64 inbound_start_time;
192 unsigned int inbound_cnt;
193 unsigned int inbound_time;
194 /* qeth_send_packet: number of times called, time spent in */
195 __u64 outbound_start_time;
196 unsigned int outbound_cnt;
197 unsigned int outbound_time;
198 /* qdio_output_handler: number of times called, time spent in */
199 __u64 outbound_handler_start_time;
200 unsigned int outbound_handler_cnt;
201 unsigned int outbound_handler_time;
202 /* number of calls to and time spent in do_QDIO for inbound queue */
203 __u64 inbound_do_qdio_start_time;
204 unsigned int inbound_do_qdio_cnt;
205 unsigned int inbound_do_qdio_time;
206 /* number of calls to and time spent in do_QDIO for outbound queues */
207 __u64 outbound_do_qdio_start_time;
208 unsigned int outbound_do_qdio_cnt;
209 unsigned int outbound_do_qdio_time;
210 /* eddp data */
211 unsigned int large_send_bytes;
212 unsigned int large_send_cnt;
213 unsigned int sg_skbs_sent;
214 unsigned int sg_frags_sent;
215};
216#endif /* CONFIG_QETH_PERF_STATS */
217
218/* Routing stuff */
219struct qeth_routing_info {
220 enum qeth_routing_types type;
221};
222
223/* IPA stuff */
224struct qeth_ipa_info {
225 __u32 supported_funcs;
226 __u32 enabled_funcs;
227};
228
229static inline int
230qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
231{
232 return (ipa->supported_funcs & func);
233}
234
235static inline int
236qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
237{
238 return (ipa->supported_funcs & ipa->enabled_funcs & func);
239}
240
241#define qeth_adp_supported(c,f) \
242 qeth_is_ipa_supported(&c->options.adp, f)
243#define qeth_adp_enabled(c,f) \
244 qeth_is_ipa_enabled(&c->options.adp, f)
245#define qeth_is_supported(c,f) \
246 qeth_is_ipa_supported(&c->options.ipa4, f)
247#define qeth_is_enabled(c,f) \
248 qeth_is_ipa_enabled(&c->options.ipa4, f)
249#ifdef CONFIG_QETH_IPV6
250#define qeth_is_supported6(c,f) \
251 qeth_is_ipa_supported(&c->options.ipa6, f)
252#define qeth_is_enabled6(c,f) \
253 qeth_is_ipa_enabled(&c->options.ipa6, f)
254#else /* CONFIG_QETH_IPV6 */
255#define qeth_is_supported6(c,f) 0
256#define qeth_is_enabled6(c,f) 0
257#endif /* CONFIG_QETH_IPV6 */
258#define qeth_is_ipafunc_supported(c,prot,f) \
259 (prot==QETH_PROT_IPV6)? qeth_is_supported6(c,f):qeth_is_supported(c,f)
260#define qeth_is_ipafunc_enabled(c,prot,f) \
261 (prot==QETH_PROT_IPV6)? qeth_is_enabled6(c,f):qeth_is_enabled(c,f)
262
263
264#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101
265#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101
266#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
267#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
268
269#define QETH_MODELLIST_ARRAY \
270 {{0x1731,0x01,0x1732,0x01,QETH_CARD_TYPE_OSAE,1, \
271 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
272 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
273 QETH_MAX_QUEUES,0}, \
274 {0x1731,0x05,0x1732,0x05,QETH_CARD_TYPE_IQD,0, \
275 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
276 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
277 QETH_MAX_QUEUES,0x103}, \
278 {0,0,0,0,0,0,0,0,0}}
279
280#define QETH_REAL_CARD 1
281#define QETH_VLAN_CARD 2
282#define QETH_BUFSIZE 4096
283
284/**
285 * some more defs
286 */
287#define IF_NAME_LEN 16
288#define QETH_TX_TIMEOUT 100 * HZ
289#define QETH_HEADER_SIZE 32
290#define MAX_PORTNO 15
291#define QETH_FAKE_LL_LEN ETH_HLEN
292#define QETH_FAKE_LL_V6_ADDR_POS 24
293
294/*IPv6 address autoconfiguration stuff*/
295#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
296#define UNIQUE_ID_NOT_BY_CARD 0x10000
297
298/*****************************************************************************/
299/* QDIO queue and buffer handling */
300/*****************************************************************************/
301#define QETH_MAX_QUEUES 4
302#define QETH_IN_BUF_SIZE_DEFAULT 65536
303#define QETH_IN_BUF_COUNT_DEFAULT 16
304#define QETH_IN_BUF_COUNT_MIN 8
305#define QETH_IN_BUF_COUNT_MAX 128
306#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
307#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
308 ((card)->qdio.in_buf_pool.buf_count / 2)
309
310/* buffers we have to be behind before we get a PCI */
311#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
312/*enqueued free buffers left before we get a PCI*/
313#define QETH_PCI_THRESHOLD_B(card) 0
314/*not used unless the microcode gets patched*/
315#define QETH_PCI_TIMER_VALUE(card) 3
316
317#define QETH_MIN_INPUT_THRESHOLD 1
318#define QETH_MAX_INPUT_THRESHOLD 500
319#define QETH_MIN_OUTPUT_THRESHOLD 1
320#define QETH_MAX_OUTPUT_THRESHOLD 300
321
322/* priority queing */
323#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
324#define QETH_DEFAULT_QUEUE 2
325#define QETH_NO_PRIO_QUEUEING 0
326#define QETH_PRIO_Q_ING_PREC 1
327#define QETH_PRIO_Q_ING_TOS 2
328#define IP_TOS_LOWDELAY 0x10
329#define IP_TOS_HIGHTHROUGHPUT 0x08
330#define IP_TOS_HIGHRELIABILITY 0x04
331#define IP_TOS_NOTIMPORTANT 0x02
332
333/* Packing */
334#define QETH_LOW_WATERMARK_PACK 2
335#define QETH_HIGH_WATERMARK_PACK 5
336#define QETH_WATERMARK_PACK_FUZZ 1
337
338#define QETH_IP_HEADER_SIZE 40
339
340struct qeth_hdr_layer3 {
341 __u8 id;
342 __u8 flags;
343 __u16 inbound_checksum; /*TSO:__u16 seqno */
344 __u32 token; /*TSO: __u32 reserved */
345 __u16 length;
346 __u8 vlan_prio;
347 __u8 ext_flags;
348 __u16 vlan_id;
349 __u16 frame_offset;
350 __u8 dest_addr[16];
351} __attribute__ ((packed));
352
353struct qeth_hdr_layer2 {
354 __u8 id;
355 __u8 flags[3];
356 __u8 port_no;
357 __u8 hdr_length;
358 __u16 pkt_length;
359 __u16 seq_no;
360 __u16 vlan_id;
361 __u32 reserved;
362 __u8 reserved2[16];
363} __attribute__ ((packed));
364
365struct qeth_hdr {
366 union {
367 struct qeth_hdr_layer2 l2;
368 struct qeth_hdr_layer3 l3;
369 } hdr;
370} __attribute__ ((packed));
371
372
373/* flags for qeth_hdr.flags */
374#define QETH_HDR_PASSTHRU 0x10
375#define QETH_HDR_IPV6 0x80
376#define QETH_HDR_CAST_MASK 0x07
377enum qeth_cast_flags {
378 QETH_CAST_UNICAST = 0x06,
379 QETH_CAST_MULTICAST = 0x04,
380 QETH_CAST_BROADCAST = 0x05,
381 QETH_CAST_ANYCAST = 0x07,
382 QETH_CAST_NOCAST = 0x00,
383};
384
385enum qeth_layer2_frame_flags {
386 QETH_LAYER2_FLAG_MULTICAST = 0x01,
387 QETH_LAYER2_FLAG_BROADCAST = 0x02,
388 QETH_LAYER2_FLAG_UNICAST = 0x04,
389 QETH_LAYER2_FLAG_VLAN = 0x10,
390};
391
392enum qeth_header_ids {
393 QETH_HEADER_TYPE_LAYER3 = 0x01,
394 QETH_HEADER_TYPE_LAYER2 = 0x02,
395 QETH_HEADER_TYPE_TSO = 0x03,
396};
397/* flags for qeth_hdr.ext_flags */
398#define QETH_HDR_EXT_VLAN_FRAME 0x01
399#define QETH_HDR_EXT_TOKEN_ID 0x02
400#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
401#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
402#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
403#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
404#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/
405
406static inline int
407qeth_is_last_sbale(struct qdio_buffer_element *sbale)
408{
409 return (sbale->flags & SBAL_FLAGS_LAST_ENTRY);
410}
411
412enum qeth_qdio_buffer_states {
413 /*
414 * inbound: read out by driver; owned by hardware in order to be filled
415 * outbound: owned by driver in order to be filled
416 */
417 QETH_QDIO_BUF_EMPTY,
418 /*
419 * inbound: filled by hardware; owned by driver in order to be read out
420 * outbound: filled by driver; owned by hardware in order to be sent
421 */
422 QETH_QDIO_BUF_PRIMED,
423};
424
425enum qeth_qdio_info_states {
426 QETH_QDIO_UNINITIALIZED,
427 QETH_QDIO_ALLOCATED,
428 QETH_QDIO_ESTABLISHED,
429};
430
431struct qeth_buffer_pool_entry {
432 struct list_head list;
433 struct list_head init_list;
434 void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
435};
436
437struct qeth_qdio_buffer_pool {
438 struct list_head entry_list;
439 int buf_count;
440};
441
442struct qeth_qdio_buffer {
443 struct qdio_buffer *buffer;
444 volatile enum qeth_qdio_buffer_states state;
445 /* the buffer pool entry currently associated to this buffer */
446 struct qeth_buffer_pool_entry *pool_entry;
447};
448
449struct qeth_qdio_q {
450 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
451 struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
452 /*
453 * buf_to_init means "buffer must be initialized by driver and must
454 * be made available for hardware" -> state is set to EMPTY
455 */
456 volatile int next_buf_to_init;
457} __attribute__ ((aligned(256)));
458
459/* possible types of qeth large_send support */
460enum qeth_large_send_types {
461 QETH_LARGE_SEND_NO,
462 QETH_LARGE_SEND_EDDP,
463 QETH_LARGE_SEND_TSO,
464};
465
466struct qeth_qdio_out_buffer {
467 struct qdio_buffer *buffer;
468 atomic_t state;
469 volatile int next_element_to_fill;
470 struct sk_buff_head skb_list;
471 struct list_head ctx_list;
472};
473
474struct qeth_card;
475
476enum qeth_out_q_states {
477 QETH_OUT_Q_UNLOCKED,
478 QETH_OUT_Q_LOCKED,
479 QETH_OUT_Q_LOCKED_FLUSH,
480};
481
482struct qeth_qdio_out_q {
483 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
484 struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
485 int queue_no;
486 struct qeth_card *card;
487 atomic_t state;
488 volatile int do_pack;
489 /*
490 * index of buffer to be filled by driver; state EMPTY or PACKING
491 */
492 volatile int next_buf_to_fill;
493 /*
494 * number of buffers that are currently filled (PRIMED)
495 * -> these buffers are hardware-owned
496 */
497 atomic_t used_buffers;
498 /* indicates whether PCI flag must be set (or if one is outstanding) */
499 atomic_t set_pci_flags_count;
500} __attribute__ ((aligned(256)));
501
502struct qeth_qdio_info {
503 volatile enum qeth_qdio_info_states state;
504 /* input */
505 struct qeth_qdio_q *in_q;
506 struct qeth_qdio_buffer_pool in_buf_pool;
507 struct qeth_qdio_buffer_pool init_pool;
508 int in_buf_size;
509
510 /* output */
511 int no_out_queues;
512 struct qeth_qdio_out_q **out_qs;
513
514 /* priority queueing */
515 int do_prio_queueing;
516 int default_out_queue;
517};
518
519enum qeth_send_errors {
520 QETH_SEND_ERROR_NONE,
521 QETH_SEND_ERROR_LINK_FAILURE,
522 QETH_SEND_ERROR_RETRY,
523 QETH_SEND_ERROR_KICK_IT,
524};
525
526#define QETH_ETH_MAC_V4 0x0100 /* like v4 */
527#define QETH_ETH_MAC_V6 0x3333 /* like v6 */
528/* tr mc mac is longer, but that will be enough to detect mc frames */
529#define QETH_TR_MAC_NC 0xc000 /* non-canonical */
530#define QETH_TR_MAC_C 0x0300 /* canonical */
531
532#define DEFAULT_ADD_HHLEN 0
533#define MAX_ADD_HHLEN 1024
534
535/**
536 * buffer stuff for read channel
537 */
538#define QETH_CMD_BUFFER_NO 8
539
540/**
541 * channel state machine
542 */
543enum qeth_channel_states {
544 CH_STATE_UP,
545 CH_STATE_DOWN,
546 CH_STATE_ACTIVATING,
547 CH_STATE_HALTED,
548 CH_STATE_STOPPED,
549};
550/**
551 * card state machine
552 */
553enum qeth_card_states {
554 CARD_STATE_DOWN,
555 CARD_STATE_HARDSETUP,
556 CARD_STATE_SOFTSETUP,
557 CARD_STATE_UP,
558 CARD_STATE_RECOVER,
559};
560
561/**
562 * Protocol versions
563 */
564enum qeth_prot_versions {
565 QETH_PROT_SNA = 0x0001,
566 QETH_PROT_IPV4 = 0x0004,
567 QETH_PROT_IPV6 = 0x0006,
568};
569
570enum qeth_ip_types {
571 QETH_IP_TYPE_NORMAL,
572 QETH_IP_TYPE_VIPA,
573 QETH_IP_TYPE_RXIP,
574 QETH_IP_TYPE_DEL_ALL_MC,
575};
576
577enum qeth_cmd_buffer_state {
578 BUF_STATE_FREE,
579 BUF_STATE_LOCKED,
580 BUF_STATE_PROCESSED,
581};
582/**
583 * IP address and multicast list
584 */
585struct qeth_ipaddr {
586 struct list_head entry;
587 enum qeth_ip_types type;
588 enum qeth_ipa_setdelip_flags set_flags;
589 enum qeth_ipa_setdelip_flags del_flags;
590 int is_multicast;
591 volatile int users;
592 enum qeth_prot_versions proto;
593 unsigned char mac[OSA_ADDR_LEN];
594 union {
595 struct {
596 unsigned int addr;
597 unsigned int mask;
598 } a4;
599 struct {
600 struct in6_addr addr;
601 unsigned int pfxlen;
602 } a6;
603 } u;
604};
605
606struct qeth_ipato_entry {
607 struct list_head entry;
608 enum qeth_prot_versions proto;
609 char addr[16];
610 int mask_bits;
611};
612
613struct qeth_ipato {
614 int enabled;
615 int invert4;
616 int invert6;
617 struct list_head entries;
618};
619
620struct qeth_channel;
621
622struct qeth_cmd_buffer {
623 enum qeth_cmd_buffer_state state;
624 struct qeth_channel *channel;
625 unsigned char *data;
626 int rc;
627 void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
628};
629
630
631/**
632 * definition of a qeth channel, used for read and write
633 */
634struct qeth_channel {
635 enum qeth_channel_states state;
636 struct ccw1 ccw;
637 spinlock_t iob_lock;
638 wait_queue_head_t wait_q;
639 struct tasklet_struct irq_tasklet;
640 struct ccw_device *ccwdev;
641/*command buffer for control data*/
642 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
643 atomic_t irq_pending;
644 volatile int io_buf_no;
645 volatile int buf_no;
646};
647
648/**
649 * OSA card related definitions
650 */
651struct qeth_token {
652 __u32 issuer_rm_w;
653 __u32 issuer_rm_r;
654 __u32 cm_filter_w;
655 __u32 cm_filter_r;
656 __u32 cm_connection_w;
657 __u32 cm_connection_r;
658 __u32 ulp_filter_w;
659 __u32 ulp_filter_r;
660 __u32 ulp_connection_w;
661 __u32 ulp_connection_r;
662};
663
664struct qeth_seqno {
665 __u32 trans_hdr;
666 __u32 pdu_hdr;
667 __u32 pdu_hdr_ack;
668 __u16 ipa;
669};
670
671struct qeth_reply {
672 struct list_head list;
673 wait_queue_head_t wait_q;
674 int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long);
675 u32 seqno;
676 unsigned long offset;
677 int received;
678 int rc;
679 void *param;
680 struct qeth_card *card;
681 atomic_t refcnt;
682};
683
684#define QETH_BROADCAST_WITH_ECHO 1
685#define QETH_BROADCAST_WITHOUT_ECHO 2
686
687struct qeth_card_blkt {
688 int time_total;
689 int inter_packet;
690 int inter_packet_jumbo;
691};
692
693
694
695struct qeth_card_info {
696 unsigned short unit_addr2;
697 unsigned short cula;
698 unsigned short chpid;
699 __u16 func_level;
700 char mcl_level[QETH_MCL_LENGTH + 1];
701 int guestlan;
702 int layer2_mac_registered;
703 int portname_required;
704 int portno;
705 char portname[9];
706 enum qeth_card_types type;
707 enum qeth_link_types link_type;
708 int is_multicast_different;
709 int initial_mtu;
710 int max_mtu;
711 int broadcast_capable;
712 int unique_id;
713 struct qeth_card_blkt blkt;
714 __u32 csum_mask;
715};
716
717struct qeth_card_options {
718 struct qeth_routing_info route4;
719 struct qeth_ipa_info ipa4;
720 struct qeth_ipa_info adp; /*Adapter parameters*/
721#ifdef CONFIG_QETH_IPV6
722 struct qeth_routing_info route6;
723 struct qeth_ipa_info ipa6;
724#endif /* QETH_IPV6 */
725 enum qeth_checksum_types checksum_type;
726 int broadcast_mode;
727 int macaddr_mode;
728 int fake_broadcast;
729 int add_hhlen;
730 int fake_ll;
731 int layer2;
732 enum qeth_large_send_types large_send;
733};
734
735/*
736 * thread bits for qeth_card thread masks
737 */
738enum qeth_threads {
739 QETH_SET_IP_THREAD = 1,
740 QETH_RECOVER_THREAD = 2,
741};
742
743struct qeth_card {
744 struct list_head list;
745 enum qeth_card_states state;
746 int lan_online;
747 spinlock_t lock;
748/*hardware and sysfs stuff*/
749 struct ccwgroup_device *gdev;
750 struct qeth_channel read;
751 struct qeth_channel write;
752 struct qeth_channel data;
753
754 struct net_device *dev;
755 struct net_device_stats stats;
756
757 struct qeth_card_info info;
758 struct qeth_token token;
759 struct qeth_seqno seqno;
760 struct qeth_card_options options;
761
762 wait_queue_head_t wait_q;
763#ifdef CONFIG_QETH_VLAN
764 spinlock_t vlanlock;
765 struct vlan_group *vlangrp;
766#endif
767 struct work_struct kernel_thread_starter;
768 spinlock_t thread_mask_lock;
769 volatile unsigned long thread_start_mask;
770 volatile unsigned long thread_allowed_mask;
771 volatile unsigned long thread_running_mask;
772 spinlock_t ip_lock;
773 struct list_head ip_list;
774 struct list_head *ip_tbd_list;
775 struct qeth_ipato ipato;
776 struct list_head cmd_waiter_list;
777 /* QDIO buffer handling */
778 struct qeth_qdio_info qdio;
779#ifdef CONFIG_QETH_PERF_STATS
780 struct qeth_perf_stats perf_stats;
781#endif /* CONFIG_QETH_PERF_STATS */
782 int use_hard_stop;
783 int (*orig_hard_header)(struct sk_buff *,struct net_device *,
784 unsigned short,void *,void *,unsigned);
785};
786
787struct qeth_card_list_struct {
788 struct list_head list;
789 rwlock_t rwlock;
790};
791
792extern struct qeth_card_list_struct qeth_card_list;
793
794/*notifier list */
795struct qeth_notify_list_struct {
796 struct list_head list;
797 struct task_struct *task;
798 int signum;
799};
800extern spinlock_t qeth_notify_lock;
801extern struct list_head qeth_notify_list;
802
803/*some helper functions*/
804
805#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
806
807inline static __u8
808qeth_get_ipa_adp_type(enum qeth_link_types link_type)
809{
810 switch (link_type) {
811 case QETH_LINK_TYPE_HSTR:
812 return 2;
813 default:
814 return 1;
815 }
816}
817
818inline static int
819qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
820{
821 struct sk_buff *new_skb = NULL;
822
823 if (skb_headroom(*skb) < size){
824 new_skb = skb_realloc_headroom(*skb, size);
825 if (!new_skb) {
826 PRINT_ERR("qeth_prepare_skb: could "
827 "not realloc headroom for qeth_hdr "
828 "on interface %s", QETH_CARD_IFNAME(card));
829 return -ENOMEM;
830 }
831 *skb = new_skb;
832 }
833 return 0;
834}
835static inline struct sk_buff *
836qeth_pskb_unshare(struct sk_buff *skb, int pri)
837{
838 struct sk_buff *nskb;
839 if (!skb_cloned(skb))
840 return skb;
841 nskb = skb_copy(skb, pri);
842 kfree_skb(skb); /* free our shared copy */
843 return nskb;
844}
845
846
847inline static void *
848qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
849{
850 void *hdr;
851
852 hdr = (void *) skb_push(*skb, size);
853 /*
854 * sanity check, the Linux memory allocation scheme should
855 * never present us cases like this one (the qdio header size plus
856 * the first 40 bytes of the paket cross a 4k boundary)
857 */
858 if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
859 (((unsigned long) hdr + size +
860 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
861 PRINT_ERR("qeth_prepare_skb: misaligned "
862 "packet on interface %s. Discarded.",
863 QETH_CARD_IFNAME(card));
864 return NULL;
865 }
866 return hdr;
867}
868
869inline static int
870qeth_get_hlen(__u8 link_type)
871{
872#ifdef CONFIG_QETH_IPV6
873 switch (link_type) {
874 case QETH_LINK_TYPE_HSTR:
875 case QETH_LINK_TYPE_LANE_TR:
876 return sizeof(struct qeth_hdr) + TR_HLEN;
877 default:
878#ifdef CONFIG_QETH_VLAN
879 return sizeof(struct qeth_hdr) + VLAN_ETH_HLEN;
880#else
881 return sizeof(struct qeth_hdr) + ETH_HLEN;
882#endif
883 }
884#else /* CONFIG_QETH_IPV6 */
885#ifdef CONFIG_QETH_VLAN
886 return sizeof(struct qeth_hdr) + VLAN_HLEN;
887#else
888 return sizeof(struct qeth_hdr);
889#endif
890#endif /* CONFIG_QETH_IPV6 */
891}
892
893inline static unsigned short
894qeth_get_netdev_flags(struct qeth_card *card)
895{
896 if (card->options.layer2)
897 return 0;
898 switch (card->info.type) {
899 case QETH_CARD_TYPE_IQD:
900 return IFF_NOARP;
901#ifdef CONFIG_QETH_IPV6
902 default:
903 return 0;
904#else
905 default:
906 return IFF_NOARP;
907#endif
908 }
909}
910
911inline static int
912qeth_get_initial_mtu_for_card(struct qeth_card * card)
913{
914 switch (card->info.type) {
915 case QETH_CARD_TYPE_UNKNOWN:
916 return 1500;
917 case QETH_CARD_TYPE_IQD:
918 return card->info.max_mtu;
919 case QETH_CARD_TYPE_OSAE:
920 switch (card->info.link_type) {
921 case QETH_LINK_TYPE_HSTR:
922 case QETH_LINK_TYPE_LANE_TR:
923 return 2000;
924 default:
925 return 1492;
926 }
927 default:
928 return 1500;
929 }
930}
931
932inline static int
933qeth_get_max_mtu_for_card(int cardtype)
934{
935 switch (cardtype) {
936 case QETH_CARD_TYPE_UNKNOWN:
937 return 61440;
938 case QETH_CARD_TYPE_OSAE:
939 return 61440;
940 case QETH_CARD_TYPE_IQD:
941 return 57344;
942 default:
943 return 1500;
944 }
945}
946
947inline static int
948qeth_get_mtu_out_of_mpc(int cardtype)
949{
950 switch (cardtype) {
951 case QETH_CARD_TYPE_IQD:
952 return 1;
953 default:
954 return 0;
955 }
956}
957
958inline static int
959qeth_get_mtu_outof_framesize(int framesize)
960{
961 switch (framesize) {
962 case 0x4000:
963 return 8192;
964 case 0x6000:
965 return 16384;
966 case 0xa000:
967 return 32768;
968 case 0xffff:
969 return 57344;
970 default:
971 return 0;
972 }
973}
974
975inline static int
976qeth_mtu_is_valid(struct qeth_card * card, int mtu)
977{
978 switch (card->info.type) {
979 case QETH_CARD_TYPE_OSAE:
980 return ((mtu >= 576) && (mtu <= 61440));
981 case QETH_CARD_TYPE_IQD:
982 return ((mtu >= 576) &&
983 (mtu <= card->info.max_mtu + 4096 - 32));
984 case QETH_CARD_TYPE_UNKNOWN:
985 default:
986 return 1;
987 }
988}
989
990inline static int
991qeth_get_arphdr_type(int cardtype, int linktype)
992{
993 switch (cardtype) {
994 case QETH_CARD_TYPE_OSAE:
995 switch (linktype) {
996 case QETH_LINK_TYPE_LANE_TR:
997 case QETH_LINK_TYPE_HSTR:
998 return ARPHRD_IEEE802_TR;
999 default:
1000 return ARPHRD_ETHER;
1001 }
1002 case QETH_CARD_TYPE_IQD:
1003 default:
1004 return ARPHRD_ETHER;
1005 }
1006}
1007
1008#ifdef CONFIG_QETH_PERF_STATS
1009inline static int
1010qeth_get_micros(void)
1011{
1012 return (int) (get_clock() >> 12);
1013}
1014#endif
1015
1016static inline int
1017qeth_get_qdio_q_format(struct qeth_card *card)
1018{
1019 switch (card->info.type) {
1020 case QETH_CARD_TYPE_IQD:
1021 return 2;
1022 default:
1023 return 0;
1024 }
1025}
1026
1027static inline void
1028qeth_ipaddr4_to_string(const __u8 *addr, char *buf)
1029{
1030 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
1031}
1032
1033static inline int
1034qeth_string_to_ipaddr4(const char *buf, __u8 *addr)
1035{
1036 const char *start, *end;
1037 char abuf[4];
1038 char *tmp;
1039 int len;
1040 int i;
1041
1042 start = buf;
1043 for (i = 0; i < 3; i++) {
1044 if (!(end = strchr(start, '.')))
1045 return -EINVAL;
1046 len = end - start;
1047 memset(abuf, 0, 4);
1048 strncpy(abuf, start, len);
1049 addr[i] = simple_strtoul(abuf, &tmp, 10);
1050 start = end + 1;
1051 }
1052 memset(abuf, 0, 4);
1053 strcpy(abuf, start);
1054 addr[3] = simple_strtoul(abuf, &tmp, 10);
1055 return 0;
1056}
1057
1058static inline void
1059qeth_ipaddr6_to_string(const __u8 *addr, char *buf)
1060{
1061 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1062 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
1063 addr[0], addr[1], addr[2], addr[3],
1064 addr[4], addr[5], addr[6], addr[7],
1065 addr[8], addr[9], addr[10], addr[11],
1066 addr[12], addr[13], addr[14], addr[15]);
1067}
1068
1069static inline int
1070qeth_string_to_ipaddr6(const char *buf, __u8 *addr)
1071{
1072 const char *start, *end;
1073 u16 *tmp_addr;
1074 char abuf[5];
1075 char *tmp;
1076 int len;
1077 int i;
1078
1079 tmp_addr = (u16 *)addr;
1080 start = buf;
1081 for (i = 0; i < 7; i++) {
1082 if (!(end = strchr(start, ':')))
1083 return -EINVAL;
1084 len = end - start;
1085 memset(abuf, 0, 5);
1086 strncpy(abuf, start, len);
1087 tmp_addr[i] = simple_strtoul(abuf, &tmp, 16);
1088 start = end + 1;
1089 }
1090 memset(abuf, 0, 5);
1091 strcpy(abuf, start);
1092 tmp_addr[7] = simple_strtoul(abuf, &tmp, 16);
1093 return 0;
1094}
1095
1096static inline void
1097qeth_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
1098 char *buf)
1099{
1100 if (proto == QETH_PROT_IPV4)
1101 return qeth_ipaddr4_to_string(addr, buf);
1102 else if (proto == QETH_PROT_IPV6)
1103 return qeth_ipaddr6_to_string(addr, buf);
1104}
1105
1106static inline int
1107qeth_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
1108 __u8 *addr)
1109{
1110 if (proto == QETH_PROT_IPV4)
1111 return qeth_string_to_ipaddr4(buf, addr);
1112 else if (proto == QETH_PROT_IPV6)
1113 return qeth_string_to_ipaddr6(buf, addr);
1114 else
1115 return -EINVAL;
1116}
1117
1118extern int
1119qeth_setrouting_v4(struct qeth_card *);
1120extern int
1121qeth_setrouting_v6(struct qeth_card *);
1122
1123extern int
1124qeth_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
1125
1126extern void
1127qeth_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions, u8 *, int);
1128
1129extern int
1130qeth_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1131
1132extern void
1133qeth_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1134
1135extern int
1136qeth_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1137
1138extern void
1139qeth_del_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1140
1141extern int
1142qeth_notifier_register(struct task_struct *, int );
1143
1144extern int
1145qeth_notifier_unregister(struct task_struct * );
1146
1147extern void
1148qeth_schedule_recovery(struct qeth_card *);
1149
1150extern int
1151qeth_realloc_buffer_pool(struct qeth_card *, int);
1152
1153extern int
1154qeth_set_large_send(struct qeth_card *);
1155
1156extern void
1157qeth_fill_header(struct qeth_card *, struct qeth_hdr *,
1158 struct sk_buff *, int, int);
1159extern void
1160qeth_flush_buffers(struct qeth_qdio_out_q *, int, int, int);
1161
1162#endif /* __QETH_H__ */
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
new file mode 100644
index 000000000000..7ee1c06ed68a
--- /dev/null
+++ b/drivers/s390/net/qeth_eddp.c
@@ -0,0 +1,643 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.11 $)
4 *
5 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
6 *
7 * Copyright 2004 IBM Corporation
8 *
9 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
10 *
11 * $Revision: 1.11 $ $Date: 2005/03/24 09:04:18 $
12 *
13 */
14#include <linux/config.h>
15#include <linux/errno.h>
16#include <linux/ip.h>
17#include <linux/inetdevice.h>
18#include <linux/netdevice.h>
19#include <linux/kernel.h>
20#include <linux/tcp.h>
21#include <net/tcp.h>
22#include <linux/skbuff.h>
23
24#include <net/ip.h>
25
26#include "qeth.h"
27#include "qeth_mpc.h"
28#include "qeth_eddp.h"
29
30int
31qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
32 struct qeth_eddp_context *ctx)
33{
34 int index = queue->next_buf_to_fill;
35 int elements_needed = ctx->num_elements;
36 int elements_in_buffer;
37 int skbs_in_buffer;
38 int buffers_needed = 0;
39
40 QETH_DBF_TEXT(trace, 5, "eddpcbfc");
41 while(elements_needed > 0) {
42 buffers_needed++;
43 if (atomic_read(&queue->bufs[index].state) !=
44 QETH_QDIO_BUF_EMPTY)
45 return -EBUSY;
46
47 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
48 queue->bufs[index].next_element_to_fill;
49 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
50 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
51 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
52 }
53 return buffers_needed;
54}
55
56static inline void
57qeth_eddp_free_context(struct qeth_eddp_context *ctx)
58{
59 int i;
60
61 QETH_DBF_TEXT(trace, 5, "eddpfctx");
62 for (i = 0; i < ctx->num_pages; ++i)
63 free_page((unsigned long)ctx->pages[i]);
64 kfree(ctx->pages);
65 if (ctx->elements != NULL)
66 kfree(ctx->elements);
67 kfree(ctx);
68}
69
70
71static inline void
72qeth_eddp_get_context(struct qeth_eddp_context *ctx)
73{
74 atomic_inc(&ctx->refcnt);
75}
76
77void
78qeth_eddp_put_context(struct qeth_eddp_context *ctx)
79{
80 if (atomic_dec_return(&ctx->refcnt) == 0)
81 qeth_eddp_free_context(ctx);
82}
83
84void
85qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
86{
87 struct qeth_eddp_context_reference *ref;
88
89 QETH_DBF_TEXT(trace, 6, "eddprctx");
90 while (!list_empty(&buf->ctx_list)){
91 ref = list_entry(buf->ctx_list.next,
92 struct qeth_eddp_context_reference, list);
93 qeth_eddp_put_context(ref->ctx);
94 list_del(&ref->list);
95 kfree(ref);
96 }
97}
98
99static inline int
100qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
101 struct qeth_eddp_context *ctx)
102{
103 struct qeth_eddp_context_reference *ref;
104
105 QETH_DBF_TEXT(trace, 6, "eddprfcx");
106 ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
107 if (ref == NULL)
108 return -ENOMEM;
109 qeth_eddp_get_context(ctx);
110 ref->ctx = ctx;
111 list_add_tail(&ref->list, &buf->ctx_list);
112 return 0;
113}
114
115int
116qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
117 struct qeth_eddp_context *ctx,
118 int index)
119{
120 struct qeth_qdio_out_buffer *buf = NULL;
121 struct qdio_buffer *buffer;
122 int elements = ctx->num_elements;
123 int element = 0;
124 int flush_cnt = 0;
125 int must_refcnt = 1;
126 int i;
127
128 QETH_DBF_TEXT(trace, 5, "eddpfibu");
129 while (elements > 0) {
130 buf = &queue->bufs[index];
131 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
132 /* normally this should not happen since we checked for
133 * available elements in qeth_check_elements_for_context
134 */
135 if (element == 0)
136 return -EBUSY;
137 else {
138 PRINT_WARN("could only partially fill eddp "
139 "buffer!\n");
140 goto out;
141 }
142 }
143 /* check if the whole next skb fits into current buffer */
144 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
145 buf->next_element_to_fill)
146 < ctx->elements_per_skb){
147 /* no -> go to next buffer */
148 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
149 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
150 flush_cnt++;
151 /* new buffer, so we have to add ctx to buffer'ctx_list
152 * and increment ctx's refcnt */
153 must_refcnt = 1;
154 continue;
155 }
156 if (must_refcnt){
157 must_refcnt = 0;
158 if (qeth_eddp_buf_ref_context(buf, ctx)){
159 PRINT_WARN("no memory to create eddp context "
160 "reference\n");
161 goto out_check;
162 }
163 }
164 buffer = buf->buffer;
165 /* fill one skb into buffer */
166 for (i = 0; i < ctx->elements_per_skb; ++i){
167 buffer->element[buf->next_element_to_fill].addr =
168 ctx->elements[element].addr;
169 buffer->element[buf->next_element_to_fill].length =
170 ctx->elements[element].length;
171 buffer->element[buf->next_element_to_fill].flags =
172 ctx->elements[element].flags;
173 buf->next_element_to_fill++;
174 element++;
175 elements--;
176 }
177 }
178out_check:
179 if (!queue->do_pack) {
180 QETH_DBF_TEXT(trace, 6, "fillbfnp");
181 /* set state to PRIMED -> will be flushed */
182 if (buf->next_element_to_fill > 0){
183 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
184 flush_cnt++;
185 }
186 } else {
187#ifdef CONFIG_QETH_PERF_STATS
188 queue->card->perf_stats.skbs_sent_pack++;
189#endif
190 QETH_DBF_TEXT(trace, 6, "fillbfpa");
191 if (buf->next_element_to_fill >=
192 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
193 /*
194 * packed buffer if full -> set state PRIMED
195 * -> will be flushed
196 */
197 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
198 flush_cnt++;
199 }
200 }
201out:
202 return flush_cnt;
203}
204
205static inline int
206qeth_get_skb_data_len(struct sk_buff *skb)
207{
208 int len = skb->len;
209 int i;
210
211 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i)
212 len -= skb_shinfo(skb)->frags[i].size;
213 return len;
214}
215
216static inline void
217qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
218 struct qeth_eddp_data *eddp)
219{
220 u8 *page;
221 int page_remainder;
222 int page_offset;
223 int hdr_len;
224 struct qeth_eddp_element *element;
225
226 QETH_DBF_TEXT(trace, 5, "eddpcrsh");
227 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
228 page_offset = ctx->offset % PAGE_SIZE;
229 element = &ctx->elements[ctx->num_elements];
230 hdr_len = eddp->nhl + eddp->thl;
231 /* FIXME: layer2 and VLAN !!! */
232 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
233 hdr_len += ETH_HLEN;
234 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
235 hdr_len += VLAN_HLEN;
236 /* does complete header fit in current page ? */
237 page_remainder = PAGE_SIZE - page_offset;
238 if (page_remainder < (sizeof(struct qeth_hdr) + hdr_len)){
239 /* no -> go to start of next page */
240 ctx->offset += page_remainder;
241 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
242 page_offset = 0;
243 }
244 memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
245 element->addr = page + page_offset;
246 element->length = sizeof(struct qeth_hdr);
247 ctx->offset += sizeof(struct qeth_hdr);
248 page_offset += sizeof(struct qeth_hdr);
249 /* add mac header (?) */
250 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
251 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
252 element->length += ETH_HLEN;
253 ctx->offset += ETH_HLEN;
254 page_offset += ETH_HLEN;
255 }
256 /* add VLAN tag */
257 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
258 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
259 element->length += VLAN_HLEN;
260 ctx->offset += VLAN_HLEN;
261 page_offset += VLAN_HLEN;
262 }
263 /* add network header */
264 memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
265 element->length += eddp->nhl;
266 eddp->nh_in_ctx = page + page_offset;
267 ctx->offset += eddp->nhl;
268 page_offset += eddp->nhl;
269 /* add transport header */
270 memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
271 element->length += eddp->thl;
272 eddp->th_in_ctx = page + page_offset;
273 ctx->offset += eddp->thl;
274}
275
276static inline void
277qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
278 u32 *hcsum)
279{
280 struct skb_frag_struct *frag;
281 int left_in_frag;
282 int copy_len;
283 u8 *src;
284
285 QETH_DBF_TEXT(trace, 5, "eddpcdtc");
286 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
287 memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
288 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
289 *hcsum);
290 eddp->skb_offset += len;
291 } else {
292 while (len > 0) {
293 if (eddp->frag < 0) {
294 /* we're in skb->data */
295 left_in_frag = qeth_get_skb_data_len(eddp->skb)
296 - eddp->skb_offset;
297 src = eddp->skb->data + eddp->skb_offset;
298 } else {
299 frag = &skb_shinfo(eddp->skb)->
300 frags[eddp->frag];
301 left_in_frag = frag->size - eddp->frag_offset;
302 src = (u8 *)(
303 (page_to_pfn(frag->page) << PAGE_SHIFT)+
304 frag->page_offset + eddp->frag_offset);
305 }
306 if (left_in_frag <= 0) {
307 eddp->frag++;
308 eddp->frag_offset = 0;
309 continue;
310 }
311 copy_len = min(left_in_frag, len);
312 memcpy(dst, src, copy_len);
313 *hcsum = csum_partial(src, copy_len, *hcsum);
314 dst += copy_len;
315 eddp->frag_offset += copy_len;
316 eddp->skb_offset += copy_len;
317 len -= copy_len;
318 }
319 }
320}
321
322static inline void
323qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
324 struct qeth_eddp_data *eddp, int data_len,
325 u32 hcsum)
326{
327 u8 *page;
328 int page_remainder;
329 int page_offset;
330 struct qeth_eddp_element *element;
331 int first_lap = 1;
332
333 QETH_DBF_TEXT(trace, 5, "eddpcsdt");
334 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
335 page_offset = ctx->offset % PAGE_SIZE;
336 element = &ctx->elements[ctx->num_elements];
337 while (data_len){
338 page_remainder = PAGE_SIZE - page_offset;
339 if (page_remainder < data_len){
340 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
341 page_remainder, &hcsum);
342 element->length += page_remainder;
343 if (first_lap)
344 element->flags = SBAL_FLAGS_FIRST_FRAG;
345 else
346 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
347 ctx->num_elements++;
348 element++;
349 data_len -= page_remainder;
350 ctx->offset += page_remainder;
351 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
352 page_offset = 0;
353 element->addr = page + page_offset;
354 } else {
355 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
356 data_len, &hcsum);
357 element->length += data_len;
358 if (!first_lap)
359 element->flags = SBAL_FLAGS_LAST_FRAG;
360 ctx->num_elements++;
361 ctx->offset += data_len;
362 data_len = 0;
363 }
364 first_lap = 0;
365 }
366 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
367}
368
369static inline u32
370qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
371{
372 u32 phcsum; /* pseudo header checksum */
373
374 QETH_DBF_TEXT(trace, 5, "eddpckt4");
375 eddp->th.tcp.h.check = 0;
376 /* compute pseudo header checksum */
377 phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
378 eddp->thl + data_len, IPPROTO_TCP, 0);
379 /* compute checksum of tcp header */
380 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
381}
382
383static inline u32
384qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
385{
386 u32 proto;
387 u32 phcsum; /* pseudo header checksum */
388
389 QETH_DBF_TEXT(trace, 5, "eddpckt6");
390 eddp->th.tcp.h.check = 0;
391 /* compute pseudo header checksum */
392 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
393 sizeof(struct in6_addr), 0);
394 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
395 sizeof(struct in6_addr), phcsum);
396 proto = htonl(IPPROTO_TCP);
397 phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
398 return phcsum;
399}
400
401static inline struct qeth_eddp_data *
402qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
403{
404 struct qeth_eddp_data *eddp;
405
406 QETH_DBF_TEXT(trace, 5, "eddpcrda");
407 eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
408 if (eddp){
409 memset(eddp, 0, sizeof(struct qeth_eddp_data));
410 eddp->nhl = nhl;
411 eddp->thl = thl;
412 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
413 memcpy(&eddp->nh, nh, nhl);
414 memcpy(&eddp->th, th, thl);
415 eddp->frag = -1; /* initially we're in skb->data */
416 }
417 return eddp;
418}
419
420static inline void
421__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
422 struct qeth_eddp_data *eddp)
423{
424 struct tcphdr *tcph;
425 int data_len;
426 u32 hcsum;
427
428 QETH_DBF_TEXT(trace, 5, "eddpftcp");
429 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
430 tcph = eddp->skb->h.th;
431 while (eddp->skb_offset < eddp->skb->len) {
432 data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
433 (int)(eddp->skb->len - eddp->skb_offset));
434 /* prepare qdio hdr */
435 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
436 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
437 eddp->nhl + eddp->thl -
438 sizeof(struct qeth_hdr);
439#ifdef CONFIG_QETH_VLAN
440 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
441 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
442#endif /* CONFIG_QETH_VLAN */
443 } else
444 eddp->qh.hdr.l3.length = data_len + eddp->nhl +
445 eddp->thl;
446 /* prepare ip hdr */
447 if (eddp->skb->protocol == ETH_P_IP){
448 eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
449 eddp->thl;
450 eddp->nh.ip4.h.check = 0;
451 eddp->nh.ip4.h.check =
452 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
453 eddp->nh.ip4.h.ihl);
454 } else
455 eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
456 /* prepare tcp hdr */
457 if (data_len == (eddp->skb->len - eddp->skb_offset)){
458 /* last segment -> set FIN and PSH flags */
459 eddp->th.tcp.h.fin = tcph->fin;
460 eddp->th.tcp.h.psh = tcph->psh;
461 }
462 if (eddp->skb->protocol == ETH_P_IP)
463 hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
464 else
465 hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
466 /* fill the next segment into the context */
467 qeth_eddp_create_segment_hdrs(ctx, eddp);
468 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
469 if (eddp->skb_offset >= eddp->skb->len)
470 break;
471 /* prepare headers for next round */
472 if (eddp->skb->protocol == ETH_P_IP)
473 eddp->nh.ip4.h.id++;
474 eddp->th.tcp.h.seq += data_len;
475 }
476}
477
478static inline int
479qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
480 struct sk_buff *skb, struct qeth_hdr *qhdr)
481{
482 struct qeth_eddp_data *eddp = NULL;
483
484 QETH_DBF_TEXT(trace, 5, "eddpficx");
485 /* create our segmentation headers and copy original headers */
486 if (skb->protocol == ETH_P_IP)
487 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
488 skb->nh.iph->ihl*4,
489 (u8 *)skb->h.th, skb->h.th->doff*4);
490 else
491 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
492 sizeof(struct ipv6hdr),
493 (u8 *)skb->h.th, skb->h.th->doff*4);
494
495 if (eddp == NULL) {
496 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
497 return -ENOMEM;
498 }
499 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
500 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
501#ifdef CONFIG_QETH_VLAN
502 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
503 eddp->vlan[0] = __constant_htons(skb->protocol);
504 eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
505 }
506#endif /* CONFIG_QETH_VLAN */
507 }
508 /* the next flags will only be set on the last segment */
509 eddp->th.tcp.h.fin = 0;
510 eddp->th.tcp.h.psh = 0;
511 eddp->skb = skb;
512 /* begin segmentation and fill context */
513 __qeth_eddp_fill_context_tcp(ctx, eddp);
514 kfree(eddp);
515 return 0;
516}
517
518static inline void
519qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
520 int hdr_len)
521{
522 int skbs_per_page;
523
524 QETH_DBF_TEXT(trace, 5, "eddpcanp");
525 /* can we put multiple skbs in one page? */
526 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
527 if (skbs_per_page > 1){
528 ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
529 skbs_per_page + 1;
530 ctx->elements_per_skb = 1;
531 } else {
532 /* no -> how many elements per skb? */
533 ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
534 PAGE_SIZE) >> PAGE_SHIFT;
535 ctx->num_pages = ctx->elements_per_skb *
536 (skb_shinfo(skb)->tso_segs + 1);
537 }
538 ctx->num_elements = ctx->elements_per_skb *
539 (skb_shinfo(skb)->tso_segs + 1);
540}
541
542static inline struct qeth_eddp_context *
543qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
544 int hdr_len)
545{
546 struct qeth_eddp_context *ctx = NULL;
547 u8 *addr;
548 int i;
549
550 QETH_DBF_TEXT(trace, 5, "creddpcg");
551 /* create the context and allocate pages */
552 ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
553 if (ctx == NULL){
554 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
555 return NULL;
556 }
557 memset(ctx, 0, sizeof(struct qeth_eddp_context));
558 ctx->type = QETH_LARGE_SEND_EDDP;
559 qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
560 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
561 QETH_DBF_TEXT(trace, 2, "ceddpcis");
562 kfree(ctx);
563 return NULL;
564 }
565 ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC);
566 if (ctx->pages == NULL){
567 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
568 kfree(ctx);
569 return NULL;
570 }
571 memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *));
572 for (i = 0; i < ctx->num_pages; ++i){
573 addr = (u8 *)__get_free_page(GFP_ATOMIC);
574 if (addr == NULL){
575 QETH_DBF_TEXT(trace, 2, "ceddpcn3");
576 ctx->num_pages = i;
577 qeth_eddp_free_context(ctx);
578 return NULL;
579 }
580 memset(addr, 0, PAGE_SIZE);
581 ctx->pages[i] = addr;
582 }
583 ctx->elements = kmalloc(ctx->num_elements *
584 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
585 if (ctx->elements == NULL){
586 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
587 qeth_eddp_free_context(ctx);
588 return NULL;
589 }
590 memset(ctx->elements, 0,
591 ctx->num_elements * sizeof(struct qeth_eddp_element));
592 /* reset num_elements; will be incremented again in fill_buffer to
593 * reflect number of actually used elements */
594 ctx->num_elements = 0;
595 return ctx;
596}
597
598static inline struct qeth_eddp_context *
599qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
600 struct qeth_hdr *qhdr)
601{
602 struct qeth_eddp_context *ctx = NULL;
603
604 QETH_DBF_TEXT(trace, 5, "creddpct");
605 if (skb->protocol == ETH_P_IP)
606 ctx = qeth_eddp_create_context_generic(card, skb,
607 sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
608 skb->h.th->doff*4);
609 else if (skb->protocol == ETH_P_IPV6)
610 ctx = qeth_eddp_create_context_generic(card, skb,
611 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
612 skb->h.th->doff*4);
613 else
614 QETH_DBF_TEXT(trace, 2, "cetcpinv");
615
616 if (ctx == NULL) {
617 QETH_DBF_TEXT(trace, 2, "creddpnl");
618 return NULL;
619 }
620 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
621 QETH_DBF_TEXT(trace, 2, "ceddptfe");
622 qeth_eddp_free_context(ctx);
623 return NULL;
624 }
625 atomic_set(&ctx->refcnt, 1);
626 return ctx;
627}
628
629struct qeth_eddp_context *
630qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
631 struct qeth_hdr *qhdr)
632{
633 QETH_DBF_TEXT(trace, 5, "creddpc");
634 switch (skb->sk->sk_protocol){
635 case IPPROTO_TCP:
636 return qeth_eddp_create_context_tcp(card, skb, qhdr);
637 default:
638 QETH_DBF_TEXT(trace, 2, "eddpinvp");
639 }
640 return NULL;
641}
642
643
diff --git a/drivers/s390/net/qeth_eddp.h b/drivers/s390/net/qeth_eddp.h
new file mode 100644
index 000000000000..e1b51860bc57
--- /dev/null
+++ b/drivers/s390/net/qeth_eddp.h
@@ -0,0 +1,85 @@
1/*
2 * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.5 $)
3 *
4 * Header file for qeth enhanced device driver pakcing.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
9 *
10 * $Revision: 1.5 $ $Date: 2005/03/24 09:04:18 $
11 *
12 */
13#ifndef __QETH_EDDP_H__
14#define __QETH_EDDP_H__
15
16struct qeth_eddp_element {
17 u32 flags;
18 u32 length;
19 void *addr;
20};
21
22struct qeth_eddp_context {
23 atomic_t refcnt;
24 enum qeth_large_send_types type;
25 int num_pages; /* # of allocated pages */
26 u8 **pages; /* pointers to pages */
27 int offset; /* offset in ctx during creation */
28 int num_elements; /* # of required 'SBALEs' */
29 struct qeth_eddp_element *elements; /* array of 'SBALEs' */
30 int elements_per_skb; /* # of 'SBALEs' per skb **/
31};
32
33struct qeth_eddp_context_reference {
34 struct list_head list;
35 struct qeth_eddp_context *ctx;
36};
37
38extern struct qeth_eddp_context *
39qeth_eddp_create_context(struct qeth_card *,struct sk_buff *,struct qeth_hdr *);
40
41extern void
42qeth_eddp_put_context(struct qeth_eddp_context *);
43
44extern int
45qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,struct qeth_eddp_context *,int);
46
47extern void
48qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *);
49
50extern int
51qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
52 struct qeth_eddp_context *);
53/*
54 * Data used for fragmenting a IP packet.
55 */
56struct qeth_eddp_data {
57 struct qeth_hdr qh;
58 struct ethhdr mac;
59 u16 vlan[2];
60 union {
61 struct {
62 struct iphdr h;
63 u8 options[40];
64 } ip4;
65 struct {
66 struct ipv6hdr h;
67 } ip6;
68 } nh;
69 u8 nhl;
70 void *nh_in_ctx; /* address of nh within the ctx */
71 union {
72 struct {
73 struct tcphdr h;
74 u8 options[40];
75 } tcp;
76 } th;
77 u8 thl;
78 void *th_in_ctx; /* address of th within the ctx */
79 struct sk_buff *skb;
80 int skb_offset;
81 int frag;
82 int frag_offset;
83} __attribute__ ((packed));
84
85#endif /* __QETH_EDDP_H__ */
diff --git a/drivers/s390/net/qeth_fs.h b/drivers/s390/net/qeth_fs.h
new file mode 100644
index 000000000000..5c9a51ce91b6
--- /dev/null
+++ b/drivers/s390/net/qeth_fs.h
@@ -0,0 +1,163 @@
1/*
2 * linux/drivers/s390/net/qeth_fs.h
3 *
4 * Linux on zSeries OSA Express and HiperSockets support.
5 *
6 * This header file contains definitions related to sysfs and procfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
10 *
11 */
12#ifndef __QETH_FS_H__
13#define __QETH_FS_H__
14
15#define VERSION_QETH_FS_H "$Revision: 1.9 $"
16
17extern const char *VERSION_QETH_PROC_C;
18extern const char *VERSION_QETH_SYS_C;
19
20#ifdef CONFIG_PROC_FS
21extern int
22qeth_create_procfs_entries(void);
23
24extern void
25qeth_remove_procfs_entries(void);
26#else
27static inline int
28qeth_create_procfs_entries(void)
29{
30 return 0;
31}
32
33static inline void
34qeth_remove_procfs_entries(void)
35{
36}
37#endif /* CONFIG_PROC_FS */
38
39extern int
40qeth_create_device_attributes(struct device *dev);
41
42extern void
43qeth_remove_device_attributes(struct device *dev);
44
45extern int
46qeth_create_driver_attributes(void);
47
48extern void
49qeth_remove_driver_attributes(void);
50
51/*
52 * utility functions used in qeth_proc.c and qeth_sys.c
53 */
54
55static inline const char *
56qeth_get_checksum_str(struct qeth_card *card)
57{
58 if (card->options.checksum_type == SW_CHECKSUMMING)
59 return "sw";
60 else if (card->options.checksum_type == HW_CHECKSUMMING)
61 return "hw";
62 else
63 return "no";
64}
65
66static inline const char *
67qeth_get_prioq_str(struct qeth_card *card, char *buf)
68{
69 if (card->qdio.do_prio_queueing == QETH_NO_PRIO_QUEUEING)
70 sprintf(buf, "always_q_%i", card->qdio.default_out_queue);
71 else
72 strcpy(buf, (card->qdio.do_prio_queueing ==
73 QETH_PRIO_Q_ING_PREC)?
74 "by_prec." : "by_ToS");
75 return buf;
76}
77
78static inline const char *
79qeth_get_bufsize_str(struct qeth_card *card)
80{
81 if (card->qdio.in_buf_size == 16384)
82 return "16k";
83 else if (card->qdio.in_buf_size == 24576)
84 return "24k";
85 else if (card->qdio.in_buf_size == 32768)
86 return "32k";
87 else if (card->qdio.in_buf_size == 40960)
88 return "40k";
89 else
90 return "64k";
91}
92
93static inline const char *
94qeth_get_cardname(struct qeth_card *card)
95{
96 if (card->info.guestlan) {
97 switch (card->info.type) {
98 case QETH_CARD_TYPE_OSAE:
99 return " Guest LAN QDIO";
100 case QETH_CARD_TYPE_IQD:
101 return " Guest LAN Hiper";
102 default:
103 return " unknown";
104 }
105 } else {
106 switch (card->info.type) {
107 case QETH_CARD_TYPE_OSAE:
108 return " OSD Express";
109 case QETH_CARD_TYPE_IQD:
110 return " HiperSockets";
111 default:
112 return " unknown";
113 }
114 }
115 return " n/a";
116}
117
118/* max length to be returned: 14 */
119static inline const char *
120qeth_get_cardname_short(struct qeth_card *card)
121{
122 if (card->info.guestlan){
123 switch (card->info.type){
124 case QETH_CARD_TYPE_OSAE:
125 return "GuestLAN QDIO";
126 case QETH_CARD_TYPE_IQD:
127 return "GuestLAN Hiper";
128 default:
129 return "unknown";
130 }
131 } else {
132 switch (card->info.type) {
133 case QETH_CARD_TYPE_OSAE:
134 switch (card->info.link_type) {
135 case QETH_LINK_TYPE_FAST_ETH:
136 return "OSD_100";
137 case QETH_LINK_TYPE_HSTR:
138 return "HSTR";
139 case QETH_LINK_TYPE_GBIT_ETH:
140 return "OSD_1000";
141 case QETH_LINK_TYPE_10GBIT_ETH:
142 return "OSD_10GIG";
143 case QETH_LINK_TYPE_LANE_ETH100:
144 return "OSD_FE_LANE";
145 case QETH_LINK_TYPE_LANE_TR:
146 return "OSD_TR_LANE";
147 case QETH_LINK_TYPE_LANE_ETH1000:
148 return "OSD_GbE_LANE";
149 case QETH_LINK_TYPE_LANE:
150 return "OSD_ATM_LANE";
151 default:
152 return "OSD_Express";
153 }
154 case QETH_CARD_TYPE_IQD:
155 return "HiperSockets";
156 default:
157 return "unknown";
158 }
159 }
160 return "n/a";
161}
162
163#endif /* __QETH_FS_H__ */
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
new file mode 100644
index 000000000000..607b92542df6
--- /dev/null
+++ b/drivers/s390/net/qeth_main.c
@@ -0,0 +1,8236 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.206 $)
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Utz Bacher (utz.bacher@de.ibm.com)
11 * Rewritten by
12 * Frank Pavlic (pavlic@de.ibm.com) and
13 * Thomas Spatzier <tspat@de.ibm.com>
14 *
15 * $Revision: 1.206 $ $Date: 2005/03/24 09:04:18 $
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */
31
32/***
33 * eye catcher; just for debugging purposes
34 */
35void volatile
36qeth_eyecatcher(void)
37{
38 return;
39}
40
41#include <linux/config.h>
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44#include <linux/string.h>
45#include <linux/errno.h>
46#include <linux/mm.h>
47#include <linux/ip.h>
48#include <linux/inetdevice.h>
49#include <linux/netdevice.h>
50#include <linux/sched.h>
51#include <linux/workqueue.h>
52#include <linux/kernel.h>
53#include <linux/slab.h>
54#include <linux/interrupt.h>
55#include <linux/tcp.h>
56#include <linux/icmp.h>
57#include <linux/skbuff.h>
58#include <linux/in.h>
59#include <linux/igmp.h>
60#include <linux/init.h>
61#include <linux/reboot.h>
62#include <linux/mii.h>
63#include <linux/rcupdate.h>
64#include <linux/ethtool.h>
65
66#include <net/arp.h>
67#include <net/ip.h>
68#include <net/route.h>
69
70#include <asm/ebcdic.h>
71#include <asm/io.h>
72#include <asm/qeth.h>
73#include <asm/timex.h>
74#include <asm/semaphore.h>
75#include <asm/uaccess.h>
76
77#include "qeth.h"
78#include "qeth_mpc.h"
79#include "qeth_fs.h"
80#include "qeth_eddp.h"
81#include "qeth_tso.h"
82
83#define VERSION_QETH_C "$Revision: 1.206 $"
84static const char *version = "qeth S/390 OSA-Express driver";
85
86/**
87 * Debug Facility Stuff
88 */
89static debug_info_t *qeth_dbf_setup = NULL;
90static debug_info_t *qeth_dbf_data = NULL;
91static debug_info_t *qeth_dbf_misc = NULL;
92static debug_info_t *qeth_dbf_control = NULL;
93debug_info_t *qeth_dbf_trace = NULL;
94static debug_info_t *qeth_dbf_sense = NULL;
95static debug_info_t *qeth_dbf_qerr = NULL;
96
97DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
98
99/**
100 * some more definitions and declarations
101 */
102static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
103
104/* list of our cards */
105struct qeth_card_list_struct qeth_card_list;
106/*process list want to be notified*/
107spinlock_t qeth_notify_lock;
108struct list_head qeth_notify_list;
109
110static void qeth_send_control_data_cb(struct qeth_channel *,
111 struct qeth_cmd_buffer *);
112
113/**
114 * here we go with function implementation
115 */
116static void
117qeth_init_qdio_info(struct qeth_card *card);
118
119static int
120qeth_init_qdio_queues(struct qeth_card *card);
121
122static int
123qeth_alloc_qdio_buffers(struct qeth_card *card);
124
125static void
126qeth_free_qdio_buffers(struct qeth_card *);
127
128static void
129qeth_clear_qdio_buffers(struct qeth_card *);
130
131static void
132qeth_clear_ip_list(struct qeth_card *, int, int);
133
134static void
135qeth_clear_ipacmd_list(struct qeth_card *);
136
137static int
138qeth_qdio_clear_card(struct qeth_card *, int);
139
140static void
141qeth_clear_working_pool_list(struct qeth_card *);
142
143static void
144qeth_clear_cmd_buffers(struct qeth_channel *);
145
146static int
147qeth_stop(struct net_device *);
148
149static void
150qeth_clear_ipato_list(struct qeth_card *);
151
152static int
153qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
154
155static void
156qeth_irq_tasklet(unsigned long);
157
158static int
159qeth_set_online(struct ccwgroup_device *);
160
161static struct qeth_ipaddr *
162qeth_get_addr_buffer(enum qeth_prot_versions);
163
164static void
165qeth_set_multicast_list(struct net_device *);
166
167static void
168qeth_notify_processes(void)
169{
170 /*notify all registered processes */
171 struct qeth_notify_list_struct *n_entry;
172
173 QETH_DBF_TEXT(trace,3,"procnoti");
174 spin_lock(&qeth_notify_lock);
175 list_for_each_entry(n_entry, &qeth_notify_list, list) {
176 send_sig(n_entry->signum, n_entry->task, 1);
177 }
178 spin_unlock(&qeth_notify_lock);
179
180}
181int
182qeth_notifier_unregister(struct task_struct *p)
183{
184 struct qeth_notify_list_struct *n_entry, *tmp;
185
186 QETH_DBF_TEXT(trace, 2, "notunreg");
187 spin_lock(&qeth_notify_lock);
188 list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) {
189 if (n_entry->task == p) {
190 list_del(&n_entry->list);
191 kfree(n_entry);
192 goto out;
193 }
194 }
195out:
196 spin_unlock(&qeth_notify_lock);
197 return 0;
198}
199int
200qeth_notifier_register(struct task_struct *p, int signum)
201{
202 struct qeth_notify_list_struct *n_entry;
203
204
205 /*check first if entry already exists*/
206 spin_lock(&qeth_notify_lock);
207 list_for_each_entry(n_entry, &qeth_notify_list, list) {
208 if (n_entry->task == p) {
209 n_entry->signum = signum;
210 spin_unlock(&qeth_notify_lock);
211 return 0;
212 }
213 }
214 spin_unlock(&qeth_notify_lock);
215
216 n_entry = (struct qeth_notify_list_struct *)
217 kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL);
218 if (!n_entry)
219 return -ENOMEM;
220 n_entry->task = p;
221 n_entry->signum = signum;
222 spin_lock(&qeth_notify_lock);
223 list_add(&n_entry->list,&qeth_notify_list);
224 spin_unlock(&qeth_notify_lock);
225 return 0;
226}
227
228
229/**
230 * free channel command buffers
231 */
232static void
233qeth_clean_channel(struct qeth_channel *channel)
234{
235 int cnt;
236
237 QETH_DBF_TEXT(setup, 2, "freech");
238 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
239 kfree(channel->iob[cnt].data);
240}
241
242/**
243 * free card
244 */
245static void
246qeth_free_card(struct qeth_card *card)
247{
248
249 QETH_DBF_TEXT(setup, 2, "freecrd");
250 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
251 qeth_clean_channel(&card->read);
252 qeth_clean_channel(&card->write);
253 if (card->dev)
254 free_netdev(card->dev);
255 qeth_clear_ip_list(card, 0, 0);
256 qeth_clear_ipato_list(card);
257 kfree(card->ip_tbd_list);
258 qeth_free_qdio_buffers(card);
259 kfree(card);
260}
261
262/**
263 * alloc memory for command buffer per channel
264 */
265static int
266qeth_setup_channel(struct qeth_channel *channel)
267{
268 int cnt;
269
270 QETH_DBF_TEXT(setup, 2, "setupch");
271 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
272 channel->iob[cnt].data = (char *)
273 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
274 if (channel->iob[cnt].data == NULL)
275 break;
276 channel->iob[cnt].state = BUF_STATE_FREE;
277 channel->iob[cnt].channel = channel;
278 channel->iob[cnt].callback = qeth_send_control_data_cb;
279 channel->iob[cnt].rc = 0;
280 }
281 if (cnt < QETH_CMD_BUFFER_NO) {
282 while (cnt-- > 0)
283 kfree(channel->iob[cnt].data);
284 return -ENOMEM;
285 }
286 channel->buf_no = 0;
287 channel->io_buf_no = 0;
288 atomic_set(&channel->irq_pending, 0);
289 spin_lock_init(&channel->iob_lock);
290
291 init_waitqueue_head(&channel->wait_q);
292 channel->irq_tasklet.data = (unsigned long) channel;
293 channel->irq_tasklet.func = qeth_irq_tasklet;
294 return 0;
295}
296
297/**
298 * alloc memory for card structure
299 */
300static struct qeth_card *
301qeth_alloc_card(void)
302{
303 struct qeth_card *card;
304
305 QETH_DBF_TEXT(setup, 2, "alloccrd");
306 card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card),
307 GFP_DMA|GFP_KERNEL);
308 if (!card)
309 return NULL;
310 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
311 memset(card, 0, sizeof(struct qeth_card));
312 if (qeth_setup_channel(&card->read)) {
313 kfree(card);
314 return NULL;
315 }
316 if (qeth_setup_channel(&card->write)) {
317 qeth_clean_channel(&card->read);
318 kfree(card);
319 return NULL;
320 }
321 return card;
322}
323
324static long
325__qeth_check_irb_error(struct ccw_device *cdev, struct irb *irb)
326{
327 if (!IS_ERR(irb))
328 return 0;
329
330 switch (PTR_ERR(irb)) {
331 case -EIO:
332 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
333 QETH_DBF_TEXT(trace, 2, "ckirberr");
334 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
335 break;
336 case -ETIMEDOUT:
337 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
338 QETH_DBF_TEXT(trace, 2, "ckirberr");
339 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
340 break;
341 default:
342 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
343 cdev->dev.bus_id);
344 QETH_DBF_TEXT(trace, 2, "ckirberr");
345 QETH_DBF_TEXT(trace, 2, " rc???");
346 }
347 return PTR_ERR(irb);
348}
349
350static int
351qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
352{
353 int dstat,cstat;
354 char *sense;
355
356 sense = (char *) irb->ecw;
357 cstat = irb->scsw.cstat;
358 dstat = irb->scsw.dstat;
359
360 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
361 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
362 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
363 QETH_DBF_TEXT(trace,2, "CGENCHK");
364 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
365 cdev->dev.bus_id, dstat, cstat);
366 HEXDUMP16(WARN, "irb: ", irb);
367 HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32);
368 return 1;
369 }
370
371 if (dstat & DEV_STAT_UNIT_CHECK) {
372 if (sense[SENSE_RESETTING_EVENT_BYTE] &
373 SENSE_RESETTING_EVENT_FLAG) {
374 QETH_DBF_TEXT(trace,2,"REVIND");
375 return 1;
376 }
377 if (sense[SENSE_COMMAND_REJECT_BYTE] &
378 SENSE_COMMAND_REJECT_FLAG) {
379 QETH_DBF_TEXT(trace,2,"CMDREJi");
380 return 0;
381 }
382 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
383 QETH_DBF_TEXT(trace,2,"AFFE");
384 return 1;
385 }
386 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
387 QETH_DBF_TEXT(trace,2,"ZEROSEN");
388 return 0;
389 }
390 QETH_DBF_TEXT(trace,2,"DGENCHK");
391 return 1;
392 }
393 return 0;
394}
395static int qeth_issue_next_read(struct qeth_card *);
396
397/**
398 * interrupt handler
399 */
400static void
401qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
402{
403 int rc;
404 int cstat,dstat;
405 struct qeth_cmd_buffer *buffer;
406 struct qeth_channel *channel;
407 struct qeth_card *card;
408
409 QETH_DBF_TEXT(trace,5,"irq");
410
411 if (__qeth_check_irb_error(cdev, irb))
412 return;
413 cstat = irb->scsw.cstat;
414 dstat = irb->scsw.dstat;
415
416 card = CARD_FROM_CDEV(cdev);
417 if (!card)
418 return;
419
420 if (card->read.ccwdev == cdev){
421 channel = &card->read;
422 QETH_DBF_TEXT(trace,5,"read");
423 } else if (card->write.ccwdev == cdev) {
424 channel = &card->write;
425 QETH_DBF_TEXT(trace,5,"write");
426 } else {
427 channel = &card->data;
428 QETH_DBF_TEXT(trace,5,"data");
429 }
430 atomic_set(&channel->irq_pending, 0);
431
432 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
433 channel->state = CH_STATE_STOPPED;
434
435 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
436 channel->state = CH_STATE_HALTED;
437
438 /*let's wake up immediately on data channel*/
439 if ((channel == &card->data) && (intparm != 0))
440 goto out;
441
442 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
443 QETH_DBF_TEXT(trace, 6, "clrchpar");
444 /* we don't have to handle this further */
445 intparm = 0;
446 }
447 if (intparm == QETH_HALT_CHANNEL_PARM) {
448 QETH_DBF_TEXT(trace, 6, "hltchpar");
449 /* we don't have to handle this further */
450 intparm = 0;
451 }
452 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
453 (dstat & DEV_STAT_UNIT_CHECK) ||
454 (cstat)) {
455 if (irb->esw.esw0.erw.cons) {
456 /* TODO: we should make this s390dbf */
457 PRINT_WARN("sense data available on channel %s.\n",
458 CHANNEL_ID(channel));
459 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
460 HEXDUMP16(WARN,"irb: ",irb);
461 HEXDUMP16(WARN,"sense data: ",irb->ecw);
462 }
463 rc = qeth_get_problem(cdev,irb);
464 if (rc) {
465 qeth_schedule_recovery(card);
466 goto out;
467 }
468 }
469
470 if (intparm) {
471 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
472 buffer->state = BUF_STATE_PROCESSED;
473 }
474 if (channel == &card->data)
475 return;
476
477 if (channel == &card->read &&
478 channel->state == CH_STATE_UP)
479 qeth_issue_next_read(card);
480
481 tasklet_schedule(&channel->irq_tasklet);
482 return;
483out:
484 wake_up(&card->wait_q);
485}
486
487/**
488 * tasklet function scheduled from irq handler
489 */
490static void
491qeth_irq_tasklet(unsigned long data)
492{
493 struct qeth_card *card;
494 struct qeth_channel *channel;
495 struct qeth_cmd_buffer *iob;
496 __u8 index;
497
498 QETH_DBF_TEXT(trace,5,"irqtlet");
499 channel = (struct qeth_channel *) data;
500 iob = channel->iob;
501 index = channel->buf_no;
502 card = CARD_FROM_CDEV(channel->ccwdev);
503 while (iob[index].state == BUF_STATE_PROCESSED) {
504 if (iob[index].callback !=NULL) {
505 iob[index].callback(channel,iob + index);
506 }
507 index = (index + 1) % QETH_CMD_BUFFER_NO;
508 }
509 channel->buf_no = index;
510 wake_up(&card->wait_q);
511}
512
513static int qeth_stop_card(struct qeth_card *);
514
515static int
516qeth_set_offline(struct ccwgroup_device *cgdev)
517{
518 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
519 int rc = 0;
520 enum qeth_card_states recover_flag;
521
522 QETH_DBF_TEXT(setup, 3, "setoffl");
523 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
524
525 recover_flag = card->state;
526 if (qeth_stop_card(card) == -ERESTARTSYS){
527 PRINT_WARN("Stopping card %s interrupted by user!\n",
528 CARD_BUS_ID(card));
529 return -ERESTARTSYS;
530 }
531 if ((rc = ccw_device_set_offline(CARD_DDEV(card))) ||
532 (rc = ccw_device_set_offline(CARD_WDEV(card))) ||
533 (rc = ccw_device_set_offline(CARD_RDEV(card)))) {
534 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
535 }
536 if (recover_flag == CARD_STATE_UP)
537 card->state = CARD_STATE_RECOVER;
538 qeth_notify_processes();
539 return 0;
540}
541
542static int
543qeth_wait_for_threads(struct qeth_card *card, unsigned long threads);
544
545
546static void
547qeth_remove_device(struct ccwgroup_device *cgdev)
548{
549 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
550 unsigned long flags;
551
552 QETH_DBF_TEXT(setup, 3, "rmdev");
553 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
554
555 if (!card)
556 return;
557
558 if (qeth_wait_for_threads(card, 0xffffffff))
559 return;
560
561 if (cgdev->state == CCWGROUP_ONLINE){
562 card->use_hard_stop = 1;
563 qeth_set_offline(cgdev);
564 }
565 /* remove form our internal list */
566 write_lock_irqsave(&qeth_card_list.rwlock, flags);
567 list_del(&card->list);
568 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
569 if (card->dev)
570 unregister_netdev(card->dev);
571 qeth_remove_device_attributes(&cgdev->dev);
572 qeth_free_card(card);
573 cgdev->dev.driver_data = NULL;
574 put_device(&cgdev->dev);
575}
576
577static int
578qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
579static int
580qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
581
582/**
583 * Add/remove address to/from card's ip list, i.e. try to add or remove
584 * reference to/from an IP address that is already registered on the card.
585 * Returns:
586 * 0 address was on card and its reference count has been adjusted,
587 * but is still > 0, so nothing has to be done
588 * also returns 0 if card was not on card and the todo was to delete
589 * the address -> there is also nothing to be done
590 * 1 address was not on card and the todo is to add it to the card's ip
591 * list
592 * -1 address was on card and its reference count has been decremented
593 * to <= 0 by the todo -> address must be removed from card
594 */
595static int
596__qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
597 struct qeth_ipaddr **__addr)
598{
599 struct qeth_ipaddr *addr;
600 int found = 0;
601
602 list_for_each_entry(addr, &card->ip_list, entry) {
603 if ((addr->proto == QETH_PROT_IPV4) &&
604 (todo->proto == QETH_PROT_IPV4) &&
605 (addr->type == todo->type) &&
606 (addr->u.a4.addr == todo->u.a4.addr) &&
607 (addr->u.a4.mask == todo->u.a4.mask) ){
608 found = 1;
609 break;
610 }
611 if ((addr->proto == QETH_PROT_IPV6) &&
612 (todo->proto == QETH_PROT_IPV6) &&
613 (addr->type == todo->type) &&
614 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
615 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
616 sizeof(struct in6_addr)) == 0)) {
617 found = 1;
618 break;
619 }
620 }
621 if (found){
622 addr->users += todo->users;
623 if (addr->users <= 0){
624 *__addr = addr;
625 return -1;
626 } else {
627 /* for VIPA and RXIP limit refcount to 1 */
628 if (addr->type != QETH_IP_TYPE_NORMAL)
629 addr->users = 1;
630 return 0;
631 }
632 }
633 if (todo->users > 0){
634 /* for VIPA and RXIP limit refcount to 1 */
635 if (todo->type != QETH_IP_TYPE_NORMAL)
636 todo->users = 1;
637 return 1;
638 } else
639 return 0;
640}
641
642static inline int
643__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
644 int same_type)
645{
646 struct qeth_ipaddr *tmp;
647
648 list_for_each_entry(tmp, list, entry) {
649 if ((tmp->proto == QETH_PROT_IPV4) &&
650 (addr->proto == QETH_PROT_IPV4) &&
651 ((same_type && (tmp->type == addr->type)) ||
652 (!same_type && (tmp->type != addr->type)) ) &&
653 (tmp->u.a4.addr == addr->u.a4.addr) ){
654 return 1;
655 }
656 if ((tmp->proto == QETH_PROT_IPV6) &&
657 (addr->proto == QETH_PROT_IPV6) &&
658 ((same_type && (tmp->type == addr->type)) ||
659 (!same_type && (tmp->type != addr->type)) ) &&
660 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
661 sizeof(struct in6_addr)) == 0) ) {
662 return 1;
663 }
664 }
665 return 0;
666}
667
668/*
669 * Add IP to be added to todo list. If there is already an "add todo"
670 * in this list we just incremenent the reference count.
671 * Returns 0 if we just incremented reference count.
672 */
673static int
674__qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add)
675{
676 struct qeth_ipaddr *tmp, *t;
677 int found = 0;
678
679 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
680 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
681 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
682 return 0;
683 if ((tmp->proto == QETH_PROT_IPV4) &&
684 (addr->proto == QETH_PROT_IPV4) &&
685 (tmp->type == addr->type) &&
686 (tmp->is_multicast == addr->is_multicast) &&
687 (tmp->u.a4.addr == addr->u.a4.addr) &&
688 (tmp->u.a4.mask == addr->u.a4.mask) ){
689 found = 1;
690 break;
691 }
692 if ((tmp->proto == QETH_PROT_IPV6) &&
693 (addr->proto == QETH_PROT_IPV6) &&
694 (tmp->type == addr->type) &&
695 (tmp->is_multicast == addr->is_multicast) &&
696 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
697 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
698 sizeof(struct in6_addr)) == 0) ){
699 found = 1;
700 break;
701 }
702 }
703 if (found){
704 if (addr->users != 0)
705 tmp->users += addr->users;
706 else
707 tmp->users += add? 1:-1;
708 if (tmp->users == 0){
709 list_del(&tmp->entry);
710 kfree(tmp);
711 }
712 return 0;
713 } else {
714 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
715 list_add(&addr->entry, card->ip_tbd_list);
716 else {
717 if (addr->users == 0)
718 addr->users += add? 1:-1;
719 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
720 qeth_is_addr_covered_by_ipato(card, addr)){
721 QETH_DBF_TEXT(trace, 2, "tkovaddr");
722 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
723 }
724 list_add_tail(&addr->entry, card->ip_tbd_list);
725 }
726 return 1;
727 }
728}
729
730/**
731 * Remove IP address from list
732 */
733static int
734qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
735{
736 unsigned long flags;
737 int rc = 0;
738
739 QETH_DBF_TEXT(trace,4,"delip");
740 if (addr->proto == QETH_PROT_IPV4)
741 QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
742 else {
743 QETH_DBF_HEX(trace,4,&addr->u.a6.addr,8);
744 QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+8,8);
745 }
746 spin_lock_irqsave(&card->ip_lock, flags);
747 rc = __qeth_insert_ip_todo(card, addr, 0);
748 spin_unlock_irqrestore(&card->ip_lock, flags);
749 return rc;
750}
751
752static int
753qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
754{
755 unsigned long flags;
756 int rc = 0;
757
758 QETH_DBF_TEXT(trace,4,"addip");
759 if (addr->proto == QETH_PROT_IPV4)
760 QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
761 else {
762 QETH_DBF_HEX(trace,4,&addr->u.a6.addr,8);
763 QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+8,8);
764 }
765 spin_lock_irqsave(&card->ip_lock, flags);
766 rc = __qeth_insert_ip_todo(card, addr, 1);
767 spin_unlock_irqrestore(&card->ip_lock, flags);
768 return rc;
769}
770
771static inline void
772__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
773{
774 struct qeth_ipaddr *addr, *tmp;
775 int rc;
776
777 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
778 if (addr->is_multicast) {
779 spin_unlock_irqrestore(&card->ip_lock, *flags);
780 rc = qeth_deregister_addr_entry(card, addr);
781 spin_lock_irqsave(&card->ip_lock, *flags);
782 if (!rc) {
783 list_del(&addr->entry);
784 kfree(addr);
785 }
786 }
787 }
788}
789
790static void
791qeth_set_ip_addr_list(struct qeth_card *card)
792{
793 struct list_head *tbd_list;
794 struct qeth_ipaddr *todo, *addr;
795 unsigned long flags;
796 int rc;
797
798 QETH_DBF_TEXT(trace, 2, "sdiplist");
799 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
800
801 spin_lock_irqsave(&card->ip_lock, flags);
802 tbd_list = card->ip_tbd_list;
803 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
804 if (!card->ip_tbd_list) {
805 QETH_DBF_TEXT(trace, 0, "silnomem");
806 card->ip_tbd_list = tbd_list;
807 spin_unlock_irqrestore(&card->ip_lock, flags);
808 return;
809 } else
810 INIT_LIST_HEAD(card->ip_tbd_list);
811
812 while (!list_empty(tbd_list)){
813 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
814 list_del(&todo->entry);
815 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC){
816 __qeth_delete_all_mc(card, &flags);
817 kfree(todo);
818 continue;
819 }
820 rc = __qeth_ref_ip_on_card(card, todo, &addr);
821 if (rc == 0) {
822 /* nothing to be done; only adjusted refcount */
823 kfree(todo);
824 } else if (rc == 1) {
825 /* new entry to be added to on-card list */
826 spin_unlock_irqrestore(&card->ip_lock, flags);
827 rc = qeth_register_addr_entry(card, todo);
828 spin_lock_irqsave(&card->ip_lock, flags);
829 if (!rc)
830 list_add_tail(&todo->entry, &card->ip_list);
831 else
832 kfree(todo);
833 } else if (rc == -1) {
834 /* on-card entry to be removed */
835 list_del_init(&addr->entry);
836 spin_unlock_irqrestore(&card->ip_lock, flags);
837 rc = qeth_deregister_addr_entry(card, addr);
838 spin_lock_irqsave(&card->ip_lock, flags);
839 if (!rc)
840 kfree(addr);
841 else
842 list_add_tail(&addr->entry, &card->ip_list);
843 kfree(todo);
844 }
845 }
846 spin_unlock_irqrestore(&card->ip_lock, flags);
847 kfree(tbd_list);
848}
849
850static void qeth_delete_mc_addresses(struct qeth_card *);
851static void qeth_add_multicast_ipv4(struct qeth_card *);
852#ifdef CONFIG_QETH_IPV6
853static void qeth_add_multicast_ipv6(struct qeth_card *);
854#endif
855
856static inline int
857qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
858{
859 unsigned long flags;
860
861 spin_lock_irqsave(&card->thread_mask_lock, flags);
862 if ( !(card->thread_allowed_mask & thread) ||
863 (card->thread_start_mask & thread) ) {
864 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
865 return -EPERM;
866 }
867 card->thread_start_mask |= thread;
868 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
869 return 0;
870}
871
872static void
873qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
874{
875 unsigned long flags;
876
877 spin_lock_irqsave(&card->thread_mask_lock, flags);
878 card->thread_start_mask &= ~thread;
879 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
880 wake_up(&card->wait_q);
881}
882
883static void
884qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
885{
886 unsigned long flags;
887
888 spin_lock_irqsave(&card->thread_mask_lock, flags);
889 card->thread_running_mask &= ~thread;
890 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
891 wake_up(&card->wait_q);
892}
893
894static inline int
895__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
896{
897 unsigned long flags;
898 int rc = 0;
899
900 spin_lock_irqsave(&card->thread_mask_lock, flags);
901 if (card->thread_start_mask & thread){
902 if ((card->thread_allowed_mask & thread) &&
903 !(card->thread_running_mask & thread)){
904 rc = 1;
905 card->thread_start_mask &= ~thread;
906 card->thread_running_mask |= thread;
907 } else
908 rc = -EPERM;
909 }
910 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
911 return rc;
912}
913
914static int
915qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
916{
917 int rc = 0;
918
919 wait_event(card->wait_q,
920 (rc = __qeth_do_run_thread(card, thread)) >= 0);
921 return rc;
922}
923
924static int
925qeth_register_ip_addresses(void *ptr)
926{
927 struct qeth_card *card;
928
929 card = (struct qeth_card *) ptr;
930 daemonize("qeth_reg_ip");
931 QETH_DBF_TEXT(trace,4,"regipth1");
932 if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD))
933 return 0;
934 QETH_DBF_TEXT(trace,4,"regipth2");
935 qeth_set_ip_addr_list(card);
936 qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD);
937 return 0;
938}
939
940static int
941qeth_recover(void *ptr)
942{
943 struct qeth_card *card;
944 int rc = 0;
945
946 card = (struct qeth_card *) ptr;
947 daemonize("qeth_recover");
948 QETH_DBF_TEXT(trace,2,"recover1");
949 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
950 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
951 return 0;
952 QETH_DBF_TEXT(trace,2,"recover2");
953 PRINT_WARN("Recovery of device %s started ...\n",
954 CARD_BUS_ID(card));
955 card->use_hard_stop = 1;
956 qeth_set_offline(card->gdev);
957 rc = qeth_set_online(card->gdev);
958 if (!rc)
959 PRINT_INFO("Device %s successfully recovered!\n",
960 CARD_BUS_ID(card));
961 else
962 PRINT_INFO("Device %s could not be recovered!\n",
963 CARD_BUS_ID(card));
964 /* don't run another scheduled recovery */
965 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
966 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
967 return 0;
968}
969
970void
971qeth_schedule_recovery(struct qeth_card *card)
972{
973 QETH_DBF_TEXT(trace,2,"startrec");
974
975 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
976 schedule_work(&card->kernel_thread_starter);
977}
978
979static int
980qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
981{
982 unsigned long flags;
983 int rc = 0;
984
985 spin_lock_irqsave(&card->thread_mask_lock, flags);
986 QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
987 (u8) card->thread_start_mask,
988 (u8) card->thread_allowed_mask,
989 (u8) card->thread_running_mask);
990 rc = (card->thread_start_mask & thread);
991 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
992 return rc;
993}
994
995static void
996qeth_start_kernel_thread(struct qeth_card *card)
997{
998 QETH_DBF_TEXT(trace , 2, "strthrd");
999
1000 if (card->read.state != CH_STATE_UP &&
1001 card->write.state != CH_STATE_UP)
1002 return;
1003
1004 if (qeth_do_start_thread(card, QETH_SET_IP_THREAD))
1005 kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD);
1006 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1007 kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1008}
1009
1010
1011static void
1012qeth_set_intial_options(struct qeth_card *card)
1013{
1014 card->options.route4.type = NO_ROUTER;
1015#ifdef CONFIG_QETH_IPV6
1016 card->options.route6.type = NO_ROUTER;
1017#endif /* QETH_IPV6 */
1018 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1019 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1020 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1021 card->options.fake_broadcast = 0;
1022 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1023 card->options.fake_ll = 0;
1024 card->options.layer2 = 0;
1025}
1026
1027/**
1028 * initialize channels ,card and all state machines
1029 */
1030static int
1031qeth_setup_card(struct qeth_card *card)
1032{
1033
1034 QETH_DBF_TEXT(setup, 2, "setupcrd");
1035 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1036
1037 card->read.state = CH_STATE_DOWN;
1038 card->write.state = CH_STATE_DOWN;
1039 card->data.state = CH_STATE_DOWN;
1040 card->state = CARD_STATE_DOWN;
1041 card->lan_online = 0;
1042 card->use_hard_stop = 0;
1043 card->dev = NULL;
1044#ifdef CONFIG_QETH_VLAN
1045 spin_lock_init(&card->vlanlock);
1046 card->vlangrp = NULL;
1047#endif
1048 spin_lock_init(&card->ip_lock);
1049 spin_lock_init(&card->thread_mask_lock);
1050 card->thread_start_mask = 0;
1051 card->thread_allowed_mask = 0;
1052 card->thread_running_mask = 0;
1053 INIT_WORK(&card->kernel_thread_starter,
1054 (void *)qeth_start_kernel_thread,card);
1055 INIT_LIST_HEAD(&card->ip_list);
1056 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1057 if (!card->ip_tbd_list) {
1058 QETH_DBF_TEXT(setup, 0, "iptbdnom");
1059 return -ENOMEM;
1060 }
1061 INIT_LIST_HEAD(card->ip_tbd_list);
1062 INIT_LIST_HEAD(&card->cmd_waiter_list);
1063 init_waitqueue_head(&card->wait_q);
1064 /* intial options */
1065 qeth_set_intial_options(card);
1066 /* IP address takeover */
1067 INIT_LIST_HEAD(&card->ipato.entries);
1068 card->ipato.enabled = 0;
1069 card->ipato.invert4 = 0;
1070 card->ipato.invert6 = 0;
1071 /* init QDIO stuff */
1072 qeth_init_qdio_info(card);
1073 return 0;
1074}
1075
1076static int
1077is_1920_device (struct qeth_card *card)
1078{
1079 int single_queue = 0;
1080 struct ccw_device *ccwdev;
1081 struct channelPath_dsc {
1082 u8 flags;
1083 u8 lsn;
1084 u8 desc;
1085 u8 chpid;
1086 u8 swla;
1087 u8 zeroes;
1088 u8 chla;
1089 u8 chpp;
1090 } *chp_dsc;
1091
1092 QETH_DBF_TEXT(setup, 2, "chk_1920");
1093
1094 ccwdev = card->data.ccwdev;
1095 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1096 if (chp_dsc != NULL) {
1097 /* CHPP field bit 6 == 1 -> single queue */
1098 single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
1099 kfree(chp_dsc);
1100 }
1101 QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue);
1102 return single_queue;
1103}
1104
1105static int
1106qeth_determine_card_type(struct qeth_card *card)
1107{
1108 int i = 0;
1109
1110 QETH_DBF_TEXT(setup, 2, "detcdtyp");
1111
1112 while (known_devices[i][4]) {
1113 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1114 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1115 card->info.type = known_devices[i][4];
1116 if (is_1920_device(card)) {
1117 PRINT_INFO("Priority Queueing not able "
1118 "due to hardware limitations!\n");
1119 card->qdio.no_out_queues = 1;
1120 card->qdio.default_out_queue = 0;
1121 } else {
1122 card->qdio.no_out_queues = known_devices[i][8];
1123 }
1124 card->info.is_multicast_different = known_devices[i][9];
1125 return 0;
1126 }
1127 i++;
1128 }
1129 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1130 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1131 return -ENOENT;
1132}
1133
1134static int
1135qeth_probe_device(struct ccwgroup_device *gdev)
1136{
1137 struct qeth_card *card;
1138 struct device *dev;
1139 unsigned long flags;
1140 int rc;
1141
1142 QETH_DBF_TEXT(setup, 2, "probedev");
1143
1144 dev = &gdev->dev;
1145 if (!get_device(dev))
1146 return -ENODEV;
1147
1148 card = qeth_alloc_card();
1149 if (!card) {
1150 put_device(dev);
1151 QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
1152 return -ENOMEM;
1153 }
1154 card->read.ccwdev = gdev->cdev[0];
1155 card->write.ccwdev = gdev->cdev[1];
1156 card->data.ccwdev = gdev->cdev[2];
1157
1158 if ((rc = qeth_setup_card(card))){
1159 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1160 put_device(dev);
1161 qeth_free_card(card);
1162 return rc;
1163 }
1164 gdev->dev.driver_data = card;
1165 card->gdev = gdev;
1166 gdev->cdev[0]->handler = qeth_irq;
1167 gdev->cdev[1]->handler = qeth_irq;
1168 gdev->cdev[2]->handler = qeth_irq;
1169
1170 rc = qeth_create_device_attributes(dev);
1171 if (rc) {
1172 put_device(dev);
1173 qeth_free_card(card);
1174 return rc;
1175 }
1176 if ((rc = qeth_determine_card_type(card))){
1177 PRINT_WARN("%s: not a valid card type\n", __func__);
1178 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1179 put_device(dev);
1180 qeth_free_card(card);
1181 return rc;
1182 }
1183 /* insert into our internal list */
1184 write_lock_irqsave(&qeth_card_list.rwlock, flags);
1185 list_add_tail(&card->list, &qeth_card_list.list);
1186 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
1187 return rc;
1188}
1189
1190
1191static int
1192qeth_get_unitaddr(struct qeth_card *card)
1193{
1194 int length;
1195 char *prcd;
1196 int rc;
1197
1198 QETH_DBF_TEXT(setup, 2, "getunit");
1199 rc = read_conf_data(CARD_DDEV(card), (void **) &prcd, &length);
1200 if (rc) {
1201 PRINT_ERR("read_conf_data for device %s returned %i\n",
1202 CARD_DDEV_ID(card), rc);
1203 return rc;
1204 }
1205 card->info.chpid = prcd[30];
1206 card->info.unit_addr2 = prcd[31];
1207 card->info.cula = prcd[63];
1208 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1209 (prcd[0x11] == _ascebc['M']));
1210 return 0;
1211}
1212
1213static void
1214qeth_init_tokens(struct qeth_card *card)
1215{
1216 card->token.issuer_rm_w = 0x00010103UL;
1217 card->token.cm_filter_w = 0x00010108UL;
1218 card->token.cm_connection_w = 0x0001010aUL;
1219 card->token.ulp_filter_w = 0x0001010bUL;
1220 card->token.ulp_connection_w = 0x0001010dUL;
1221}
1222
1223static inline __u16
1224raw_devno_from_bus_id(char *id)
1225{
1226 id += (strlen(id) - 4);
1227 return (__u16) simple_strtoul(id, &id, 16);
1228}
1229/**
1230 * setup channel
1231 */
1232static void
1233qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len)
1234{
1235 struct qeth_card *card;
1236
1237 QETH_DBF_TEXT(trace, 4, "setupccw");
1238 card = CARD_FROM_CDEV(channel->ccwdev);
1239 if (channel == &card->read)
1240 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1241 else
1242 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1243 channel->ccw.count = len;
1244 channel->ccw.cda = (__u32) __pa(iob);
1245}
1246
1247/**
1248 * get free buffer for ccws (IDX activation, lancmds,ipassists...)
1249 */
1250static struct qeth_cmd_buffer *
1251__qeth_get_buffer(struct qeth_channel *channel)
1252{
1253 __u8 index;
1254
1255 QETH_DBF_TEXT(trace, 6, "getbuff");
1256 index = channel->io_buf_no;
1257 do {
1258 if (channel->iob[index].state == BUF_STATE_FREE) {
1259 channel->iob[index].state = BUF_STATE_LOCKED;
1260 channel->io_buf_no = (channel->io_buf_no + 1) %
1261 QETH_CMD_BUFFER_NO;
1262 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
1263 return channel->iob + index;
1264 }
1265 index = (index + 1) % QETH_CMD_BUFFER_NO;
1266 } while(index != channel->io_buf_no);
1267
1268 return NULL;
1269}
1270
1271/**
1272 * release command buffer
1273 */
1274static void
1275qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1276{
1277 unsigned long flags;
1278
1279 QETH_DBF_TEXT(trace, 6, "relbuff");
1280 spin_lock_irqsave(&channel->iob_lock, flags);
1281 memset(iob->data, 0, QETH_BUFSIZE);
1282 iob->state = BUF_STATE_FREE;
1283 iob->callback = qeth_send_control_data_cb;
1284 iob->rc = 0;
1285 spin_unlock_irqrestore(&channel->iob_lock, flags);
1286}
1287
1288static struct qeth_cmd_buffer *
1289qeth_get_buffer(struct qeth_channel *channel)
1290{
1291 struct qeth_cmd_buffer *buffer = NULL;
1292 unsigned long flags;
1293
1294 spin_lock_irqsave(&channel->iob_lock, flags);
1295 buffer = __qeth_get_buffer(channel);
1296 spin_unlock_irqrestore(&channel->iob_lock, flags);
1297 return buffer;
1298}
1299
1300static struct qeth_cmd_buffer *
1301qeth_wait_for_buffer(struct qeth_channel *channel)
1302{
1303 struct qeth_cmd_buffer *buffer;
1304 wait_event(channel->wait_q,
1305 ((buffer = qeth_get_buffer(channel)) != NULL));
1306 return buffer;
1307}
1308
1309static void
1310qeth_clear_cmd_buffers(struct qeth_channel *channel)
1311{
1312 int cnt = 0;
1313
1314 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1315 qeth_release_buffer(channel,&channel->iob[cnt]);
1316 channel->buf_no = 0;
1317 channel->io_buf_no = 0;
1318}
1319
1320/**
1321 * start IDX for read and write channel
1322 */
1323static int
1324qeth_idx_activate_get_answer(struct qeth_channel *channel,
1325 void (*idx_reply_cb)(struct qeth_channel *,
1326 struct qeth_cmd_buffer *))
1327{
1328 struct qeth_cmd_buffer *iob;
1329 unsigned long flags;
1330 int rc;
1331 struct qeth_card *card;
1332
1333 QETH_DBF_TEXT(setup, 2, "idxanswr");
1334 card = CARD_FROM_CDEV(channel->ccwdev);
1335 iob = qeth_get_buffer(channel);
1336 iob->callback = idx_reply_cb;
1337 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1338 channel->ccw.count = QETH_BUFSIZE;
1339 channel->ccw.cda = (__u32) __pa(iob->data);
1340
1341 wait_event(card->wait_q,
1342 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1343 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1344 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1345 rc = ccw_device_start(channel->ccwdev,
1346 &channel->ccw,(addr_t) iob, 0, 0);
1347 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1348
1349 if (rc) {
1350 PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc);
1351 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1352 atomic_set(&channel->irq_pending, 0);
1353 wake_up(&card->wait_q);
1354 return rc;
1355 }
1356 rc = wait_event_interruptible_timeout(card->wait_q,
1357 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1358 if (rc == -ERESTARTSYS)
1359 return rc;
1360 if (channel->state != CH_STATE_UP){
1361 rc = -ETIME;
1362 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1363 qeth_clear_cmd_buffers(channel);
1364 } else
1365 rc = 0;
1366 return rc;
1367}
1368
1369static int
1370qeth_idx_activate_channel(struct qeth_channel *channel,
1371 void (*idx_reply_cb)(struct qeth_channel *,
1372 struct qeth_cmd_buffer *))
1373{
1374 struct qeth_card *card;
1375 struct qeth_cmd_buffer *iob;
1376 unsigned long flags;
1377 __u16 temp;
1378 int rc;
1379
1380 card = CARD_FROM_CDEV(channel->ccwdev);
1381
1382 QETH_DBF_TEXT(setup, 2, "idxactch");
1383
1384 iob = qeth_get_buffer(channel);
1385 iob->callback = idx_reply_cb;
1386 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1387 channel->ccw.count = IDX_ACTIVATE_SIZE;
1388 channel->ccw.cda = (__u32) __pa(iob->data);
1389 if (channel == &card->write) {
1390 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1391 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1392 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1393 card->seqno.trans_hdr++;
1394 } else {
1395 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1396 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1397 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1398 }
1399 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1400 &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH);
1401 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1402 &card->info.func_level,sizeof(__u16));
1403 temp = raw_devno_from_bus_id(CARD_DDEV_ID(card));
1404 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1405 temp = (card->info.cula << 8) + card->info.unit_addr2;
1406 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1407
1408 wait_event(card->wait_q,
1409 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1410 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1411 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1412 rc = ccw_device_start(channel->ccwdev,
1413 &channel->ccw,(addr_t) iob, 0, 0);
1414 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1415
1416 if (rc) {
1417 PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc);
1418 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1419 atomic_set(&channel->irq_pending, 0);
1420 wake_up(&card->wait_q);
1421 return rc;
1422 }
1423 rc = wait_event_interruptible_timeout(card->wait_q,
1424 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1425 if (rc == -ERESTARTSYS)
1426 return rc;
1427 if (channel->state != CH_STATE_ACTIVATING) {
1428 PRINT_WARN("qeth: IDX activate timed out!\n");
1429 QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1430 qeth_clear_cmd_buffers(channel);
1431 return -ETIME;
1432 }
1433 return qeth_idx_activate_get_answer(channel,idx_reply_cb);
1434}
1435
1436static int
1437qeth_peer_func_level(int level)
1438{
1439 if ((level & 0xff) == 8)
1440 return (level & 0xff) + 0x400;
1441 if (((level >> 8) & 3) == 1)
1442 return (level & 0xff) + 0x200;
1443 return level;
1444}
1445
1446static void
1447qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1448{
1449 struct qeth_card *card;
1450 __u16 temp;
1451
1452 QETH_DBF_TEXT(setup ,2, "idxwrcb");
1453
1454 if (channel->state == CH_STATE_DOWN) {
1455 channel->state = CH_STATE_ACTIVATING;
1456 goto out;
1457 }
1458 card = CARD_FROM_CDEV(channel->ccwdev);
1459
1460 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1461 PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
1462 "reply\n", CARD_WDEV_ID(card));
1463 goto out;
1464 }
1465 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1466 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1467 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1468 "function level mismatch "
1469 "(sent: 0x%x, received: 0x%x)\n",
1470 CARD_WDEV_ID(card), card->info.func_level, temp);
1471 goto out;
1472 }
1473 channel->state = CH_STATE_UP;
1474out:
1475 qeth_release_buffer(channel, iob);
1476}
1477
1478static int
1479qeth_check_idx_response(unsigned char *buffer)
1480{
1481 if (!buffer)
1482 return 0;
1483
1484 QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
1485 if ((buffer[2] & 0xc0) == 0xc0) {
1486 PRINT_WARN("received an IDX TERMINATE "
1487 "with cause code 0x%02x%s\n",
1488 buffer[4],
1489 ((buffer[4] == 0x22) ?
1490 " -- try another portname" : ""));
1491 QETH_DBF_TEXT(trace, 2, "ckidxres");
1492 QETH_DBF_TEXT(trace, 2, " idxterm");
1493 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1494 return -EIO;
1495 }
1496 return 0;
1497}
1498
1499static void
1500qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1501{
1502 struct qeth_card *card;
1503 __u16 temp;
1504
1505 QETH_DBF_TEXT(setup , 2, "idxrdcb");
1506 if (channel->state == CH_STATE_DOWN) {
1507 channel->state = CH_STATE_ACTIVATING;
1508 goto out;
1509 }
1510
1511 card = CARD_FROM_CDEV(channel->ccwdev);
1512 if (qeth_check_idx_response(iob->data)) {
1513 goto out;
1514 }
1515 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1516 PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
1517 "reply\n", CARD_RDEV_ID(card));
1518 goto out;
1519 }
1520
1521/**
1522 * temporary fix for microcode bug
1523 * to revert it,replace OR by AND
1524 */
1525 if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1526 (card->info.type == QETH_CARD_TYPE_OSAE) )
1527 card->info.portname_required = 1;
1528
1529 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1530 if (temp != qeth_peer_func_level(card->info.func_level)) {
1531 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1532 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1533 CARD_RDEV_ID(card), card->info.func_level, temp);
1534 goto out;
1535 }
1536 memcpy(&card->token.issuer_rm_r,
1537 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1538 QETH_MPC_TOKEN_LENGTH);
1539 memcpy(&card->info.mcl_level[0],
1540 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1541 channel->state = CH_STATE_UP;
1542out:
1543 qeth_release_buffer(channel,iob);
1544}
1545
1546static int
1547qeth_issue_next_read(struct qeth_card *card)
1548{
1549 int rc;
1550 struct qeth_cmd_buffer *iob;
1551
1552 QETH_DBF_TEXT(trace,5,"issnxrd");
1553 if (card->read.state != CH_STATE_UP)
1554 return -EIO;
1555 iob = qeth_get_buffer(&card->read);
1556 if (!iob) {
1557 PRINT_WARN("issue_next_read failed: no iob available!\n");
1558 return -ENOMEM;
1559 }
1560 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1561 wait_event(card->wait_q,
1562 atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0);
1563 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1564 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1565 (addr_t) iob, 0, 0);
1566 if (rc) {
1567 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
1568 atomic_set(&card->read.irq_pending, 0);
1569 qeth_schedule_recovery(card);
1570 wake_up(&card->wait_q);
1571 }
1572 return rc;
1573}
1574
1575static struct qeth_reply *
1576qeth_alloc_reply(struct qeth_card *card)
1577{
1578 struct qeth_reply *reply;
1579
1580 reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1581 if (reply){
1582 memset(reply, 0, sizeof(struct qeth_reply));
1583 atomic_set(&reply->refcnt, 1);
1584 reply->card = card;
1585 };
1586 return reply;
1587}
1588
1589static void
1590qeth_get_reply(struct qeth_reply *reply)
1591{
1592 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1593 atomic_inc(&reply->refcnt);
1594}
1595
1596static void
1597qeth_put_reply(struct qeth_reply *reply)
1598{
1599 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1600 if (atomic_dec_and_test(&reply->refcnt))
1601 kfree(reply);
1602}
1603
1604static void
1605qeth_cmd_timeout(unsigned long data)
1606{
1607 struct qeth_reply *reply, *list_reply, *r;
1608 unsigned long flags;
1609
1610 reply = (struct qeth_reply *) data;
1611 spin_lock_irqsave(&reply->card->lock, flags);
1612 list_for_each_entry_safe(list_reply, r,
1613 &reply->card->cmd_waiter_list, list) {
1614 if (reply == list_reply){
1615 qeth_get_reply(reply);
1616 list_del_init(&reply->list);
1617 spin_unlock_irqrestore(&reply->card->lock, flags);
1618 reply->rc = -ETIME;
1619 reply->received = 1;
1620 wake_up(&reply->wait_q);
1621 qeth_put_reply(reply);
1622 return;
1623 }
1624 }
1625 spin_unlock_irqrestore(&reply->card->lock, flags);
1626}
1627
1628static void
1629qeth_reset_ip_addresses(struct qeth_card *card)
1630{
1631 QETH_DBF_TEXT(trace, 2, "rstipadd");
1632
1633 qeth_clear_ip_list(card, 0, 1);
1634 /* this function will also schedule the SET_IP_THREAD */
1635 qeth_set_multicast_list(card->dev);
1636}
1637
1638static struct qeth_ipa_cmd *
1639qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1640{
1641 struct qeth_ipa_cmd *cmd = NULL;
1642
1643 QETH_DBF_TEXT(trace,5,"chkipad");
1644 if (IS_IPA(iob->data)){
1645 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
1646 if (IS_IPA_REPLY(cmd))
1647 return cmd;
1648 else {
1649 switch (cmd->hdr.command) {
1650 case IPA_CMD_STOPLAN:
1651 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
1652 "there is a network problem or "
1653 "someone pulled the cable or "
1654 "disabled the port.\n",
1655 QETH_CARD_IFNAME(card),
1656 card->info.chpid);
1657 card->lan_online = 0;
1658 netif_carrier_off(card->dev);
1659 return NULL;
1660 case IPA_CMD_STARTLAN:
1661 PRINT_INFO("Link reestablished on %s "
1662 "(CHPID 0x%X). Scheduling "
1663 "IP address reset.\n",
1664 QETH_CARD_IFNAME(card),
1665 card->info.chpid);
1666 card->lan_online = 1;
1667 netif_carrier_on(card->dev);
1668 qeth_reset_ip_addresses(card);
1669 return NULL;
1670 case IPA_CMD_REGISTER_LOCAL_ADDR:
1671 QETH_DBF_TEXT(trace,3, "irla");
1672 break;
1673 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
1674 QETH_DBF_TEXT(trace,3, "urla");
1675 break;
1676 default:
1677 PRINT_WARN("Received data is IPA "
1678 "but not a reply!\n");
1679 break;
1680 }
1681 }
1682 }
1683 return cmd;
1684}
1685
1686/**
1687 * wake all waiting ipa commands
1688 */
1689static void
1690qeth_clear_ipacmd_list(struct qeth_card *card)
1691{
1692 struct qeth_reply *reply, *r;
1693 unsigned long flags;
1694
1695 QETH_DBF_TEXT(trace, 4, "clipalst");
1696
1697 spin_lock_irqsave(&card->lock, flags);
1698 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1699 qeth_get_reply(reply);
1700 reply->rc = -EIO;
1701 reply->received = 1;
1702 list_del_init(&reply->list);
1703 wake_up(&reply->wait_q);
1704 qeth_put_reply(reply);
1705 }
1706 spin_unlock_irqrestore(&card->lock, flags);
1707}
1708
1709static void
1710qeth_send_control_data_cb(struct qeth_channel *channel,
1711 struct qeth_cmd_buffer *iob)
1712{
1713 struct qeth_card *card;
1714 struct qeth_reply *reply, *r;
1715 struct qeth_ipa_cmd *cmd;
1716 unsigned long flags;
1717 int keep_reply;
1718
1719 QETH_DBF_TEXT(trace,4,"sndctlcb");
1720
1721 card = CARD_FROM_CDEV(channel->ccwdev);
1722 if (qeth_check_idx_response(iob->data)) {
1723 qeth_clear_ipacmd_list(card);
1724 qeth_schedule_recovery(card);
1725 goto out;
1726 }
1727
1728 cmd = qeth_check_ipa_data(card, iob);
1729 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
1730 goto out;
1731
1732 spin_lock_irqsave(&card->lock, flags);
1733 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1734 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
1735 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
1736 qeth_get_reply(reply);
1737 list_del_init(&reply->list);
1738 spin_unlock_irqrestore(&card->lock, flags);
1739 keep_reply = 0;
1740 if (reply->callback != NULL) {
1741 if (cmd) {
1742 reply->offset = (__u16)((char*)cmd -
1743 (char *)iob->data);
1744 keep_reply = reply->callback(card,
1745 reply,
1746 (unsigned long)cmd);
1747 }
1748 else
1749 keep_reply = reply->callback(card,
1750 reply,
1751 (unsigned long)iob);
1752 }
1753 if (cmd)
1754 reply->rc = (u16) cmd->hdr.return_code;
1755 else if (iob->rc)
1756 reply->rc = iob->rc;
1757 if (keep_reply) {
1758 spin_lock_irqsave(&card->lock, flags);
1759 list_add_tail(&reply->list,
1760 &card->cmd_waiter_list);
1761 spin_unlock_irqrestore(&card->lock, flags);
1762 } else {
1763 reply->received = 1;
1764 wake_up(&reply->wait_q);
1765 }
1766 qeth_put_reply(reply);
1767 goto out;
1768 }
1769 }
1770 spin_unlock_irqrestore(&card->lock, flags);
1771out:
1772 memcpy(&card->seqno.pdu_hdr_ack,
1773 QETH_PDU_HEADER_SEQ_NO(iob->data),
1774 QETH_SEQ_NO_LENGTH);
1775 qeth_release_buffer(channel,iob);
1776}
1777
1778static int
1779qeth_send_control_data(struct qeth_card *card, int len,
1780 struct qeth_cmd_buffer *iob,
1781 int (*reply_cb)
1782 (struct qeth_card *, struct qeth_reply*, unsigned long),
1783 void *reply_param)
1784
1785{
1786 int rc;
1787 unsigned long flags;
1788 struct qeth_reply *reply;
1789 struct timer_list timer;
1790
1791 QETH_DBF_TEXT(trace, 2, "sendctl");
1792
1793 qeth_setup_ccw(&card->write,iob->data,len);
1794
1795 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1796 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1797 card->seqno.trans_hdr++;
1798
1799 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1800 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1801 card->seqno.pdu_hdr++;
1802 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1803 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1804 iob->callback = qeth_release_buffer;
1805
1806 reply = qeth_alloc_reply(card);
1807 if (!reply) {
1808 PRINT_WARN("Could no alloc qeth_reply!\n");
1809 return -ENOMEM;
1810 }
1811 reply->callback = reply_cb;
1812 reply->param = reply_param;
1813 if (card->state == CARD_STATE_DOWN)
1814 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1815 else
1816 reply->seqno = card->seqno.ipa++;
1817 init_timer(&timer);
1818 timer.function = qeth_cmd_timeout;
1819 timer.data = (unsigned long) reply;
1820 if (IS_IPA(iob->data))
1821 timer.expires = jiffies + QETH_IPA_TIMEOUT;
1822 else
1823 timer.expires = jiffies + QETH_TIMEOUT;
1824 init_waitqueue_head(&reply->wait_q);
1825 spin_lock_irqsave(&card->lock, flags);
1826 list_add_tail(&reply->list, &card->cmd_waiter_list);
1827 spin_unlock_irqrestore(&card->lock, flags);
1828 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1829 wait_event(card->wait_q,
1830 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
1831 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1832 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1833 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1834 (addr_t) iob, 0, 0);
1835 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1836 if (rc){
1837 PRINT_WARN("qeth_send_control_data: "
1838 "ccw_device_start rc = %i\n", rc);
1839 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1840 spin_lock_irqsave(&card->lock, flags);
1841 list_del_init(&reply->list);
1842 qeth_put_reply(reply);
1843 spin_unlock_irqrestore(&card->lock, flags);
1844 qeth_release_buffer(iob->channel, iob);
1845 atomic_set(&card->write.irq_pending, 0);
1846 wake_up(&card->wait_q);
1847 return rc;
1848 }
1849 add_timer(&timer);
1850 wait_event(reply->wait_q, reply->received);
1851 del_timer_sync(&timer);
1852 rc = reply->rc;
1853 qeth_put_reply(reply);
1854 return rc;
1855}
1856
1857static int
1858qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1859 int (*reply_cb)
1860 (struct qeth_card *,struct qeth_reply*, unsigned long),
1861 void *reply_param)
1862{
1863 int rc;
1864 char prot_type;
1865
1866 QETH_DBF_TEXT(trace,4,"sendipa");
1867
1868 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1869
1870 if (card->options.layer2)
1871 prot_type = QETH_PROT_LAYER2;
1872 else
1873 prot_type = QETH_PROT_TCPIP;
1874
1875 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
1876 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1877 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1878
1879 rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
1880 reply_cb, reply_param);
1881 return rc;
1882}
1883
1884
1885static int
1886qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1887 unsigned long data)
1888{
1889 struct qeth_cmd_buffer *iob;
1890
1891 QETH_DBF_TEXT(setup, 2, "cmenblcb");
1892
1893 iob = (struct qeth_cmd_buffer *) data;
1894 memcpy(&card->token.cm_filter_r,
1895 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1896 QETH_MPC_TOKEN_LENGTH);
1897 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1898 return 0;
1899}
1900
1901static int
1902qeth_cm_enable(struct qeth_card *card)
1903{
1904 int rc;
1905 struct qeth_cmd_buffer *iob;
1906
1907 QETH_DBF_TEXT(setup,2,"cmenable");
1908
1909 iob = qeth_wait_for_buffer(&card->write);
1910 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
1911 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
1912 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1913 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
1914 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
1915
1916 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
1917 qeth_cm_enable_cb, NULL);
1918 return rc;
1919}
1920
1921static int
1922qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1923 unsigned long data)
1924{
1925
1926 struct qeth_cmd_buffer *iob;
1927
1928 QETH_DBF_TEXT(setup, 2, "cmsetpcb");
1929
1930 iob = (struct qeth_cmd_buffer *) data;
1931 memcpy(&card->token.cm_connection_r,
1932 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
1933 QETH_MPC_TOKEN_LENGTH);
1934 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1935 return 0;
1936}
1937
1938static int
1939qeth_cm_setup(struct qeth_card *card)
1940{
1941 int rc;
1942 struct qeth_cmd_buffer *iob;
1943
1944 QETH_DBF_TEXT(setup,2,"cmsetup");
1945
1946 iob = qeth_wait_for_buffer(&card->write);
1947 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
1948 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
1949 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1950 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
1951 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
1952 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
1953 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
1954 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
1955 qeth_cm_setup_cb, NULL);
1956 return rc;
1957
1958}
1959
1960static int
1961qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1962 unsigned long data)
1963{
1964
1965 __u16 mtu, framesize;
1966 __u16 len;
1967 __u8 link_type;
1968 struct qeth_cmd_buffer *iob;
1969
1970 QETH_DBF_TEXT(setup, 2, "ulpenacb");
1971
1972 iob = (struct qeth_cmd_buffer *) data;
1973 memcpy(&card->token.ulp_filter_r,
1974 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1975 QETH_MPC_TOKEN_LENGTH);
1976 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
1977 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1978 mtu = qeth_get_mtu_outof_framesize(framesize);
1979 if (!mtu) {
1980 iob->rc = -EINVAL;
1981 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1982 return 0;
1983 }
1984 card->info.max_mtu = mtu;
1985 card->info.initial_mtu = mtu;
1986 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1987 } else {
1988 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1989 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
1990 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1991 }
1992
1993 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
1994 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
1995 memcpy(&link_type,
1996 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
1997 card->info.link_type = link_type;
1998 } else
1999 card->info.link_type = 0;
2000 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2001 return 0;
2002}
2003
2004static int
2005qeth_ulp_enable(struct qeth_card *card)
2006{
2007 int rc;
2008 char prot_type;
2009 struct qeth_cmd_buffer *iob;
2010
2011 /*FIXME: trace view callbacks*/
2012 QETH_DBF_TEXT(setup,2,"ulpenabl");
2013
2014 iob = qeth_wait_for_buffer(&card->write);
2015 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2016
2017 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
2018 (__u8) card->info.portno;
2019 if (card->options.layer2)
2020 prot_type = QETH_PROT_LAYER2;
2021 else
2022 prot_type = QETH_PROT_TCPIP;
2023
2024 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data),&prot_type,1);
2025 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2026 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2027 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2028 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2029 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
2030 card->info.portname, 9);
2031 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2032 qeth_ulp_enable_cb, NULL);
2033 return rc;
2034
2035}
2036
2037static inline __u16
2038__raw_devno_from_bus_id(char *id)
2039{
2040 id += (strlen(id) - 4);
2041 return (__u16) simple_strtoul(id, &id, 16);
2042}
2043
2044static int
2045qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2046 unsigned long data)
2047{
2048 struct qeth_cmd_buffer *iob;
2049
2050 QETH_DBF_TEXT(setup, 2, "ulpstpcb");
2051
2052 iob = (struct qeth_cmd_buffer *) data;
2053 memcpy(&card->token.ulp_connection_r,
2054 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2055 QETH_MPC_TOKEN_LENGTH);
2056 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2057 return 0;
2058}
2059
2060static int
2061qeth_ulp_setup(struct qeth_card *card)
2062{
2063 int rc;
2064 __u16 temp;
2065 struct qeth_cmd_buffer *iob;
2066
2067 QETH_DBF_TEXT(setup,2,"ulpsetup");
2068
2069 iob = qeth_wait_for_buffer(&card->write);
2070 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2071
2072 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2073 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2074 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2075 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2076 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2077 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2078
2079 temp = __raw_devno_from_bus_id(CARD_DDEV_ID(card));
2080 memcpy(QETH_ULP_SETUP_CUA(iob->data), &temp, 2);
2081 temp = (card->info.cula << 8) + card->info.unit_addr2;
2082 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2083 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2084 qeth_ulp_setup_cb, NULL);
2085 return rc;
2086}
2087
2088static inline int
2089qeth_check_for_inbound_error(struct qeth_qdio_buffer *buf,
2090 unsigned int qdio_error,
2091 unsigned int siga_error)
2092{
2093 int rc = 0;
2094
2095 if (qdio_error || siga_error) {
2096 QETH_DBF_TEXT(trace, 2, "qdinerr");
2097 QETH_DBF_TEXT(qerr, 2, "qdinerr");
2098 QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
2099 buf->buffer->element[15].flags & 0xff);
2100 QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
2101 buf->buffer->element[14].flags & 0xff);
2102 QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2103 QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
2104 rc = 1;
2105 }
2106 return rc;
2107}
2108
2109static inline struct sk_buff *
2110qeth_get_skb(unsigned int length)
2111{
2112 struct sk_buff* skb;
2113#ifdef CONFIG_QETH_VLAN
2114 if ((skb = dev_alloc_skb(length + VLAN_HLEN)))
2115 skb_reserve(skb, VLAN_HLEN);
2116#else
2117 skb = dev_alloc_skb(length);
2118#endif
2119 return skb;
2120}
2121
2122static inline struct sk_buff *
2123qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2124 struct qdio_buffer_element **__element, int *__offset,
2125 struct qeth_hdr **hdr)
2126{
2127 struct qdio_buffer_element *element = *__element;
2128 int offset = *__offset;
2129 struct sk_buff *skb = NULL;
2130 int skb_len;
2131 void *data_ptr;
2132 int data_len;
2133
2134 QETH_DBF_TEXT(trace,6,"nextskb");
2135 /* qeth_hdr must not cross element boundaries */
2136 if (element->length < offset + sizeof(struct qeth_hdr)){
2137 if (qeth_is_last_sbale(element))
2138 return NULL;
2139 element++;
2140 offset = 0;
2141 if (element->length < sizeof(struct qeth_hdr))
2142 return NULL;
2143 }
2144 *hdr = element->addr + offset;
2145
2146 offset += sizeof(struct qeth_hdr);
2147 if (card->options.layer2)
2148 skb_len = (*hdr)->hdr.l2.pkt_length;
2149 else
2150 skb_len = (*hdr)->hdr.l3.length;
2151
2152 if (!skb_len)
2153 return NULL;
2154 if (card->options.fake_ll){
2155 if (!(skb = qeth_get_skb(skb_len + QETH_FAKE_LL_LEN)))
2156 goto no_mem;
2157 skb_pull(skb, QETH_FAKE_LL_LEN);
2158 } else if (!(skb = qeth_get_skb(skb_len)))
2159 goto no_mem;
2160 data_ptr = element->addr + offset;
2161 while (skb_len) {
2162 data_len = min(skb_len, (int)(element->length - offset));
2163 if (data_len)
2164 memcpy(skb_put(skb, data_len), data_ptr, data_len);
2165 skb_len -= data_len;
2166 if (skb_len){
2167 if (qeth_is_last_sbale(element)){
2168 QETH_DBF_TEXT(trace,4,"unexeob");
2169 QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card));
2170 QETH_DBF_TEXT(qerr,2,"unexeob");
2171 QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
2172 QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
2173 dev_kfree_skb_any(skb);
2174 card->stats.rx_errors++;
2175 return NULL;
2176 }
2177 element++;
2178 offset = 0;
2179 data_ptr = element->addr;
2180 } else {
2181 offset += data_len;
2182 }
2183 }
2184 *__element = element;
2185 *__offset = offset;
2186 return skb;
2187no_mem:
2188 if (net_ratelimit()){
2189 PRINT_WARN("No memory for packet received on %s.\n",
2190 QETH_CARD_IFNAME(card));
2191 QETH_DBF_TEXT(trace,2,"noskbmem");
2192 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2193 }
2194 card->stats.rx_dropped++;
2195 return NULL;
2196}
2197
2198static inline unsigned short
2199qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2200{
2201 struct qeth_card *card;
2202 struct ethhdr *eth;
2203
2204 QETH_DBF_TEXT(trace,6,"typtrans");
2205
2206 card = (struct qeth_card *)dev->priv;
2207#ifdef CONFIG_TR
2208 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
2209 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
2210 return tr_type_trans(skb,dev);
2211#endif /* CONFIG_TR */
2212 skb->mac.raw = skb->data;
2213 skb_pull(skb, ETH_HLEN );
2214 eth = eth_hdr(skb);
2215
2216 if (*eth->h_dest & 1) {
2217 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
2218 skb->pkt_type = PACKET_BROADCAST;
2219 else
2220 skb->pkt_type = PACKET_MULTICAST;
2221 } else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
2222 skb->pkt_type = PACKET_OTHERHOST;
2223
2224 if (ntohs(eth->h_proto) >= 1536)
2225 return eth->h_proto;
2226 if (*(unsigned short *) (skb->data) == 0xFFFF)
2227 return htons(ETH_P_802_3);
2228 return htons(ETH_P_802_2);
2229}
2230
2231static inline void
2232qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2233 struct qeth_hdr *hdr)
2234{
2235 struct ethhdr *fake_hdr;
2236 struct iphdr *ip_hdr;
2237
2238 QETH_DBF_TEXT(trace,5,"skbfake");
2239 skb->mac.raw = skb->data - QETH_FAKE_LL_LEN;
2240 /* this is a fake ethernet header */
2241 fake_hdr = (struct ethhdr *) skb->mac.raw;
2242
2243 /* the destination MAC address */
2244 switch (skb->pkt_type){
2245 case PACKET_MULTICAST:
2246 switch (skb->protocol){
2247#ifdef CONFIG_QETH_IPV6
2248 case __constant_htons(ETH_P_IPV6):
2249 ndisc_mc_map((struct in6_addr *)
2250 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2251 fake_hdr->h_dest, card->dev, 0);
2252 break;
2253#endif /* CONFIG_QETH_IPV6 */
2254 case __constant_htons(ETH_P_IP):
2255 ip_hdr = (struct iphdr *)skb->data;
2256 if (card->dev->type == ARPHRD_IEEE802_TR)
2257 ip_tr_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2258 else
2259 ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2260 break;
2261 default:
2262 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2263 }
2264 break;
2265 case PACKET_BROADCAST:
2266 memset(fake_hdr->h_dest, 0xff, ETH_ALEN);
2267 break;
2268 default:
2269 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2270 }
2271 /* the source MAC address */
2272 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2273 memcpy(fake_hdr->h_source, &hdr->hdr.l3.dest_addr[2], ETH_ALEN);
2274 else
2275 memset(fake_hdr->h_source, 0, ETH_ALEN);
2276 /* the protocol */
2277 fake_hdr->h_proto = skb->protocol;
2278}
2279
2280static inline void
2281qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb,
2282 struct qeth_hdr *hdr)
2283{
2284#ifdef CONFIG_QETH_VLAN
2285 u16 *vlan_tag;
2286
2287 if (hdr->hdr.l3.ext_flags &
2288 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
2289 vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN);
2290 *vlan_tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
2291 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
2292 *(vlan_tag + 1) = skb->protocol;
2293 skb->protocol = __constant_htons(ETH_P_8021Q);
2294 }
2295#endif /* CONFIG_QETH_VLAN */
2296}
2297
2298static inline __u16
2299qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2300 struct qeth_hdr *hdr)
2301{
2302 unsigned short vlan_id = 0;
2303#ifdef CONFIG_QETH_VLAN
2304 struct vlan_hdr *vhdr;
2305#endif
2306
2307 skb->pkt_type = PACKET_HOST;
2308 skb->protocol = qeth_type_trans(skb, skb->dev);
2309 if (card->options.checksum_type == NO_CHECKSUMMING)
2310 skb->ip_summed = CHECKSUM_UNNECESSARY;
2311 else
2312 skb->ip_summed = CHECKSUM_NONE;
2313#ifdef CONFIG_QETH_VLAN
2314 if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) {
2315 vhdr = (struct vlan_hdr *) skb->data;
2316 skb->protocol =
2317 __constant_htons(vhdr->h_vlan_encapsulated_proto);
2318 vlan_id = hdr->hdr.l2.vlan_id;
2319 skb_pull(skb, VLAN_HLEN);
2320 }
2321#endif
2322 return vlan_id;
2323}
2324
2325static inline void
2326qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2327 struct qeth_hdr *hdr)
2328{
2329#ifdef CONFIG_QETH_IPV6
2330 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
2331 skb->pkt_type = PACKET_HOST;
2332 skb->protocol = qeth_type_trans(skb, card->dev);
2333 return;
2334 }
2335#endif /* CONFIG_QETH_IPV6 */
2336 skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
2337 ETH_P_IP);
2338 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK){
2339 case QETH_CAST_UNICAST:
2340 skb->pkt_type = PACKET_HOST;
2341 break;
2342 case QETH_CAST_MULTICAST:
2343 skb->pkt_type = PACKET_MULTICAST;
2344 card->stats.multicast++;
2345 break;
2346 case QETH_CAST_BROADCAST:
2347 skb->pkt_type = PACKET_BROADCAST;
2348 card->stats.multicast++;
2349 break;
2350 case QETH_CAST_ANYCAST:
2351 case QETH_CAST_NOCAST:
2352 default:
2353 skb->pkt_type = PACKET_HOST;
2354 }
2355 qeth_rebuild_skb_vlan(card, skb, hdr);
2356 if (card->options.fake_ll)
2357 qeth_rebuild_skb_fake_ll(card, skb, hdr);
2358 else
2359 skb->mac.raw = skb->data;
2360 skb->ip_summed = card->options.checksum_type;
2361 if (card->options.checksum_type == HW_CHECKSUMMING){
2362 if ( (hdr->hdr.l3.ext_flags &
2363 (QETH_HDR_EXT_CSUM_HDR_REQ |
2364 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
2365 (QETH_HDR_EXT_CSUM_HDR_REQ |
2366 QETH_HDR_EXT_CSUM_TRANSP_REQ) )
2367 skb->ip_summed = CHECKSUM_UNNECESSARY;
2368 else
2369 skb->ip_summed = SW_CHECKSUMMING;
2370 }
2371}
2372
2373static inline void
2374qeth_process_inbound_buffer(struct qeth_card *card,
2375 struct qeth_qdio_buffer *buf, int index)
2376{
2377 struct qdio_buffer_element *element;
2378 struct sk_buff *skb;
2379 struct qeth_hdr *hdr;
2380 int offset;
2381 int rxrc;
2382 __u16 vlan_tag = 0;
2383
2384 /* get first element of current buffer */
2385 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2386 offset = 0;
2387#ifdef CONFIG_QETH_PERF_STATS
2388 card->perf_stats.bufs_rec++;
2389#endif
2390 while((skb = qeth_get_next_skb(card, buf->buffer, &element,
2391 &offset, &hdr))) {
2392 skb->dev = card->dev;
2393 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
2394 vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr);
2395 else
2396 qeth_rebuild_skb(card, skb, hdr);
2397 /* is device UP ? */
2398 if (!(card->dev->flags & IFF_UP)){
2399 dev_kfree_skb_any(skb);
2400 continue;
2401 }
2402#ifdef CONFIG_QETH_VLAN
2403 if (vlan_tag)
2404 vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
2405 else
2406#endif
2407 rxrc = netif_rx(skb);
2408 card->dev->last_rx = jiffies;
2409 card->stats.rx_packets++;
2410 card->stats.rx_bytes += skb->len;
2411 }
2412}
2413
2414static inline struct qeth_buffer_pool_entry *
2415qeth_get_buffer_pool_entry(struct qeth_card *card)
2416{
2417 struct qeth_buffer_pool_entry *entry;
2418
2419 QETH_DBF_TEXT(trace, 6, "gtbfplen");
2420 if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
2421 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2422 struct qeth_buffer_pool_entry, list);
2423 list_del_init(&entry->list);
2424 return entry;
2425 }
2426 return NULL;
2427}
2428
2429static inline void
2430qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2431{
2432 struct qeth_buffer_pool_entry *pool_entry;
2433 int i;
2434
2435 pool_entry = qeth_get_buffer_pool_entry(card);
2436 /*
2437 * since the buffer is accessed only from the input_tasklet
2438 * there shouldn't be a need to synchronize; also, since we use
2439 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2440 * buffers
2441 */
2442 BUG_ON(!pool_entry);
2443
2444 buf->pool_entry = pool_entry;
2445 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
2446 buf->buffer->element[i].length = PAGE_SIZE;
2447 buf->buffer->element[i].addr = pool_entry->elements[i];
2448 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2449 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2450 else
2451 buf->buffer->element[i].flags = 0;
2452 }
2453 buf->state = QETH_QDIO_BUF_EMPTY;
2454}
2455
2456static inline void
2457qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2458 struct qeth_qdio_out_buffer *buf)
2459{
2460 int i;
2461 struct sk_buff *skb;
2462
2463 /* is PCI flag set on buffer? */
2464 if (buf->buffer->element[0].flags & 0x40)
2465 atomic_dec(&queue->set_pci_flags_count);
2466
2467 while ((skb = skb_dequeue(&buf->skb_list))){
2468 atomic_dec(&skb->users);
2469 dev_kfree_skb_any(skb);
2470 }
2471 qeth_eddp_buf_release_contexts(buf);
2472 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
2473 buf->buffer->element[i].length = 0;
2474 buf->buffer->element[i].addr = NULL;
2475 buf->buffer->element[i].flags = 0;
2476 }
2477 buf->next_element_to_fill = 0;
2478 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2479}
2480
2481static inline void
2482qeth_queue_input_buffer(struct qeth_card *card, int index)
2483{
2484 struct qeth_qdio_q *queue = card->qdio.in_q;
2485 int count;
2486 int i;
2487 int rc;
2488
2489 QETH_DBF_TEXT(trace,6,"queinbuf");
2490 count = (index < queue->next_buf_to_init)?
2491 card->qdio.in_buf_pool.buf_count -
2492 (queue->next_buf_to_init - index) :
2493 card->qdio.in_buf_pool.buf_count -
2494 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2495 /* only requeue at a certain threshold to avoid SIGAs */
2496 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
2497 for (i = queue->next_buf_to_init;
2498 i < queue->next_buf_to_init + count; ++i)
2499 qeth_init_input_buffer(card,
2500 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]);
2501 /*
2502 * according to old code it should be avoided to requeue all
2503 * 128 buffers in order to benefit from PCI avoidance.
2504 * this function keeps at least one buffer (the buffer at
2505 * 'index') un-requeued -> this buffer is the first buffer that
2506 * will be requeued the next time
2507 */
2508#ifdef CONFIG_QETH_PERF_STATS
2509 card->perf_stats.inbound_do_qdio_cnt++;
2510 card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros();
2511#endif
2512 rc = do_QDIO(CARD_DDEV(card),
2513 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2514 0, queue->next_buf_to_init, count, NULL);
2515#ifdef CONFIG_QETH_PERF_STATS
2516 card->perf_stats.inbound_do_qdio_time += qeth_get_micros() -
2517 card->perf_stats.inbound_do_qdio_start_time;
2518#endif
2519 if (rc){
2520 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2521 "return %i (device %s).\n",
2522 rc, CARD_DDEV_ID(card));
2523 QETH_DBF_TEXT(trace,2,"qinberr");
2524 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2525 }
2526 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2527 QDIO_MAX_BUFFERS_PER_Q;
2528 }
2529}
2530
2531static inline void
2532qeth_put_buffer_pool_entry(struct qeth_card *card,
2533 struct qeth_buffer_pool_entry *entry)
2534{
2535 QETH_DBF_TEXT(trace, 6, "ptbfplen");
2536 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2537}
2538
2539static void
2540qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2541 unsigned int qdio_err, unsigned int siga_err,
2542 unsigned int queue, int first_element, int count,
2543 unsigned long card_ptr)
2544{
2545 struct net_device *net_dev;
2546 struct qeth_card *card;
2547 struct qeth_qdio_buffer *buffer;
2548 int index;
2549 int i;
2550
2551 QETH_DBF_TEXT(trace, 6, "qdinput");
2552 card = (struct qeth_card *) card_ptr;
2553 net_dev = card->dev;
2554#ifdef CONFIG_QETH_PERF_STATS
2555 card->perf_stats.inbound_cnt++;
2556 card->perf_stats.inbound_start_time = qeth_get_micros();
2557#endif
2558 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2559 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2560 QETH_DBF_TEXT(trace, 1,"qdinchk");
2561 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2562 QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count);
2563 QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status);
2564 qeth_schedule_recovery(card);
2565 return;
2566 }
2567 }
2568 for (i = first_element; i < (first_element + count); ++i) {
2569 index = i % QDIO_MAX_BUFFERS_PER_Q;
2570 buffer = &card->qdio.in_q->bufs[index];
2571 if (!((status == QDIO_STATUS_LOOK_FOR_ERROR) &&
2572 qeth_check_for_inbound_error(buffer, qdio_err, siga_err)))
2573 qeth_process_inbound_buffer(card, buffer, index);
2574 /* clear buffer and give back to hardware */
2575 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
2576 qeth_queue_input_buffer(card, index);
2577 }
2578#ifdef CONFIG_QETH_PERF_STATS
2579 card->perf_stats.inbound_time += qeth_get_micros() -
2580 card->perf_stats.inbound_start_time;
2581#endif
2582}
2583
2584static inline int
2585qeth_handle_send_error(struct qeth_card *card,
2586 struct qeth_qdio_out_buffer *buffer,
2587 int qdio_err, int siga_err)
2588{
2589 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2590 int cc = siga_err & 3;
2591
2592 QETH_DBF_TEXT(trace, 6, "hdsnderr");
2593 switch (cc) {
2594 case 0:
2595 if (qdio_err){
2596 QETH_DBF_TEXT(trace, 1,"lnkfail");
2597 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2598 QETH_DBF_TEXT_(trace,1,"%04x %02x",
2599 (u16)qdio_err, (u8)sbalf15);
2600 return QETH_SEND_ERROR_LINK_FAILURE;
2601 }
2602 return QETH_SEND_ERROR_NONE;
2603 case 2:
2604 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2605 QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2606 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2607 return QETH_SEND_ERROR_KICK_IT;
2608 }
2609 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2610 return QETH_SEND_ERROR_RETRY;
2611 return QETH_SEND_ERROR_LINK_FAILURE;
2612 /* look at qdio_error and sbalf 15 */
2613 case 1:
2614 QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2615 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2616 return QETH_SEND_ERROR_LINK_FAILURE;
2617 case 3:
2618 QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2619 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2620 return QETH_SEND_ERROR_KICK_IT;
2621 }
2622 return QETH_SEND_ERROR_LINK_FAILURE;
2623}
2624
2625void
2626qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2627 int index, int count)
2628{
2629 struct qeth_qdio_out_buffer *buf;
2630 int rc;
2631 int i;
2632
2633 QETH_DBF_TEXT(trace, 6, "flushbuf");
2634
2635 for (i = index; i < index + count; ++i) {
2636 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2637 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2638 SBAL_FLAGS_LAST_ENTRY;
2639
2640 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2641 continue;
2642
2643 if (!queue->do_pack){
2644 if ((atomic_read(&queue->used_buffers) >=
2645 (QETH_HIGH_WATERMARK_PACK -
2646 QETH_WATERMARK_PACK_FUZZ)) &&
2647 !atomic_read(&queue->set_pci_flags_count)){
2648 /* it's likely that we'll go to packing
2649 * mode soon */
2650 atomic_inc(&queue->set_pci_flags_count);
2651 buf->buffer->element[0].flags |= 0x40;
2652 }
2653 } else {
2654 if (!atomic_read(&queue->set_pci_flags_count)){
2655 /*
2656 * there's no outstanding PCI any more, so we
2657 * have to request a PCI to be sure the the PCI
2658 * will wake at some time in the future then we
2659 * can flush packed buffers that might still be
2660 * hanging around, which can happen if no
2661 * further send was requested by the stack
2662 */
2663 atomic_inc(&queue->set_pci_flags_count);
2664 buf->buffer->element[0].flags |= 0x40;
2665 }
2666 }
2667 }
2668
2669 queue->card->dev->trans_start = jiffies;
2670#ifdef CONFIG_QETH_PERF_STATS
2671 queue->card->perf_stats.outbound_do_qdio_cnt++;
2672 queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros();
2673#endif
2674 if (under_int)
2675 rc = do_QDIO(CARD_DDEV(queue->card),
2676 QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT,
2677 queue->queue_no, index, count, NULL);
2678 else
2679 rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT,
2680 queue->queue_no, index, count, NULL);
2681#ifdef CONFIG_QETH_PERF_STATS
2682 queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() -
2683 queue->card->perf_stats.outbound_do_qdio_start_time;
2684#endif
2685 if (rc){
2686 QETH_DBF_SPRINTF(trace, 0, "qeth_flush_buffers: do_QDIO "
2687 "returned error (%i) on device %s.",
2688 rc, CARD_DDEV_ID(queue->card));
2689 QETH_DBF_TEXT(trace, 2, "flushbuf");
2690 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
2691 queue->card->stats.tx_errors += count;
2692 /* this must not happen under normal circumstances. if it
2693 * happens something is really wrong -> recover */
2694 qeth_schedule_recovery(queue->card);
2695 return;
2696 }
2697 atomic_add(count, &queue->used_buffers);
2698#ifdef CONFIG_QETH_PERF_STATS
2699 queue->card->perf_stats.bufs_sent += count;
2700#endif
2701}
2702
2703/*
2704 * Switched to packing state if the number of used buffers on a queue
2705 * reaches a certain limit.
2706 */
2707static inline void
2708qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2709{
2710 if (!queue->do_pack) {
2711 if (atomic_read(&queue->used_buffers)
2712 >= QETH_HIGH_WATERMARK_PACK){
2713 /* switch non-PACKING -> PACKING */
2714 QETH_DBF_TEXT(trace, 6, "np->pack");
2715#ifdef CONFIG_QETH_PERF_STATS
2716 queue->card->perf_stats.sc_dp_p++;
2717#endif
2718 queue->do_pack = 1;
2719 }
2720 }
2721}
2722
2723/*
2724 * Switches from packing to non-packing mode. If there is a packing
2725 * buffer on the queue this buffer will be prepared to be flushed.
2726 * In that case 1 is returned to inform the caller. If no buffer
2727 * has to be flushed, zero is returned.
2728 */
2729static inline int
2730qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2731{
2732 struct qeth_qdio_out_buffer *buffer;
2733 int flush_count = 0;
2734
2735 if (queue->do_pack) {
2736 if (atomic_read(&queue->used_buffers)
2737 <= QETH_LOW_WATERMARK_PACK) {
2738 /* switch PACKING -> non-PACKING */
2739 QETH_DBF_TEXT(trace, 6, "pack->np");
2740#ifdef CONFIG_QETH_PERF_STATS
2741 queue->card->perf_stats.sc_p_dp++;
2742#endif
2743 queue->do_pack = 0;
2744 /* flush packing buffers */
2745 buffer = &queue->bufs[queue->next_buf_to_fill];
2746 if ((atomic_read(&buffer->state) ==
2747 QETH_QDIO_BUF_EMPTY) &&
2748 (buffer->next_element_to_fill > 0)) {
2749 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
2750 flush_count++;
2751 queue->next_buf_to_fill =
2752 (queue->next_buf_to_fill + 1) %
2753 QDIO_MAX_BUFFERS_PER_Q;
2754 }
2755 }
2756 }
2757 return flush_count;
2758}
2759
2760/*
2761 * Called to flush a packing buffer if no more pci flags are on the queue.
2762 * Checks if there is a packing buffer and prepares it to be flushed.
2763 * In that case returns 1, otherwise zero.
2764 */
2765static inline int
2766qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2767{
2768 struct qeth_qdio_out_buffer *buffer;
2769
2770 buffer = &queue->bufs[queue->next_buf_to_fill];
2771 if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2772 (buffer->next_element_to_fill > 0)){
2773 /* it's a packing buffer */
2774 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2775 queue->next_buf_to_fill =
2776 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2777 return 1;
2778 }
2779 return 0;
2780}
2781
2782static inline void
2783qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2784{
2785 int index;
2786 int flush_cnt = 0;
2787 int q_was_packing = 0;
2788
2789 /*
2790 * check if weed have to switch to non-packing mode or if
2791 * we have to get a pci flag out on the queue
2792 */
2793 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2794 !atomic_read(&queue->set_pci_flags_count)){
2795 if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2796 QETH_OUT_Q_UNLOCKED) {
2797 /*
2798 * If we get in here, there was no action in
2799 * do_send_packet. So, we check if there is a
2800 * packing buffer to be flushed here.
2801 */
2802 netif_stop_queue(queue->card->dev);
2803 index = queue->next_buf_to_fill;
2804 q_was_packing = queue->do_pack;
2805 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
2806 if (!flush_cnt &&
2807 !atomic_read(&queue->set_pci_flags_count))
2808 flush_cnt +=
2809 qeth_flush_buffers_on_no_pci(queue);
2810#ifdef CONFIG_QETH_PERF_STATS
2811 if (q_was_packing)
2812 queue->card->perf_stats.bufs_sent_pack +=
2813 flush_cnt;
2814#endif
2815 if (flush_cnt)
2816 qeth_flush_buffers(queue, 1, index, flush_cnt);
2817 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2818 }
2819 }
2820}
2821
2822static void
2823qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
2824 unsigned int qdio_error, unsigned int siga_error,
2825 unsigned int __queue, int first_element, int count,
2826 unsigned long card_ptr)
2827{
2828 struct qeth_card *card = (struct qeth_card *) card_ptr;
2829 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2830 struct qeth_qdio_out_buffer *buffer;
2831 int i;
2832
2833 QETH_DBF_TEXT(trace, 6, "qdouhdl");
2834 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2835 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2836 QETH_DBF_SPRINTF(trace, 2, "On device %s: "
2837 "received active check "
2838 "condition (0x%08x).",
2839 CARD_BUS_ID(card), status);
2840 QETH_DBF_TEXT(trace, 2, "chkcond");
2841 QETH_DBF_TEXT_(trace, 2, "%08x", status);
2842 netif_stop_queue(card->dev);
2843 qeth_schedule_recovery(card);
2844 return;
2845 }
2846 }
2847#ifdef CONFIG_QETH_PERF_STATS
2848 card->perf_stats.outbound_handler_cnt++;
2849 card->perf_stats.outbound_handler_start_time = qeth_get_micros();
2850#endif
2851 for(i = first_element; i < (first_element + count); ++i){
2852 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2853 /*we only handle the KICK_IT error by doing a recovery */
2854 if (qeth_handle_send_error(card, buffer, qdio_error, siga_error)
2855 == QETH_SEND_ERROR_KICK_IT){
2856 netif_stop_queue(card->dev);
2857 qeth_schedule_recovery(card);
2858 return;
2859 }
2860 qeth_clear_output_buffer(queue, buffer);
2861 }
2862 atomic_sub(count, &queue->used_buffers);
2863 /* check if we need to do something on this outbound queue */
2864 if (card->info.type != QETH_CARD_TYPE_IQD)
2865 qeth_check_outbound_queue(queue);
2866
2867 netif_wake_queue(queue->card->dev);
2868#ifdef CONFIG_QETH_PERF_STATS
2869 card->perf_stats.outbound_handler_time += qeth_get_micros() -
2870 card->perf_stats.outbound_handler_start_time;
2871#endif
2872}
2873
2874static void
2875qeth_create_qib_param_field(struct qeth_card *card, char *param_field)
2876{
2877
2878 param_field[0] = _ascebc['P'];
2879 param_field[1] = _ascebc['C'];
2880 param_field[2] = _ascebc['I'];
2881 param_field[3] = _ascebc['T'];
2882 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2883 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2884 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2885}
2886
2887static void
2888qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field)
2889{
2890 param_field[16] = _ascebc['B'];
2891 param_field[17] = _ascebc['L'];
2892 param_field[18] = _ascebc['K'];
2893 param_field[19] = _ascebc['T'];
2894 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2895 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2896 *((unsigned int *) (&param_field[28])) = card->info.blkt.inter_packet_jumbo;
2897}
2898
2899static void
2900qeth_initialize_working_pool_list(struct qeth_card *card)
2901{
2902 struct qeth_buffer_pool_entry *entry;
2903
2904 QETH_DBF_TEXT(trace,5,"inwrklst");
2905
2906 list_for_each_entry(entry,
2907 &card->qdio.init_pool.entry_list, init_list) {
2908 qeth_put_buffer_pool_entry(card,entry);
2909 }
2910}
2911
2912static void
2913qeth_clear_working_pool_list(struct qeth_card *card)
2914{
2915 struct qeth_buffer_pool_entry *pool_entry, *tmp;
2916
2917 QETH_DBF_TEXT(trace,5,"clwrklst");
2918 list_for_each_entry_safe(pool_entry, tmp,
2919 &card->qdio.in_buf_pool.entry_list, list){
2920 list_del(&pool_entry->list);
2921 }
2922}
2923
2924static void
2925qeth_free_buffer_pool(struct qeth_card *card)
2926{
2927 struct qeth_buffer_pool_entry *pool_entry, *tmp;
2928 int i=0;
2929 QETH_DBF_TEXT(trace,5,"freepool");
2930 list_for_each_entry_safe(pool_entry, tmp,
2931 &card->qdio.init_pool.entry_list, init_list){
2932 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
2933 free_page((unsigned long)pool_entry->elements[i]);
2934 list_del(&pool_entry->init_list);
2935 kfree(pool_entry);
2936 }
2937}
2938
2939static int
2940qeth_alloc_buffer_pool(struct qeth_card *card)
2941{
2942 struct qeth_buffer_pool_entry *pool_entry;
2943 void *ptr;
2944 int i, j;
2945
2946 QETH_DBF_TEXT(trace,5,"alocpool");
2947 for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
2948 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
2949 if (!pool_entry){
2950 qeth_free_buffer_pool(card);
2951 return -ENOMEM;
2952 }
2953 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
2954 ptr = (void *) __get_free_page(GFP_KERNEL);
2955 if (!ptr) {
2956 while (j > 0)
2957 free_page((unsigned long)
2958 pool_entry->elements[--j]);
2959 kfree(pool_entry);
2960 qeth_free_buffer_pool(card);
2961 return -ENOMEM;
2962 }
2963 pool_entry->elements[j] = ptr;
2964 }
2965 list_add(&pool_entry->init_list,
2966 &card->qdio.init_pool.entry_list);
2967 }
2968 return 0;
2969}
2970
2971int
2972qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
2973{
2974 QETH_DBF_TEXT(trace, 2, "realcbp");
2975
2976 if ((card->state != CARD_STATE_DOWN) &&
2977 (card->state != CARD_STATE_RECOVER))
2978 return -EPERM;
2979
2980 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
2981 qeth_clear_working_pool_list(card);
2982 qeth_free_buffer_pool(card);
2983 card->qdio.in_buf_pool.buf_count = bufcnt;
2984 card->qdio.init_pool.buf_count = bufcnt;
2985 return qeth_alloc_buffer_pool(card);
2986}
2987
2988static int
2989qeth_alloc_qdio_buffers(struct qeth_card *card)
2990{
2991 int i, j;
2992
2993 QETH_DBF_TEXT(setup, 2, "allcqdbf");
2994
2995 if (card->qdio.state == QETH_QDIO_ALLOCATED)
2996 return 0;
2997
2998 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL);
2999 if (!card->qdio.in_q)
3000 return - ENOMEM;
3001 QETH_DBF_TEXT(setup, 2, "inq");
3002 QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
3003 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
3004 /* give inbound qeth_qdio_buffers their qdio_buffers */
3005 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3006 card->qdio.in_q->bufs[i].buffer =
3007 &card->qdio.in_q->qdio_bufs[i];
3008 /* inbound buffer pool */
3009 if (qeth_alloc_buffer_pool(card)){
3010 kfree(card->qdio.in_q);
3011 return -ENOMEM;
3012 }
3013 /* outbound */
3014 card->qdio.out_qs =
3015 kmalloc(card->qdio.no_out_queues *
3016 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
3017 if (!card->qdio.out_qs){
3018 qeth_free_buffer_pool(card);
3019 return -ENOMEM;
3020 }
3021 for (i = 0; i < card->qdio.no_out_queues; ++i){
3022 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
3023 GFP_KERNEL);
3024 if (!card->qdio.out_qs[i]){
3025 while (i > 0)
3026 kfree(card->qdio.out_qs[--i]);
3027 kfree(card->qdio.out_qs);
3028 return -ENOMEM;
3029 }
3030 QETH_DBF_TEXT_(setup, 2, "outq %i", i);
3031 QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
3032 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
3033 card->qdio.out_qs[i]->queue_no = i;
3034 /* give outbound qeth_qdio_buffers their qdio_buffers */
3035 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3036 card->qdio.out_qs[i]->bufs[j].buffer =
3037 &card->qdio.out_qs[i]->qdio_bufs[j];
3038 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
3039 skb_list);
3040 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
3041 }
3042 }
3043 card->qdio.state = QETH_QDIO_ALLOCATED;
3044 return 0;
3045}
3046
3047static void
3048qeth_free_qdio_buffers(struct qeth_card *card)
3049{
3050 int i, j;
3051
3052 QETH_DBF_TEXT(trace, 2, "freeqdbf");
3053 if (card->qdio.state == QETH_QDIO_UNINITIALIZED)
3054 return;
3055 kfree(card->qdio.in_q);
3056 /* inbound buffer pool */
3057 qeth_free_buffer_pool(card);
3058 /* free outbound qdio_qs */
3059 for (i = 0; i < card->qdio.no_out_queues; ++i){
3060 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3061 qeth_clear_output_buffer(card->qdio.out_qs[i],
3062 &card->qdio.out_qs[i]->bufs[j]);
3063 kfree(card->qdio.out_qs[i]);
3064 }
3065 kfree(card->qdio.out_qs);
3066 card->qdio.state = QETH_QDIO_UNINITIALIZED;
3067}
3068
3069static void
3070qeth_clear_qdio_buffers(struct qeth_card *card)
3071{
3072 int i, j;
3073
3074 QETH_DBF_TEXT(trace, 2, "clearqdbf");
3075 /* clear outbound buffers to free skbs */
3076 for (i = 0; i < card->qdio.no_out_queues; ++i)
3077 if (card->qdio.out_qs[i]){
3078 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3079 qeth_clear_output_buffer(card->qdio.out_qs[i],
3080 &card->qdio.out_qs[i]->bufs[j]);
3081 }
3082}
3083
3084static void
3085qeth_init_qdio_info(struct qeth_card *card)
3086{
3087 QETH_DBF_TEXT(setup, 4, "intqdinf");
3088 card->qdio.state = QETH_QDIO_UNINITIALIZED;
3089 /* inbound */
3090 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
3091 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
3092 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
3093 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
3094 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
3095 /* outbound */
3096 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
3097 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
3098}
3099
3100static int
3101qeth_init_qdio_queues(struct qeth_card *card)
3102{
3103 int i, j;
3104 int rc;
3105
3106 QETH_DBF_TEXT(setup, 2, "initqdqs");
3107
3108 /* inbound queue */
3109 memset(card->qdio.in_q->qdio_bufs, 0,
3110 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3111 qeth_initialize_working_pool_list(card);
3112 /*give only as many buffers to hardware as we have buffer pool entries*/
3113 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
3114 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3115 card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1;
3116 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3117 card->qdio.in_buf_pool.buf_count - 1, NULL);
3118 if (rc) {
3119 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3120 return rc;
3121 }
3122 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
3123 if (rc) {
3124 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3125 return rc;
3126 }
3127 /* outbound queue */
3128 for (i = 0; i < card->qdio.no_out_queues; ++i){
3129 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
3130 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3131 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3132 qeth_clear_output_buffer(card->qdio.out_qs[i],
3133 &card->qdio.out_qs[i]->bufs[j]);
3134 }
3135 card->qdio.out_qs[i]->card = card;
3136 card->qdio.out_qs[i]->next_buf_to_fill = 0;
3137 card->qdio.out_qs[i]->do_pack = 0;
3138 atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
3139 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
3140 atomic_set(&card->qdio.out_qs[i]->state,
3141 QETH_OUT_Q_UNLOCKED);
3142 }
3143 return 0;
3144}
3145
3146static int
3147qeth_qdio_establish(struct qeth_card *card)
3148{
3149 struct qdio_initialize init_data;
3150 char *qib_param_field;
3151 struct qdio_buffer **in_sbal_ptrs;
3152 struct qdio_buffer **out_sbal_ptrs;
3153 int i, j, k;
3154 int rc;
3155
3156 QETH_DBF_TEXT(setup, 2, "qdioest");
3157
3158 qib_param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3159 GFP_KERNEL);
3160 if (!qib_param_field)
3161 return -ENOMEM;
3162
3163 memset(qib_param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char));
3164
3165 qeth_create_qib_param_field(card, qib_param_field);
3166 qeth_create_qib_param_field_blkt(card, qib_param_field);
3167
3168 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3169 GFP_KERNEL);
3170 if (!in_sbal_ptrs) {
3171 kfree(qib_param_field);
3172 return -ENOMEM;
3173 }
3174 for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3175 in_sbal_ptrs[i] = (struct qdio_buffer *)
3176 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3177
3178 out_sbal_ptrs =
3179 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3180 sizeof(void *), GFP_KERNEL);
3181 if (!out_sbal_ptrs) {
3182 kfree(in_sbal_ptrs);
3183 kfree(qib_param_field);
3184 return -ENOMEM;
3185 }
3186 for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3187 for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){
3188 out_sbal_ptrs[k] = (struct qdio_buffer *)
3189 virt_to_phys(card->qdio.out_qs[i]->
3190 bufs[j].buffer);
3191 }
3192
3193 memset(&init_data, 0, sizeof(struct qdio_initialize));
3194 init_data.cdev = CARD_DDEV(card);
3195 init_data.q_format = qeth_get_qdio_q_format(card);
3196 init_data.qib_param_field_format = 0;
3197 init_data.qib_param_field = qib_param_field;
3198 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3199 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3200 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3201 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3202 init_data.no_input_qs = 1;
3203 init_data.no_output_qs = card->qdio.no_out_queues;
3204 init_data.input_handler = (qdio_handler_t *)
3205 qeth_qdio_input_handler;
3206 init_data.output_handler = (qdio_handler_t *)
3207 qeth_qdio_output_handler;
3208 init_data.int_parm = (unsigned long) card;
3209 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3210 QDIO_OUTBOUND_0COPY_SBALS |
3211 QDIO_USE_OUTBOUND_PCIS;
3212 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3213 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3214
3215 if (!(rc = qdio_initialize(&init_data)))
3216 card->qdio.state = QETH_QDIO_ESTABLISHED;
3217
3218 kfree(out_sbal_ptrs);
3219 kfree(in_sbal_ptrs);
3220 kfree(qib_param_field);
3221 return rc;
3222}
3223
3224static int
3225qeth_qdio_activate(struct qeth_card *card)
3226{
3227 QETH_DBF_TEXT(setup,3,"qdioact");
3228 return qdio_activate(CARD_DDEV(card), 0);
3229}
3230
3231static int
3232qeth_clear_channel(struct qeth_channel *channel)
3233{
3234 unsigned long flags;
3235 struct qeth_card *card;
3236 int rc;
3237
3238 QETH_DBF_TEXT(trace,3,"clearch");
3239 card = CARD_FROM_CDEV(channel->ccwdev);
3240 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3241 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
3242 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3243
3244 if (rc)
3245 return rc;
3246 rc = wait_event_interruptible_timeout(card->wait_q,
3247 channel->state==CH_STATE_STOPPED, QETH_TIMEOUT);
3248 if (rc == -ERESTARTSYS)
3249 return rc;
3250 if (channel->state != CH_STATE_STOPPED)
3251 return -ETIME;
3252 channel->state = CH_STATE_DOWN;
3253 return 0;
3254}
3255
3256static int
3257qeth_halt_channel(struct qeth_channel *channel)
3258{
3259 unsigned long flags;
3260 struct qeth_card *card;
3261 int rc;
3262
3263 QETH_DBF_TEXT(trace,3,"haltch");
3264 card = CARD_FROM_CDEV(channel->ccwdev);
3265 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3266 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
3267 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3268
3269 if (rc)
3270 return rc;
3271 rc = wait_event_interruptible_timeout(card->wait_q,
3272 channel->state==CH_STATE_HALTED, QETH_TIMEOUT);
3273 if (rc == -ERESTARTSYS)
3274 return rc;
3275 if (channel->state != CH_STATE_HALTED)
3276 return -ETIME;
3277 return 0;
3278}
3279
3280static int
3281qeth_halt_channels(struct qeth_card *card)
3282{
3283 int rc = 0;
3284
3285 QETH_DBF_TEXT(trace,3,"haltchs");
3286 if ((rc = qeth_halt_channel(&card->read)))
3287 return rc;
3288 if ((rc = qeth_halt_channel(&card->write)))
3289 return rc;
3290 return qeth_halt_channel(&card->data);
3291}
3292static int
3293qeth_clear_channels(struct qeth_card *card)
3294{
3295 int rc = 0;
3296
3297 QETH_DBF_TEXT(trace,3,"clearchs");
3298 if ((rc = qeth_clear_channel(&card->read)))
3299 return rc;
3300 if ((rc = qeth_clear_channel(&card->write)))
3301 return rc;
3302 return qeth_clear_channel(&card->data);
3303}
3304
3305static int
3306qeth_clear_halt_card(struct qeth_card *card, int halt)
3307{
3308 int rc = 0;
3309
3310 QETH_DBF_TEXT(trace,3,"clhacrd");
3311 QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
3312
3313 if (halt)
3314 rc = qeth_halt_channels(card);
3315 if (rc)
3316 return rc;
3317 return qeth_clear_channels(card);
3318}
3319
3320static int
3321qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
3322{
3323 int rc = 0;
3324
3325 QETH_DBF_TEXT(trace,3,"qdioclr");
3326 if (card->qdio.state == QETH_QDIO_ESTABLISHED){
3327 if ((rc = qdio_cleanup(CARD_DDEV(card),
3328 (card->info.type == QETH_CARD_TYPE_IQD) ?
3329 QDIO_FLAG_CLEANUP_USING_HALT :
3330 QDIO_FLAG_CLEANUP_USING_CLEAR)))
3331 QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
3332 card->qdio.state = QETH_QDIO_ALLOCATED;
3333 }
3334 if ((rc = qeth_clear_halt_card(card, use_halt)))
3335 QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
3336 card->state = CARD_STATE_DOWN;
3337 return rc;
3338}
3339
3340static int
3341qeth_dm_act(struct qeth_card *card)
3342{
3343 int rc;
3344 struct qeth_cmd_buffer *iob;
3345
3346 QETH_DBF_TEXT(setup,2,"dmact");
3347
3348 iob = qeth_wait_for_buffer(&card->write);
3349 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
3350
3351 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
3352 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
3353 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
3354 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3355 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
3356 return rc;
3357}
3358
3359static int
3360qeth_mpc_initialize(struct qeth_card *card)
3361{
3362 int rc;
3363
3364 QETH_DBF_TEXT(setup,2,"mpcinit");
3365
3366 if ((rc = qeth_issue_next_read(card))){
3367 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3368 return rc;
3369 }
3370 if ((rc = qeth_cm_enable(card))){
3371 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3372 return rc;
3373 }
3374 if ((rc = qeth_cm_setup(card))){
3375 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3376 return rc;
3377 }
3378 if ((rc = qeth_ulp_enable(card))){
3379 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3380 return rc;
3381 }
3382 if ((rc = qeth_ulp_setup(card))){
3383 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3384 return rc;
3385 }
3386 if ((rc = qeth_alloc_qdio_buffers(card))){
3387 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3388 return rc;
3389 }
3390 if ((rc = qeth_qdio_establish(card))){
3391 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3392 qeth_free_qdio_buffers(card);
3393 goto out_qdio;
3394 }
3395 if ((rc = qeth_qdio_activate(card))){
3396 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
3397 goto out_qdio;
3398 }
3399 if ((rc = qeth_dm_act(card))){
3400 QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
3401 goto out_qdio;
3402 }
3403
3404 return 0;
3405out_qdio:
3406 qeth_qdio_clear_card(card, card->info.type==QETH_CARD_TYPE_OSAE);
3407 return rc;
3408}
3409
3410static struct net_device *
3411qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
3412{
3413 struct net_device *dev = NULL;
3414
3415 switch (type) {
3416 case QETH_CARD_TYPE_OSAE:
3417 switch (linktype) {
3418 case QETH_LINK_TYPE_LANE_TR:
3419 case QETH_LINK_TYPE_HSTR:
3420#ifdef CONFIG_TR
3421 dev = alloc_trdev(0);
3422#endif /* CONFIG_TR */
3423 break;
3424 default:
3425 dev = alloc_etherdev(0);
3426 }
3427 break;
3428 case QETH_CARD_TYPE_IQD:
3429 dev = alloc_netdev(0, "hsi%d", ether_setup);
3430 break;
3431 default:
3432 dev = alloc_etherdev(0);
3433 }
3434 return dev;
3435}
3436
3437/*hard_header fake function; used in case fake_ll is set */
3438static int
3439qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3440 unsigned short type, void *daddr, void *saddr,
3441 unsigned len)
3442{
3443 struct ethhdr *hdr;
3444
3445 hdr = (struct ethhdr *)skb_push(skb, QETH_FAKE_LL_LEN);
3446 memcpy(hdr->h_source, dev->dev_addr, ETH_ALEN);
3447 memcpy(hdr->h_dest, "FAKELL", ETH_ALEN);
3448 if (type != ETH_P_802_3)
3449 hdr->h_proto = htons(type);
3450 else
3451 hdr->h_proto = htons(len);
3452 return QETH_FAKE_LL_LEN;
3453}
3454
3455static inline int
3456qeth_send_packet(struct qeth_card *, struct sk_buff *);
3457
3458static int
3459qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3460{
3461 int rc;
3462 struct qeth_card *card;
3463
3464 QETH_DBF_TEXT(trace, 6, "hrdstxmi");
3465 card = (struct qeth_card *)dev->priv;
3466 if (skb==NULL) {
3467 card->stats.tx_dropped++;
3468 card->stats.tx_errors++;
3469 /* return OK; otherwise ksoftirqd goes to 100% */
3470 return NETDEV_TX_OK;
3471 }
3472 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
3473 card->stats.tx_dropped++;
3474 card->stats.tx_errors++;
3475 card->stats.tx_carrier_errors++;
3476 dev_kfree_skb_any(skb);
3477 /* return OK; otherwise ksoftirqd goes to 100% */
3478 return NETDEV_TX_OK;
3479 }
3480#ifdef CONFIG_QETH_PERF_STATS
3481 card->perf_stats.outbound_cnt++;
3482 card->perf_stats.outbound_start_time = qeth_get_micros();
3483#endif
3484 netif_stop_queue(dev);
3485 if ((rc = qeth_send_packet(card, skb))) {
3486 if (rc == -EBUSY) {
3487 return NETDEV_TX_BUSY;
3488 } else {
3489 card->stats.tx_errors++;
3490 card->stats.tx_dropped++;
3491 dev_kfree_skb_any(skb);
3492 /*set to OK; otherwise ksoftirqd goes to 100% */
3493 rc = NETDEV_TX_OK;
3494 }
3495 }
3496 netif_wake_queue(dev);
3497#ifdef CONFIG_QETH_PERF_STATS
3498 card->perf_stats.outbound_time += qeth_get_micros() -
3499 card->perf_stats.outbound_start_time;
3500#endif
3501 return rc;
3502}
3503
3504static int
3505qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
3506{
3507 int rc = 0;
3508#ifdef CONFIG_QETH_VLAN
3509 struct vlan_group *vg;
3510 int i;
3511
3512 if (!(vg = card->vlangrp))
3513 return rc;
3514
3515 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
3516 if (vg->vlan_devices[i] == dev){
3517 rc = QETH_VLAN_CARD;
3518 break;
3519 }
3520 }
3521#endif
3522 return rc;
3523}
3524
3525static int
3526qeth_verify_dev(struct net_device *dev)
3527{
3528 struct qeth_card *card;
3529 unsigned long flags;
3530 int rc = 0;
3531
3532 read_lock_irqsave(&qeth_card_list.rwlock, flags);
3533 list_for_each_entry(card, &qeth_card_list.list, list){
3534 if (card->dev == dev){
3535 rc = QETH_REAL_CARD;
3536 break;
3537 }
3538 rc = qeth_verify_vlan_dev(dev, card);
3539 if (rc)
3540 break;
3541 }
3542 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
3543
3544 return rc;
3545}
3546
3547static struct qeth_card *
3548qeth_get_card_from_dev(struct net_device *dev)
3549{
3550 struct qeth_card *card = NULL;
3551 int rc;
3552
3553 rc = qeth_verify_dev(dev);
3554 if (rc == QETH_REAL_CARD)
3555 card = (struct qeth_card *)dev->priv;
3556 else if (rc == QETH_VLAN_CARD)
3557 card = (struct qeth_card *)
3558 VLAN_DEV_INFO(dev)->real_dev->priv;
3559
3560 QETH_DBF_TEXT_(trace, 4, "%d", rc);
3561 return card ;
3562}
3563
3564static void
3565qeth_tx_timeout(struct net_device *dev)
3566{
3567 struct qeth_card *card;
3568
3569 card = (struct qeth_card *) dev->priv;
3570 card->stats.tx_errors++;
3571 qeth_schedule_recovery(card);
3572}
3573
3574static int
3575qeth_open(struct net_device *dev)
3576{
3577 struct qeth_card *card;
3578
3579 QETH_DBF_TEXT(trace, 4, "qethopen");
3580
3581 card = (struct qeth_card *) dev->priv;
3582
3583 if (card->state != CARD_STATE_SOFTSETUP)
3584 return -ENODEV;
3585
3586 if ( (card->options.layer2) &&
3587 (!card->info.layer2_mac_registered)) {
3588 QETH_DBF_TEXT(trace,4,"nomacadr");
3589 return -EPERM;
3590 }
3591 card->dev->flags |= IFF_UP;
3592 netif_start_queue(dev);
3593 card->data.state = CH_STATE_UP;
3594 card->state = CARD_STATE_UP;
3595
3596 if (!card->lan_online){
3597 if (netif_carrier_ok(dev))
3598 netif_carrier_off(dev);
3599 }
3600 return 0;
3601}
3602
3603static int
3604qeth_stop(struct net_device *dev)
3605{
3606 struct qeth_card *card;
3607
3608 QETH_DBF_TEXT(trace, 4, "qethstop");
3609
3610 card = (struct qeth_card *) dev->priv;
3611
3612 netif_stop_queue(dev);
3613 card->dev->flags &= ~IFF_UP;
3614 if (card->state == CARD_STATE_UP)
3615 card->state = CARD_STATE_SOFTSETUP;
3616 return 0;
3617}
3618
3619static inline int
3620qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3621{
3622 int cast_type = RTN_UNSPEC;
3623
3624 if (skb->dst && skb->dst->neighbour){
3625 cast_type = skb->dst->neighbour->type;
3626 if ((cast_type == RTN_BROADCAST) ||
3627 (cast_type == RTN_MULTICAST) ||
3628 (cast_type == RTN_ANYCAST))
3629 return cast_type;
3630 else
3631 return RTN_UNSPEC;
3632 }
3633 /* try something else */
3634 if (skb->protocol == ETH_P_IPV6)
3635 return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0;
3636 else if (skb->protocol == ETH_P_IP)
3637 return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0;
3638 /* ... */
3639 if (!memcmp(skb->data, skb->dev->broadcast, 6))
3640 return RTN_BROADCAST;
3641 else {
3642 u16 hdr_mac;
3643
3644 hdr_mac = *((u16 *)skb->data);
3645 /* tr multicast? */
3646 switch (card->info.link_type) {
3647 case QETH_LINK_TYPE_HSTR:
3648 case QETH_LINK_TYPE_LANE_TR:
3649 if ((hdr_mac == QETH_TR_MAC_NC) ||
3650 (hdr_mac == QETH_TR_MAC_C))
3651 return RTN_MULTICAST;
3652 /* eth or so multicast? */
3653 default:
3654 if ((hdr_mac == QETH_ETH_MAC_V4) ||
3655 (hdr_mac == QETH_ETH_MAC_V6))
3656 return RTN_MULTICAST;
3657 }
3658 }
3659 return cast_type;
3660}
3661
3662static inline int
3663qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3664 int ipv, int cast_type)
3665{
3666 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
3667 return card->qdio.default_out_queue;
3668 switch (card->qdio.no_out_queues) {
3669 case 4:
3670 if (cast_type && card->info.is_multicast_different)
3671 return card->info.is_multicast_different &
3672 (card->qdio.no_out_queues - 1);
3673 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3674 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
3675 if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT)
3676 return 3;
3677 if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY)
3678 return 2;
3679 if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT)
3680 return 1;
3681 if (skb->nh.iph->tos & IP_TOS_LOWDELAY)
3682 return 0;
3683 }
3684 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
3685 return 3 - (skb->nh.iph->tos >> 6);
3686 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3687 /* TODO: IPv6!!! */
3688 }
3689 return card->qdio.default_out_queue;
3690 case 1: /* fallthrough for single-out-queue 1920-device */
3691 default:
3692 return card->qdio.default_out_queue;
3693 }
3694}
3695
3696static inline int
3697qeth_get_ip_version(struct sk_buff *skb)
3698{
3699 switch (skb->protocol) {
3700 case ETH_P_IPV6:
3701 return 6;
3702 case ETH_P_IP:
3703 return 4;
3704 default:
3705 return 0;
3706 }
3707}
3708
3709static inline int
3710qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
3711 struct qeth_hdr **hdr, int ipv)
3712{
3713 int rc = 0;
3714#ifdef CONFIG_QETH_VLAN
3715 u16 *tag;
3716#endif
3717
3718 QETH_DBF_TEXT(trace, 6, "prepskb");
3719
3720 rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
3721 if (rc)
3722 return rc;
3723#ifdef CONFIG_QETH_VLAN
3724 if (card->vlangrp && vlan_tx_tag_present(*skb) &&
3725 ((ipv == 6) || card->options.layer2) ) {
3726 /*
3727 * Move the mac addresses (6 bytes src, 6 bytes dest)
3728 * to the beginning of the new header. We are using three
3729 * memcpys instead of one memmove to save cycles.
3730 */
3731 skb_push(*skb, VLAN_HLEN);
3732 memcpy((*skb)->data, (*skb)->data + 4, 4);
3733 memcpy((*skb)->data + 4, (*skb)->data + 8, 4);
3734 memcpy((*skb)->data + 8, (*skb)->data + 12, 4);
3735 tag = (u16 *)((*skb)->data + 12);
3736 /*
3737 * first two bytes = ETH_P_8021Q (0x8100)
3738 * second two bytes = VLANID
3739 */
3740 *tag = __constant_htons(ETH_P_8021Q);
3741 *(tag + 1) = htons(vlan_tx_tag_get(*skb));
3742 }
3743#endif
3744 *hdr = (struct qeth_hdr *)
3745 qeth_push_skb(card, skb, sizeof(struct qeth_hdr));
3746 if (hdr == NULL)
3747 return -EINVAL;
3748 return 0;
3749}
3750
3751static inline u8
3752qeth_get_qeth_hdr_flags4(int cast_type)
3753{
3754 if (cast_type == RTN_MULTICAST)
3755 return QETH_CAST_MULTICAST;
3756 if (cast_type == RTN_BROADCAST)
3757 return QETH_CAST_BROADCAST;
3758 return QETH_CAST_UNICAST;
3759}
3760
3761static inline u8
3762qeth_get_qeth_hdr_flags6(int cast_type)
3763{
3764 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
3765 if (cast_type == RTN_MULTICAST)
3766 return ct | QETH_CAST_MULTICAST;
3767 if (cast_type == RTN_ANYCAST)
3768 return ct | QETH_CAST_ANYCAST;
3769 if (cast_type == RTN_BROADCAST)
3770 return ct | QETH_CAST_BROADCAST;
3771 return ct | QETH_CAST_UNICAST;
3772}
3773
3774static inline void
3775qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3776 struct sk_buff *skb)
3777{
3778 __u16 hdr_mac;
3779
3780 if (!memcmp(skb->data+QETH_HEADER_SIZE,
3781 skb->dev->broadcast,6)) { /* broadcast? */
3782 *(__u32 *)hdr->hdr.l2.flags |=
3783 QETH_LAYER2_FLAG_BROADCAST << 8;
3784 return;
3785 }
3786 hdr_mac=*((__u16*)skb->data);
3787 /* tr multicast? */
3788 switch (card->info.link_type) {
3789 case QETH_LINK_TYPE_HSTR:
3790 case QETH_LINK_TYPE_LANE_TR:
3791 if ((hdr_mac == QETH_TR_MAC_NC) ||
3792 (hdr_mac == QETH_TR_MAC_C) )
3793 *(__u32 *)hdr->hdr.l2.flags |=
3794 QETH_LAYER2_FLAG_MULTICAST << 8;
3795 else
3796 *(__u32 *)hdr->hdr.l2.flags |=
3797 QETH_LAYER2_FLAG_UNICAST << 8;
3798 break;
3799 /* eth or so multicast? */
3800 default:
3801 if ( (hdr_mac==QETH_ETH_MAC_V4) ||
3802 (hdr_mac==QETH_ETH_MAC_V6) )
3803 *(__u32 *)hdr->hdr.l2.flags |=
3804 QETH_LAYER2_FLAG_MULTICAST << 8;
3805 else
3806 *(__u32 *)hdr->hdr.l2.flags |=
3807 QETH_LAYER2_FLAG_UNICAST << 8;
3808 }
3809}
3810
3811static inline void
3812qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3813 struct sk_buff *skb, int cast_type)
3814{
3815 memset(hdr, 0, sizeof(struct qeth_hdr));
3816 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
3817
3818 /* set byte 0 to "0x02" and byte 3 to casting flags */
3819 if (cast_type==RTN_MULTICAST)
3820 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_MULTICAST << 8;
3821 else if (cast_type==RTN_BROADCAST)
3822 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_BROADCAST << 8;
3823 else
3824 qeth_layer2_get_packet_type(card, hdr, skb);
3825
3826 hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
3827#ifdef CONFIG_QETH_VLAN
3828 /* VSWITCH relies on the VLAN
3829 * information to be present in
3830 * the QDIO header */
3831 if ((card->vlangrp != NULL) &&
3832 vlan_tx_tag_present(skb)) {
3833 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_VLAN << 8;
3834 hdr->hdr.l2.vlan_id = vlan_tx_tag_get(skb);
3835 }
3836#endif
3837}
3838
3839void
3840qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3841 struct sk_buff *skb, int ipv, int cast_type)
3842{
3843 QETH_DBF_TEXT(trace, 6, "fillhdr");
3844
3845 memset(hdr, 0, sizeof(struct qeth_hdr));
3846 if (card->options.layer2) {
3847 qeth_layer2_fill_header(card, hdr, skb, cast_type);
3848 return;
3849 }
3850 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
3851 hdr->hdr.l3.ext_flags = 0;
3852#ifdef CONFIG_QETH_VLAN
3853 /*
3854 * before we're going to overwrite this location with next hop ip.
3855 * v6 uses passthrough, v4 sets the tag in the QDIO header.
3856 */
3857 if (card->vlangrp && vlan_tx_tag_present(skb)) {
3858 hdr->hdr.l3.ext_flags = (ipv == 4) ?
3859 QETH_HDR_EXT_VLAN_FRAME :
3860 QETH_HDR_EXT_INCLUDE_VLAN_TAG;
3861 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
3862 }
3863#endif /* CONFIG_QETH_VLAN */
3864 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
3865 if (ipv == 4) { /* IPv4 */
3866 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags4(cast_type);
3867 memset(hdr->hdr.l3.dest_addr, 0, 12);
3868 if ((skb->dst) && (skb->dst->neighbour)) {
3869 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
3870 *((u32 *) skb->dst->neighbour->primary_key);
3871 } else {
3872 /* fill in destination address used in ip header */
3873 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = skb->nh.iph->daddr;
3874 }
3875 } else if (ipv == 6) { /* IPv6 or passthru */
3876 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type);
3877 if ((skb->dst) && (skb->dst->neighbour)) {
3878 memcpy(hdr->hdr.l3.dest_addr,
3879 skb->dst->neighbour->primary_key, 16);
3880 } else {
3881 /* fill in destination address used in ip header */
3882 memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16);
3883 }
3884 } else { /* passthrough */
3885 if (!memcmp(skb->data + sizeof(struct qeth_hdr),
3886 skb->dev->broadcast, 6)) { /* broadcast? */
3887 hdr->hdr.l3.flags = QETH_CAST_BROADCAST | QETH_HDR_PASSTHRU;
3888 } else {
3889 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
3890 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
3891 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
3892 }
3893 }
3894}
3895
3896static inline void
3897__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer,
3898 int *next_element_to_fill)
3899{
3900 int length = skb->len;
3901 struct skb_frag_struct *frag;
3902 int fragno;
3903 unsigned long addr;
3904 int element;
3905 int first_lap = 1;
3906
3907 fragno = skb_shinfo(skb)->nr_frags; /* start with last frag */
3908 element = *next_element_to_fill + fragno;
3909 while (length > 0) {
3910 if (fragno > 0) {
3911 frag = &skb_shinfo(skb)->frags[fragno - 1];
3912 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
3913 frag->page_offset;
3914 buffer->element[element].addr = (char *)addr;
3915 buffer->element[element].length = frag->size;
3916 length -= frag->size;
3917 if (first_lap)
3918 buffer->element[element].flags =
3919 SBAL_FLAGS_LAST_FRAG;
3920 else
3921 buffer->element[element].flags =
3922 SBAL_FLAGS_MIDDLE_FRAG;
3923 } else {
3924 buffer->element[element].addr = skb->data;
3925 buffer->element[element].length = length;
3926 length = 0;
3927 buffer->element[element].flags =
3928 SBAL_FLAGS_FIRST_FRAG;
3929 }
3930 element--;
3931 fragno--;
3932 first_lap = 0;
3933 }
3934 *next_element_to_fill += skb_shinfo(skb)->nr_frags + 1;
3935}
3936
3937static inline void
3938__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
3939 int *next_element_to_fill)
3940{
3941 int length = skb->len;
3942 int length_here;
3943 int element;
3944 char *data;
3945 int first_lap = 1;
3946
3947 element = *next_element_to_fill;
3948 data = skb->data;
3949 while (length > 0) {
3950 /* length_here is the remaining amount of data in this page */
3951 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3952 if (length < length_here)
3953 length_here = length;
3954 buffer->element[element].addr = data;
3955 buffer->element[element].length = length_here;
3956 length -= length_here;
3957 if (!length){
3958 if (first_lap)
3959 buffer->element[element].flags = 0;
3960 else
3961 buffer->element[element].flags =
3962 SBAL_FLAGS_LAST_FRAG;
3963 } else {
3964 if (first_lap)
3965 buffer->element[element].flags =
3966 SBAL_FLAGS_FIRST_FRAG;
3967 else
3968 buffer->element[element].flags =
3969 SBAL_FLAGS_MIDDLE_FRAG;
3970 }
3971 data += length_here;
3972 element++;
3973 first_lap = 0;
3974 }
3975 *next_element_to_fill = element;
3976}
3977
3978static inline int
3979qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3980 struct qeth_qdio_out_buffer *buf,
3981 struct sk_buff *skb)
3982{
3983 struct qdio_buffer *buffer;
3984 int flush_cnt = 0;
3985
3986 QETH_DBF_TEXT(trace, 6, "qdfillbf");
3987 buffer = buf->buffer;
3988 atomic_inc(&skb->users);
3989 skb_queue_tail(&buf->skb_list, skb);
3990 if (skb_shinfo(skb)->nr_frags == 0)
3991 __qeth_fill_buffer(skb, buffer,
3992 (int *)&buf->next_element_to_fill);
3993 else
3994 __qeth_fill_buffer_frag(skb, buffer,
3995 (int *)&buf->next_element_to_fill);
3996
3997 if (!queue->do_pack) {
3998 QETH_DBF_TEXT(trace, 6, "fillbfnp");
3999 /* set state to PRIMED -> will be flushed */
4000 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4001 flush_cnt = 1;
4002 } else {
4003 QETH_DBF_TEXT(trace, 6, "fillbfpa");
4004#ifdef CONFIG_QETH_PERF_STATS
4005 queue->card->perf_stats.skbs_sent_pack++;
4006#endif
4007 if (buf->next_element_to_fill >=
4008 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
4009 /*
4010 * packed buffer if full -> set state PRIMED
4011 * -> will be flushed
4012 */
4013 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4014 flush_cnt = 1;
4015 }
4016 }
4017 return flush_cnt;
4018}
4019
4020static inline int
4021qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4022 struct sk_buff *skb, struct qeth_hdr *hdr,
4023 int elements_needed,
4024 struct qeth_eddp_context *ctx)
4025{
4026 struct qeth_qdio_out_buffer *buffer;
4027 int buffers_needed = 0;
4028 int flush_cnt = 0;
4029 int index;
4030
4031 QETH_DBF_TEXT(trace, 6, "dosndpfa");
4032
4033 /* spin until we get the queue ... */
4034 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
4035 QETH_OUT_Q_LOCKED,
4036 &queue->state));
4037 /* ... now we've got the queue */
4038 index = queue->next_buf_to_fill;
4039 buffer = &queue->bufs[queue->next_buf_to_fill];
4040 /*
4041 * check if buffer is empty to make sure that we do not 'overtake'
4042 * ourselves and try to fill a buffer that is already primed
4043 */
4044 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4045 card->stats.tx_dropped++;
4046 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4047 return -EBUSY;
4048 }
4049 if (ctx == NULL)
4050 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
4051 QDIO_MAX_BUFFERS_PER_Q;
4052 else {
4053 buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx);
4054 if (buffers_needed < 0) {
4055 card->stats.tx_dropped++;
4056 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4057 return -EBUSY;
4058 }
4059 queue->next_buf_to_fill =
4060 (queue->next_buf_to_fill + buffers_needed) %
4061 QDIO_MAX_BUFFERS_PER_Q;
4062 }
4063 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4064 if (ctx == NULL) {
4065 qeth_fill_buffer(queue, buffer, skb);
4066 qeth_flush_buffers(queue, 0, index, 1);
4067 } else {
4068 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
4069 WARN_ON(buffers_needed != flush_cnt);
4070 qeth_flush_buffers(queue, 0, index, flush_cnt);
4071 }
4072 return 0;
4073}
4074
4075static inline int
4076qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4077 struct sk_buff *skb, struct qeth_hdr *hdr,
4078 int elements_needed, struct qeth_eddp_context *ctx)
4079{
4080 struct qeth_qdio_out_buffer *buffer;
4081 int start_index;
4082 int flush_count = 0;
4083 int do_pack = 0;
4084 int tmp;
4085 int rc = 0;
4086
4087 QETH_DBF_TEXT(trace, 6, "dosndpkt");
4088
4089 /* spin until we get the queue ... */
4090 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
4091 QETH_OUT_Q_LOCKED,
4092 &queue->state));
4093 start_index = queue->next_buf_to_fill;
4094 buffer = &queue->bufs[queue->next_buf_to_fill];
4095 /*
4096 * check if buffer is empty to make sure that we do not 'overtake'
4097 * ourselves and try to fill a buffer that is already primed
4098 */
4099 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
4100 card->stats.tx_dropped++;
4101 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4102 return -EBUSY;
4103 }
4104 /* check if we need to switch packing state of this queue */
4105 qeth_switch_to_packing_if_needed(queue);
4106 if (queue->do_pack){
4107 do_pack = 1;
4108 if (ctx == NULL) {
4109 /* does packet fit in current buffer? */
4110 if((QETH_MAX_BUFFER_ELEMENTS(card) -
4111 buffer->next_element_to_fill) < elements_needed){
4112 /* ... no -> set state PRIMED */
4113 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
4114 flush_count++;
4115 queue->next_buf_to_fill =
4116 (queue->next_buf_to_fill + 1) %
4117 QDIO_MAX_BUFFERS_PER_Q;
4118 buffer = &queue->bufs[queue->next_buf_to_fill];
4119 /* we did a step forward, so check buffer state
4120 * again */
4121 if (atomic_read(&buffer->state) !=
4122 QETH_QDIO_BUF_EMPTY){
4123 card->stats.tx_dropped++;
4124 qeth_flush_buffers(queue, 0, start_index, flush_count);
4125 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4126 return -EBUSY;
4127 }
4128 }
4129 } else {
4130 /* check if we have enough elements (including following
4131 * free buffers) to handle eddp context */
4132 if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
4133 printk("eddp tx_dropped 1\n");
4134 card->stats.tx_dropped++;
4135 rc = -EBUSY;
4136 goto out;
4137 }
4138 }
4139 }
4140 if (ctx == NULL)
4141 tmp = qeth_fill_buffer(queue, buffer, skb);
4142 else {
4143 tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill);
4144 if (tmp < 0) {
4145 printk("eddp tx_dropped 2\n");
4146 card->stats.tx_dropped++;
4147 rc = - EBUSY;
4148 goto out;
4149 }
4150 }
4151 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
4152 QDIO_MAX_BUFFERS_PER_Q;
4153 flush_count += tmp;
4154out:
4155 if (flush_count)
4156 qeth_flush_buffers(queue, 0, start_index, flush_count);
4157 /*
4158 * queue->state will go from LOCKED -> UNLOCKED or from
4159 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4160 * (switch packing state or flush buffer to get another pci flag out).
4161 * In that case we will enter this loop
4162 */
4163 while (atomic_dec_return(&queue->state)){
4164 flush_count = 0;
4165 start_index = queue->next_buf_to_fill;
4166 /* check if we can go back to non-packing state */
4167 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
4168 /*
4169 * check if we need to flush a packing buffer to get a pci
4170 * flag out on the queue
4171 */
4172 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
4173 flush_count += qeth_flush_buffers_on_no_pci(queue);
4174 if (flush_count)
4175 qeth_flush_buffers(queue, 0, start_index, flush_count);
4176 }
4177 /* at this point the queue is UNLOCKED again */
4178#ifdef CONFIG_QETH_PERF_STATS
4179 if (do_pack)
4180 queue->card->perf_stats.bufs_sent_pack += flush_count;
4181#endif /* CONFIG_QETH_PERF_STATS */
4182
4183 return rc;
4184}
4185
4186static inline int
4187qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4188{
4189 int ipv = 0;
4190 int cast_type;
4191 struct qeth_qdio_out_q *queue;
4192 struct qeth_hdr *hdr;
4193 int elements_needed = 0;
4194 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
4195 struct qeth_eddp_context *ctx = NULL;
4196 int rc;
4197
4198 QETH_DBF_TEXT(trace, 6, "sendpkt");
4199
4200 if (!card->options.layer2) {
4201 ipv = qeth_get_ip_version(skb);
4202 if ((card->dev->hard_header == qeth_fake_header) && ipv) {
4203 if ((skb = qeth_pskb_unshare(skb,GFP_ATOMIC)) == NULL) {
4204 card->stats.tx_dropped++;
4205 dev_kfree_skb_irq(skb);
4206 return 0;
4207 }
4208 skb_pull(skb, QETH_FAKE_LL_LEN);
4209 }
4210 }
4211 cast_type = qeth_get_cast_type(card, skb);
4212 if ((cast_type == RTN_BROADCAST) && (card->info.broadcast_capable == 0)){
4213 card->stats.tx_dropped++;
4214 card->stats.tx_errors++;
4215 dev_kfree_skb_any(skb);
4216 return NETDEV_TX_OK;
4217 }
4218 queue = card->qdio.out_qs
4219 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
4220
4221 if (skb_shinfo(skb)->tso_size)
4222 large_send = card->options.large_send;
4223
4224 if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))){
4225 QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
4226 return rc;
4227 }
4228 /*are we able to do TSO ? If so ,prepare and send it from here */
4229 if ((large_send == QETH_LARGE_SEND_TSO) &&
4230 (cast_type == RTN_UNSPEC)) {
4231 rc = qeth_tso_send_packet(card, skb, queue,
4232 ipv, cast_type);
4233 goto do_statistics;
4234 }
4235
4236 qeth_fill_header(card, hdr, skb, ipv, cast_type);
4237 if (large_send == QETH_LARGE_SEND_EDDP) {
4238 ctx = qeth_eddp_create_context(card, skb, hdr);
4239 if (ctx == NULL) {
4240 PRINT_WARN("could not create eddp context\n");
4241 return -EINVAL;
4242 }
4243 } else {
4244 elements_needed = qeth_get_elements_no(card,(void*) hdr, skb);
4245 if (!elements_needed)
4246 return -EINVAL;
4247 }
4248
4249 if (card->info.type != QETH_CARD_TYPE_IQD)
4250 rc = qeth_do_send_packet(card, queue, skb, hdr,
4251 elements_needed, ctx);
4252 else
4253 rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
4254 elements_needed, ctx);
4255do_statistics:
4256 if (!rc){
4257 card->stats.tx_packets++;
4258 card->stats.tx_bytes += skb->len;
4259#ifdef CONFIG_QETH_PERF_STATS
4260 if (skb_shinfo(skb)->tso_size) {
4261 card->perf_stats.large_send_bytes += skb->len;
4262 card->perf_stats.large_send_cnt++;
4263 }
4264 if (skb_shinfo(skb)->nr_frags > 0){
4265 card->perf_stats.sg_skbs_sent++;
4266 /* nr_frags + skb->data */
4267 card->perf_stats.sg_frags_sent +=
4268 skb_shinfo(skb)->nr_frags + 1;
4269 }
4270#endif /* CONFIG_QETH_PERF_STATS */
4271 }
4272 if (ctx != NULL) {
4273 /* drop creator's reference */
4274 qeth_eddp_put_context(ctx);
4275 /* free skb; it's not referenced by a buffer */
4276 if (rc == 0)
4277 dev_kfree_skb_any(skb);
4278
4279 }
4280 return rc;
4281}
4282
4283static int
4284qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4285{
4286 struct qeth_card *card = (struct qeth_card *) dev->priv;
4287 int rc = 0;
4288
4289 switch(regnum){
4290 case MII_BMCR: /* Basic mode control register */
4291 rc = BMCR_FULLDPLX;
4292 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
4293 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
4294 rc |= BMCR_SPEED100;
4295 break;
4296 case MII_BMSR: /* Basic mode status register */
4297 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4298 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4299 BMSR_100BASE4;
4300 break;
4301 case MII_PHYSID1: /* PHYS ID 1 */
4302 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4303 dev->dev_addr[2];
4304 rc = (rc >> 5) & 0xFFFF;
4305 break;
4306 case MII_PHYSID2: /* PHYS ID 2 */
4307 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4308 break;
4309 case MII_ADVERTISE: /* Advertisement control reg */
4310 rc = ADVERTISE_ALL;
4311 break;
4312 case MII_LPA: /* Link partner ability reg */
4313 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4314 LPA_100BASE4 | LPA_LPACK;
4315 break;
4316 case MII_EXPANSION: /* Expansion register */
4317 break;
4318 case MII_DCOUNTER: /* disconnect counter */
4319 break;
4320 case MII_FCSCOUNTER: /* false carrier counter */
4321 break;
4322 case MII_NWAYTEST: /* N-way auto-neg test register */
4323 break;
4324 case MII_RERRCOUNTER: /* rx error counter */
4325 rc = card->stats.rx_errors;
4326 break;
4327 case MII_SREVISION: /* silicon revision */
4328 break;
4329 case MII_RESV1: /* reserved 1 */
4330 break;
4331 case MII_LBRERROR: /* loopback, rx, bypass error */
4332 break;
4333 case MII_PHYADDR: /* physical address */
4334 break;
4335 case MII_RESV2: /* reserved 2 */
4336 break;
4337 case MII_TPISTATUS: /* TPI status for 10mbps */
4338 break;
4339 case MII_NCONFIG: /* network interface config */
4340 break;
4341 default:
4342 rc = 0;
4343 break;
4344 }
4345 return rc;
4346}
4347
4348static void
4349qeth_mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
4350{
4351 switch(regnum){
4352 case MII_BMCR: /* Basic mode control register */
4353 case MII_BMSR: /* Basic mode status register */
4354 case MII_PHYSID1: /* PHYS ID 1 */
4355 case MII_PHYSID2: /* PHYS ID 2 */
4356 case MII_ADVERTISE: /* Advertisement control reg */
4357 case MII_LPA: /* Link partner ability reg */
4358 case MII_EXPANSION: /* Expansion register */
4359 case MII_DCOUNTER: /* disconnect counter */
4360 case MII_FCSCOUNTER: /* false carrier counter */
4361 case MII_NWAYTEST: /* N-way auto-neg test register */
4362 case MII_RERRCOUNTER: /* rx error counter */
4363 case MII_SREVISION: /* silicon revision */
4364 case MII_RESV1: /* reserved 1 */
4365 case MII_LBRERROR: /* loopback, rx, bypass error */
4366 case MII_PHYADDR: /* physical address */
4367 case MII_RESV2: /* reserved 2 */
4368 case MII_TPISTATUS: /* TPI status for 10mbps */
4369 case MII_NCONFIG: /* network interface config */
4370 default:
4371 break;
4372 }
4373}
4374
4375static inline const char *
4376qeth_arp_get_error_cause(int *rc)
4377{
4378 switch (*rc) {
4379 case QETH_IPA_ARP_RC_FAILED:
4380 *rc = -EIO;
4381 return "operation failed";
4382 case QETH_IPA_ARP_RC_NOTSUPP:
4383 *rc = -EOPNOTSUPP;
4384 return "operation not supported";
4385 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
4386 *rc = -EINVAL;
4387 return "argument out of range";
4388 case QETH_IPA_ARP_RC_Q_NOTSUPP:
4389 *rc = -EOPNOTSUPP;
4390 return "query operation not supported";
4391 case QETH_IPA_ARP_RC_Q_NO_DATA:
4392 *rc = -ENOENT;
4393 return "no query data available";
4394 default:
4395 return "unknown error";
4396 }
4397}
4398
4399static int
4400qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
4401 __u16, long);
4402
4403static int
4404qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4405{
4406 int tmp;
4407 int rc;
4408
4409 QETH_DBF_TEXT(trace,3,"arpstnoe");
4410
4411 /* TODO: really not supported by GuestLAN? */
4412 if (card->info.guestlan)
4413 return -EOPNOTSUPP;
4414 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4415 PRINT_WARN("ARP processing not supported "
4416 "on %s!\n", QETH_CARD_IFNAME(card));
4417 return -EOPNOTSUPP;
4418 }
4419 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4420 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
4421 no_entries);
4422 if (rc) {
4423 tmp = rc;
4424 PRINT_WARN("Could not set number of ARP entries on %s: "
4425 "%s (0x%x/%d)\n",
4426 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4427 tmp, tmp);
4428 }
4429 return rc;
4430}
4431
4432static inline void
4433qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4434 struct qeth_arp_query_data *qdata,
4435 int entry_size, int uentry_size)
4436{
4437 char *entry_ptr;
4438 char *uentry_ptr;
4439 int i;
4440
4441 entry_ptr = (char *)&qdata->data;
4442 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
4443 for (i = 0; i < qdata->no_entries; ++i){
4444 /* strip off 32 bytes "media specific information" */
4445 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
4446 entry_ptr += entry_size;
4447 uentry_ptr += uentry_size;
4448 }
4449}
4450
4451static int
4452qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply,
4453 unsigned long data)
4454{
4455 struct qeth_ipa_cmd *cmd;
4456 struct qeth_arp_query_data *qdata;
4457 struct qeth_arp_query_info *qinfo;
4458 int entry_size;
4459 int uentry_size;
4460 int i;
4461
4462 QETH_DBF_TEXT(trace,4,"arpquecb");
4463
4464 qinfo = (struct qeth_arp_query_info *) reply->param;
4465 cmd = (struct qeth_ipa_cmd *) data;
4466 if (cmd->hdr.return_code) {
4467 QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code);
4468 return 0;
4469 }
4470 if (cmd->data.setassparms.hdr.return_code) {
4471 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
4472 QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code);
4473 return 0;
4474 }
4475 qdata = &cmd->data.setassparms.data.query_arp;
4476 switch(qdata->reply_bits){
4477 case 5:
4478 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
4479 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4480 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
4481 break;
4482 case 7:
4483 /* fall through to default */
4484 default:
4485 /* tr is the same as eth -> entry7 */
4486 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
4487 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4488 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
4489 break;
4490 }
4491 /* check if there is enough room in userspace */
4492 if ((qinfo->udata_len - qinfo->udata_offset) <
4493 qdata->no_entries * uentry_size){
4494 QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
4495 cmd->hdr.return_code = -ENOMEM;
4496 PRINT_WARN("query ARP user space buffer is too small for "
4497 "the returned number of ARP entries. "
4498 "Aborting query!\n");
4499 goto out_error;
4500 }
4501 QETH_DBF_TEXT_(trace, 4, "anore%i",
4502 cmd->data.setassparms.hdr.number_of_replies);
4503 QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
4504 QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
4505
4506 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
4507 /* strip off "media specific information" */
4508 qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size,
4509 uentry_size);
4510 } else
4511 /*copy entries to user buffer*/
4512 memcpy(qinfo->udata + qinfo->udata_offset,
4513 (char *)&qdata->data, qdata->no_entries*uentry_size);
4514
4515 qinfo->no_entries += qdata->no_entries;
4516 qinfo->udata_offset += (qdata->no_entries*uentry_size);
4517 /* check if all replies received ... */
4518 if (cmd->data.setassparms.hdr.seq_no <
4519 cmd->data.setassparms.hdr.number_of_replies)
4520 return 1;
4521 memcpy(qinfo->udata, &qinfo->no_entries, 4);
4522 /* keep STRIP_ENTRIES flag so the user program can distinguish
4523 * stripped entries from normal ones */
4524 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4525 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
4526 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2);
4527 return 0;
4528out_error:
4529 i = 0;
4530 memcpy(qinfo->udata, &i, 4);
4531 return 0;
4532}
4533
4534static int
4535qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4536 int len, int (*reply_cb)(struct qeth_card *,
4537 struct qeth_reply *,
4538 unsigned long),
4539 void *reply_param)
4540{
4541 QETH_DBF_TEXT(trace,4,"sendarp");
4542
4543 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4544 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4545 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4546 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4547 reply_cb, reply_param);
4548}
4549
4550static int
4551qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4552 int len, int (*reply_cb)(struct qeth_card *,
4553 struct qeth_reply *,
4554 unsigned long),
4555 void *reply_param)
4556{
4557 u16 s1, s2;
4558
4559 QETH_DBF_TEXT(trace,4,"sendsnmp");
4560
4561 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4562 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4563 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4564 /* adjust PDU length fields in IPA_PDU_HEADER */
4565 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4566 s2 = (u32) len;
4567 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4568 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4569 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4570 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4571 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4572 reply_cb, reply_param);
4573}
4574
4575static struct qeth_cmd_buffer *
4576qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
4577 __u16, __u16, enum qeth_prot_versions);
4578static int
4579qeth_arp_query(struct qeth_card *card, char *udata)
4580{
4581 struct qeth_cmd_buffer *iob;
4582 struct qeth_arp_query_info qinfo = {0, };
4583 int tmp;
4584 int rc;
4585
4586 QETH_DBF_TEXT(trace,3,"arpquery");
4587
4588 /*
4589 * currently GuestLAN does only deliver all zeros on query arp,
4590 * even though arp processing is supported (according to IPA supp.
4591 * funcs flags); since all zeros is no valueable information,
4592 * we say EOPNOTSUPP for all ARP functions
4593 */
4594 /*if (card->info.guestlan)
4595 return -EOPNOTSUPP; */
4596 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
4597 IPA_ARP_PROCESSING)) {
4598 PRINT_WARN("ARP processing not supported "
4599 "on %s!\n", QETH_CARD_IFNAME(card));
4600 return -EOPNOTSUPP;
4601 }
4602 /* get size of userspace buffer and mask_bits -> 6 bytes */
4603 if (copy_from_user(&qinfo, udata, 6))
4604 return -EFAULT;
4605 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL)))
4606 return -ENOMEM;
4607 memset(qinfo.udata, 0, qinfo.udata_len);
4608 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
4609 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4610 IPA_CMD_ASS_ARP_QUERY_INFO,
4611 sizeof(int),QETH_PROT_IPV4);
4612
4613 rc = qeth_send_ipa_arp_cmd(card, iob,
4614 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
4615 qeth_arp_query_cb, (void *)&qinfo);
4616 if (rc) {
4617 tmp = rc;
4618 PRINT_WARN("Error while querying ARP cache on %s: %s "
4619 "(0x%x/%d)\n",
4620 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4621 tmp, tmp);
4622 copy_to_user(udata, qinfo.udata, 4);
4623 } else {
4624 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4625 }
4626 kfree(qinfo.udata);
4627 return rc;
4628}
4629
4630/**
4631 * SNMP command callback
4632 */
4633static int
4634qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply,
4635 unsigned long sdata)
4636{
4637 struct qeth_ipa_cmd *cmd;
4638 struct qeth_arp_query_info *qinfo;
4639 struct qeth_snmp_cmd *snmp;
4640 unsigned char *data;
4641 __u16 data_len;
4642
4643 QETH_DBF_TEXT(trace,3,"snpcmdcb");
4644
4645 cmd = (struct qeth_ipa_cmd *) sdata;
4646 data = (unsigned char *)((char *)cmd - reply->offset);
4647 qinfo = (struct qeth_arp_query_info *) reply->param;
4648 snmp = &cmd->data.setadapterparms.data.snmp;
4649
4650 if (cmd->hdr.return_code) {
4651 QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code);
4652 return 0;
4653 }
4654 if (cmd->data.setadapterparms.hdr.return_code) {
4655 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
4656 QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code);
4657 return 0;
4658 }
4659 data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data));
4660 if (cmd->data.setadapterparms.hdr.seq_no == 1)
4661 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
4662 else
4663 data_len -= (__u16)((char*)&snmp->request - (char *)cmd);
4664
4665 /* check if there is enough room in userspace */
4666 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4667 QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
4668 cmd->hdr.return_code = -ENOMEM;
4669 return 0;
4670 }
4671 QETH_DBF_TEXT_(trace, 4, "snore%i",
4672 cmd->data.setadapterparms.hdr.used_total);
4673 QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no);
4674 /*copy entries to user buffer*/
4675 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4676 memcpy(qinfo->udata + qinfo->udata_offset,
4677 (char *)snmp,
4678 data_len + offsetof(struct qeth_snmp_cmd,data));
4679 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
4680 } else {
4681 memcpy(qinfo->udata + qinfo->udata_offset,
4682 (char *)&snmp->request, data_len);
4683 }
4684 qinfo->udata_offset += data_len;
4685 /* check if all replies received ... */
4686 QETH_DBF_TEXT_(trace, 4, "srtot%i",
4687 cmd->data.setadapterparms.hdr.used_total);
4688 QETH_DBF_TEXT_(trace, 4, "srseq%i",
4689 cmd->data.setadapterparms.hdr.seq_no);
4690 if (cmd->data.setadapterparms.hdr.seq_no <
4691 cmd->data.setadapterparms.hdr.used_total)
4692 return 1;
4693 return 0;
4694}
4695
4696static struct qeth_cmd_buffer *
4697qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds,
4698 enum qeth_prot_versions );
4699
4700static struct qeth_cmd_buffer *
4701qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen)
4702{
4703 struct qeth_cmd_buffer *iob;
4704 struct qeth_ipa_cmd *cmd;
4705
4706 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS,
4707 QETH_PROT_IPV4);
4708 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4709 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
4710 cmd->data.setadapterparms.hdr.command_code = command;
4711 cmd->data.setadapterparms.hdr.used_total = 1;
4712 cmd->data.setadapterparms.hdr.seq_no = 1;
4713
4714 return iob;
4715}
4716
4717/**
4718 * function to send SNMP commands to OSA-E card
4719 */
4720static int
4721qeth_snmp_command(struct qeth_card *card, char *udata)
4722{
4723 struct qeth_cmd_buffer *iob;
4724 struct qeth_ipa_cmd *cmd;
4725 struct qeth_snmp_ureq *ureq;
4726 int req_len;
4727 struct qeth_arp_query_info qinfo = {0, };
4728 int rc = 0;
4729
4730 QETH_DBF_TEXT(trace,3,"snmpcmd");
4731
4732 if (card->info.guestlan)
4733 return -EOPNOTSUPP;
4734
4735 if ((!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) &&
4736 (!card->options.layer2) ) {
4737 PRINT_WARN("SNMP Query MIBS not supported "
4738 "on %s!\n", QETH_CARD_IFNAME(card));
4739 return -EOPNOTSUPP;
4740 }
4741 /* skip 4 bytes (data_len struct member) to get req_len */
4742 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4743 return -EFAULT;
4744 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
4745 if (!ureq) {
4746 QETH_DBF_TEXT(trace, 2, "snmpnome");
4747 return -ENOMEM;
4748 }
4749 if (copy_from_user(ureq, udata,
4750 req_len+sizeof(struct qeth_snmp_ureq_hdr))){
4751 kfree(ureq);
4752 return -EFAULT;
4753 }
4754 qinfo.udata_len = ureq->hdr.data_len;
4755 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){
4756 kfree(ureq);
4757 return -ENOMEM;
4758 }
4759 memset(qinfo.udata, 0, qinfo.udata_len);
4760 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4761
4762 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4763 QETH_SNMP_SETADP_CMDLENGTH + req_len);
4764 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4765 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4766 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
4767 qeth_snmp_command_cb, (void *)&qinfo);
4768 if (rc)
4769 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
4770 QETH_CARD_IFNAME(card), rc);
4771 else
4772 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4773
4774 kfree(ureq);
4775 kfree(qinfo.udata);
4776 return rc;
4777}
4778
4779static int
4780qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *,
4781 unsigned long);
4782
4783static int
4784qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *,
4785 __u16, long,
4786 int (*reply_cb)
4787 (struct qeth_card *, struct qeth_reply *, unsigned long),
4788 void *reply_param);
4789
4790static int
4791qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4792{
4793 struct qeth_cmd_buffer *iob;
4794 char buf[16];
4795 int tmp;
4796 int rc;
4797
4798 QETH_DBF_TEXT(trace,3,"arpadent");
4799
4800 /*
4801 * currently GuestLAN does only deliver all zeros on query arp,
4802 * even though arp processing is supported (according to IPA supp.
4803 * funcs flags); since all zeros is no valueable information,
4804 * we say EOPNOTSUPP for all ARP functions
4805 */
4806 if (card->info.guestlan)
4807 return -EOPNOTSUPP;
4808 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4809 PRINT_WARN("ARP processing not supported "
4810 "on %s!\n", QETH_CARD_IFNAME(card));
4811 return -EOPNOTSUPP;
4812 }
4813
4814 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4815 IPA_CMD_ASS_ARP_ADD_ENTRY,
4816 sizeof(struct qeth_arp_cache_entry),
4817 QETH_PROT_IPV4);
4818 rc = qeth_send_setassparms(card, iob,
4819 sizeof(struct qeth_arp_cache_entry),
4820 (unsigned long) entry,
4821 qeth_default_setassparms_cb, NULL);
4822 if (rc) {
4823 tmp = rc;
4824 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
4825 PRINT_WARN("Could not add ARP entry for address %s on %s: "
4826 "%s (0x%x/%d)\n",
4827 buf, QETH_CARD_IFNAME(card),
4828 qeth_arp_get_error_cause(&rc), tmp, tmp);
4829 }
4830 return rc;
4831}
4832
4833static int
4834qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4835{
4836 struct qeth_cmd_buffer *iob;
4837 char buf[16] = {0, };
4838 int tmp;
4839 int rc;
4840
4841 QETH_DBF_TEXT(trace,3,"arprment");
4842
4843 /*
4844 * currently GuestLAN does only deliver all zeros on query arp,
4845 * even though arp processing is supported (according to IPA supp.
4846 * funcs flags); since all zeros is no valueable information,
4847 * we say EOPNOTSUPP for all ARP functions
4848 */
4849 if (card->info.guestlan)
4850 return -EOPNOTSUPP;
4851 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4852 PRINT_WARN("ARP processing not supported "
4853 "on %s!\n", QETH_CARD_IFNAME(card));
4854 return -EOPNOTSUPP;
4855 }
4856 memcpy(buf, entry, 12);
4857 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4858 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
4859 12,
4860 QETH_PROT_IPV4);
4861 rc = qeth_send_setassparms(card, iob,
4862 12, (unsigned long)buf,
4863 qeth_default_setassparms_cb, NULL);
4864 if (rc) {
4865 tmp = rc;
4866 memset(buf, 0, 16);
4867 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
4868 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
4869 "%s (0x%x/%d)\n",
4870 buf, QETH_CARD_IFNAME(card),
4871 qeth_arp_get_error_cause(&rc), tmp, tmp);
4872 }
4873 return rc;
4874}
4875
4876static int
4877qeth_arp_flush_cache(struct qeth_card *card)
4878{
4879 int rc;
4880 int tmp;
4881
4882 QETH_DBF_TEXT(trace,3,"arpflush");
4883
4884 /*
4885 * currently GuestLAN does only deliver all zeros on query arp,
4886 * even though arp processing is supported (according to IPA supp.
4887 * funcs flags); since all zeros is no valueable information,
4888 * we say EOPNOTSUPP for all ARP functions
4889 */
4890 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
4891 return -EOPNOTSUPP;
4892 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4893 PRINT_WARN("ARP processing not supported "
4894 "on %s!\n", QETH_CARD_IFNAME(card));
4895 return -EOPNOTSUPP;
4896 }
4897 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4898 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
4899 if (rc){
4900 tmp = rc;
4901 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
4902 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4903 tmp, tmp);
4904 }
4905 return rc;
4906}
4907
4908static int
4909qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4910{
4911 struct qeth_card *card = (struct qeth_card *)dev->priv;
4912 struct qeth_arp_cache_entry arp_entry;
4913 struct mii_ioctl_data *mii_data;
4914 int rc = 0;
4915
4916 if (!card)
4917 return -ENODEV;
4918
4919 if ((card->state != CARD_STATE_UP) &&
4920 (card->state != CARD_STATE_SOFTSETUP))
4921 return -ENODEV;
4922
4923 switch (cmd){
4924 case SIOC_QETH_ARP_SET_NO_ENTRIES:
4925 if ( !capable(CAP_NET_ADMIN) ||
4926 (card->options.layer2) ) {
4927 rc = -EPERM;
4928 break;
4929 }
4930 rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
4931 break;
4932 case SIOC_QETH_ARP_QUERY_INFO:
4933 if ( !capable(CAP_NET_ADMIN) ||
4934 (card->options.layer2) ) {
4935 rc = -EPERM;
4936 break;
4937 }
4938 rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data);
4939 break;
4940 case SIOC_QETH_ARP_ADD_ENTRY:
4941 if ( !capable(CAP_NET_ADMIN) ||
4942 (card->options.layer2) ) {
4943 rc = -EPERM;
4944 break;
4945 }
4946 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
4947 sizeof(struct qeth_arp_cache_entry)))
4948 rc = -EFAULT;
4949 else
4950 rc = qeth_arp_add_entry(card, &arp_entry);
4951 break;
4952 case SIOC_QETH_ARP_REMOVE_ENTRY:
4953 if ( !capable(CAP_NET_ADMIN) ||
4954 (card->options.layer2) ) {
4955 rc = -EPERM;
4956 break;
4957 }
4958 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
4959 sizeof(struct qeth_arp_cache_entry)))
4960 rc = -EFAULT;
4961 else
4962 rc = qeth_arp_remove_entry(card, &arp_entry);
4963 break;
4964 case SIOC_QETH_ARP_FLUSH_CACHE:
4965 if ( !capable(CAP_NET_ADMIN) ||
4966 (card->options.layer2) ) {
4967 rc = -EPERM;
4968 break;
4969 }
4970 rc = qeth_arp_flush_cache(card);
4971 break;
4972 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
4973 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
4974 break;
4975 case SIOC_QETH_GET_CARD_TYPE:
4976 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
4977 !card->info.guestlan)
4978 return 1;
4979 return 0;
4980 break;
4981 case SIOCGMIIPHY:
4982 mii_data = if_mii(rq);
4983 mii_data->phy_id = 0;
4984 break;
4985 case SIOCGMIIREG:
4986 mii_data = if_mii(rq);
4987 if (mii_data->phy_id != 0)
4988 rc = -EINVAL;
4989 else
4990 mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
4991 mii_data->reg_num);
4992 break;
4993 case SIOCSMIIREG:
4994 rc = -EOPNOTSUPP;
4995 break;
4996 /* TODO: remove return if qeth_mdio_write does something */
4997 if (!capable(CAP_NET_ADMIN)){
4998 rc = -EPERM;
4999 break;
5000 }
5001 mii_data = if_mii(rq);
5002 if (mii_data->phy_id != 0)
5003 rc = -EINVAL;
5004 else
5005 qeth_mdio_write(dev, mii_data->phy_id, mii_data->reg_num,
5006 mii_data->val_in);
5007 break;
5008 default:
5009 rc = -EOPNOTSUPP;
5010 }
5011 if (rc)
5012 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
5013 return rc;
5014}
5015
5016static struct net_device_stats *
5017qeth_get_stats(struct net_device *dev)
5018{
5019 struct qeth_card *card;
5020
5021 card = (struct qeth_card *) (dev->priv);
5022
5023 QETH_DBF_TEXT(trace,5,"getstat");
5024
5025 return &card->stats;
5026}
5027
5028static int
5029qeth_change_mtu(struct net_device *dev, int new_mtu)
5030{
5031 struct qeth_card *card;
5032 char dbf_text[15];
5033
5034 card = (struct qeth_card *) (dev->priv);
5035
5036 QETH_DBF_TEXT(trace,4,"chgmtu");
5037 sprintf(dbf_text, "%8x", new_mtu);
5038 QETH_DBF_TEXT(trace,4,dbf_text);
5039
5040 if (new_mtu < 64)
5041 return -EINVAL;
5042 if (new_mtu > 65535)
5043 return -EINVAL;
5044 if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) &&
5045 (!qeth_mtu_is_valid(card, new_mtu)))
5046 return -EINVAL;
5047 dev->mtu = new_mtu;
5048 return 0;
5049}
5050
5051#ifdef CONFIG_QETH_VLAN
5052static void
5053qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5054{
5055 struct qeth_card *card;
5056 unsigned long flags;
5057
5058 QETH_DBF_TEXT(trace,4,"vlanreg");
5059
5060 card = (struct qeth_card *) dev->priv;
5061 spin_lock_irqsave(&card->vlanlock, flags);
5062 card->vlangrp = grp;
5063 spin_unlock_irqrestore(&card->vlanlock, flags);
5064}
5065
5066static inline void
5067qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5068 unsigned short vid)
5069{
5070 int i;
5071 struct sk_buff *skb;
5072 struct sk_buff_head tmp_list;
5073
5074 skb_queue_head_init(&tmp_list);
5075 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
5076 while ((skb = skb_dequeue(&buf->skb_list))){
5077 if (vlan_tx_tag_present(skb) &&
5078 (vlan_tx_tag_get(skb) == vid)) {
5079 atomic_dec(&skb->users);
5080 dev_kfree_skb(skb);
5081 } else
5082 skb_queue_tail(&tmp_list, skb);
5083 }
5084 }
5085 while ((skb = skb_dequeue(&tmp_list)))
5086 skb_queue_tail(&buf->skb_list, skb);
5087}
5088
5089static void
5090qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid)
5091{
5092 int i, j;
5093
5094 QETH_DBF_TEXT(trace, 4, "frvlskbs");
5095 for (i = 0; i < card->qdio.no_out_queues; ++i){
5096 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
5097 qeth_free_vlan_buffer(card, &card->qdio.
5098 out_qs[i]->bufs[j], vid);
5099 }
5100}
5101
5102static void
5103qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
5104{
5105 struct in_device *in_dev;
5106 struct in_ifaddr *ifa;
5107 struct qeth_ipaddr *addr;
5108
5109 QETH_DBF_TEXT(trace, 4, "frvaddr4");
5110 if (!card->vlangrp)
5111 return;
5112 rcu_read_lock();
5113 in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]);
5114 if (!in_dev)
5115 goto out;
5116 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
5117 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
5118 if (addr){
5119 addr->u.a4.addr = ifa->ifa_address;
5120 addr->u.a4.mask = ifa->ifa_mask;
5121 addr->type = QETH_IP_TYPE_NORMAL;
5122 if (!qeth_delete_ip(card, addr))
5123 kfree(addr);
5124 }
5125 }
5126out:
5127 rcu_read_unlock();
5128}
5129
5130static void
5131qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
5132{
5133#ifdef CONFIG_QETH_IPV6
5134 struct inet6_dev *in6_dev;
5135 struct inet6_ifaddr *ifa;
5136 struct qeth_ipaddr *addr;
5137
5138 QETH_DBF_TEXT(trace, 4, "frvaddr6");
5139 if (!card->vlangrp)
5140 return;
5141 in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]);
5142 if (!in6_dev)
5143 return;
5144 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
5145 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
5146 if (addr){
5147 memcpy(&addr->u.a6.addr, &ifa->addr,
5148 sizeof(struct in6_addr));
5149 addr->u.a6.pfxlen = ifa->prefix_len;
5150 addr->type = QETH_IP_TYPE_NORMAL;
5151 if (!qeth_delete_ip(card, addr))
5152 kfree(addr);
5153 }
5154 }
5155 in6_dev_put(in6_dev);
5156#endif /* CONFIG_QETH_IPV6 */
5157}
5158
5159static void
5160qeth_layer2_send_setdelvlan(struct qeth_card *card, __u16 i,
5161 enum qeth_ipa_cmds ipacmd)
5162{
5163 int rc;
5164 struct qeth_ipa_cmd *cmd;
5165 struct qeth_cmd_buffer *iob;
5166
5167 QETH_DBF_TEXT_(trace, 4, "L2sdv%x",ipacmd);
5168 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5169 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5170 cmd->data.setdelvlan.vlan_id = i;
5171
5172 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5173 if (rc) {
5174 PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
5175 "Continuing\n",i, QETH_CARD_IFNAME(card), rc);
5176 QETH_DBF_TEXT_(trace, 2, "L2VL%4x", ipacmd);
5177 QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card));
5178 QETH_DBF_TEXT_(trace, 2, "err%d", rc);
5179 }
5180}
5181
5182static void
5183qeth_layer2_process_vlans(struct qeth_card *card, int clear)
5184{
5185 unsigned short i;
5186
5187 QETH_DBF_TEXT(trace, 3, "L2prcvln");
5188
5189 if (!card->vlangrp)
5190 return;
5191 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5192 if (card->vlangrp->vlan_devices[i] == NULL)
5193 continue;
5194 if (clear)
5195 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
5196 else
5197 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_SETVLAN);
5198 }
5199}
5200
5201/*add_vid is layer 2 used only ....*/
5202static void
5203qeth_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
5204{
5205 struct qeth_card *card;
5206
5207 QETH_DBF_TEXT_(trace, 4, "aid:%d", vid);
5208
5209 card = (struct qeth_card *) dev->priv;
5210 if (!card->options.layer2)
5211 return;
5212 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
5213}
5214
5215/*... kill_vid used for both modes*/
5216static void
5217qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
5218{
5219 struct qeth_card *card;
5220 unsigned long flags;
5221
5222 QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
5223
5224 card = (struct qeth_card *) dev->priv;
5225 /* free all skbs for the vlan device */
5226 qeth_free_vlan_skbs(card, vid);
5227 spin_lock_irqsave(&card->vlanlock, flags);
5228 /* unregister IP addresses of vlan device */
5229 qeth_free_vlan_addresses4(card, vid);
5230 qeth_free_vlan_addresses6(card, vid);
5231 if (card->vlangrp)
5232 card->vlangrp->vlan_devices[vid] = NULL;
5233 spin_unlock_irqrestore(&card->vlanlock, flags);
5234 if (card->options.layer2)
5235 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
5236 qeth_set_multicast_list(card->dev);
5237}
5238#endif
5239
5240/**
5241 * set multicast address on card
5242 */
5243static void
5244qeth_set_multicast_list(struct net_device *dev)
5245{
5246 struct qeth_card *card = (struct qeth_card *) dev->priv;
5247
5248 QETH_DBF_TEXT(trace,3,"setmulti");
5249 qeth_delete_mc_addresses(card);
5250 qeth_add_multicast_ipv4(card);
5251#ifdef CONFIG_QETH_IPV6
5252 qeth_add_multicast_ipv6(card);
5253#endif
5254 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
5255 schedule_work(&card->kernel_thread_starter);
5256}
5257
5258static int
5259qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np)
5260{
5261 return 0;
5262}
5263
5264static void
5265qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
5266{
5267 if (dev->type == ARPHRD_IEEE802_TR)
5268 ip_tr_mc_map(ipm, mac);
5269 else
5270 ip_eth_mc_map(ipm, mac);
5271}
5272
5273static struct qeth_ipaddr *
5274qeth_get_addr_buffer(enum qeth_prot_versions prot)
5275{
5276 struct qeth_ipaddr *addr;
5277
5278 addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
5279 if (addr == NULL) {
5280 PRINT_WARN("Not enough memory to add address\n");
5281 return NULL;
5282 }
5283 memset(addr,0,sizeof(struct qeth_ipaddr));
5284 addr->type = QETH_IP_TYPE_NORMAL;
5285 addr->proto = prot;
5286 return addr;
5287}
5288
5289static void
5290qeth_delete_mc_addresses(struct qeth_card *card)
5291{
5292 struct qeth_ipaddr *iptodo;
5293 unsigned long flags;
5294
5295 QETH_DBF_TEXT(trace,4,"delmc");
5296 iptodo = qeth_get_addr_buffer(QETH_PROT_IPV4);
5297 if (!iptodo) {
5298 QETH_DBF_TEXT(trace, 2, "dmcnomem");
5299 return;
5300 }
5301 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
5302 spin_lock_irqsave(&card->ip_lock, flags);
5303 if (!__qeth_insert_ip_todo(card, iptodo, 0))
5304 kfree(iptodo);
5305 spin_unlock_irqrestore(&card->ip_lock, flags);
5306}
5307
5308static inline void
5309qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5310{
5311 struct qeth_ipaddr *ipm;
5312 struct ip_mc_list *im4;
5313 char buf[MAX_ADDR_LEN];
5314
5315 QETH_DBF_TEXT(trace,4,"addmc");
5316 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
5317 qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
5318 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
5319 if (!ipm)
5320 continue;
5321 ipm->u.a4.addr = im4->multiaddr;
5322 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5323 ipm->is_multicast = 1;
5324 if (!qeth_add_ip(card,ipm))
5325 kfree(ipm);
5326 }
5327}
5328
5329static inline void
5330qeth_add_vlan_mc(struct qeth_card *card)
5331{
5332#ifdef CONFIG_QETH_VLAN
5333 struct in_device *in_dev;
5334 struct vlan_group *vg;
5335 int i;
5336
5337 QETH_DBF_TEXT(trace,4,"addmcvl");
5338 if ( ((card->options.layer2 == 0) &&
5339 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5340 (card->vlangrp == NULL) )
5341 return ;
5342
5343 vg = card->vlangrp;
5344 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5345 if (vg->vlan_devices[i] == NULL ||
5346 !(vg->vlan_devices[i]->flags & IFF_UP))
5347 continue;
5348 in_dev = in_dev_get(vg->vlan_devices[i]);
5349 if (!in_dev)
5350 continue;
5351 read_lock(&in_dev->mc_list_lock);
5352 qeth_add_mc(card,in_dev);
5353 read_unlock(&in_dev->mc_list_lock);
5354 in_dev_put(in_dev);
5355 }
5356#endif
5357}
5358
5359static void
5360qeth_add_multicast_ipv4(struct qeth_card *card)
5361{
5362 struct in_device *in4_dev;
5363
5364 QETH_DBF_TEXT(trace,4,"chkmcv4");
5365 in4_dev = in_dev_get(card->dev);
5366 if (in4_dev == NULL)
5367 return;
5368 read_lock(&in4_dev->mc_list_lock);
5369 qeth_add_mc(card, in4_dev);
5370 qeth_add_vlan_mc(card);
5371 read_unlock(&in4_dev->mc_list_lock);
5372 in_dev_put(in4_dev);
5373}
5374
5375#ifdef CONFIG_QETH_IPV6
5376static inline void
5377qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
5378{
5379 struct qeth_ipaddr *ipm;
5380 struct ifmcaddr6 *im6;
5381 char buf[MAX_ADDR_LEN];
5382
5383 QETH_DBF_TEXT(trace,4,"addmc6");
5384 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
5385 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
5386 ipm = qeth_get_addr_buffer(QETH_PROT_IPV6);
5387 if (!ipm)
5388 continue;
5389 ipm->is_multicast = 1;
5390 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5391 memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr,
5392 sizeof(struct in6_addr));
5393 if (!qeth_add_ip(card,ipm))
5394 kfree(ipm);
5395 }
5396}
5397
5398static inline void
5399qeth_add_vlan_mc6(struct qeth_card *card)
5400{
5401#ifdef CONFIG_QETH_VLAN
5402 struct inet6_dev *in_dev;
5403 struct vlan_group *vg;
5404 int i;
5405
5406 QETH_DBF_TEXT(trace,4,"admc6vl");
5407 if ( ((card->options.layer2 == 0) &&
5408 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5409 (card->vlangrp == NULL))
5410 return ;
5411
5412 vg = card->vlangrp;
5413 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5414 if (vg->vlan_devices[i] == NULL ||
5415 !(vg->vlan_devices[i]->flags & IFF_UP))
5416 continue;
5417 in_dev = in6_dev_get(vg->vlan_devices[i]);
5418 if (!in_dev)
5419 continue;
5420 read_lock(&in_dev->lock);
5421 qeth_add_mc6(card,in_dev);
5422 read_unlock(&in_dev->lock);
5423 in6_dev_put(in_dev);
5424 }
5425#endif /* CONFIG_QETH_VLAN */
5426}
5427
5428static void
5429qeth_add_multicast_ipv6(struct qeth_card *card)
5430{
5431 struct inet6_dev *in6_dev;
5432
5433 QETH_DBF_TEXT(trace,4,"chkmcv6");
5434 if ((card->options.layer2 == 0) &&
5435 (!qeth_is_supported(card, IPA_IPV6)) )
5436 return ;
5437
5438 in6_dev = in6_dev_get(card->dev);
5439 if (in6_dev == NULL)
5440 return;
5441 read_lock(&in6_dev->lock);
5442 qeth_add_mc6(card, in6_dev);
5443 qeth_add_vlan_mc6(card);
5444 read_unlock(&in6_dev->lock);
5445 in6_dev_put(in6_dev);
5446}
5447#endif /* CONFIG_QETH_IPV6 */
5448
5449static int
5450qeth_layer2_send_setdelmac(struct qeth_card *card, __u8 *mac,
5451 enum qeth_ipa_cmds ipacmd,
5452 int (*reply_cb) (struct qeth_card *,
5453 struct qeth_reply*,
5454 unsigned long))
5455{
5456 struct qeth_ipa_cmd *cmd;
5457 struct qeth_cmd_buffer *iob;
5458
5459 QETH_DBF_TEXT(trace, 2, "L2sdmac");
5460 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5461 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5462 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
5463 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
5464 return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
5465}
5466
5467static int
5468qeth_layer2_send_setgroupmac_cb(struct qeth_card *card,
5469 struct qeth_reply *reply,
5470 unsigned long data)
5471{
5472 struct qeth_ipa_cmd *cmd;
5473 __u8 *mac;
5474
5475 QETH_DBF_TEXT(trace, 2, "L2Sgmacb");
5476 cmd = (struct qeth_ipa_cmd *) data;
5477 mac = &cmd->data.setdelmac.mac[0];
5478 /* MAC already registered, needed in couple/uncouple case */
5479 if (cmd->hdr.return_code == 0x2005) {
5480 PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
5481 "already existing on %s \n",
5482 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5483 QETH_CARD_IFNAME(card));
5484 cmd->hdr.return_code = 0;
5485 }
5486 if (cmd->hdr.return_code)
5487 PRINT_ERR("Could not set group MAC " \
5488 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5489 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5490 QETH_CARD_IFNAME(card),cmd->hdr.return_code);
5491 return 0;
5492}
5493
5494static int
5495qeth_layer2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
5496{
5497 QETH_DBF_TEXT(trace, 2, "L2Sgmac");
5498 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
5499 qeth_layer2_send_setgroupmac_cb);
5500}
5501
5502static int
5503qeth_layer2_send_delgroupmac_cb(struct qeth_card *card,
5504 struct qeth_reply *reply,
5505 unsigned long data)
5506{
5507 struct qeth_ipa_cmd *cmd;
5508 __u8 *mac;
5509
5510 QETH_DBF_TEXT(trace, 2, "L2Dgmacb");
5511 cmd = (struct qeth_ipa_cmd *) data;
5512 mac = &cmd->data.setdelmac.mac[0];
5513 if (cmd->hdr.return_code)
5514 PRINT_ERR("Could not delete group MAC " \
5515 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5516 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5517 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
5518 return 0;
5519}
5520
5521static int
5522qeth_layer2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
5523{
5524 QETH_DBF_TEXT(trace, 2, "L2Dgmac");
5525 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
5526 qeth_layer2_send_delgroupmac_cb);
5527}
5528
5529static int
5530qeth_layer2_send_setmac_cb(struct qeth_card *card,
5531 struct qeth_reply *reply,
5532 unsigned long data)
5533{
5534 struct qeth_ipa_cmd *cmd;
5535
5536 QETH_DBF_TEXT(trace, 2, "L2Smaccb");
5537 cmd = (struct qeth_ipa_cmd *) data;
5538 if (cmd->hdr.return_code) {
5539 QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code);
5540 PRINT_WARN("Error in registering MAC address on " \
5541 "device %s: x%x\n", CARD_BUS_ID(card),
5542 cmd->hdr.return_code);
5543 card->info.layer2_mac_registered = 0;
5544 cmd->hdr.return_code = -EIO;
5545 } else {
5546 card->info.layer2_mac_registered = 1;
5547 memcpy(card->dev->dev_addr,cmd->data.setdelmac.mac,
5548 OSA_ADDR_LEN);
5549 PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
5550 "successfully registered on device %s\n",
5551 card->dev->dev_addr[0], card->dev->dev_addr[1],
5552 card->dev->dev_addr[2], card->dev->dev_addr[3],
5553 card->dev->dev_addr[4], card->dev->dev_addr[5],
5554 card->dev->name);
5555 }
5556 return 0;
5557}
5558
5559static int
5560qeth_layer2_send_setmac(struct qeth_card *card, __u8 *mac)
5561{
5562 QETH_DBF_TEXT(trace, 2, "L2Setmac");
5563 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
5564 qeth_layer2_send_setmac_cb);
5565}
5566
5567static int
5568qeth_layer2_send_delmac_cb(struct qeth_card *card,
5569 struct qeth_reply *reply,
5570 unsigned long data)
5571{
5572 struct qeth_ipa_cmd *cmd;
5573
5574 QETH_DBF_TEXT(trace, 2, "L2Dmaccb");
5575 cmd = (struct qeth_ipa_cmd *) data;
5576 if (cmd->hdr.return_code) {
5577 PRINT_WARN("Error in deregistering MAC address on " \
5578 "device %s: x%x\n", CARD_BUS_ID(card),
5579 cmd->hdr.return_code);
5580 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
5581 cmd->hdr.return_code = -EIO;
5582 return 0;
5583 }
5584 card->info.layer2_mac_registered = 0;
5585
5586 return 0;
5587}
5588static int
5589qeth_layer2_send_delmac(struct qeth_card *card, __u8 *mac)
5590{
5591 QETH_DBF_TEXT(trace, 2, "L2Delmac");
5592 if (!card->info.layer2_mac_registered)
5593 return 0;
5594 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
5595 qeth_layer2_send_delmac_cb);
5596}
5597
5598static int
5599qeth_layer2_set_mac_address(struct net_device *dev, void *p)
5600{
5601 struct sockaddr *addr = p;
5602 struct qeth_card *card;
5603 int rc = 0;
5604
5605 QETH_DBF_TEXT(trace, 3, "setmac");
5606
5607 if (qeth_verify_dev(dev) != QETH_REAL_CARD) {
5608 QETH_DBF_TEXT(trace, 3, "setmcINV");
5609 return -EOPNOTSUPP;
5610 }
5611 card = (struct qeth_card *) dev->priv;
5612
5613 if (!card->options.layer2) {
5614 PRINT_WARN("Setting MAC address on %s is not supported"
5615 "in Layer 3 mode.\n", dev->name);
5616 QETH_DBF_TEXT(trace, 3, "setmcLY3");
5617 return -EOPNOTSUPP;
5618 }
5619 QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
5620 QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
5621 rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]);
5622 if (!rc)
5623 rc = qeth_layer2_send_setmac(card, addr->sa_data);
5624 return rc;
5625}
5626
5627static void
5628qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd,
5629 __u8 command, enum qeth_prot_versions prot)
5630{
5631 memset(cmd, 0, sizeof (struct qeth_ipa_cmd));
5632 cmd->hdr.command = command;
5633 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
5634 cmd->hdr.seqno = card->seqno.ipa;
5635 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
5636 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
5637 if (card->options.layer2)
5638 cmd->hdr.prim_version_no = 2;
5639 else
5640 cmd->hdr.prim_version_no = 1;
5641 cmd->hdr.param_count = 1;
5642 cmd->hdr.prot_version = prot;
5643 cmd->hdr.ipa_supported = 0;
5644 cmd->hdr.ipa_enabled = 0;
5645}
5646
5647static struct qeth_cmd_buffer *
5648qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
5649 enum qeth_prot_versions prot)
5650{
5651 struct qeth_cmd_buffer *iob;
5652 struct qeth_ipa_cmd *cmd;
5653
5654 iob = qeth_wait_for_buffer(&card->write);
5655 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5656 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
5657
5658 return iob;
5659}
5660
5661static int
5662qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
5663{
5664 int rc;
5665 struct qeth_cmd_buffer *iob;
5666 struct qeth_ipa_cmd *cmd;
5667
5668 QETH_DBF_TEXT(trace,4,"setdelmc");
5669
5670 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
5671 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5672 memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN);
5673 if (addr->proto == QETH_PROT_IPV6)
5674 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
5675 sizeof(struct in6_addr));
5676 else
5677 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4);
5678
5679 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5680
5681 return rc;
5682}
5683static inline void
5684qeth_fill_netmask(u8 *netmask, unsigned int len)
5685{
5686 int i,j;
5687 for (i=0;i<16;i++) {
5688 j=(len)-(i*8);
5689 if (j >= 8)
5690 netmask[i] = 0xff;
5691 else if (j > 0)
5692 netmask[i] = (u8)(0xFF00>>j);
5693 else
5694 netmask[i] = 0;
5695 }
5696}
5697
5698static int
5699qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr,
5700 int ipacmd, unsigned int flags)
5701{
5702 int rc;
5703 struct qeth_cmd_buffer *iob;
5704 struct qeth_ipa_cmd *cmd;
5705 __u8 netmask[16];
5706
5707 QETH_DBF_TEXT(trace,4,"setdelip");
5708 QETH_DBF_TEXT_(trace,4,"flags%02X", flags);
5709
5710 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
5711 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5712 if (addr->proto == QETH_PROT_IPV6) {
5713 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
5714 sizeof(struct in6_addr));
5715 qeth_fill_netmask(netmask,addr->u.a6.pfxlen);
5716 memcpy(cmd->data.setdelip6.mask, netmask,
5717 sizeof(struct in6_addr));
5718 cmd->data.setdelip6.flags = flags;
5719 } else {
5720 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
5721 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
5722 cmd->data.setdelip4.flags = flags;
5723 }
5724
5725 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5726
5727 return rc;
5728}
5729
5730static int
5731qeth_layer2_register_addr_entry(struct qeth_card *card,
5732 struct qeth_ipaddr *addr)
5733{
5734 if (!addr->is_multicast)
5735 return 0;
5736 QETH_DBF_TEXT(trace, 2, "setgmac");
5737 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
5738 return qeth_layer2_send_setgroupmac(card, &addr->mac[0]);
5739}
5740
5741static int
5742qeth_layer2_deregister_addr_entry(struct qeth_card *card,
5743 struct qeth_ipaddr *addr)
5744{
5745 if (!addr->is_multicast)
5746 return 0;
5747 QETH_DBF_TEXT(trace, 2, "delgmac");
5748 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
5749 return qeth_layer2_send_delgroupmac(card, &addr->mac[0]);
5750}
5751
5752static int
5753qeth_layer3_register_addr_entry(struct qeth_card *card,
5754 struct qeth_ipaddr *addr)
5755{
5756 char buf[50];
5757 int rc;
5758 int cnt = 3;
5759
5760 if (addr->proto == QETH_PROT_IPV4) {
5761 QETH_DBF_TEXT(trace, 2,"setaddr4");
5762 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
5763 } else if (addr->proto == QETH_PROT_IPV6) {
5764 QETH_DBF_TEXT(trace, 2, "setaddr6");
5765 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
5766 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
5767 } else {
5768 QETH_DBF_TEXT(trace, 2, "setaddr?");
5769 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
5770 }
5771 do {
5772 if (addr->is_multicast)
5773 rc = qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM);
5774 else
5775 rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP,
5776 addr->set_flags);
5777 if (rc)
5778 QETH_DBF_TEXT(trace, 2, "failed");
5779 } while ((--cnt > 0) && rc);
5780 if (rc){
5781 QETH_DBF_TEXT(trace, 2, "FAILED");
5782 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
5783 PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
5784 buf, rc, rc);
5785 }
5786 return rc;
5787}
5788
5789static int
5790qeth_layer3_deregister_addr_entry(struct qeth_card *card,
5791 struct qeth_ipaddr *addr)
5792{
5793 //char buf[50];
5794 int rc;
5795
5796 if (addr->proto == QETH_PROT_IPV4) {
5797 QETH_DBF_TEXT(trace, 2,"deladdr4");
5798 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
5799 } else if (addr->proto == QETH_PROT_IPV6) {
5800 QETH_DBF_TEXT(trace, 2, "deladdr6");
5801 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
5802 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
5803 } else {
5804 QETH_DBF_TEXT(trace, 2, "deladdr?");
5805 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
5806 }
5807 if (addr->is_multicast)
5808 rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM);
5809 else
5810 rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP,
5811 addr->del_flags);
5812 if (rc) {
5813 QETH_DBF_TEXT(trace, 2, "failed");
5814 /* TODO: re-activate this warning as soon as we have a
5815 * clean mirco code
5816 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
5817 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
5818 buf, rc);
5819 */
5820 }
5821 return rc;
5822}
5823
5824static int
5825qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
5826{
5827 if (card->options.layer2)
5828 return qeth_layer2_register_addr_entry(card, addr);
5829
5830 return qeth_layer3_register_addr_entry(card, addr);
5831}
5832
5833static int
5834qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
5835{
5836 if (card->options.layer2)
5837 return qeth_layer2_deregister_addr_entry(card, addr);
5838
5839 return qeth_layer3_deregister_addr_entry(card, addr);
5840}
5841
5842static u32
5843qeth_ethtool_get_tx_csum(struct net_device *dev)
5844{
5845 /* We may need to say that we support tx csum offload if
5846 * we do EDDP or TSO. There are discussions going on to
5847 * enforce rules in the stack and in ethtool that make
5848 * SG and TSO depend on HW_CSUM. At the moment there are
5849 * no such rules....
5850 * If we say yes here, we have to checksum outbound packets
5851 * any time. */
5852 return 0;
5853}
5854
5855static int
5856qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
5857{
5858 return -EINVAL;
5859}
5860
5861static u32
5862qeth_ethtool_get_rx_csum(struct net_device *dev)
5863{
5864 struct qeth_card *card = (struct qeth_card *)dev->priv;
5865
5866 return (card->options.checksum_type == HW_CHECKSUMMING);
5867}
5868
5869static int
5870qeth_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5871{
5872 struct qeth_card *card = (struct qeth_card *)dev->priv;
5873
5874 if ((card->state != CARD_STATE_DOWN) &&
5875 (card->state != CARD_STATE_RECOVER))
5876 return -EPERM;
5877 if (data)
5878 card->options.checksum_type = HW_CHECKSUMMING;
5879 else
5880 card->options.checksum_type = SW_CHECKSUMMING;
5881 return 0;
5882}
5883
5884static u32
5885qeth_ethtool_get_sg(struct net_device *dev)
5886{
5887 struct qeth_card *card = (struct qeth_card *)dev->priv;
5888
5889 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
5890 (dev->features & NETIF_F_SG));
5891}
5892
5893static int
5894qeth_ethtool_set_sg(struct net_device *dev, u32 data)
5895{
5896 struct qeth_card *card = (struct qeth_card *)dev->priv;
5897
5898 if (data) {
5899 if (card->options.large_send != QETH_LARGE_SEND_NO)
5900 dev->features |= NETIF_F_SG;
5901 else {
5902 dev->features &= ~NETIF_F_SG;
5903 return -EINVAL;
5904 }
5905 } else
5906 dev->features &= ~NETIF_F_SG;
5907 return 0;
5908}
5909
5910static u32
5911qeth_ethtool_get_tso(struct net_device *dev)
5912{
5913 struct qeth_card *card = (struct qeth_card *)dev->priv;
5914
5915 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
5916 (dev->features & NETIF_F_TSO));
5917}
5918
5919static int
5920qeth_ethtool_set_tso(struct net_device *dev, u32 data)
5921{
5922 struct qeth_card *card = (struct qeth_card *)dev->priv;
5923
5924 if (data) {
5925 if (card->options.large_send != QETH_LARGE_SEND_NO)
5926 dev->features |= NETIF_F_TSO;
5927 else {
5928 dev->features &= ~NETIF_F_TSO;
5929 return -EINVAL;
5930 }
5931 } else
5932 dev->features &= ~NETIF_F_TSO;
5933 return 0;
5934}
5935
5936static struct ethtool_ops qeth_ethtool_ops = {
5937 .get_tx_csum = qeth_ethtool_get_tx_csum,
5938 .set_tx_csum = qeth_ethtool_set_tx_csum,
5939 .get_rx_csum = qeth_ethtool_get_rx_csum,
5940 .set_rx_csum = qeth_ethtool_set_rx_csum,
5941 .get_sg = qeth_ethtool_get_sg,
5942 .set_sg = qeth_ethtool_set_sg,
5943 .get_tso = qeth_ethtool_get_tso,
5944 .set_tso = qeth_ethtool_set_tso,
5945};
5946
5947static int
5948qeth_netdev_init(struct net_device *dev)
5949{
5950 struct qeth_card *card;
5951
5952 card = (struct qeth_card *) dev->priv;
5953
5954 QETH_DBF_TEXT(trace,3,"initdev");
5955
5956 dev->tx_timeout = &qeth_tx_timeout;
5957 dev->watchdog_timeo = QETH_TX_TIMEOUT;
5958 dev->open = qeth_open;
5959 dev->stop = qeth_stop;
5960 dev->hard_start_xmit = qeth_hard_start_xmit;
5961 dev->do_ioctl = qeth_do_ioctl;
5962 dev->get_stats = qeth_get_stats;
5963 dev->change_mtu = qeth_change_mtu;
5964 dev->neigh_setup = qeth_neigh_setup;
5965 dev->set_multicast_list = qeth_set_multicast_list;
5966#ifdef CONFIG_QETH_VLAN
5967 dev->vlan_rx_register = qeth_vlan_rx_register;
5968 dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
5969 dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid;
5970#endif
5971 dev->hard_header = card->orig_hard_header;
5972 if (qeth_get_netdev_flags(card) & IFF_NOARP) {
5973 dev->rebuild_header = NULL;
5974 dev->hard_header = NULL;
5975 if (card->options.fake_ll)
5976 dev->hard_header = qeth_fake_header;
5977 dev->header_cache_update = NULL;
5978 dev->hard_header_cache = NULL;
5979 }
5980#ifdef CONFIG_QETH_IPV6
5981 /*IPv6 address autoconfiguration stuff*/
5982 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
5983 card->dev->dev_id = card->info.unique_id & 0xffff;
5984#endif
5985 dev->hard_header_parse = NULL;
5986 dev->set_mac_address = qeth_layer2_set_mac_address;
5987 dev->flags |= qeth_get_netdev_flags(card);
5988 if ((card->options.fake_broadcast) ||
5989 (card->info.broadcast_capable))
5990 dev->flags |= IFF_BROADCAST;
5991 dev->hard_header_len =
5992 qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
5993 dev->addr_len = OSA_ADDR_LEN;
5994 dev->mtu = card->info.initial_mtu;
5995
5996 SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
5997
5998 SET_MODULE_OWNER(dev);
5999 return 0;
6000}
6001
6002static void
6003qeth_init_func_level(struct qeth_card *card)
6004{
6005 if (card->ipato.enabled) {
6006 if (card->info.type == QETH_CARD_TYPE_IQD)
6007 card->info.func_level =
6008 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
6009 else
6010 card->info.func_level =
6011 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
6012 } else {
6013 if (card->info.type == QETH_CARD_TYPE_IQD)
6014 card->info.func_level =
6015 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
6016 else
6017 card->info.func_level =
6018 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
6019 }
6020}
6021
6022/**
6023 * hardsetup card, initialize MPC and QDIO stuff
6024 */
6025static int
6026qeth_hardsetup_card(struct qeth_card *card)
6027{
6028 int retries = 3;
6029 int rc;
6030
6031 QETH_DBF_TEXT(setup, 2, "hrdsetup");
6032
6033retry:
6034 if (retries < 3){
6035 PRINT_WARN("Retrying to do IDX activates.\n");
6036 ccw_device_set_offline(CARD_DDEV(card));
6037 ccw_device_set_offline(CARD_WDEV(card));
6038 ccw_device_set_offline(CARD_RDEV(card));
6039 ccw_device_set_online(CARD_RDEV(card));
6040 ccw_device_set_online(CARD_WDEV(card));
6041 ccw_device_set_online(CARD_DDEV(card));
6042 }
6043 rc = qeth_qdio_clear_card(card,card->info.type==QETH_CARD_TYPE_OSAE);
6044 if (rc == -ERESTARTSYS) {
6045 QETH_DBF_TEXT(setup, 2, "break1");
6046 return rc;
6047 } else if (rc) {
6048 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6049 if (--retries < 0)
6050 goto out;
6051 else
6052 goto retry;
6053 }
6054 if ((rc = qeth_get_unitaddr(card))){
6055 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6056 return rc;
6057 }
6058 qeth_init_tokens(card);
6059 qeth_init_func_level(card);
6060 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
6061 if (rc == -ERESTARTSYS) {
6062 QETH_DBF_TEXT(setup, 2, "break2");
6063 return rc;
6064 } else if (rc) {
6065 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6066 if (--retries < 0)
6067 goto out;
6068 else
6069 goto retry;
6070 }
6071 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
6072 if (rc == -ERESTARTSYS) {
6073 QETH_DBF_TEXT(setup, 2, "break3");
6074 return rc;
6075 } else if (rc) {
6076 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6077 if (--retries < 0)
6078 goto out;
6079 else
6080 goto retry;
6081 }
6082 if ((rc = qeth_mpc_initialize(card))){
6083 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6084 goto out;
6085 }
6086 /*network device will be recovered*/
6087 if (card->dev) {
6088 card->dev->hard_header = card->orig_hard_header;
6089 return 0;
6090 }
6091 /* at first set_online allocate netdev */
6092 card->dev = qeth_get_netdevice(card->info.type,
6093 card->info.link_type);
6094 if (!card->dev){
6095 qeth_qdio_clear_card(card, card->info.type ==
6096 QETH_CARD_TYPE_OSAE);
6097 rc = -ENODEV;
6098 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
6099 goto out;
6100 }
6101 card->dev->priv = card;
6102 card->orig_hard_header = card->dev->hard_header;
6103 card->dev->type = qeth_get_arphdr_type(card->info.type,
6104 card->info.link_type);
6105 card->dev->init = qeth_netdev_init;
6106 return 0;
6107out:
6108 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
6109 return rc;
6110}
6111
6112static int
6113qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6114 unsigned long data)
6115{
6116 struct qeth_ipa_cmd *cmd;
6117
6118 QETH_DBF_TEXT(trace,4,"defadpcb");
6119
6120 cmd = (struct qeth_ipa_cmd *) data;
6121 if (cmd->hdr.return_code == 0){
6122 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6123 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6124 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6125#ifdef CONFIG_QETH_IPV6
6126 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6127 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6128#endif
6129 }
6130 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
6131 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
6132 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
6133 QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
6134 }
6135 return 0;
6136}
6137
6138static int
6139qeth_default_setadapterparms_cb(struct qeth_card *card,
6140 struct qeth_reply *reply,
6141 unsigned long data)
6142{
6143 struct qeth_ipa_cmd *cmd;
6144
6145 QETH_DBF_TEXT(trace,4,"defadpcb");
6146
6147 cmd = (struct qeth_ipa_cmd *) data;
6148 if (cmd->hdr.return_code == 0)
6149 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
6150 return 0;
6151}
6152
6153static int
6154qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6155 unsigned long data)
6156{
6157 struct qeth_ipa_cmd *cmd;
6158
6159 QETH_DBF_TEXT(trace,3,"quyadpcb");
6160
6161 cmd = (struct qeth_ipa_cmd *) data;
6162 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
6163 card->info.link_type =
6164 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
6165 card->options.adp.supported_funcs =
6166 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
6167 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
6168}
6169
6170static int
6171qeth_query_setadapterparms(struct qeth_card *card)
6172{
6173 int rc;
6174 struct qeth_cmd_buffer *iob;
6175
6176 QETH_DBF_TEXT(trace,3,"queryadp");
6177 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
6178 sizeof(struct qeth_ipacmd_setadpparms));
6179 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
6180 return rc;
6181}
6182
6183static int
6184qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
6185 struct qeth_reply *reply,
6186 unsigned long data)
6187{
6188 struct qeth_ipa_cmd *cmd;
6189
6190 QETH_DBF_TEXT(trace,4,"chgmaccb");
6191
6192 cmd = (struct qeth_ipa_cmd *) data;
6193 memcpy(card->dev->dev_addr,
6194 &cmd->data.setadapterparms.data.change_addr.addr,OSA_ADDR_LEN);
6195 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
6196 return 0;
6197}
6198
6199static int
6200qeth_setadpparms_change_macaddr(struct qeth_card *card)
6201{
6202 int rc;
6203 struct qeth_cmd_buffer *iob;
6204 struct qeth_ipa_cmd *cmd;
6205
6206 QETH_DBF_TEXT(trace,4,"chgmac");
6207
6208 iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS,
6209 sizeof(struct qeth_ipacmd_setadpparms));
6210 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6211 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
6212 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
6213 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
6214 card->dev->dev_addr, OSA_ADDR_LEN);
6215 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
6216 NULL);
6217 return rc;
6218}
6219
6220static int
6221qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6222{
6223 int rc;
6224 struct qeth_cmd_buffer *iob;
6225 struct qeth_ipa_cmd *cmd;
6226
6227 QETH_DBF_TEXT(trace,4,"adpmode");
6228
6229 iob = qeth_get_adapter_cmd(card, command,
6230 sizeof(struct qeth_ipacmd_setadpparms));
6231 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6232 cmd->data.setadapterparms.data.mode = mode;
6233 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
6234 NULL);
6235 return rc;
6236}
6237
6238static inline int
6239qeth_setadapter_hstr(struct qeth_card *card)
6240{
6241 int rc;
6242
6243 QETH_DBF_TEXT(trace,4,"adphstr");
6244
6245 if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) {
6246 rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE,
6247 card->options.broadcast_mode);
6248 if (rc)
6249 PRINT_WARN("couldn't set broadcast mode on "
6250 "device %s: x%x\n",
6251 CARD_BUS_ID(card), rc);
6252 rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS,
6253 card->options.macaddr_mode);
6254 if (rc)
6255 PRINT_WARN("couldn't set macaddr mode on "
6256 "device %s: x%x\n", CARD_BUS_ID(card), rc);
6257 return rc;
6258 }
6259 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
6260 PRINT_WARN("set adapter parameters not available "
6261 "to set broadcast mode, using ALLRINGS "
6262 "on device %s:\n", CARD_BUS_ID(card));
6263 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
6264 PRINT_WARN("set adapter parameters not available "
6265 "to set macaddr mode, using NONCANONICAL "
6266 "on device %s:\n", CARD_BUS_ID(card));
6267 return 0;
6268}
6269
6270static int
6271qeth_setadapter_parms(struct qeth_card *card)
6272{
6273 int rc;
6274
6275 QETH_DBF_TEXT(setup, 2, "setadprm");
6276
6277 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){
6278 PRINT_WARN("set adapter parameters not supported "
6279 "on device %s.\n",
6280 CARD_BUS_ID(card));
6281 QETH_DBF_TEXT(setup, 2, " notsupp");
6282 return 0;
6283 }
6284 rc = qeth_query_setadapterparms(card);
6285 if (rc) {
6286 PRINT_WARN("couldn't set adapter parameters on device %s: "
6287 "x%x\n", CARD_BUS_ID(card), rc);
6288 return rc;
6289 }
6290 if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) {
6291 rc = qeth_setadpparms_change_macaddr(card);
6292 if (rc)
6293 PRINT_WARN("couldn't get MAC address on "
6294 "device %s: x%x\n",
6295 CARD_BUS_ID(card), rc);
6296 }
6297
6298 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
6299 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
6300 rc = qeth_setadapter_hstr(card);
6301
6302 return rc;
6303}
6304
6305static int
6306qeth_layer2_initialize(struct qeth_card *card)
6307{
6308 int rc = 0;
6309
6310
6311 QETH_DBF_TEXT(setup, 2, "doL2init");
6312 QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card));
6313
6314 rc = qeth_setadpparms_change_macaddr(card);
6315 if (rc) {
6316 PRINT_WARN("couldn't get MAC address on "
6317 "device %s: x%x\n",
6318 CARD_BUS_ID(card), rc);
6319 QETH_DBF_TEXT_(setup, 2,"1err%d",rc);
6320 return rc;
6321 }
6322 QETH_DBF_HEX(setup,2, card->dev->dev_addr, OSA_ADDR_LEN);
6323
6324 rc = qeth_layer2_send_setmac(card, &card->dev->dev_addr[0]);
6325 if (rc)
6326 QETH_DBF_TEXT_(setup, 2,"2err%d",rc);
6327 return 0;
6328}
6329
6330
6331static int
6332qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
6333 enum qeth_prot_versions prot)
6334{
6335 int rc;
6336 struct qeth_cmd_buffer *iob;
6337
6338 iob = qeth_get_ipacmd_buffer(card,ipacmd,prot);
6339 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6340
6341 return rc;
6342}
6343
6344static int
6345qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot)
6346{
6347 int rc;
6348
6349 QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot);
6350
6351 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot);
6352 return rc;
6353}
6354
6355static int
6356qeth_send_stoplan(struct qeth_card *card)
6357{
6358 int rc = 0;
6359
6360 /*
6361 * TODO: according to the IPA format document page 14,
6362 * TCP/IP (we!) never issue a STOPLAN
6363 * is this right ?!?
6364 */
6365 QETH_DBF_TEXT(trace, 2, "stoplan");
6366
6367 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4);
6368 return rc;
6369}
6370
6371static int
6372qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
6373 unsigned long data)
6374{
6375 struct qeth_ipa_cmd *cmd;
6376
6377 QETH_DBF_TEXT(setup, 2, "qipasscb");
6378
6379 cmd = (struct qeth_ipa_cmd *) data;
6380 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
6381 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
6382 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6383 } else {
6384#ifdef CONFIG_QETH_IPV6
6385 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
6386 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6387#endif
6388 }
6389 QETH_DBF_TEXT(setup, 2, "suppenbl");
6390 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_supported);
6391 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_enabled);
6392 return 0;
6393}
6394
6395static int
6396qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
6397{
6398 int rc;
6399 struct qeth_cmd_buffer *iob;
6400
6401 QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
6402 if (card->options.layer2) {
6403 QETH_DBF_TEXT(setup, 2, "noprmly2");
6404 return -EPERM;
6405 }
6406
6407 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot);
6408 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
6409 return rc;
6410}
6411
6412static struct qeth_cmd_buffer *
6413qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func,
6414 __u16 cmd_code, __u16 len,
6415 enum qeth_prot_versions prot)
6416{
6417 struct qeth_cmd_buffer *iob;
6418 struct qeth_ipa_cmd *cmd;
6419
6420 QETH_DBF_TEXT(trace,4,"getasscm");
6421 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot);
6422
6423 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6424 cmd->data.setassparms.hdr.assist_no = ipa_func;
6425 cmd->data.setassparms.hdr.length = 8 + len;
6426 cmd->data.setassparms.hdr.command_code = cmd_code;
6427 cmd->data.setassparms.hdr.return_code = 0;
6428 cmd->data.setassparms.hdr.seq_no = 0;
6429
6430 return iob;
6431}
6432
6433static int
6434qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob,
6435 __u16 len, long data,
6436 int (*reply_cb)
6437 (struct qeth_card *,struct qeth_reply *,unsigned long),
6438 void *reply_param)
6439{
6440 int rc;
6441 struct qeth_ipa_cmd *cmd;
6442
6443 QETH_DBF_TEXT(trace,4,"sendassp");
6444
6445 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6446 if (len <= sizeof(__u32))
6447 cmd->data.setassparms.data.flags_32bit = (__u32) data;
6448 else if (len > sizeof(__u32))
6449 memcpy(&cmd->data.setassparms.data, (void *) data, len);
6450
6451 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
6452 return rc;
6453}
6454
6455#ifdef CONFIG_QETH_IPV6
6456static int
6457qeth_send_simple_setassparms_ipv6(struct qeth_card *card,
6458 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
6459
6460{
6461 int rc;
6462 struct qeth_cmd_buffer *iob;
6463
6464 QETH_DBF_TEXT(trace,4,"simassp6");
6465 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6466 0, QETH_PROT_IPV6);
6467 rc = qeth_send_setassparms(card, iob, 0, 0,
6468 qeth_default_setassparms_cb, NULL);
6469 return rc;
6470}
6471#endif
6472
6473static int
6474qeth_send_simple_setassparms(struct qeth_card *card,
6475 enum qeth_ipa_funcs ipa_func,
6476 __u16 cmd_code, long data)
6477{
6478 int rc;
6479 int length = 0;
6480 struct qeth_cmd_buffer *iob;
6481
6482 QETH_DBF_TEXT(trace,4,"simassp4");
6483 if (data)
6484 length = sizeof(__u32);
6485 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6486 length, QETH_PROT_IPV4);
6487 rc = qeth_send_setassparms(card, iob, length, data,
6488 qeth_default_setassparms_cb, NULL);
6489 return rc;
6490}
6491
6492static inline int
6493qeth_start_ipa_arp_processing(struct qeth_card *card)
6494{
6495 int rc;
6496
6497 QETH_DBF_TEXT(trace,3,"ipaarp");
6498
6499 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
6500 PRINT_WARN("ARP processing not supported "
6501 "on %s!\n", QETH_CARD_IFNAME(card));
6502 return 0;
6503 }
6504 rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING,
6505 IPA_CMD_ASS_START, 0);
6506 if (rc) {
6507 PRINT_WARN("Could not start ARP processing "
6508 "assist on %s: 0x%x\n",
6509 QETH_CARD_IFNAME(card), rc);
6510 }
6511 return rc;
6512}
6513
6514static int
6515qeth_start_ipa_ip_fragmentation(struct qeth_card *card)
6516{
6517 int rc;
6518
6519 QETH_DBF_TEXT(trace,3,"ipaipfrg");
6520
6521 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
6522 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
6523 QETH_CARD_IFNAME(card));
6524 return -EOPNOTSUPP;
6525 }
6526
6527 rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
6528 IPA_CMD_ASS_START, 0);
6529 if (rc) {
6530 PRINT_WARN("Could not start Hardware IP fragmentation "
6531 "assist on %s: 0x%x\n",
6532 QETH_CARD_IFNAME(card), rc);
6533 } else
6534 PRINT_INFO("Hardware IP fragmentation enabled \n");
6535 return rc;
6536}
6537
6538static int
6539qeth_start_ipa_source_mac(struct qeth_card *card)
6540{
6541 int rc;
6542
6543 QETH_DBF_TEXT(trace,3,"stsrcmac");
6544
6545 if (!card->options.fake_ll)
6546 return -EOPNOTSUPP;
6547
6548 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
6549 PRINT_INFO("Inbound source address not "
6550 "supported on %s\n", QETH_CARD_IFNAME(card));
6551 return -EOPNOTSUPP;
6552 }
6553
6554 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
6555 IPA_CMD_ASS_START, 0);
6556 if (rc)
6557 PRINT_WARN("Could not start inbound source "
6558 "assist on %s: 0x%x\n",
6559 QETH_CARD_IFNAME(card), rc);
6560 return rc;
6561}
6562
6563static int
6564qeth_start_ipa_vlan(struct qeth_card *card)
6565{
6566 int rc = 0;
6567
6568 QETH_DBF_TEXT(trace,3,"strtvlan");
6569
6570#ifdef CONFIG_QETH_VLAN
6571 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
6572 PRINT_WARN("VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
6573 return -EOPNOTSUPP;
6574 }
6575
6576 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
6577 IPA_CMD_ASS_START,0);
6578 if (rc) {
6579 PRINT_WARN("Could not start vlan "
6580 "assist on %s: 0x%x\n",
6581 QETH_CARD_IFNAME(card), rc);
6582 } else {
6583 PRINT_INFO("VLAN enabled \n");
6584 card->dev->features |=
6585 NETIF_F_HW_VLAN_FILTER |
6586 NETIF_F_HW_VLAN_TX |
6587 NETIF_F_HW_VLAN_RX;
6588 }
6589#endif /* QETH_VLAN */
6590 return rc;
6591}
6592
6593static int
6594qeth_start_ipa_multicast(struct qeth_card *card)
6595{
6596 int rc;
6597
6598 QETH_DBF_TEXT(trace,3,"stmcast");
6599
6600 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
6601 PRINT_WARN("Multicast not supported on %s\n",
6602 QETH_CARD_IFNAME(card));
6603 return -EOPNOTSUPP;
6604 }
6605
6606 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
6607 IPA_CMD_ASS_START,0);
6608 if (rc) {
6609 PRINT_WARN("Could not start multicast "
6610 "assist on %s: rc=%i\n",
6611 QETH_CARD_IFNAME(card), rc);
6612 } else {
6613 PRINT_INFO("Multicast enabled\n");
6614 card->dev->flags |= IFF_MULTICAST;
6615 }
6616 return rc;
6617}
6618
6619#ifdef CONFIG_QETH_IPV6
6620static int
6621qeth_softsetup_ipv6(struct qeth_card *card)
6622{
6623 int rc;
6624
6625 QETH_DBF_TEXT(trace,3,"softipv6");
6626
6627 netif_stop_queue(card->dev);
6628 rc = qeth_send_startlan(card, QETH_PROT_IPV6);
6629 if (rc) {
6630 PRINT_ERR("IPv6 startlan failed on %s\n",
6631 QETH_CARD_IFNAME(card));
6632 return rc;
6633 }
6634 netif_wake_queue(card->dev);
6635 rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
6636 if (rc) {
6637 PRINT_ERR("IPv6 query ipassist failed on %s\n",
6638 QETH_CARD_IFNAME(card));
6639 return rc;
6640 }
6641 rc = qeth_send_simple_setassparms(card, IPA_IPV6,
6642 IPA_CMD_ASS_START, 3);
6643 if (rc) {
6644 PRINT_WARN("IPv6 start assist (version 4) failed "
6645 "on %s: 0x%x\n",
6646 QETH_CARD_IFNAME(card), rc);
6647 return rc;
6648 }
6649 rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6,
6650 IPA_CMD_ASS_START);
6651 if (rc) {
6652 PRINT_WARN("IPV6 start assist (version 6) failed "
6653 "on %s: 0x%x\n",
6654 QETH_CARD_IFNAME(card), rc);
6655 return rc;
6656 }
6657 rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
6658 IPA_CMD_ASS_START);
6659 if (rc) {
6660 PRINT_WARN("Could not enable passthrough "
6661 "on %s: 0x%x\n",
6662 QETH_CARD_IFNAME(card), rc);
6663 return rc;
6664 }
6665 PRINT_INFO("IPV6 enabled \n");
6666 return 0;
6667}
6668
6669#endif
6670
6671static int
6672qeth_start_ipa_ipv6(struct qeth_card *card)
6673{
6674 int rc = 0;
6675#ifdef CONFIG_QETH_IPV6
6676 QETH_DBF_TEXT(trace,3,"strtipv6");
6677
6678 if (!qeth_is_supported(card, IPA_IPV6)) {
6679 PRINT_WARN("IPv6 not supported on %s\n",
6680 QETH_CARD_IFNAME(card));
6681 return 0;
6682 }
6683 rc = qeth_softsetup_ipv6(card);
6684#endif
6685 return rc ;
6686}
6687
6688static int
6689qeth_start_ipa_broadcast(struct qeth_card *card)
6690{
6691 int rc;
6692
6693 QETH_DBF_TEXT(trace,3,"stbrdcst");
6694 card->info.broadcast_capable = 0;
6695 if (!qeth_is_supported(card, IPA_FILTERING)) {
6696 PRINT_WARN("Broadcast not supported on %s\n",
6697 QETH_CARD_IFNAME(card));
6698 rc = -EOPNOTSUPP;
6699 goto out;
6700 }
6701 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
6702 IPA_CMD_ASS_START, 0);
6703 if (rc) {
6704 PRINT_WARN("Could not enable broadcasting filtering "
6705 "on %s: 0x%x\n",
6706 QETH_CARD_IFNAME(card), rc);
6707 goto out;
6708 }
6709
6710 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
6711 IPA_CMD_ASS_CONFIGURE, 1);
6712 if (rc) {
6713 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
6714 QETH_CARD_IFNAME(card), rc);
6715 goto out;
6716 }
6717 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
6718 PRINT_INFO("Broadcast enabled \n");
6719 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
6720 IPA_CMD_ASS_ENABLE, 1);
6721 if (rc) {
6722 PRINT_WARN("Could not set up broadcast echo filtering on "
6723 "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
6724 goto out;
6725 }
6726 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
6727out:
6728 if (card->info.broadcast_capable)
6729 card->dev->flags |= IFF_BROADCAST;
6730 else
6731 card->dev->flags &= ~IFF_BROADCAST;
6732 return rc;
6733}
6734
6735static int
6736qeth_send_checksum_command(struct qeth_card *card)
6737{
6738 int rc;
6739
6740 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
6741 IPA_CMD_ASS_START, 0);
6742 if (rc) {
6743 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
6744 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
6745 QETH_CARD_IFNAME(card), rc);
6746 return rc;
6747 }
6748 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
6749 IPA_CMD_ASS_ENABLE,
6750 card->info.csum_mask);
6751 if (rc) {
6752 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
6753 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
6754 QETH_CARD_IFNAME(card), rc);
6755 return rc;
6756 }
6757 return 0;
6758}
6759
6760static int
6761qeth_start_ipa_checksum(struct qeth_card *card)
6762{
6763 int rc = 0;
6764
6765 QETH_DBF_TEXT(trace,3,"strtcsum");
6766
6767 if (card->options.checksum_type == NO_CHECKSUMMING) {
6768 PRINT_WARN("Using no checksumming on %s.\n",
6769 QETH_CARD_IFNAME(card));
6770 return 0;
6771 }
6772 if (card->options.checksum_type == SW_CHECKSUMMING) {
6773 PRINT_WARN("Using SW checksumming on %s.\n",
6774 QETH_CARD_IFNAME(card));
6775 return 0;
6776 }
6777 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
6778 PRINT_WARN("Inbound HW Checksumming not "
6779 "supported on %s,\ncontinuing "
6780 "using Inbound SW Checksumming\n",
6781 QETH_CARD_IFNAME(card));
6782 card->options.checksum_type = SW_CHECKSUMMING;
6783 return 0;
6784 }
6785 rc = qeth_send_checksum_command(card);
6786 if (!rc) {
6787 PRINT_INFO("HW Checksumming (inbound) enabled \n");
6788 }
6789 return rc;
6790}
6791
6792static int
6793qeth_start_ipa_tso(struct qeth_card *card)
6794{
6795 int rc;
6796
6797 QETH_DBF_TEXT(trace,3,"sttso");
6798
6799 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
6800 PRINT_WARN("Outbound TSO not supported on %s\n",
6801 QETH_CARD_IFNAME(card));
6802 rc = -EOPNOTSUPP;
6803 } else {
6804 rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
6805 IPA_CMD_ASS_START,0);
6806 if (rc)
6807 PRINT_WARN("Could not start outbound TSO "
6808 "assist on %s: rc=%i\n",
6809 QETH_CARD_IFNAME(card), rc);
6810 else
6811 PRINT_INFO("Outbound TSO enabled\n");
6812 }
6813 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
6814 card->options.large_send = QETH_LARGE_SEND_NO;
6815 card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG);
6816 }
6817 return rc;
6818}
6819
6820static int
6821qeth_start_ipassists(struct qeth_card *card)
6822{
6823 QETH_DBF_TEXT(trace,3,"strtipas");
6824 qeth_start_ipa_arp_processing(card); /* go on*/
6825 qeth_start_ipa_ip_fragmentation(card); /* go on*/
6826 qeth_start_ipa_source_mac(card); /* go on*/
6827 qeth_start_ipa_vlan(card); /* go on*/
6828 qeth_start_ipa_multicast(card); /* go on*/
6829 qeth_start_ipa_ipv6(card); /* go on*/
6830 qeth_start_ipa_broadcast(card); /* go on*/
6831 qeth_start_ipa_checksum(card); /* go on*/
6832 qeth_start_ipa_tso(card); /* go on*/
6833 return 0;
6834}
6835
6836static int
6837qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type,
6838 enum qeth_prot_versions prot)
6839{
6840 int rc;
6841 struct qeth_ipa_cmd *cmd;
6842 struct qeth_cmd_buffer *iob;
6843
6844 QETH_DBF_TEXT(trace,4,"setroutg");
6845 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
6846 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6847 cmd->data.setrtg.type = (type);
6848 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6849
6850 return rc;
6851
6852}
6853
6854static void
6855qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type,
6856 enum qeth_prot_versions prot)
6857{
6858 if (card->info.type == QETH_CARD_TYPE_IQD) {
6859 switch (*type) {
6860 case NO_ROUTER:
6861 case PRIMARY_CONNECTOR:
6862 case SECONDARY_CONNECTOR:
6863 case MULTICAST_ROUTER:
6864 return;
6865 default:
6866 goto out_inval;
6867 }
6868 } else {
6869 switch (*type) {
6870 case NO_ROUTER:
6871 case PRIMARY_ROUTER:
6872 case SECONDARY_ROUTER:
6873 return;
6874 case MULTICAST_ROUTER:
6875 if (qeth_is_ipafunc_supported(card, prot,
6876 IPA_OSA_MC_ROUTER))
6877 return;
6878 default:
6879 goto out_inval;
6880 }
6881 }
6882out_inval:
6883 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
6884 "Router status set to 'no router'.\n",
6885 ((*type == PRIMARY_ROUTER)? "primary router" :
6886 (*type == SECONDARY_ROUTER)? "secondary router" :
6887 (*type == PRIMARY_CONNECTOR)? "primary connector" :
6888 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
6889 (*type == MULTICAST_ROUTER)? "multicast router" :
6890 "unknown"),
6891 card->dev->name);
6892 *type = NO_ROUTER;
6893}
6894
6895int
6896qeth_setrouting_v4(struct qeth_card *card)
6897{
6898 int rc;
6899
6900 QETH_DBF_TEXT(trace,3,"setrtg4");
6901
6902 qeth_correct_routing_type(card, &card->options.route4.type,
6903 QETH_PROT_IPV4);
6904
6905 rc = qeth_send_setrouting(card, card->options.route4.type,
6906 QETH_PROT_IPV4);
6907 if (rc) {
6908 card->options.route4.type = NO_ROUTER;
6909 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
6910 "Type set to 'no router'.\n",
6911 rc, QETH_CARD_IFNAME(card));
6912 }
6913 return rc;
6914}
6915
6916int
6917qeth_setrouting_v6(struct qeth_card *card)
6918{
6919 int rc = 0;
6920
6921 QETH_DBF_TEXT(trace,3,"setrtg6");
6922#ifdef CONFIG_QETH_IPV6
6923
6924 qeth_correct_routing_type(card, &card->options.route6.type,
6925 QETH_PROT_IPV6);
6926
6927 if ((card->options.route6.type == NO_ROUTER) ||
6928 ((card->info.type == QETH_CARD_TYPE_OSAE) &&
6929 (card->options.route6.type == MULTICAST_ROUTER) &&
6930 !qeth_is_supported6(card,IPA_OSA_MC_ROUTER)))
6931 return 0;
6932 rc = qeth_send_setrouting(card, card->options.route6.type,
6933 QETH_PROT_IPV6);
6934 if (rc) {
6935 card->options.route6.type = NO_ROUTER;
6936 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
6937 "Type set to 'no router'.\n",
6938 rc, QETH_CARD_IFNAME(card));
6939 }
6940#endif
6941 return rc;
6942}
6943
6944int
6945qeth_set_large_send(struct qeth_card *card)
6946{
6947 int rc = 0;
6948
6949 if (card->dev == NULL)
6950 return 0;
6951
6952 netif_stop_queue(card->dev);
6953 switch (card->options.large_send) {
6954 case QETH_LARGE_SEND_EDDP:
6955 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
6956 break;
6957 case QETH_LARGE_SEND_TSO:
6958 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
6959 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
6960 } else {
6961 PRINT_WARN("TSO not supported on %s. "
6962 "large_send set to 'no'.\n",
6963 card->dev->name);
6964 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
6965 card->options.large_send = QETH_LARGE_SEND_NO;
6966 rc = -EOPNOTSUPP;
6967 }
6968 break;
6969 default: /* includes QETH_LARGE_SEND_NO */
6970 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
6971 break;
6972 }
6973
6974 netif_wake_queue(card->dev);
6975 return rc;
6976}
6977
6978/*
6979 * softsetup card: init IPA stuff
6980 */
6981static int
6982qeth_softsetup_card(struct qeth_card *card)
6983{
6984 int rc;
6985
6986 QETH_DBF_TEXT(setup, 2, "softsetp");
6987
6988 if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){
6989 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6990 if (rc == 0xe080){
6991 PRINT_WARN("LAN on card %s if offline! "
6992 "Continuing softsetup.\n",
6993 CARD_BUS_ID(card));
6994 card->lan_online = 0;
6995 } else
6996 return rc;
6997 } else
6998 card->lan_online = 1;
6999 if (card->options.layer2) {
7000 card->dev->features |=
7001 NETIF_F_HW_VLAN_FILTER |
7002 NETIF_F_HW_VLAN_TX |
7003 NETIF_F_HW_VLAN_RX;
7004 card->dev->flags|=IFF_MULTICAST|IFF_BROADCAST;
7005 card->info.broadcast_capable=1;
7006 if ((rc = qeth_layer2_initialize(card))) {
7007 QETH_DBF_TEXT_(setup, 2, "L2err%d", rc);
7008 return rc;
7009 }
7010#ifdef CONFIG_QETH_VLAN
7011 qeth_layer2_process_vlans(card, 0);
7012#endif
7013 goto out;
7014 }
7015 if ((card->options.large_send == QETH_LARGE_SEND_EDDP) ||
7016 (card->options.large_send == QETH_LARGE_SEND_TSO))
7017 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
7018 else
7019 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7020
7021 if ((rc = qeth_setadapter_parms(card)))
7022 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7023 if ((rc = qeth_start_ipassists(card)))
7024 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7025 if ((rc = qeth_setrouting_v4(card)))
7026 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7027 if ((rc = qeth_setrouting_v6(card)))
7028 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7029out:
7030 netif_stop_queue(card->dev);
7031 return 0;
7032}
7033
7034#ifdef CONFIG_QETH_IPV6
7035static int
7036qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply,
7037 unsigned long data)
7038{
7039 struct qeth_ipa_cmd *cmd;
7040
7041 cmd = (struct qeth_ipa_cmd *) data;
7042 if (cmd->hdr.return_code == 0)
7043 card->info.unique_id = *((__u16 *)
7044 &cmd->data.create_destroy_addr.unique_id[6]);
7045 else {
7046 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7047 UNIQUE_ID_NOT_BY_CARD;
7048 PRINT_WARN("couldn't get a unique id from the card on device "
7049 "%s (result=x%x), using default id. ipv6 "
7050 "autoconfig on other lpars may lead to duplicate "
7051 "ip addresses. please use manually "
7052 "configured ones.\n",
7053 CARD_BUS_ID(card), cmd->hdr.return_code);
7054 }
7055 return 0;
7056}
7057#endif
7058
7059static int
7060qeth_put_unique_id(struct qeth_card *card)
7061{
7062
7063 int rc = 0;
7064#ifdef CONFIG_QETH_IPV6
7065 struct qeth_cmd_buffer *iob;
7066 struct qeth_ipa_cmd *cmd;
7067
7068 QETH_DBF_TEXT(trace,2,"puniqeid");
7069
7070 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
7071 UNIQUE_ID_NOT_BY_CARD)
7072 return -1;
7073 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
7074 QETH_PROT_IPV6);
7075 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7076 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7077 card->info.unique_id;
7078 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
7079 card->dev->dev_addr, OSA_ADDR_LEN);
7080 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7081#else
7082 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7083 UNIQUE_ID_NOT_BY_CARD;
7084#endif
7085 return rc;
7086}
7087
7088/**
7089 * Clear IP List
7090 */
7091static void
7092qeth_clear_ip_list(struct qeth_card *card, int clean, int recover)
7093{
7094 struct qeth_ipaddr *addr, *tmp;
7095 unsigned long flags;
7096
7097 QETH_DBF_TEXT(trace,4,"clearip");
7098 spin_lock_irqsave(&card->ip_lock, flags);
7099 /* clear todo list */
7100 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry){
7101 list_del(&addr->entry);
7102 kfree(addr);
7103 }
7104
7105 while (!list_empty(&card->ip_list)) {
7106 addr = list_entry(card->ip_list.next,
7107 struct qeth_ipaddr, entry);
7108 list_del_init(&addr->entry);
7109 if (clean) {
7110 spin_unlock_irqrestore(&card->ip_lock, flags);
7111 qeth_deregister_addr_entry(card, addr);
7112 spin_lock_irqsave(&card->ip_lock, flags);
7113 }
7114 if (!recover || addr->is_multicast) {
7115 kfree(addr);
7116 continue;
7117 }
7118 list_add_tail(&addr->entry, card->ip_tbd_list);
7119 }
7120 spin_unlock_irqrestore(&card->ip_lock, flags);
7121}
7122
7123static void
7124qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7125 int clear_start_mask)
7126{
7127 unsigned long flags;
7128
7129 spin_lock_irqsave(&card->thread_mask_lock, flags);
7130 card->thread_allowed_mask = threads;
7131 if (clear_start_mask)
7132 card->thread_start_mask &= threads;
7133 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7134 wake_up(&card->wait_q);
7135}
7136
7137static inline int
7138qeth_threads_running(struct qeth_card *card, unsigned long threads)
7139{
7140 unsigned long flags;
7141 int rc = 0;
7142
7143 spin_lock_irqsave(&card->thread_mask_lock, flags);
7144 rc = (card->thread_running_mask & threads);
7145 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7146 return rc;
7147}
7148
7149static int
7150qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
7151{
7152 return wait_event_interruptible(card->wait_q,
7153 qeth_threads_running(card, threads) == 0);
7154}
7155
7156static int
7157qeth_stop_card(struct qeth_card *card)
7158{
7159 int rc = 0;
7160
7161 QETH_DBF_TEXT(setup ,2,"stopcard");
7162 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7163
7164 qeth_set_allowed_threads(card, 0, 1);
7165 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
7166 return -ERESTARTSYS;
7167 if (card->read.state == CH_STATE_UP &&
7168 card->write.state == CH_STATE_UP &&
7169 (card->state == CARD_STATE_UP)) {
7170 rtnl_lock();
7171 dev_close(card->dev);
7172 rtnl_unlock();
7173 if (!card->use_hard_stop) {
7174 __u8 *mac = &card->dev->dev_addr[0];
7175 rc = qeth_layer2_send_delmac(card, mac);
7176 QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc);
7177 if ((rc = qeth_send_stoplan(card)))
7178 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7179 }
7180 card->state = CARD_STATE_SOFTSETUP;
7181 }
7182 if (card->state == CARD_STATE_SOFTSETUP) {
7183#ifdef CONFIG_QETH_VLAN
7184 if (card->options.layer2)
7185 qeth_layer2_process_vlans(card, 1);
7186#endif
7187 qeth_clear_ip_list(card, !card->use_hard_stop, 1);
7188 qeth_clear_ipacmd_list(card);
7189 card->state = CARD_STATE_HARDSETUP;
7190 }
7191 if (card->state == CARD_STATE_HARDSETUP) {
7192 if ((!card->use_hard_stop) &&
7193 (!card->options.layer2))
7194 if ((rc = qeth_put_unique_id(card)))
7195 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7196 qeth_qdio_clear_card(card, 0);
7197 qeth_clear_qdio_buffers(card);
7198 qeth_clear_working_pool_list(card);
7199 card->state = CARD_STATE_DOWN;
7200 }
7201 if (card->state == CARD_STATE_DOWN) {
7202 qeth_clear_cmd_buffers(&card->read);
7203 qeth_clear_cmd_buffers(&card->write);
7204 }
7205 card->use_hard_stop = 0;
7206 return rc;
7207}
7208
7209
7210static int
7211qeth_get_unique_id(struct qeth_card *card)
7212{
7213 int rc = 0;
7214#ifdef CONFIG_QETH_IPV6
7215 struct qeth_cmd_buffer *iob;
7216 struct qeth_ipa_cmd *cmd;
7217
7218 QETH_DBF_TEXT(setup, 2, "guniqeid");
7219
7220 if (!qeth_is_supported(card,IPA_IPV6)) {
7221 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7222 UNIQUE_ID_NOT_BY_CARD;
7223 return 0;
7224 }
7225
7226 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
7227 QETH_PROT_IPV6);
7228 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7229 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7230 card->info.unique_id;
7231
7232 rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL);
7233#else
7234 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7235 UNIQUE_ID_NOT_BY_CARD;
7236#endif
7237 return rc;
7238}
7239static void
7240qeth_print_status_with_portname(struct qeth_card *card)
7241{
7242 char dbf_text[15];
7243 int i;
7244
7245 sprintf(dbf_text, "%s", card->info.portname + 1);
7246 for (i = 0; i < 8; i++)
7247 dbf_text[i] =
7248 (char) _ebcasc[(__u8) dbf_text[i]];
7249 dbf_text[8] = 0;
7250 printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
7251 "with link type %s (portname: %s)\n",
7252 CARD_RDEV_ID(card),
7253 CARD_WDEV_ID(card),
7254 CARD_DDEV_ID(card),
7255 qeth_get_cardname(card),
7256 (card->info.mcl_level[0]) ? " (level: " : "",
7257 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7258 (card->info.mcl_level[0]) ? ")" : "",
7259 qeth_get_cardname_short(card),
7260 dbf_text);
7261
7262}
7263
7264static void
7265qeth_print_status_no_portname(struct qeth_card *card)
7266{
7267 if (card->info.portname[0])
7268 printk("qeth: Device %s/%s/%s is a%s "
7269 "card%s%s%s\nwith link type %s "
7270 "(no portname needed by interface).\n",
7271 CARD_RDEV_ID(card),
7272 CARD_WDEV_ID(card),
7273 CARD_DDEV_ID(card),
7274 qeth_get_cardname(card),
7275 (card->info.mcl_level[0]) ? " (level: " : "",
7276 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7277 (card->info.mcl_level[0]) ? ")" : "",
7278 qeth_get_cardname_short(card));
7279 else
7280 printk("qeth: Device %s/%s/%s is a%s "
7281 "card%s%s%s\nwith link type %s.\n",
7282 CARD_RDEV_ID(card),
7283 CARD_WDEV_ID(card),
7284 CARD_DDEV_ID(card),
7285 qeth_get_cardname(card),
7286 (card->info.mcl_level[0]) ? " (level: " : "",
7287 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7288 (card->info.mcl_level[0]) ? ")" : "",
7289 qeth_get_cardname_short(card));
7290}
7291
7292static void
7293qeth_print_status_message(struct qeth_card *card)
7294{
7295 switch (card->info.type) {
7296 case QETH_CARD_TYPE_OSAE:
7297 /* VM will use a non-zero first character
7298 * to indicate a HiperSockets like reporting
7299 * of the level OSA sets the first character to zero
7300 * */
7301 if (!card->info.mcl_level[0]) {
7302 sprintf(card->info.mcl_level,"%02x%02x",
7303 card->info.mcl_level[2],
7304 card->info.mcl_level[3]);
7305
7306 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
7307 break;
7308 }
7309 /* fallthrough */
7310 case QETH_CARD_TYPE_IQD:
7311 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
7312 card->info.mcl_level[0]];
7313 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
7314 card->info.mcl_level[1]];
7315 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
7316 card->info.mcl_level[2]];
7317 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
7318 card->info.mcl_level[3]];
7319 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
7320 break;
7321 default:
7322 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
7323 }
7324 if (card->info.portname_required)
7325 qeth_print_status_with_portname(card);
7326 else
7327 qeth_print_status_no_portname(card);
7328}
7329
7330static int
7331qeth_register_netdev(struct qeth_card *card)
7332{
7333 QETH_DBF_TEXT(setup, 3, "regnetd");
7334 if (card->dev->reg_state != NETREG_UNINITIALIZED) {
7335 qeth_netdev_init(card->dev);
7336 return 0;
7337 }
7338 /* sysfs magic */
7339 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
7340 return register_netdev(card->dev);
7341}
7342
7343static void
7344qeth_start_again(struct qeth_card *card)
7345{
7346 QETH_DBF_TEXT(setup ,2, "startag");
7347
7348 rtnl_lock();
7349 dev_open(card->dev);
7350 rtnl_unlock();
7351 /* this also sets saved unicast addresses */
7352 qeth_set_multicast_list(card->dev);
7353}
7354
7355
7356/* Layer 2 specific stuff */
7357#define IGNORE_PARAM_EQ(option,value,reset_value,msg) \
7358 if (card->options.option == value) { \
7359 PRINT_ERR("%s not supported with layer 2 " \
7360 "functionality, ignoring option on read" \
7361 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7362 card->options.option = reset_value; \
7363 }
7364#define IGNORE_PARAM_NEQ(option,value,reset_value,msg) \
7365 if (card->options.option != value) { \
7366 PRINT_ERR("%s not supported with layer 2 " \
7367 "functionality, ignoring option on read" \
7368 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7369 card->options.option = reset_value; \
7370 }
7371
7372
7373static void qeth_make_parameters_consistent(struct qeth_card *card)
7374{
7375
7376 if (card->options.layer2) {
7377 if (card->info.type == QETH_CARD_TYPE_IQD) {
7378 PRINT_ERR("Device %s does not support " \
7379 "layer 2 functionality. " \
7380 "Ignoring layer2 option.\n",CARD_BUS_ID(card));
7381 }
7382 IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
7383 "Routing options are");
7384#ifdef CONFIG_QETH_IPV6
7385 IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
7386 "Routing options are");
7387#endif
7388 IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
7389 QETH_CHECKSUM_DEFAULT,
7390 "Checksumming options are");
7391 IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
7392 QETH_TR_BROADCAST_ALLRINGS,
7393 "Broadcast mode options are");
7394 IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
7395 QETH_TR_MACADDR_NONCANONICAL,
7396 "Canonical MAC addr options are");
7397 IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
7398 "Broadcast faking options are");
7399 IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
7400 DEFAULT_ADD_HHLEN,"Option add_hhlen is");
7401 IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
7402 }
7403}
7404
7405
7406static int
7407qeth_set_online(struct ccwgroup_device *gdev)
7408{
7409 struct qeth_card *card = gdev->dev.driver_data;
7410 int rc = 0;
7411 enum qeth_card_states recover_flag;
7412
7413 BUG_ON(!card);
7414 QETH_DBF_TEXT(setup ,2, "setonlin");
7415 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7416
7417 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
7418 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){
7419 PRINT_WARN("set_online of card %s interrupted by user!\n",
7420 CARD_BUS_ID(card));
7421 return -ERESTARTSYS;
7422 }
7423
7424 recover_flag = card->state;
7425 if ((rc = ccw_device_set_online(CARD_RDEV(card))) ||
7426 (rc = ccw_device_set_online(CARD_WDEV(card))) ||
7427 (rc = ccw_device_set_online(CARD_DDEV(card)))){
7428 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7429 return -EIO;
7430 }
7431
7432 if (card->options.layer2)
7433 qeth_make_parameters_consistent(card);
7434
7435 if ((rc = qeth_hardsetup_card(card))){
7436 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7437 goto out_remove;
7438 }
7439 card->state = CARD_STATE_HARDSETUP;
7440
7441 if (!(rc = qeth_query_ipassists(card,QETH_PROT_IPV4)))
7442 rc = qeth_get_unique_id(card);
7443
7444 if (rc && card->options.layer2 == 0) {
7445 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7446 goto out_remove;
7447 }
7448 qeth_print_status_message(card);
7449 if ((rc = qeth_register_netdev(card))){
7450 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7451 goto out_remove;
7452 }
7453 if ((rc = qeth_softsetup_card(card))){
7454 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7455 goto out_remove;
7456 }
7457 card->state = CARD_STATE_SOFTSETUP;
7458
7459 if ((rc = qeth_init_qdio_queues(card))){
7460 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
7461 goto out_remove;
7462 }
7463/*maybe it was set offline without ifconfig down
7464 * we can also use this state for recovery purposes*/
7465 qeth_set_allowed_threads(card, 0xffffffff, 0);
7466 if (recover_flag == CARD_STATE_RECOVER)
7467 qeth_start_again(card);
7468 qeth_notify_processes();
7469 return 0;
7470out_remove:
7471 card->use_hard_stop = 1;
7472 qeth_stop_card(card);
7473 ccw_device_set_offline(CARD_DDEV(card));
7474 ccw_device_set_offline(CARD_WDEV(card));
7475 ccw_device_set_offline(CARD_RDEV(card));
7476 if (recover_flag == CARD_STATE_RECOVER)
7477 card->state = CARD_STATE_RECOVER;
7478 else
7479 card->state = CARD_STATE_DOWN;
7480 return -ENODEV;
7481}
7482
7483static struct ccw_device_id qeth_ids[] = {
7484 {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
7485 {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
7486 {},
7487};
7488MODULE_DEVICE_TABLE(ccw, qeth_ids);
7489
7490struct device *qeth_root_dev = NULL;
7491
7492struct ccwgroup_driver qeth_ccwgroup_driver = {
7493 .owner = THIS_MODULE,
7494 .name = "qeth",
7495 .driver_id = 0xD8C5E3C8,
7496 .probe = qeth_probe_device,
7497 .remove = qeth_remove_device,
7498 .set_online = qeth_set_online,
7499 .set_offline = qeth_set_offline,
7500};
7501
7502struct ccw_driver qeth_ccw_driver = {
7503 .name = "qeth",
7504 .ids = qeth_ids,
7505 .probe = ccwgroup_probe_ccwdev,
7506 .remove = ccwgroup_remove_ccwdev,
7507};
7508
7509
7510static void
7511qeth_unregister_dbf_views(void)
7512{
7513 if (qeth_dbf_setup)
7514 debug_unregister(qeth_dbf_setup);
7515 if (qeth_dbf_qerr)
7516 debug_unregister(qeth_dbf_qerr);
7517 if (qeth_dbf_sense)
7518 debug_unregister(qeth_dbf_sense);
7519 if (qeth_dbf_misc)
7520 debug_unregister(qeth_dbf_misc);
7521 if (qeth_dbf_data)
7522 debug_unregister(qeth_dbf_data);
7523 if (qeth_dbf_control)
7524 debug_unregister(qeth_dbf_control);
7525 if (qeth_dbf_trace)
7526 debug_unregister(qeth_dbf_trace);
7527}
7528static int
7529qeth_register_dbf_views(void)
7530{
7531 qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
7532 QETH_DBF_SETUP_INDEX,
7533 QETH_DBF_SETUP_NR_AREAS,
7534 QETH_DBF_SETUP_LEN);
7535 qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
7536 QETH_DBF_MISC_INDEX,
7537 QETH_DBF_MISC_NR_AREAS,
7538 QETH_DBF_MISC_LEN);
7539 qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
7540 QETH_DBF_DATA_INDEX,
7541 QETH_DBF_DATA_NR_AREAS,
7542 QETH_DBF_DATA_LEN);
7543 qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
7544 QETH_DBF_CONTROL_INDEX,
7545 QETH_DBF_CONTROL_NR_AREAS,
7546 QETH_DBF_CONTROL_LEN);
7547 qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
7548 QETH_DBF_SENSE_INDEX,
7549 QETH_DBF_SENSE_NR_AREAS,
7550 QETH_DBF_SENSE_LEN);
7551 qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
7552 QETH_DBF_QERR_INDEX,
7553 QETH_DBF_QERR_NR_AREAS,
7554 QETH_DBF_QERR_LEN);
7555 qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
7556 QETH_DBF_TRACE_INDEX,
7557 QETH_DBF_TRACE_NR_AREAS,
7558 QETH_DBF_TRACE_LEN);
7559
7560 if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
7561 (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
7562 (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
7563 (qeth_dbf_trace == NULL)) {
7564 qeth_unregister_dbf_views();
7565 return -ENOMEM;
7566 }
7567 debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
7568 debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
7569
7570 debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
7571 debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
7572
7573 debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
7574 debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
7575
7576 debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
7577 debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
7578
7579 debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
7580 debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
7581
7582 debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
7583 debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
7584
7585 debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
7586 debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
7587
7588 return 0;
7589}
7590
7591#ifdef CONFIG_QETH_IPV6
7592extern struct neigh_table arp_tbl;
7593static struct neigh_ops *arp_direct_ops;
7594static int (*qeth_old_arp_constructor) (struct neighbour *);
7595
7596static struct neigh_ops arp_direct_ops_template = {
7597 .family = AF_INET,
7598 .destructor = NULL,
7599 .solicit = NULL,
7600 .error_report = NULL,
7601 .output = dev_queue_xmit,
7602 .connected_output = dev_queue_xmit,
7603 .hh_output = dev_queue_xmit,
7604 .queue_xmit = dev_queue_xmit
7605};
7606
7607static int
7608qeth_arp_constructor(struct neighbour *neigh)
7609{
7610 struct net_device *dev = neigh->dev;
7611 struct in_device *in_dev;
7612 struct neigh_parms *parms;
7613 struct qeth_card *card;
7614
7615 card = qeth_get_card_from_dev(dev);
7616 if (card == NULL)
7617 goto out;
7618 if((card->options.layer2) ||
7619 (card->dev->hard_header == qeth_fake_header))
7620 goto out;
7621
7622 rcu_read_lock();
7623 in_dev = rcu_dereference(__in_dev_get(dev));
7624 if (in_dev == NULL) {
7625 rcu_read_unlock();
7626 return -EINVAL;
7627 }
7628
7629 parms = in_dev->arp_parms;
7630 __neigh_parms_put(neigh->parms);
7631 neigh->parms = neigh_parms_clone(parms);
7632 rcu_read_unlock();
7633
7634 neigh->type = inet_addr_type(*(u32 *) neigh->primary_key);
7635 neigh->nud_state = NUD_NOARP;
7636 neigh->ops = arp_direct_ops;
7637 neigh->output = neigh->ops->queue_xmit;
7638 return 0;
7639out:
7640 return qeth_old_arp_constructor(neigh);
7641}
7642#endif /*CONFIG_QETH_IPV6*/
7643
7644/*
7645 * IP address takeover related functions
7646 */
7647static void
7648qeth_clear_ipato_list(struct qeth_card *card)
7649{
7650 struct qeth_ipato_entry *ipatoe, *tmp;
7651 unsigned long flags;
7652
7653 spin_lock_irqsave(&card->ip_lock, flags);
7654 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
7655 list_del(&ipatoe->entry);
7656 kfree(ipatoe);
7657 }
7658 spin_unlock_irqrestore(&card->ip_lock, flags);
7659}
7660
7661int
7662qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new)
7663{
7664 struct qeth_ipato_entry *ipatoe;
7665 unsigned long flags;
7666 int rc = 0;
7667
7668 QETH_DBF_TEXT(trace, 2, "addipato");
7669 spin_lock_irqsave(&card->ip_lock, flags);
7670 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
7671 if (ipatoe->proto != new->proto)
7672 continue;
7673 if (!memcmp(ipatoe->addr, new->addr,
7674 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
7675 (ipatoe->mask_bits == new->mask_bits)){
7676 PRINT_WARN("ipato entry already exists!\n");
7677 rc = -EEXIST;
7678 break;
7679 }
7680 }
7681 if (!rc) {
7682 list_add_tail(&new->entry, &card->ipato.entries);
7683 }
7684 spin_unlock_irqrestore(&card->ip_lock, flags);
7685 return rc;
7686}
7687
7688void
7689qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
7690 u8 *addr, int mask_bits)
7691{
7692 struct qeth_ipato_entry *ipatoe, *tmp;
7693 unsigned long flags;
7694
7695 QETH_DBF_TEXT(trace, 2, "delipato");
7696 spin_lock_irqsave(&card->ip_lock, flags);
7697 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){
7698 if (ipatoe->proto != proto)
7699 continue;
7700 if (!memcmp(ipatoe->addr, addr,
7701 (proto == QETH_PROT_IPV4)? 4:16) &&
7702 (ipatoe->mask_bits == mask_bits)){
7703 list_del(&ipatoe->entry);
7704 kfree(ipatoe);
7705 }
7706 }
7707 spin_unlock_irqrestore(&card->ip_lock, flags);
7708}
7709
7710static inline void
7711qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
7712{
7713 int i, j;
7714 u8 octet;
7715
7716 for (i = 0; i < len; ++i){
7717 octet = addr[i];
7718 for (j = 7; j >= 0; --j){
7719 bits[i*8 + j] = octet & 1;
7720 octet >>= 1;
7721 }
7722 }
7723}
7724
7725static int
7726qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr)
7727{
7728 struct qeth_ipato_entry *ipatoe;
7729 u8 addr_bits[128] = {0, };
7730 u8 ipatoe_bits[128] = {0, };
7731 int rc = 0;
7732
7733 if (!card->ipato.enabled)
7734 return 0;
7735
7736 qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
7737 (addr->proto == QETH_PROT_IPV4)? 4:16);
7738 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
7739 if (addr->proto != ipatoe->proto)
7740 continue;
7741 qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
7742 (ipatoe->proto==QETH_PROT_IPV4) ?
7743 4:16);
7744 if (addr->proto == QETH_PROT_IPV4)
7745 rc = !memcmp(addr_bits, ipatoe_bits,
7746 min(32, ipatoe->mask_bits));
7747 else
7748 rc = !memcmp(addr_bits, ipatoe_bits,
7749 min(128, ipatoe->mask_bits));
7750 if (rc)
7751 break;
7752 }
7753 /* invert? */
7754 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
7755 rc = !rc;
7756 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
7757 rc = !rc;
7758
7759 return rc;
7760}
7761
7762/*
7763 * VIPA related functions
7764 */
7765int
7766qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
7767 const u8 *addr)
7768{
7769 struct qeth_ipaddr *ipaddr;
7770 unsigned long flags;
7771 int rc = 0;
7772
7773 ipaddr = qeth_get_addr_buffer(proto);
7774 if (ipaddr){
7775 if (proto == QETH_PROT_IPV4){
7776 QETH_DBF_TEXT(trace, 2, "addvipa4");
7777 memcpy(&ipaddr->u.a4.addr, addr, 4);
7778 ipaddr->u.a4.mask = 0;
7779#ifdef CONFIG_QETH_IPV6
7780 } else if (proto == QETH_PROT_IPV6){
7781 QETH_DBF_TEXT(trace, 2, "addvipa6");
7782 memcpy(&ipaddr->u.a6.addr, addr, 16);
7783 ipaddr->u.a6.pfxlen = 0;
7784#endif
7785 }
7786 ipaddr->type = QETH_IP_TYPE_VIPA;
7787 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
7788 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
7789 } else
7790 return -ENOMEM;
7791 spin_lock_irqsave(&card->ip_lock, flags);
7792 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
7793 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
7794 rc = -EEXIST;
7795 spin_unlock_irqrestore(&card->ip_lock, flags);
7796 if (rc){
7797 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
7798 return rc;
7799 }
7800 if (!qeth_add_ip(card, ipaddr))
7801 kfree(ipaddr);
7802 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7803 schedule_work(&card->kernel_thread_starter);
7804 return rc;
7805}
7806
7807void
7808qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
7809 const u8 *addr)
7810{
7811 struct qeth_ipaddr *ipaddr;
7812
7813 ipaddr = qeth_get_addr_buffer(proto);
7814 if (ipaddr){
7815 if (proto == QETH_PROT_IPV4){
7816 QETH_DBF_TEXT(trace, 2, "delvipa4");
7817 memcpy(&ipaddr->u.a4.addr, addr, 4);
7818 ipaddr->u.a4.mask = 0;
7819#ifdef CONFIG_QETH_IPV6
7820 } else if (proto == QETH_PROT_IPV6){
7821 QETH_DBF_TEXT(trace, 2, "delvipa6");
7822 memcpy(&ipaddr->u.a6.addr, addr, 16);
7823 ipaddr->u.a6.pfxlen = 0;
7824#endif
7825 }
7826 ipaddr->type = QETH_IP_TYPE_VIPA;
7827 } else
7828 return;
7829 if (!qeth_delete_ip(card, ipaddr))
7830 kfree(ipaddr);
7831 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7832 schedule_work(&card->kernel_thread_starter);
7833}
7834
7835/*
7836 * proxy ARP related functions
7837 */
7838int
7839qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
7840 const u8 *addr)
7841{
7842 struct qeth_ipaddr *ipaddr;
7843 unsigned long flags;
7844 int rc = 0;
7845
7846 ipaddr = qeth_get_addr_buffer(proto);
7847 if (ipaddr){
7848 if (proto == QETH_PROT_IPV4){
7849 QETH_DBF_TEXT(trace, 2, "addrxip4");
7850 memcpy(&ipaddr->u.a4.addr, addr, 4);
7851 ipaddr->u.a4.mask = 0;
7852#ifdef CONFIG_QETH_IPV6
7853 } else if (proto == QETH_PROT_IPV6){
7854 QETH_DBF_TEXT(trace, 2, "addrxip6");
7855 memcpy(&ipaddr->u.a6.addr, addr, 16);
7856 ipaddr->u.a6.pfxlen = 0;
7857#endif
7858 }
7859 ipaddr->type = QETH_IP_TYPE_RXIP;
7860 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
7861 ipaddr->del_flags = 0;
7862 } else
7863 return -ENOMEM;
7864 spin_lock_irqsave(&card->ip_lock, flags);
7865 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
7866 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
7867 rc = -EEXIST;
7868 spin_unlock_irqrestore(&card->ip_lock, flags);
7869 if (rc){
7870 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
7871 return rc;
7872 }
7873 if (!qeth_add_ip(card, ipaddr))
7874 kfree(ipaddr);
7875 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7876 schedule_work(&card->kernel_thread_starter);
7877 return 0;
7878}
7879
7880void
7881qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
7882 const u8 *addr)
7883{
7884 struct qeth_ipaddr *ipaddr;
7885
7886 ipaddr = qeth_get_addr_buffer(proto);
7887 if (ipaddr){
7888 if (proto == QETH_PROT_IPV4){
7889 QETH_DBF_TEXT(trace, 2, "addrxip4");
7890 memcpy(&ipaddr->u.a4.addr, addr, 4);
7891 ipaddr->u.a4.mask = 0;
7892#ifdef CONFIG_QETH_IPV6
7893 } else if (proto == QETH_PROT_IPV6){
7894 QETH_DBF_TEXT(trace, 2, "addrxip6");
7895 memcpy(&ipaddr->u.a6.addr, addr, 16);
7896 ipaddr->u.a6.pfxlen = 0;
7897#endif
7898 }
7899 ipaddr->type = QETH_IP_TYPE_RXIP;
7900 } else
7901 return;
7902 if (!qeth_delete_ip(card, ipaddr))
7903 kfree(ipaddr);
7904 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7905 schedule_work(&card->kernel_thread_starter);
7906}
7907
7908/**
7909 * IP event handler
7910 */
7911static int
7912qeth_ip_event(struct notifier_block *this,
7913 unsigned long event,void *ptr)
7914{
7915 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
7916 struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev;
7917 struct qeth_ipaddr *addr;
7918 struct qeth_card *card;
7919
7920 QETH_DBF_TEXT(trace,3,"ipevent");
7921 card = qeth_get_card_from_dev(dev);
7922 if (!card)
7923 return NOTIFY_DONE;
7924 if (card->options.layer2)
7925 return NOTIFY_DONE;
7926
7927 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
7928 if (addr != NULL) {
7929 addr->u.a4.addr = ifa->ifa_address;
7930 addr->u.a4.mask = ifa->ifa_mask;
7931 addr->type = QETH_IP_TYPE_NORMAL;
7932 } else
7933 goto out;
7934
7935 switch(event) {
7936 case NETDEV_UP:
7937 if (!qeth_add_ip(card, addr))
7938 kfree(addr);
7939 break;
7940 case NETDEV_DOWN:
7941 if (!qeth_delete_ip(card, addr))
7942 kfree(addr);
7943 break;
7944 default:
7945 break;
7946 }
7947 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7948 schedule_work(&card->kernel_thread_starter);
7949out:
7950 return NOTIFY_DONE;
7951}
7952
7953static struct notifier_block qeth_ip_notifier = {
7954 qeth_ip_event,
7955 0
7956};
7957
7958#ifdef CONFIG_QETH_IPV6
7959/**
7960 * IPv6 event handler
7961 */
7962static int
7963qeth_ip6_event(struct notifier_block *this,
7964 unsigned long event,void *ptr)
7965{
7966
7967 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
7968 struct net_device *dev = (struct net_device *)ifa->idev->dev;
7969 struct qeth_ipaddr *addr;
7970 struct qeth_card *card;
7971
7972 QETH_DBF_TEXT(trace,3,"ip6event");
7973
7974 card = qeth_get_card_from_dev(dev);
7975 if (!card)
7976 return NOTIFY_DONE;
7977 if (!qeth_is_supported(card, IPA_IPV6))
7978 return NOTIFY_DONE;
7979
7980 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
7981 if (addr != NULL) {
7982 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
7983 addr->u.a6.pfxlen = ifa->prefix_len;
7984 addr->type = QETH_IP_TYPE_NORMAL;
7985 } else
7986 goto out;
7987
7988 switch(event) {
7989 case NETDEV_UP:
7990 if (!qeth_add_ip(card, addr))
7991 kfree(addr);
7992 break;
7993 case NETDEV_DOWN:
7994 if (!qeth_delete_ip(card, addr))
7995 kfree(addr);
7996 break;
7997 default:
7998 break;
7999 }
8000 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8001 schedule_work(&card->kernel_thread_starter);
8002out:
8003 return NOTIFY_DONE;
8004}
8005
8006static struct notifier_block qeth_ip6_notifier = {
8007 qeth_ip6_event,
8008 0
8009};
8010#endif
8011
8012static int
8013qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
8014{
8015
8016 struct device *entry;
8017 struct qeth_card *card;
8018
8019 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
8020 list_for_each_entry(entry, &qeth_ccwgroup_driver.driver.devices,
8021 driver_list) {
8022 card = (struct qeth_card *) entry->driver_data;
8023 qeth_clear_ip_list(card, 0, 0);
8024 qeth_qdio_clear_card(card, 0);
8025 }
8026 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
8027 return NOTIFY_DONE;
8028}
8029
8030
8031static struct notifier_block qeth_reboot_notifier = {
8032 qeth_reboot_event,
8033 0
8034};
8035
8036static int
8037qeth_register_notifiers(void)
8038{
8039 int r;
8040
8041 QETH_DBF_TEXT(trace,5,"regnotif");
8042 if ((r = register_reboot_notifier(&qeth_reboot_notifier)))
8043 return r;
8044 if ((r = register_inetaddr_notifier(&qeth_ip_notifier)))
8045 goto out_reboot;
8046#ifdef CONFIG_QETH_IPV6
8047 if ((r = register_inet6addr_notifier(&qeth_ip6_notifier)))
8048 goto out_ipv4;
8049#endif
8050 return 0;
8051
8052#ifdef CONFIG_QETH_IPV6
8053out_ipv4:
8054 unregister_inetaddr_notifier(&qeth_ip_notifier);
8055#endif
8056out_reboot:
8057 unregister_reboot_notifier(&qeth_reboot_notifier);
8058 return r;
8059}
8060
8061/**
8062 * unregister all event notifiers
8063 */
8064static void
8065qeth_unregister_notifiers(void)
8066{
8067
8068 QETH_DBF_TEXT(trace,5,"unregnot");
8069 BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier));
8070 BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier));
8071#ifdef CONFIG_QETH_IPV6
8072 BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier));
8073#endif /* QETH_IPV6 */
8074
8075}
8076
8077#ifdef CONFIG_QETH_IPV6
8078static int
8079qeth_ipv6_init(void)
8080{
8081 qeth_old_arp_constructor = arp_tbl.constructor;
8082 write_lock(&arp_tbl.lock);
8083 arp_tbl.constructor = qeth_arp_constructor;
8084 write_unlock(&arp_tbl.lock);
8085
8086 arp_direct_ops = (struct neigh_ops*)
8087 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
8088 if (!arp_direct_ops)
8089 return -ENOMEM;
8090
8091 memcpy(arp_direct_ops, &arp_direct_ops_template,
8092 sizeof(struct neigh_ops));
8093
8094 return 0;
8095}
8096
8097static void
8098qeth_ipv6_uninit(void)
8099{
8100 write_lock(&arp_tbl.lock);
8101 arp_tbl.constructor = qeth_old_arp_constructor;
8102 write_unlock(&arp_tbl.lock);
8103 kfree(arp_direct_ops);
8104}
8105#endif /* CONFIG_QETH_IPV6 */
8106
8107static void
8108qeth_sysfs_unregister(void)
8109{
8110 qeth_remove_driver_attributes();
8111 ccw_driver_unregister(&qeth_ccw_driver);
8112 ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
8113 s390_root_dev_unregister(qeth_root_dev);
8114}
8115/**
8116 * register qeth at sysfs
8117 */
8118static int
8119qeth_sysfs_register(void)
8120{
8121 int rc=0;
8122
8123 rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
8124 if (rc)
8125 return rc;
8126 rc = ccw_driver_register(&qeth_ccw_driver);
8127 if (rc)
8128 return rc;
8129 rc = qeth_create_driver_attributes();
8130 if (rc)
8131 return rc;
8132 qeth_root_dev = s390_root_dev_register("qeth");
8133 if (IS_ERR(qeth_root_dev)) {
8134 rc = PTR_ERR(qeth_root_dev);
8135 return rc;
8136 }
8137 return 0;
8138}
8139
8140/***
8141 * init function
8142 */
8143static int __init
8144qeth_init(void)
8145{
8146 int rc=0;
8147
8148 qeth_eyecatcher();
8149 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
8150 version, VERSION_QETH_C, VERSION_QETH_H,
8151 VERSION_QETH_MPC_H, VERSION_QETH_MPC_C,
8152 VERSION_QETH_FS_H, VERSION_QETH_PROC_C,
8153 VERSION_QETH_SYS_C, QETH_VERSION_IPV6,
8154 QETH_VERSION_VLAN);
8155
8156 INIT_LIST_HEAD(&qeth_card_list.list);
8157 INIT_LIST_HEAD(&qeth_notify_list);
8158 spin_lock_init(&qeth_notify_lock);
8159 rwlock_init(&qeth_card_list.rwlock);
8160
8161 if (qeth_register_dbf_views())
8162 goto out_err;
8163 if (qeth_sysfs_register())
8164 goto out_sysfs;
8165
8166#ifdef CONFIG_QETH_IPV6
8167 if (qeth_ipv6_init()) {
8168 PRINT_ERR("Out of memory during ipv6 init.\n");
8169 goto out_sysfs;
8170 }
8171#endif /* QETH_IPV6 */
8172 if (qeth_register_notifiers())
8173 goto out_ipv6;
8174 if (qeth_create_procfs_entries())
8175 goto out_notifiers;
8176
8177 return rc;
8178
8179out_notifiers:
8180 qeth_unregister_notifiers();
8181out_ipv6:
8182#ifdef CONFIG_QETH_IPV6
8183 qeth_ipv6_uninit();
8184#endif /* QETH_IPV6 */
8185out_sysfs:
8186 qeth_sysfs_unregister();
8187 qeth_unregister_dbf_views();
8188out_err:
8189 PRINT_ERR("Initialization failed");
8190 return rc;
8191}
8192
8193static void
8194__exit qeth_exit(void)
8195{
8196 struct qeth_card *card, *tmp;
8197 unsigned long flags;
8198
8199 QETH_DBF_TEXT(trace,1, "cleanup.");
8200
8201 /*
8202 * Weed would not need to clean up our devices here, because the
8203 * common device layer calls qeth_remove_device for each device
8204 * as soon as we unregister our driver (done in qeth_sysfs_unregister).
8205 * But we do cleanup here so we can do a "soft" shutdown of our cards.
8206 * qeth_remove_device called by the common device layer would otherwise
8207 * do a "hard" shutdown (card->use_hard_stop is set to one in
8208 * qeth_remove_device).
8209 */
8210again:
8211 read_lock_irqsave(&qeth_card_list.rwlock, flags);
8212 list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){
8213 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8214 qeth_set_offline(card->gdev);
8215 qeth_remove_device(card->gdev);
8216 goto again;
8217 }
8218 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8219#ifdef CONFIG_QETH_IPV6
8220 qeth_ipv6_uninit();
8221#endif
8222 qeth_unregister_notifiers();
8223 qeth_remove_procfs_entries();
8224 qeth_sysfs_unregister();
8225 qeth_unregister_dbf_views();
8226 printk("qeth: removed\n");
8227}
8228
8229EXPORT_SYMBOL(qeth_eyecatcher);
8230module_init(qeth_init);
8231module_exit(qeth_exit);
8232MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
8233MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
8234 "Copyright 2000,2003 IBM Corporation\n");
8235
8236MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_mpc.c b/drivers/s390/net/qeth_mpc.c
new file mode 100644
index 000000000000..f685ecc7da99
--- /dev/null
+++ b/drivers/s390/net/qeth_mpc.c
@@ -0,0 +1,168 @@
1/*
2 * linux/drivers/s390/net/qeth_mpc.c
3 *
4 * Linux on zSeries OSA Express and HiperSockets support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 * Author(s): Frank Pavlic <pavlic@de.ibm.com>
8 * Thomas Spatzier <tspat@de.ibm.com>
9 *
10 */
11#include <asm/cio.h>
12#include "qeth_mpc.h"
13
14const char *VERSION_QETH_MPC_C = "$Revision: 1.11 $";
15
16unsigned char IDX_ACTIVATE_READ[]={
17 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
18 0x19,0x01,0x01,0x80, 0x00,0x00,0x00,0x00,
19 0x00,0x00,0x00,0x00, 0x00,0x00,0xc8,0xc1,
20 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00,
21 0x00,0x00
22};
23
24unsigned char IDX_ACTIVATE_WRITE[]={
25 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
26 0x15,0x01,0x01,0x80, 0x00,0x00,0x00,0x00,
27 0xff,0xff,0x00,0x00, 0x00,0x00,0xc8,0xc1,
28 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00,
29 0x00,0x00
30};
31
32unsigned char CM_ENABLE[]={
33 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x01,
34 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x63,
35 0x10,0x00,0x00,0x01,
36 0x00,0x00,0x00,0x00,
37 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00,
38 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x23,
39 0x00,0x00,0x23,0x05, 0x00,0x00,0x00,0x00,
40 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
41 0x01,0x00,0x00,0x23, 0x00,0x00,0x00,0x40,
42 0x00,0x0c,0x41,0x02, 0x00,0x17,0x00,0x00,
43 0x00,0x00,0x00,0x00,
44 0x00,0x0b,0x04,0x01,
45 0x7e,0x04,0x05,0x00, 0x01,0x01,0x0f,
46 0x00,
47 0x0c,0x04,0x02,0xff, 0xff,0xff,0xff,0xff,
48 0xff,0xff,0xff
49};
50
51unsigned char CM_SETUP[]={
52 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x02,
53 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x64,
54 0x10,0x00,0x00,0x01,
55 0x00,0x00,0x00,0x00,
56 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00,
57 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x24,
58 0x00,0x00,0x24,0x05, 0x00,0x00,0x00,0x00,
59 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
60 0x01,0x00,0x00,0x24, 0x00,0x00,0x00,0x40,
61 0x00,0x0c,0x41,0x04, 0x00,0x18,0x00,0x00,
62 0x00,0x00,0x00,0x00,
63 0x00,0x09,0x04,0x04,
64 0x05,0x00,0x01,0x01, 0x11,
65 0x00,0x09,0x04,
66 0x05,0x05,0x00,0x00, 0x00,0x00,
67 0x00,0x06,
68 0x04,0x06,0xc8,0x00
69};
70
71unsigned char ULP_ENABLE[]={
72 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x03,
73 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6b,
74 0x10,0x00,0x00,0x01,
75 0x00,0x00,0x00,0x00,
76 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x01,
77 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x2b,
78 0x00,0x00,0x2b,0x05, 0x20,0x01,0x00,0x00,
79 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
80 0x01,0x00,0x00,0x2b, 0x00,0x00,0x00,0x40,
81 0x00,0x0c,0x41,0x02, 0x00,0x1f,0x00,0x00,
82 0x00,0x00,0x00,0x00,
83 0x00,0x0b,0x04,0x01,
84 0x03,0x04,0x05,0x00, 0x01,0x01,0x12,
85 0x00,
86 0x14,0x04,0x0a,0x00, 0x20,0x00,0x00,0xff,
87 0xff,0x00,0x08,0xc8, 0xe8,0xc4,0xf1,0xc7,
88 0xf1,0x00,0x00
89};
90
91unsigned char ULP_SETUP[]={
92 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x04,
93 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6c,
94 0x10,0x00,0x00,0x01,
95 0x00,0x00,0x00,0x00,
96 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x02,
97 0x00,0x00,0x00,0x01, 0x00,0x24,0x00,0x2c,
98 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00,
99 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
100 0x01,0x00,0x00,0x2c, 0x00,0x00,0x00,0x40,
101 0x00,0x0c,0x41,0x04, 0x00,0x20,0x00,0x00,
102 0x00,0x00,0x00,0x00,
103 0x00,0x09,0x04,0x04,
104 0x05,0x00,0x01,0x01, 0x14,
105 0x00,0x09,0x04,
106 0x05,0x05,0x30,0x01, 0x00,0x00,
107 0x00,0x06,
108 0x04,0x06,0x40,0x00,
109 0x00,0x08,0x04,0x0b,
110 0x00,0x00,0x00,0x00
111};
112
113unsigned char DM_ACT[]={
114 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x05,
115 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x55,
116 0x10,0x00,0x00,0x01,
117 0x00,0x00,0x00,0x00,
118 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x03,
119 0x00,0x00,0x00,0x02, 0x00,0x24,0x00,0x15,
120 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00,
121 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
122 0x01,0x00,0x00,0x15, 0x00,0x00,0x00,0x40,
123 0x00,0x0c,0x43,0x60, 0x00,0x09,0x00,0x00,
124 0x00,0x00,0x00,0x00,
125 0x00,0x09,0x04,0x04,
126 0x05,0x40,0x01,0x01, 0x00
127};
128
129unsigned char IPA_PDU_HEADER[]={
130 0x00,0xe0,0x00,0x00, 0x77,0x77,0x77,0x77,
131 0x00,0x00,0x00,0x14, 0x00,0x00,
132 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))/256,
133 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))%256,
134 0x10,0x00,0x00,0x01, 0x00,0x00,0x00,0x00,
135 0xc1,0x03,0x00,0x01, 0x00,0x00,0x00,0x00,
136 0x00,0x00,0x00,0x00, 0x00,0x24,
137 sizeof(struct qeth_ipa_cmd)/256,
138 sizeof(struct qeth_ipa_cmd)%256,
139 0x00,
140 sizeof(struct qeth_ipa_cmd)/256,
141 sizeof(struct qeth_ipa_cmd),0x05, 0x77,0x77,0x77,0x77,
142 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
143 0x01,0x00,
144 sizeof(struct qeth_ipa_cmd)/256,
145 sizeof(struct qeth_ipa_cmd)%256,
146 0x00,0x00,0x00,0x40,
147};
148
149unsigned char WRITE_CCW[]={
150 0x01,CCW_FLAG_SLI,0,0,
151 0,0,0,0
152};
153
154unsigned char READ_CCW[]={
155 0x02,CCW_FLAG_SLI,0,0,
156 0,0,0,0
157};
158
159
160
161
162
163
164
165
166
167
168
diff --git a/drivers/s390/net/qeth_mpc.h b/drivers/s390/net/qeth_mpc.h
new file mode 100644
index 000000000000..3d916b5c5d09
--- /dev/null
+++ b/drivers/s390/net/qeth_mpc.h
@@ -0,0 +1,538 @@
1/*
2 * linux/drivers/s390/net/qeth_mpc.h
3 *
4 * Linux on zSeries OSA Express and HiperSockets support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Thomas Spatzier <tspat@de.ibm.com>
9 * Frank Pavlic <pavlic@de.ibm.com>
10 *
11 */
12#ifndef __QETH_MPC_H__
13#define __QETH_MPC_H__
14
15#include <asm/qeth.h>
16
17#define VERSION_QETH_MPC_H "$Revision: 1.43 $"
18
19extern const char *VERSION_QETH_MPC_C;
20
21#define IPA_PDU_HEADER_SIZE 0x40
22#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer+0x0e)
23#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer+0x26)
24#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer+0x2a)
25#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer+0x3a)
26
27extern unsigned char IPA_PDU_HEADER[];
28#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer+0x2c)
29
30#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
31
32#define QETH_SEQ_NO_LENGTH 4
33#define QETH_MPC_TOKEN_LENGTH 4
34#define QETH_MCL_LENGTH 4
35#define OSA_ADDR_LEN 6
36
37#define QETH_TIMEOUT (10 * HZ)
38#define QETH_IPA_TIMEOUT (45 * HZ)
39#define QETH_IDX_COMMAND_SEQNO 0xffff0000
40#define SR_INFO_LEN 16
41
42#define QETH_CLEAR_CHANNEL_PARM -10
43#define QETH_HALT_CHANNEL_PARM -11
44
45/*****************************************************************************/
46/* IP Assist related definitions */
47/*****************************************************************************/
48#define IPA_CMD_INITIATOR_HOST 0x00
49#define IPA_CMD_INITIATOR_HYDRA 0x01
50#define IPA_CMD_PRIM_VERSION_NO 0x01
51
52enum qeth_card_types {
53 QETH_CARD_TYPE_UNKNOWN = 0,
54 QETH_CARD_TYPE_OSAE = 10,
55 QETH_CARD_TYPE_IQD = 1234,
56};
57
58#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
59/* only the first two bytes are looked at in qeth_get_cardname_short */
60enum qeth_link_types {
61 QETH_LINK_TYPE_FAST_ETH = 0x01,
62 QETH_LINK_TYPE_HSTR = 0x02,
63 QETH_LINK_TYPE_GBIT_ETH = 0x03,
64 QETH_LINK_TYPE_10GBIT_ETH = 0x10,
65 QETH_LINK_TYPE_LANE_ETH100 = 0x81,
66 QETH_LINK_TYPE_LANE_TR = 0x82,
67 QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
68 QETH_LINK_TYPE_LANE = 0x88,
69 QETH_LINK_TYPE_ATM_NATIVE = 0x90,
70};
71
72enum qeth_tr_macaddr_modes {
73 QETH_TR_MACADDR_NONCANONICAL = 0,
74 QETH_TR_MACADDR_CANONICAL = 1,
75};
76
77enum qeth_tr_broadcast_modes {
78 QETH_TR_BROADCAST_ALLRINGS = 0,
79 QETH_TR_BROADCAST_LOCAL = 1,
80};
81
82/* these values match CHECKSUM_* in include/linux/skbuff.h */
83enum qeth_checksum_types {
84 SW_CHECKSUMMING = 0, /* TODO: set to bit flag used in IPA Command */
85 HW_CHECKSUMMING = 1,
86 NO_CHECKSUMMING = 2,
87};
88#define QETH_CHECKSUM_DEFAULT SW_CHECKSUMMING
89
90/*
91 * Routing stuff
92 */
93#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
94enum qeth_routing_types {
95 NO_ROUTER = 0, /* TODO: set to bit flag used in IPA Command */
96 PRIMARY_ROUTER = 1,
97 SECONDARY_ROUTER = 2,
98 MULTICAST_ROUTER = 3,
99 PRIMARY_CONNECTOR = 4,
100 SECONDARY_CONNECTOR = 5,
101};
102
103
104/* IPA Commands */
105enum qeth_ipa_cmds {
106 IPA_CMD_STARTLAN = 0x01,
107 IPA_CMD_STOPLAN = 0x02,
108 IPA_CMD_SETVMAC = 0x21,
109 IPA_CMD_DELVMAC = 0x22,
110 IPA_CMD_SETGMAC = 0x23,
111 IPA_CMD_DELGMAC = 0x24,
112 IPA_CMD_SETVLAN = 0x25,
113 IPA_CMD_DELVLAN = 0x26,
114 IPA_CMD_SETIP = 0xb1,
115 IPA_CMD_DELIP = 0xb7,
116 IPA_CMD_QIPASSIST = 0xb2,
117 IPA_CMD_SETASSPARMS = 0xb3,
118 IPA_CMD_SETIPM = 0xb4,
119 IPA_CMD_DELIPM = 0xb5,
120 IPA_CMD_SETRTG = 0xb6,
121 IPA_CMD_SETADAPTERPARMS = 0xb8,
122 IPA_CMD_IPFRAME = 0xb9,
123 IPA_CMD_ADD_ADDR_ENTRY = 0xc1,
124 IPA_CMD_DELETE_ADDR_ENTRY = 0xc2,
125 IPA_CMD_CREATE_ADDR = 0xc3,
126 IPA_CMD_DESTROY_ADDR = 0xc4,
127 IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1,
128 IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2,
129};
130
131enum qeth_ip_ass_cmds {
132 IPA_CMD_ASS_START = 0x0001,
133 IPA_CMD_ASS_STOP = 0x0002,
134 IPA_CMD_ASS_CONFIGURE = 0x0003,
135 IPA_CMD_ASS_ENABLE = 0x0004,
136};
137
138enum qeth_arp_process_subcmds {
139 IPA_CMD_ASS_ARP_SET_NO_ENTRIES = 0x0003,
140 IPA_CMD_ASS_ARP_QUERY_CACHE = 0x0004,
141 IPA_CMD_ASS_ARP_ADD_ENTRY = 0x0005,
142 IPA_CMD_ASS_ARP_REMOVE_ENTRY = 0x0006,
143 IPA_CMD_ASS_ARP_FLUSH_CACHE = 0x0007,
144 IPA_CMD_ASS_ARP_QUERY_INFO = 0x0104,
145 IPA_CMD_ASS_ARP_QUERY_STATS = 0x0204,
146};
147
148/* Return Codes for IPA Commands */
149enum qeth_ipa_return_codes {
150 IPA_RC_SUCCESS = 0x0000,
151 IPA_RC_NOTSUPP = 0x0001,
152 IPA_RC_NO_ACCESS = 0x0002,
153 IPA_RC_FAILED = 0x0003,
154 IPA_RC_DATA_MISMATCH = 0xe001,
155 IPA_RC_INVALID_LAN_TYPE = 0xe003,
156 IPA_RC_INVALID_LAN_NO = 0xe004,
157 IPA_RC_IPADDR_ALREADY_REG = 0xe005,
158 IPA_RC_IPADDR_TABLE_FULL = 0xe006,
159 IPA_RC_IPADDR_ALREADY_USED = 0xe00a,
160 IPA_RC_ASSNO_NOT_SUPP = 0xe00d,
161 IPA_RC_ASSCMD_START_FAILED = 0xe00e,
162 IPA_RC_ASSCMD_PART_SUCCESS = 0xe00f,
163 IPA_RC_IPADDR_NOT_DEFINED = 0xe010,
164 IPA_RC_LAN_OFFLINE = 0xe080,
165};
166
167/* IPA function flags; each flag marks availability of respective function */
168enum qeth_ipa_funcs {
169 IPA_ARP_PROCESSING = 0x00000001L,
170 IPA_INBOUND_CHECKSUM = 0x00000002L,
171 IPA_OUTBOUND_CHECKSUM = 0x00000004L,
172 IPA_IP_FRAGMENTATION = 0x00000008L,
173 IPA_FILTERING = 0x00000010L,
174 IPA_IPV6 = 0x00000020L,
175 IPA_MULTICASTING = 0x00000040L,
176 IPA_IP_REASSEMBLY = 0x00000080L,
177 IPA_QUERY_ARP_COUNTERS = 0x00000100L,
178 IPA_QUERY_ARP_ADDR_INFO = 0x00000200L,
179 IPA_SETADAPTERPARMS = 0x00000400L,
180 IPA_VLAN_PRIO = 0x00000800L,
181 IPA_PASSTHRU = 0x00001000L,
182 IPA_FULL_VLAN = 0x00004000L,
183 IPA_SOURCE_MAC = 0x00010000L,
184 IPA_OSA_MC_ROUTER = 0x00020000L,
185 IPA_QUERY_ARP_ASSIST = 0x00040000L,
186 IPA_INBOUND_TSO = 0x00080000L,
187 IPA_OUTBOUND_TSO = 0x00100000L,
188};
189
190/* SETIP/DELIP IPA Command: ***************************************************/
191enum qeth_ipa_setdelip_flags {
192 QETH_IPA_SETDELIP_DEFAULT = 0x00L, /* default */
193 QETH_IPA_SETIP_VIPA_FLAG = 0x01L, /* no grat. ARP */
194 QETH_IPA_SETIP_TAKEOVER_FLAG = 0x02L, /* nofail on grat. ARP */
195 QETH_IPA_DELIP_ADDR_2_B_TAKEN_OVER = 0x20L,
196 QETH_IPA_DELIP_VIPA_FLAG = 0x40L,
197 QETH_IPA_DELIP_ADDR_NEEDS_SETIP = 0x80L,
198};
199
200/* SETADAPTER IPA Command: ****************************************************/
201enum qeth_ipa_setadp_cmd {
202 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x01,
203 IPA_SETADP_ALTER_MAC_ADDRESS = 0x02,
204 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x04,
205 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x08,
206 IPA_SETADP_SET_ADDRESSING_MODE = 0x10,
207 IPA_SETADP_SET_CONFIG_PARMS = 0x20,
208 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x40,
209 IPA_SETADP_SET_BROADCAST_MODE = 0x80,
210 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100,
211 IPA_SETADP_SET_SNMP_CONTROL = 0x0200,
212 IPA_SETADP_READ_SNMP_PARMS = 0x0400,
213 IPA_SETADP_WRITE_SNMP_PARMS = 0x0800,
214 IPA_SETADP_QUERY_CARD_INFO = 0x1000,
215};
216enum qeth_ipa_mac_ops {
217 CHANGE_ADDR_READ_MAC = 0,
218 CHANGE_ADDR_REPLACE_MAC = 1,
219 CHANGE_ADDR_ADD_MAC = 2,
220 CHANGE_ADDR_DEL_MAC = 4,
221 CHANGE_ADDR_RESET_MAC = 8,
222};
223enum qeth_ipa_addr_ops {
224 CHANGE_ADDR_READ_ADDR = 0,
225 CHANGE_ADDR_ADD_ADDR = 1,
226 CHANGE_ADDR_DEL_ADDR = 2,
227 CHANGE_ADDR_FLUSH_ADDR_TABLE = 4,
228
229
230};
231/* (SET)DELIP(M) IPA stuff ***************************************************/
232struct qeth_ipacmd_setdelip4 {
233 __u8 ip_addr[4];
234 __u8 mask[4];
235 __u32 flags;
236} __attribute__ ((packed));
237
238struct qeth_ipacmd_setdelip6 {
239 __u8 ip_addr[16];
240 __u8 mask[16];
241 __u32 flags;
242} __attribute__ ((packed));
243
244struct qeth_ipacmd_setdelipm {
245 __u8 mac[6];
246 __u8 padding[2];
247 __u8 ip6[12];
248 __u8 ip4[4];
249} __attribute__ ((packed));
250
251struct qeth_ipacmd_layer2setdelmac {
252 __u32 mac_length;
253 __u8 mac[6];
254} __attribute__ ((packed));
255
256struct qeth_ipacmd_layer2setdelvlan {
257 __u16 vlan_id;
258} __attribute__ ((packed));
259
260
261struct qeth_ipacmd_setassparms_hdr {
262 __u32 assist_no;
263 __u16 length;
264 __u16 command_code;
265 __u16 return_code;
266 __u8 number_of_replies;
267 __u8 seq_no;
268} __attribute__((packed));
269
270struct qeth_arp_query_data {
271 __u16 request_bits;
272 __u16 reply_bits;
273 __u32 no_entries;
274 char data;
275} __attribute__((packed));
276
277/* used as parameter for arp_query reply */
278struct qeth_arp_query_info {
279 __u32 udata_len;
280 __u16 mask_bits;
281 __u32 udata_offset;
282 __u32 no_entries;
283 char *udata;
284};
285
286/* SETASSPARMS IPA Command: */
287struct qeth_ipacmd_setassparms {
288 struct qeth_ipacmd_setassparms_hdr hdr;
289 union {
290 __u32 flags_32bit;
291 struct qeth_arp_cache_entry add_arp_entry;
292 struct qeth_arp_query_data query_arp;
293 __u8 ip[16];
294 } data;
295} __attribute__ ((packed));
296
297
298/* SETRTG IPA Command: ****************************************************/
299struct qeth_set_routing {
300 __u8 type;
301};
302
303/* SETADAPTERPARMS IPA Command: *******************************************/
304struct qeth_query_cmds_supp {
305 __u32 no_lantypes_supp;
306 __u8 lan_type;
307 __u8 reserved1[3];
308 __u32 supported_cmds;
309 __u8 reserved2[8];
310} __attribute__ ((packed));
311
312struct qeth_change_addr {
313 __u32 cmd;
314 __u32 addr_size;
315 __u32 no_macs;
316 __u8 addr[OSA_ADDR_LEN];
317} __attribute__ ((packed));
318
319
320struct qeth_snmp_cmd {
321 __u8 token[16];
322 __u32 request;
323 __u32 interface;
324 __u32 returncode;
325 __u32 firmwarelevel;
326 __u32 seqno;
327 __u8 data;
328} __attribute__ ((packed));
329
330struct qeth_snmp_ureq_hdr {
331 __u32 data_len;
332 __u32 req_len;
333 __u32 reserved1;
334 __u32 reserved2;
335} __attribute__ ((packed));
336
337struct qeth_snmp_ureq {
338 struct qeth_snmp_ureq_hdr hdr;
339 struct qeth_snmp_cmd cmd;
340} __attribute__((packed));
341
342struct qeth_ipacmd_setadpparms_hdr {
343 __u32 supp_hw_cmds;
344 __u32 reserved1;
345 __u16 cmdlength;
346 __u16 reserved2;
347 __u32 command_code;
348 __u16 return_code;
349 __u8 used_total;
350 __u8 seq_no;
351 __u32 reserved3;
352} __attribute__ ((packed));
353
354struct qeth_ipacmd_setadpparms {
355 struct qeth_ipacmd_setadpparms_hdr hdr;
356 union {
357 struct qeth_query_cmds_supp query_cmds_supp;
358 struct qeth_change_addr change_addr;
359 struct qeth_snmp_cmd snmp;
360 __u32 mode;
361 } data;
362} __attribute__ ((packed));
363
364/* IPFRAME IPA Command: ***************************************************/
365/* TODO: define in analogy to commands define above */
366
367/* ADD_ADDR_ENTRY IPA Command: ********************************************/
368/* TODO: define in analogy to commands define above */
369
370/* DELETE_ADDR_ENTRY IPA Command: *****************************************/
371/* TODO: define in analogy to commands define above */
372
373/* CREATE_ADDR IPA Command: ***********************************************/
374struct qeth_create_destroy_address {
375 __u8 unique_id[8];
376} __attribute__ ((packed));
377
378/* REGISTER_LOCAL_ADDR IPA Command: ***************************************/
379/* TODO: define in analogy to commands define above */
380
381/* UNREGISTER_LOCAL_ADDR IPA Command: *************************************/
382/* TODO: define in analogy to commands define above */
383
384/* Header for each IPA command */
385struct qeth_ipacmd_hdr {
386 __u8 command;
387 __u8 initiator;
388 __u16 seqno;
389 __u16 return_code;
390 __u8 adapter_type;
391 __u8 rel_adapter_no;
392 __u8 prim_version_no;
393 __u8 param_count;
394 __u16 prot_version;
395 __u32 ipa_supported;
396 __u32 ipa_enabled;
397} __attribute__ ((packed));
398
399/* The IPA command itself */
400struct qeth_ipa_cmd {
401 struct qeth_ipacmd_hdr hdr;
402 union {
403 struct qeth_ipacmd_setdelip4 setdelip4;
404 struct qeth_ipacmd_setdelip6 setdelip6;
405 struct qeth_ipacmd_setdelipm setdelipm;
406 struct qeth_ipacmd_setassparms setassparms;
407 struct qeth_ipacmd_layer2setdelmac setdelmac;
408 struct qeth_ipacmd_layer2setdelvlan setdelvlan;
409 struct qeth_create_destroy_address create_destroy_addr;
410 struct qeth_ipacmd_setadpparms setadapterparms;
411 struct qeth_set_routing setrtg;
412 } data;
413} __attribute__ ((packed));
414
415/*
416 * special command for ARP processing.
417 * this is not included in setassparms command before, because we get
418 * problem with the size of struct qeth_ipacmd_setassparms otherwise
419 */
420enum qeth_ipa_arp_return_codes {
421 QETH_IPA_ARP_RC_SUCCESS = 0x0000,
422 QETH_IPA_ARP_RC_FAILED = 0x0001,
423 QETH_IPA_ARP_RC_NOTSUPP = 0x0002,
424 QETH_IPA_ARP_RC_OUT_OF_RANGE = 0x0003,
425 QETH_IPA_ARP_RC_Q_NOTSUPP = 0x0004,
426 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
427};
428
429#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
430 sizeof(struct qeth_ipacmd_setassparms_hdr))
431#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \
432 QETH_SETASS_BASE_LEN)
433#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
434 sizeof(struct qeth_ipacmd_setadpparms_hdr))
435#define QETH_SNMP_SETADP_CMDLENGTH 16
436
437#define QETH_ARP_DATA_SIZE 3968
438#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
439/* Helper functions */
440#define IS_IPA_REPLY(cmd) (cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST)
441
442/*****************************************************************************/
443/* END OF IP Assist related definitions */
444/*****************************************************************************/
445
446
447extern unsigned char WRITE_CCW[];
448extern unsigned char READ_CCW[];
449
450extern unsigned char CM_ENABLE[];
451#define CM_ENABLE_SIZE 0x63
452#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer+0x2c)
453#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53)
454#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer+0x5b)
455
456#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \
457 (PDU_ENCAPSULATION(buffer)+ 0x13)
458
459
460extern unsigned char CM_SETUP[];
461#define CM_SETUP_SIZE 0x64
462#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer+0x2c)
463#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51)
464#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a)
465
466#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
467 (PDU_ENCAPSULATION(buffer) + 0x1a)
468
469extern unsigned char ULP_ENABLE[];
470#define ULP_ENABLE_SIZE 0x6b
471#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer+0x61)
472#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer+0x2c)
473#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53)
474#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer+0x62)
475#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \
476 (PDU_ENCAPSULATION(buffer) + 0x13)
477#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \
478 (PDU_ENCAPSULATION(buffer)+ 0x1f)
479#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \
480 (PDU_ENCAPSULATION(buffer) + 0x17)
481#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
482 (PDU_ENCAPSULATION(buffer)+ 0x2b)
483/* Layer 2 defintions */
484#define QETH_PROT_LAYER2 0x08
485#define QETH_PROT_TCPIP 0x03
486#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer+0x50)
487#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer+0x19)
488
489extern unsigned char ULP_SETUP[];
490#define ULP_SETUP_SIZE 0x6c
491#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer+0x2c)
492#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51)
493#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a)
494#define QETH_ULP_SETUP_CUA(buffer) (buffer+0x68)
495#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer+0x6a)
496
497#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \
498 (PDU_ENCAPSULATION(buffer)+0x1a)
499
500
501extern unsigned char DM_ACT[];
502#define DM_ACT_SIZE 0x55
503#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer+0x2c)
504#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer+0x51)
505
506
507
508#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer+4)
509#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer+0x1c)
510#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer+0x20)
511
512extern unsigned char IDX_ACTIVATE_READ[];
513extern unsigned char IDX_ACTIVATE_WRITE[];
514
515#define IDX_ACTIVATE_SIZE 0x22
516#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer+0x0c)
517#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b]&0x80)
518#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer+0x10)
519#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer+0x16)
520#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer+0x1e)
521#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer+0x20)
522#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08]&3)==2)
523#define QETH_IDX_REPLY_LEVEL(buffer) (buffer+0x12)
524
525#define PDU_ENCAPSULATION(buffer) \
526 (buffer + *(buffer + (*(buffer+0x0b)) + \
527 *(buffer + *(buffer+0x0b)+0x11) +0x07))
528
529#define IS_IPA(buffer) \
530 ((buffer) && \
531 ( *(buffer + ((*(buffer+0x0b))+4) )==0xc1) )
532
533#define ADDR_FRAME_TYPE_DIX 1
534#define ADDR_FRAME_TYPE_802_3 2
535#define ADDR_FRAME_TYPE_TR_WITHOUT_SR 0x10
536#define ADDR_FRAME_TYPE_TR_WITH_SR 0x20
537
538#endif
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
new file mode 100644
index 000000000000..04719196fd20
--- /dev/null
+++ b/drivers/s390/net/qeth_proc.c
@@ -0,0 +1,495 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_fs.c ($Revision: 1.13 $)
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 * This file contains code related to procfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 *
10 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 *
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/list.h>
18#include <linux/rwsem.h>
19
20#include "qeth.h"
21#include "qeth_mpc.h"
22#include "qeth_fs.h"
23
24const char *VERSION_QETH_PROC_C = "$Revision: 1.13 $";
25
26/***** /proc/qeth *****/
27#define QETH_PROCFILE_NAME "qeth"
28static struct proc_dir_entry *qeth_procfile;
29
30static void *
31qeth_procfile_seq_start(struct seq_file *s, loff_t *offset)
32{
33 struct list_head *next_card = NULL;
34 int i = 0;
35
36 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
37
38 if (*offset == 0)
39 return SEQ_START_TOKEN;
40
41 /* get card at pos *offset */
42 list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices)
43 if (++i == *offset)
44 return next_card;
45
46 return NULL;
47}
48
49static void
50qeth_procfile_seq_stop(struct seq_file *s, void* it)
51{
52 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
53}
54
55static void *
56qeth_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset)
57{
58 struct list_head *next_card = NULL;
59 struct list_head *current_card;
60
61 if (it == SEQ_START_TOKEN) {
62 next_card = qeth_ccwgroup_driver.driver.devices.next;
63 if (next_card->next == next_card) /* list empty */
64 return NULL;
65 (*offset)++;
66 } else {
67 current_card = (struct list_head *)it;
68 if (current_card->next == &qeth_ccwgroup_driver.driver.devices)
69 return NULL; /* end of list reached */
70 next_card = current_card->next;
71 (*offset)++;
72 }
73
74 return next_card;
75}
76
77static inline const char *
78qeth_get_router_str(struct qeth_card *card, int ipv)
79{
80 int routing_type = 0;
81
82 if (ipv == 4){
83 routing_type = card->options.route4.type;
84 } else {
85#ifdef CONFIG_QETH_IPV6
86 routing_type = card->options.route6.type;
87#else
88 return "n/a";
89#endif /* CONFIG_QETH_IPV6 */
90 }
91
92 if (routing_type == PRIMARY_ROUTER)
93 return "pri";
94 else if (routing_type == SECONDARY_ROUTER)
95 return "sec";
96 else if (routing_type == MULTICAST_ROUTER) {
97 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
98 return "mc+";
99 return "mc";
100 } else if (routing_type == PRIMARY_CONNECTOR) {
101 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
102 return "p+c";
103 return "p.c";
104 } else if (routing_type == SECONDARY_CONNECTOR) {
105 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
106 return "s+c";
107 return "s.c";
108 } else if (routing_type == NO_ROUTER)
109 return "no";
110 else
111 return "unk";
112}
113
114static int
115qeth_procfile_seq_show(struct seq_file *s, void *it)
116{
117 struct device *device;
118 struct qeth_card *card;
119 char tmp[12]; /* for qeth_get_prioq_str */
120
121 if (it == SEQ_START_TOKEN){
122 seq_printf(s, "devices CHPID interface "
123 "cardtype port chksum prio-q'ing rtr4 "
124 "rtr6 fsz cnt\n");
125 seq_printf(s, "-------------------------- ----- ---------- "
126 "-------------- ---- ------ ---------- ---- "
127 "---- ----- -----\n");
128 } else {
129 device = list_entry(it, struct device, driver_list);
130 card = device->driver_data;
131 seq_printf(s, "%s/%s/%s x%02X %-10s %-14s %-4i ",
132 CARD_RDEV_ID(card),
133 CARD_WDEV_ID(card),
134 CARD_DDEV_ID(card),
135 card->info.chpid,
136 QETH_CARD_IFNAME(card),
137 qeth_get_cardname_short(card),
138 card->info.portno);
139 if (card->lan_online)
140 seq_printf(s, "%-6s %-10s %-4s %-4s %-5s %-5i\n",
141 qeth_get_checksum_str(card),
142 qeth_get_prioq_str(card, tmp),
143 qeth_get_router_str(card, 4),
144 qeth_get_router_str(card, 6),
145 qeth_get_bufsize_str(card),
146 card->qdio.in_buf_pool.buf_count);
147 else
148 seq_printf(s, " +++ LAN OFFLINE +++\n");
149 }
150 return 0;
151}
152
153static struct seq_operations qeth_procfile_seq_ops = {
154 .start = qeth_procfile_seq_start,
155 .stop = qeth_procfile_seq_stop,
156 .next = qeth_procfile_seq_next,
157 .show = qeth_procfile_seq_show,
158};
159
160static int
161qeth_procfile_open(struct inode *inode, struct file *file)
162{
163 return seq_open(file, &qeth_procfile_seq_ops);
164}
165
166static struct file_operations qeth_procfile_fops = {
167 .owner = THIS_MODULE,
168 .open = qeth_procfile_open,
169 .read = seq_read,
170 .llseek = seq_lseek,
171 .release = seq_release,
172};
173
174/***** /proc/qeth_perf *****/
175#define QETH_PERF_PROCFILE_NAME "qeth_perf"
176static struct proc_dir_entry *qeth_perf_procfile;
177
178#ifdef CONFIG_QETH_PERF_STATS
179
180static void *
181qeth_perf_procfile_seq_start(struct seq_file *s, loff_t *offset)
182{
183 struct list_head *next_card = NULL;
184 int i = 0;
185
186 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
187 /* get card at pos *offset */
188 list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices){
189 if (i == *offset)
190 return next_card;
191 i++;
192 }
193 return NULL;
194}
195
196static void
197qeth_perf_procfile_seq_stop(struct seq_file *s, void* it)
198{
199 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
200}
201
202static void *
203qeth_perf_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset)
204{
205 struct list_head *current_card = (struct list_head *)it;
206
207 if (current_card->next == &qeth_ccwgroup_driver.driver.devices)
208 return NULL; /* end of list reached */
209 (*offset)++;
210 return current_card->next;
211}
212
213static int
214qeth_perf_procfile_seq_show(struct seq_file *s, void *it)
215{
216 struct device *device;
217 struct qeth_card *card;
218
219 device = list_entry(it, struct device, driver_list);
220 card = device->driver_data;
221 seq_printf(s, "For card with devnos %s/%s/%s (%s):\n",
222 CARD_RDEV_ID(card),
223 CARD_WDEV_ID(card),
224 CARD_DDEV_ID(card),
225 QETH_CARD_IFNAME(card)
226 );
227 seq_printf(s, " Skb's/buffers received : %li/%i\n"
228 " Skb's/buffers sent : %li/%i\n\n",
229 card->stats.rx_packets, card->perf_stats.bufs_rec,
230 card->stats.tx_packets, card->perf_stats.bufs_sent
231 );
232 seq_printf(s, " Skb's/buffers sent without packing : %li/%i\n"
233 " Skb's/buffers sent with packing : %i/%i\n\n",
234 card->stats.tx_packets - card->perf_stats.skbs_sent_pack,
235 card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack,
236 card->perf_stats.skbs_sent_pack,
237 card->perf_stats.bufs_sent_pack
238 );
239 seq_printf(s, " Skbs sent in SG mode : %i\n"
240 " Skb fragments sent in SG mode : %i\n\n",
241 card->perf_stats.sg_skbs_sent,
242 card->perf_stats.sg_frags_sent);
243 seq_printf(s, " large_send tx (in Kbytes) : %i\n"
244 " large_send count : %i\n\n",
245 card->perf_stats.large_send_bytes >> 10,
246 card->perf_stats.large_send_cnt);
247 seq_printf(s, " Packing state changes no pkg.->packing : %i/%i\n"
248 " Watermarks L/H : %i/%i\n"
249 " Current buffer usage (outbound q's) : "
250 "%i/%i/%i/%i\n\n",
251 card->perf_stats.sc_dp_p, card->perf_stats.sc_p_dp,
252 QETH_LOW_WATERMARK_PACK, QETH_HIGH_WATERMARK_PACK,
253 atomic_read(&card->qdio.out_qs[0]->used_buffers),
254 (card->qdio.no_out_queues > 1)?
255 atomic_read(&card->qdio.out_qs[1]->used_buffers)
256 : 0,
257 (card->qdio.no_out_queues > 2)?
258 atomic_read(&card->qdio.out_qs[2]->used_buffers)
259 : 0,
260 (card->qdio.no_out_queues > 3)?
261 atomic_read(&card->qdio.out_qs[3]->used_buffers)
262 : 0
263 );
264 seq_printf(s, " Inbound handler time (in us) : %i\n"
265 " Inbound handler count : %i\n"
266 " Inbound do_QDIO time (in us) : %i\n"
267 " Inbound do_QDIO count : %i\n\n"
268 " Outbound handler time (in us) : %i\n"
269 " Outbound handler count : %i\n\n"
270 " Outbound time (in us, incl QDIO) : %i\n"
271 " Outbound count : %i\n"
272 " Outbound do_QDIO time (in us) : %i\n"
273 " Outbound do_QDIO count : %i\n\n",
274 card->perf_stats.inbound_time,
275 card->perf_stats.inbound_cnt,
276 card->perf_stats.inbound_do_qdio_time,
277 card->perf_stats.inbound_do_qdio_cnt,
278 card->perf_stats.outbound_handler_time,
279 card->perf_stats.outbound_handler_cnt,
280 card->perf_stats.outbound_time,
281 card->perf_stats.outbound_cnt,
282 card->perf_stats.outbound_do_qdio_time,
283 card->perf_stats.outbound_do_qdio_cnt
284 );
285 return 0;
286}
287
288static struct seq_operations qeth_perf_procfile_seq_ops = {
289 .start = qeth_perf_procfile_seq_start,
290 .stop = qeth_perf_procfile_seq_stop,
291 .next = qeth_perf_procfile_seq_next,
292 .show = qeth_perf_procfile_seq_show,
293};
294
295static int
296qeth_perf_procfile_open(struct inode *inode, struct file *file)
297{
298 return seq_open(file, &qeth_perf_procfile_seq_ops);
299}
300
301static struct file_operations qeth_perf_procfile_fops = {
302 .owner = THIS_MODULE,
303 .open = qeth_perf_procfile_open,
304 .read = seq_read,
305 .llseek = seq_lseek,
306 .release = seq_release,
307};
308
309#define qeth_perf_procfile_created qeth_perf_procfile
310#else
311#define qeth_perf_procfile_created 1
312#endif /* CONFIG_QETH_PERF_STATS */
313
314/***** /proc/qeth_ipa_takeover *****/
315#define QETH_IPATO_PROCFILE_NAME "qeth_ipa_takeover"
316static struct proc_dir_entry *qeth_ipato_procfile;
317
318static void *
319qeth_ipato_procfile_seq_start(struct seq_file *s, loff_t *offset)
320{
321 struct list_head *next_card = NULL;
322 int i = 0;
323
324 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
325 /* TODO: finish this */
326 /*
327 * maybe SEQ_SATRT_TOKEN can be returned for offset 0
328 * output driver settings then;
329 * else output setting for respective card
330 */
331 /* get card at pos *offset */
332 list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices){
333 if (i == *offset)
334 return next_card;
335 i++;
336 }
337 return NULL;
338}
339
340static void
341qeth_ipato_procfile_seq_stop(struct seq_file *s, void* it)
342{
343 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
344}
345
346static void *
347qeth_ipato_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset)
348{
349 struct list_head *current_card = (struct list_head *)it;
350
351 /* TODO: finish this */
352 /*
353 * maybe SEQ_SATRT_TOKEN can be returned for offset 0
354 * output driver settings then;
355 * else output setting for respective card
356 */
357 if (current_card->next == &qeth_ccwgroup_driver.driver.devices)
358 return NULL; /* end of list reached */
359 (*offset)++;
360 return current_card->next;
361}
362
363static int
364qeth_ipato_procfile_seq_show(struct seq_file *s, void *it)
365{
366 struct device *device;
367 struct qeth_card *card;
368
369 /* TODO: finish this */
370 /*
371 * maybe SEQ_SATRT_TOKEN can be returned for offset 0
372 * output driver settings then;
373 * else output setting for respective card
374 */
375 device = list_entry(it, struct device, driver_list);
376 card = device->driver_data;
377
378 return 0;
379}
380
381static struct seq_operations qeth_ipato_procfile_seq_ops = {
382 .start = qeth_ipato_procfile_seq_start,
383 .stop = qeth_ipato_procfile_seq_stop,
384 .next = qeth_ipato_procfile_seq_next,
385 .show = qeth_ipato_procfile_seq_show,
386};
387
388static int
389qeth_ipato_procfile_open(struct inode *inode, struct file *file)
390{
391 return seq_open(file, &qeth_ipato_procfile_seq_ops);
392}
393
394static struct file_operations qeth_ipato_procfile_fops = {
395 .owner = THIS_MODULE,
396 .open = qeth_ipato_procfile_open,
397 .read = seq_read,
398 .llseek = seq_lseek,
399 .release = seq_release,
400};
401
402int __init
403qeth_create_procfs_entries(void)
404{
405 qeth_procfile = create_proc_entry(QETH_PROCFILE_NAME,
406 S_IFREG | 0444, NULL);
407 if (qeth_procfile)
408 qeth_procfile->proc_fops = &qeth_procfile_fops;
409
410#ifdef CONFIG_QETH_PERF_STATS
411 qeth_perf_procfile = create_proc_entry(QETH_PERF_PROCFILE_NAME,
412 S_IFREG | 0444, NULL);
413 if (qeth_perf_procfile)
414 qeth_perf_procfile->proc_fops = &qeth_perf_procfile_fops;
415#endif /* CONFIG_QETH_PERF_STATS */
416
417 qeth_ipato_procfile = create_proc_entry(QETH_IPATO_PROCFILE_NAME,
418 S_IFREG | 0444, NULL);
419 if (qeth_ipato_procfile)
420 qeth_ipato_procfile->proc_fops = &qeth_ipato_procfile_fops;
421
422 if (qeth_procfile &&
423 qeth_ipato_procfile &&
424 qeth_perf_procfile_created)
425 return 0;
426 else
427 return -ENOMEM;
428}
429
430void __exit
431qeth_remove_procfs_entries(void)
432{
433 if (qeth_procfile)
434 remove_proc_entry(QETH_PROCFILE_NAME, NULL);
435 if (qeth_perf_procfile)
436 remove_proc_entry(QETH_PERF_PROCFILE_NAME, NULL);
437 if (qeth_ipato_procfile)
438 remove_proc_entry(QETH_IPATO_PROCFILE_NAME, NULL);
439}
440
441
442/* ONLY FOR DEVELOPMENT! -> make it as module */
443/*
444static void
445qeth_create_sysfs_entries(void)
446{
447 struct device *dev;
448
449 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
450
451 list_for_each_entry(dev, &qeth_ccwgroup_driver.driver.devices,
452 driver_list)
453 qeth_create_device_attributes(dev);
454
455 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
456}
457
458static void
459qeth_remove_sysfs_entries(void)
460{
461 struct device *dev;
462
463 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
464
465 list_for_each_entry(dev, &qeth_ccwgroup_driver.driver.devices,
466 driver_list)
467 qeth_remove_device_attributes(dev);
468
469 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
470}
471
472static int __init
473qeth_fs_init(void)
474{
475 printk(KERN_INFO "qeth_fs_init\n");
476 qeth_create_procfs_entries();
477 qeth_create_sysfs_entries();
478
479 return 0;
480}
481
482static void __exit
483qeth_fs_exit(void)
484{
485 printk(KERN_INFO "qeth_fs_exit\n");
486 qeth_remove_procfs_entries();
487 qeth_remove_sysfs_entries();
488}
489
490
491module_init(qeth_fs_init);
492module_exit(qeth_fs_exit);
493
494MODULE_LICENSE("GPL");
495*/
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
new file mode 100644
index 000000000000..240348398211
--- /dev/null
+++ b/drivers/s390/net/qeth_sys.c
@@ -0,0 +1,1788 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.51 $)
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 * This file contains code related to sysfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 *
10 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 * Frank Pavlic <pavlic@de.ibm.com>
12 *
13 */
14#include <linux/list.h>
15#include <linux/rwsem.h>
16
17#include <asm/ebcdic.h>
18
19#include "qeth.h"
20#include "qeth_mpc.h"
21#include "qeth_fs.h"
22
23const char *VERSION_QETH_SYS_C = "$Revision: 1.51 $";
24
25/*****************************************************************************/
26/* */
27/* /sys-fs stuff UNDER DEVELOPMENT !!! */
28/* */
29/*****************************************************************************/
30//low/high watermark
31
32static ssize_t
33qeth_dev_state_show(struct device *dev, char *buf)
34{
35 struct qeth_card *card = dev->driver_data;
36 if (!card)
37 return -EINVAL;
38
39 switch (card->state) {
40 case CARD_STATE_DOWN:
41 return sprintf(buf, "DOWN\n");
42 case CARD_STATE_HARDSETUP:
43 return sprintf(buf, "HARDSETUP\n");
44 case CARD_STATE_SOFTSETUP:
45 return sprintf(buf, "SOFTSETUP\n");
46 case CARD_STATE_UP:
47 if (card->lan_online)
48 return sprintf(buf, "UP (LAN ONLINE)\n");
49 else
50 return sprintf(buf, "UP (LAN OFFLINE)\n");
51 case CARD_STATE_RECOVER:
52 return sprintf(buf, "RECOVER\n");
53 default:
54 return sprintf(buf, "UNKNOWN\n");
55 }
56}
57
58static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
59
60static ssize_t
61qeth_dev_chpid_show(struct device *dev, char *buf)
62{
63 struct qeth_card *card = dev->driver_data;
64 if (!card)
65 return -EINVAL;
66
67 return sprintf(buf, "%02X\n", card->info.chpid);
68}
69
70static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
71
72static ssize_t
73qeth_dev_if_name_show(struct device *dev, char *buf)
74{
75 struct qeth_card *card = dev->driver_data;
76 if (!card)
77 return -EINVAL;
78 return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
79}
80
81static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
82
83static ssize_t
84qeth_dev_card_type_show(struct device *dev, char *buf)
85{
86 struct qeth_card *card = dev->driver_data;
87 if (!card)
88 return -EINVAL;
89
90 return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
91}
92
93static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
94
95static ssize_t
96qeth_dev_portno_show(struct device *dev, char *buf)
97{
98 struct qeth_card *card = dev->driver_data;
99 if (!card)
100 return -EINVAL;
101
102 return sprintf(buf, "%i\n", card->info.portno);
103}
104
105static ssize_t
106qeth_dev_portno_store(struct device *dev, const char *buf, size_t count)
107{
108 struct qeth_card *card = dev->driver_data;
109 char *tmp;
110 unsigned int portno;
111
112 if (!card)
113 return -EINVAL;
114
115 if ((card->state != CARD_STATE_DOWN) &&
116 (card->state != CARD_STATE_RECOVER))
117 return -EPERM;
118
119 portno = simple_strtoul(buf, &tmp, 16);
120 if ((portno < 0) || (portno > MAX_PORTNO)){
121 PRINT_WARN("portno 0x%X is out of range\n", portno);
122 return -EINVAL;
123 }
124
125 card->info.portno = portno;
126 return count;
127}
128
129static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
130
131static ssize_t
132qeth_dev_portname_show(struct device *dev, char *buf)
133{
134 struct qeth_card *card = dev->driver_data;
135 char portname[9] = {0, };
136
137 if (!card)
138 return -EINVAL;
139
140 if (card->info.portname_required) {
141 memcpy(portname, card->info.portname + 1, 8);
142 EBCASC(portname, 8);
143 return sprintf(buf, "%s\n", portname);
144 } else
145 return sprintf(buf, "no portname required\n");
146}
147
148static ssize_t
149qeth_dev_portname_store(struct device *dev, const char *buf, size_t count)
150{
151 struct qeth_card *card = dev->driver_data;
152 char *tmp;
153 int i;
154
155 if (!card)
156 return -EINVAL;
157
158 if ((card->state != CARD_STATE_DOWN) &&
159 (card->state != CARD_STATE_RECOVER))
160 return -EPERM;
161
162 tmp = strsep((char **) &buf, "\n");
163 if ((strlen(tmp) > 8) || (strlen(tmp) < 2))
164 return -EINVAL;
165
166 card->info.portname[0] = strlen(tmp);
167 /* for beauty reasons */
168 for (i = 1; i < 9; i++)
169 card->info.portname[i] = ' ';
170 strcpy(card->info.portname + 1, tmp);
171 ASCEBC(card->info.portname + 1, 8);
172
173 return count;
174}
175
176static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
177 qeth_dev_portname_store);
178
179static ssize_t
180qeth_dev_checksum_show(struct device *dev, char *buf)
181{
182 struct qeth_card *card = dev->driver_data;
183
184 if (!card)
185 return -EINVAL;
186
187 return sprintf(buf, "%s checksumming\n", qeth_get_checksum_str(card));
188}
189
190static ssize_t
191qeth_dev_checksum_store(struct device *dev, const char *buf, size_t count)
192{
193 struct qeth_card *card = dev->driver_data;
194 char *tmp;
195
196 if (!card)
197 return -EINVAL;
198
199 if ((card->state != CARD_STATE_DOWN) &&
200 (card->state != CARD_STATE_RECOVER))
201 return -EPERM;
202
203 tmp = strsep((char **) &buf, "\n");
204 if (!strcmp(tmp, "sw_checksumming"))
205 card->options.checksum_type = SW_CHECKSUMMING;
206 else if (!strcmp(tmp, "hw_checksumming"))
207 card->options.checksum_type = HW_CHECKSUMMING;
208 else if (!strcmp(tmp, "no_checksumming"))
209 card->options.checksum_type = NO_CHECKSUMMING;
210 else {
211 PRINT_WARN("Unknown checksumming type '%s'\n", tmp);
212 return -EINVAL;
213 }
214 return count;
215}
216
217static DEVICE_ATTR(checksumming, 0644, qeth_dev_checksum_show,
218 qeth_dev_checksum_store);
219
220static ssize_t
221qeth_dev_prioqing_show(struct device *dev, char *buf)
222{
223 struct qeth_card *card = dev->driver_data;
224
225 if (!card)
226 return -EINVAL;
227
228 switch (card->qdio.do_prio_queueing) {
229 case QETH_PRIO_Q_ING_PREC:
230 return sprintf(buf, "%s\n", "by precedence");
231 case QETH_PRIO_Q_ING_TOS:
232 return sprintf(buf, "%s\n", "by type of service");
233 default:
234 return sprintf(buf, "always queue %i\n",
235 card->qdio.default_out_queue);
236 }
237}
238
239static ssize_t
240qeth_dev_prioqing_store(struct device *dev, const char *buf, size_t count)
241{
242 struct qeth_card *card = dev->driver_data;
243 char *tmp;
244
245 if (!card)
246 return -EINVAL;
247
248 if ((card->state != CARD_STATE_DOWN) &&
249 (card->state != CARD_STATE_RECOVER))
250 return -EPERM;
251
252 /* check if 1920 devices are supported ,
253 * if though we have to permit priority queueing
254 */
255 if (card->qdio.no_out_queues == 1) {
256 PRINT_WARN("Priority queueing disabled due "
257 "to hardware limitations!\n");
258 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
259 return -EPERM;
260 }
261
262 tmp = strsep((char **) &buf, "\n");
263 if (!strcmp(tmp, "prio_queueing_prec"))
264 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
265 else if (!strcmp(tmp, "prio_queueing_tos"))
266 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
267 else if (!strcmp(tmp, "no_prio_queueing:0")) {
268 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
269 card->qdio.default_out_queue = 0;
270 } else if (!strcmp(tmp, "no_prio_queueing:1")) {
271 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
272 card->qdio.default_out_queue = 1;
273 } else if (!strcmp(tmp, "no_prio_queueing:2")) {
274 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
275 card->qdio.default_out_queue = 2;
276 } else if (!strcmp(tmp, "no_prio_queueing:3")) {
277 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
278 card->qdio.default_out_queue = 3;
279 } else if (!strcmp(tmp, "no_prio_queueing")) {
280 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
281 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
282 } else {
283 PRINT_WARN("Unknown queueing type '%s'\n", tmp);
284 return -EINVAL;
285 }
286 return count;
287}
288
289static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
290 qeth_dev_prioqing_store);
291
292static ssize_t
293qeth_dev_bufcnt_show(struct device *dev, char *buf)
294{
295 struct qeth_card *card = dev->driver_data;
296
297 if (!card)
298 return -EINVAL;
299
300 return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
301}
302
303static ssize_t
304qeth_dev_bufcnt_store(struct device *dev, const char *buf, size_t count)
305{
306 struct qeth_card *card = dev->driver_data;
307 char *tmp;
308 int cnt, old_cnt;
309 int rc;
310
311 if (!card)
312 return -EINVAL;
313
314 if ((card->state != CARD_STATE_DOWN) &&
315 (card->state != CARD_STATE_RECOVER))
316 return -EPERM;
317
318 old_cnt = card->qdio.in_buf_pool.buf_count;
319 cnt = simple_strtoul(buf, &tmp, 10);
320 cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
321 ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
322 if (old_cnt != cnt) {
323 if ((rc = qeth_realloc_buffer_pool(card, cnt)))
324 PRINT_WARN("Error (%d) while setting "
325 "buffer count.\n", rc);
326 }
327 return count;
328}
329
330static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
331 qeth_dev_bufcnt_store);
332
333static inline ssize_t
334qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route,
335 char *buf)
336{
337 switch (route->type) {
338 case PRIMARY_ROUTER:
339 return sprintf(buf, "%s\n", "primary router");
340 case SECONDARY_ROUTER:
341 return sprintf(buf, "%s\n", "secondary router");
342 case MULTICAST_ROUTER:
343 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
344 return sprintf(buf, "%s\n", "multicast router+");
345 else
346 return sprintf(buf, "%s\n", "multicast router");
347 case PRIMARY_CONNECTOR:
348 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
349 return sprintf(buf, "%s\n", "primary connector+");
350 else
351 return sprintf(buf, "%s\n", "primary connector");
352 case SECONDARY_CONNECTOR:
353 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
354 return sprintf(buf, "%s\n", "secondary connector+");
355 else
356 return sprintf(buf, "%s\n", "secondary connector");
357 default:
358 return sprintf(buf, "%s\n", "no");
359 }
360}
361
362static ssize_t
363qeth_dev_route4_show(struct device *dev, char *buf)
364{
365 struct qeth_card *card = dev->driver_data;
366
367 if (!card)
368 return -EINVAL;
369
370 return qeth_dev_route_show(card, &card->options.route4, buf);
371}
372
373static inline ssize_t
374qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route,
375 enum qeth_prot_versions prot, const char *buf, size_t count)
376{
377 enum qeth_routing_types old_route_type = route->type;
378 char *tmp;
379 int rc;
380
381 tmp = strsep((char **) &buf, "\n");
382
383 if (!strcmp(tmp, "no_router")){
384 route->type = NO_ROUTER;
385 } else if (!strcmp(tmp, "primary_connector")) {
386 route->type = PRIMARY_CONNECTOR;
387 } else if (!strcmp(tmp, "secondary_connector")) {
388 route->type = SECONDARY_CONNECTOR;
389 } else if (!strcmp(tmp, "multicast_router")) {
390 route->type = MULTICAST_ROUTER;
391 } else if (!strcmp(tmp, "primary_router")) {
392 route->type = PRIMARY_ROUTER;
393 } else if (!strcmp(tmp, "secondary_router")) {
394 route->type = SECONDARY_ROUTER;
395 } else if (!strcmp(tmp, "multicast_router")) {
396 route->type = MULTICAST_ROUTER;
397 } else {
398 PRINT_WARN("Invalid routing type '%s'.\n", tmp);
399 return -EINVAL;
400 }
401 if (((card->state == CARD_STATE_SOFTSETUP) ||
402 (card->state == CARD_STATE_UP)) &&
403 (old_route_type != route->type)){
404 if (prot == QETH_PROT_IPV4)
405 rc = qeth_setrouting_v4(card);
406 else if (prot == QETH_PROT_IPV6)
407 rc = qeth_setrouting_v6(card);
408 }
409 return count;
410}
411
412static ssize_t
413qeth_dev_route4_store(struct device *dev, const char *buf, size_t count)
414{
415 struct qeth_card *card = dev->driver_data;
416
417 if (!card)
418 return -EINVAL;
419
420 return qeth_dev_route_store(card, &card->options.route4,
421 QETH_PROT_IPV4, buf, count);
422}
423
424static DEVICE_ATTR(route4, 0644, qeth_dev_route4_show, qeth_dev_route4_store);
425
426#ifdef CONFIG_QETH_IPV6
427static ssize_t
428qeth_dev_route6_show(struct device *dev, char *buf)
429{
430 struct qeth_card *card = dev->driver_data;
431
432 if (!card)
433 return -EINVAL;
434
435 if (!qeth_is_supported(card, IPA_IPV6))
436 return sprintf(buf, "%s\n", "n/a");
437
438 return qeth_dev_route_show(card, &card->options.route6, buf);
439}
440
441static ssize_t
442qeth_dev_route6_store(struct device *dev, const char *buf, size_t count)
443{
444 struct qeth_card *card = dev->driver_data;
445
446 if (!card)
447 return -EINVAL;
448
449 if (!qeth_is_supported(card, IPA_IPV6)){
450 PRINT_WARN("IPv6 not supported for interface %s.\n"
451 "Routing status no changed.\n",
452 QETH_CARD_IFNAME(card));
453 return -ENOTSUPP;
454 }
455
456 return qeth_dev_route_store(card, &card->options.route6,
457 QETH_PROT_IPV6, buf, count);
458}
459
460static DEVICE_ATTR(route6, 0644, qeth_dev_route6_show, qeth_dev_route6_store);
461#endif
462
463static ssize_t
464qeth_dev_add_hhlen_show(struct device *dev, char *buf)
465{
466 struct qeth_card *card = dev->driver_data;
467
468 if (!card)
469 return -EINVAL;
470
471 return sprintf(buf, "%i\n", card->options.add_hhlen);
472}
473
474static ssize_t
475qeth_dev_add_hhlen_store(struct device *dev, const char *buf, size_t count)
476{
477 struct qeth_card *card = dev->driver_data;
478 char *tmp;
479 int i;
480
481 if (!card)
482 return -EINVAL;
483
484 if ((card->state != CARD_STATE_DOWN) &&
485 (card->state != CARD_STATE_RECOVER))
486 return -EPERM;
487
488 i = simple_strtoul(buf, &tmp, 10);
489 if ((i < 0) || (i > MAX_ADD_HHLEN)) {
490 PRINT_WARN("add_hhlen out of range\n");
491 return -EINVAL;
492 }
493 card->options.add_hhlen = i;
494
495 return count;
496}
497
498static DEVICE_ATTR(add_hhlen, 0644, qeth_dev_add_hhlen_show,
499 qeth_dev_add_hhlen_store);
500
501static ssize_t
502qeth_dev_fake_ll_show(struct device *dev, char *buf)
503{
504 struct qeth_card *card = dev->driver_data;
505
506 if (!card)
507 return -EINVAL;
508
509 return sprintf(buf, "%i\n", card->options.fake_ll? 1:0);
510}
511
512static ssize_t
513qeth_dev_fake_ll_store(struct device *dev, const char *buf, size_t count)
514{
515 struct qeth_card *card = dev->driver_data;
516 char *tmp;
517 int i;
518
519 if (!card)
520 return -EINVAL;
521
522 if ((card->state != CARD_STATE_DOWN) &&
523 (card->state != CARD_STATE_RECOVER))
524 return -EPERM;
525
526 i = simple_strtoul(buf, &tmp, 16);
527 if ((i != 0) && (i != 1)) {
528 PRINT_WARN("fake_ll: write 0 or 1 to this file!\n");
529 return -EINVAL;
530 }
531 card->options.fake_ll = i;
532 return count;
533}
534
535static DEVICE_ATTR(fake_ll, 0644, qeth_dev_fake_ll_show,
536 qeth_dev_fake_ll_store);
537
538static ssize_t
539qeth_dev_fake_broadcast_show(struct device *dev, char *buf)
540{
541 struct qeth_card *card = dev->driver_data;
542
543 if (!card)
544 return -EINVAL;
545
546 return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
547}
548
549static ssize_t
550qeth_dev_fake_broadcast_store(struct device *dev, const char *buf, size_t count)
551{
552 struct qeth_card *card = dev->driver_data;
553 char *tmp;
554 int i;
555
556 if (!card)
557 return -EINVAL;
558
559 if ((card->state != CARD_STATE_DOWN) &&
560 (card->state != CARD_STATE_RECOVER))
561 return -EPERM;
562
563 i = simple_strtoul(buf, &tmp, 16);
564 if ((i == 0) || (i == 1))
565 card->options.fake_broadcast = i;
566 else {
567 PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n");
568 return -EINVAL;
569 }
570 return count;
571}
572
573static DEVICE_ATTR(fake_broadcast, 0644, qeth_dev_fake_broadcast_show,
574 qeth_dev_fake_broadcast_store);
575
576static ssize_t
577qeth_dev_recover_store(struct device *dev, const char *buf, size_t count)
578{
579 struct qeth_card *card = dev->driver_data;
580 char *tmp;
581 int i;
582
583 if (!card)
584 return -EINVAL;
585
586 if (card->state != CARD_STATE_UP)
587 return -EPERM;
588
589 i = simple_strtoul(buf, &tmp, 16);
590 if (i == 1)
591 qeth_schedule_recovery(card);
592
593 return count;
594}
595
596static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
597
598static ssize_t
599qeth_dev_broadcast_mode_show(struct device *dev, char *buf)
600{
601 struct qeth_card *card = dev->driver_data;
602
603 if (!card)
604 return -EINVAL;
605
606 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
607 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
608 return sprintf(buf, "n/a\n");
609
610 return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
611 QETH_TR_BROADCAST_ALLRINGS)?
612 "all rings":"local");
613}
614
615static ssize_t
616qeth_dev_broadcast_mode_store(struct device *dev, const char *buf, size_t count)
617{
618 struct qeth_card *card = dev->driver_data;
619 char *tmp;
620
621 if (!card)
622 return -EINVAL;
623
624 if ((card->state != CARD_STATE_DOWN) &&
625 (card->state != CARD_STATE_RECOVER))
626 return -EPERM;
627
628 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
629 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){
630 PRINT_WARN("Device is not a tokenring device!\n");
631 return -EINVAL;
632 }
633
634 tmp = strsep((char **) &buf, "\n");
635
636 if (!strcmp(tmp, "local")){
637 card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
638 return count;
639 } else if (!strcmp(tmp, "all_rings")) {
640 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
641 return count;
642 } else {
643 PRINT_WARN("broadcast_mode: invalid mode %s!\n",
644 tmp);
645 return -EINVAL;
646 }
647 return count;
648}
649
650static DEVICE_ATTR(broadcast_mode, 0644, qeth_dev_broadcast_mode_show,
651 qeth_dev_broadcast_mode_store);
652
653static ssize_t
654qeth_dev_canonical_macaddr_show(struct device *dev, char *buf)
655{
656 struct qeth_card *card = dev->driver_data;
657
658 if (!card)
659 return -EINVAL;
660
661 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
662 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
663 return sprintf(buf, "n/a\n");
664
665 return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
666 QETH_TR_MACADDR_CANONICAL)? 1:0);
667}
668
669static ssize_t
670qeth_dev_canonical_macaddr_store(struct device *dev, const char *buf,
671 size_t count)
672{
673 struct qeth_card *card = dev->driver_data;
674 char *tmp;
675 int i;
676
677 if (!card)
678 return -EINVAL;
679
680 if ((card->state != CARD_STATE_DOWN) &&
681 (card->state != CARD_STATE_RECOVER))
682 return -EPERM;
683
684 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
685 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){
686 PRINT_WARN("Device is not a tokenring device!\n");
687 return -EINVAL;
688 }
689
690 i = simple_strtoul(buf, &tmp, 16);
691 if ((i == 0) || (i == 1))
692 card->options.macaddr_mode = i?
693 QETH_TR_MACADDR_CANONICAL :
694 QETH_TR_MACADDR_NONCANONICAL;
695 else {
696 PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n");
697 return -EINVAL;
698 }
699 return count;
700}
701
702static DEVICE_ATTR(canonical_macaddr, 0644, qeth_dev_canonical_macaddr_show,
703 qeth_dev_canonical_macaddr_store);
704
705static ssize_t
706qeth_dev_layer2_show(struct device *dev, char *buf)
707{
708 struct qeth_card *card = dev->driver_data;
709
710 if (!card)
711 return -EINVAL;
712
713 return sprintf(buf, "%i\n", card->options.layer2 ? 1:0);
714}
715
716static ssize_t
717qeth_dev_layer2_store(struct device *dev, const char *buf, size_t count)
718{
719 struct qeth_card *card = dev->driver_data;
720 char *tmp;
721 int i;
722
723 if (!card)
724 return -EINVAL;
725
726 if (((card->state != CARD_STATE_DOWN) &&
727 (card->state != CARD_STATE_RECOVER)) ||
728 (card->info.type != QETH_CARD_TYPE_OSAE))
729 return -EPERM;
730
731 i = simple_strtoul(buf, &tmp, 16);
732 if ((i == 0) || (i == 1))
733 card->options.layer2 = i;
734 else {
735 PRINT_WARN("layer2: write 0 or 1 to this file!\n");
736 return -EINVAL;
737 }
738 return count;
739}
740
741static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
742 qeth_dev_layer2_store);
743
744static ssize_t
745qeth_dev_large_send_show(struct device *dev, char *buf)
746{
747 struct qeth_card *card = dev->driver_data;
748
749 if (!card)
750 return -EINVAL;
751
752 switch (card->options.large_send) {
753 case QETH_LARGE_SEND_NO:
754 return sprintf(buf, "%s\n", "no");
755 case QETH_LARGE_SEND_EDDP:
756 return sprintf(buf, "%s\n", "EDDP");
757 case QETH_LARGE_SEND_TSO:
758 return sprintf(buf, "%s\n", "TSO");
759 default:
760 return sprintf(buf, "%s\n", "N/A");
761 }
762}
763
764static ssize_t
765qeth_dev_large_send_store(struct device *dev, const char *buf, size_t count)
766{
767 struct qeth_card *card = dev->driver_data;
768 enum qeth_large_send_types type;
769 int rc = 0;
770 char *tmp;
771
772 if (!card)
773 return -EINVAL;
774
775 tmp = strsep((char **) &buf, "\n");
776
777 if (!strcmp(tmp, "no")){
778 type = QETH_LARGE_SEND_NO;
779 } else if (!strcmp(tmp, "EDDP")) {
780 type = QETH_LARGE_SEND_EDDP;
781 } else if (!strcmp(tmp, "TSO")) {
782 type = QETH_LARGE_SEND_TSO;
783 } else {
784 PRINT_WARN("large_send: invalid mode %s!\n", tmp);
785 return -EINVAL;
786 }
787 if (card->options.large_send == type)
788 return count;
789 card->options.large_send = type;
790 if ((rc = qeth_set_large_send(card)))
791 return rc;
792
793 return count;
794}
795
796static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show,
797 qeth_dev_large_send_store);
798
799static ssize_t
800qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value )
801{
802
803 if (!card)
804 return -EINVAL;
805
806 return sprintf(buf, "%i\n", value);
807}
808
809static ssize_t
810qeth_dev_blkt_store(struct qeth_card *card, const char *buf, size_t count,
811 int *value, int max_value)
812{
813 char *tmp;
814 int i;
815
816 if (!card)
817 return -EINVAL;
818
819 if ((card->state != CARD_STATE_DOWN) &&
820 (card->state != CARD_STATE_RECOVER))
821 return -EPERM;
822
823 i = simple_strtoul(buf, &tmp, 10);
824 if (i <= max_value) {
825 *value = i;
826 } else {
827 PRINT_WARN("blkt total time: write values between"
828 " 0 and %d to this file!\n", max_value);
829 return -EINVAL;
830 }
831 return count;
832}
833
834static ssize_t
835qeth_dev_blkt_total_show(struct device *dev, char *buf)
836{
837 struct qeth_card *card = dev->driver_data;
838
839 return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
840}
841
842
843static ssize_t
844qeth_dev_blkt_total_store(struct device *dev, const char *buf, size_t count)
845{
846 struct qeth_card *card = dev->driver_data;
847
848 return qeth_dev_blkt_store(card, buf, count,
849 &card->info.blkt.time_total,1000);
850}
851
852
853
854static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
855 qeth_dev_blkt_total_store);
856
857static ssize_t
858qeth_dev_blkt_inter_show(struct device *dev, char *buf)
859{
860 struct qeth_card *card = dev->driver_data;
861
862 return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
863}
864
865
866static ssize_t
867qeth_dev_blkt_inter_store(struct device *dev, const char *buf, size_t count)
868{
869 struct qeth_card *card = dev->driver_data;
870
871 return qeth_dev_blkt_store(card, buf, count,
872 &card->info.blkt.inter_packet,100);
873}
874
875static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
876 qeth_dev_blkt_inter_store);
877
878static ssize_t
879qeth_dev_blkt_inter_jumbo_show(struct device *dev, char *buf)
880{
881 struct qeth_card *card = dev->driver_data;
882
883 return qeth_dev_blkt_show(buf, card,
884 card->info.blkt.inter_packet_jumbo);
885}
886
887
888static ssize_t
889qeth_dev_blkt_inter_jumbo_store(struct device *dev, const char *buf, size_t count)
890{
891 struct qeth_card *card = dev->driver_data;
892
893 return qeth_dev_blkt_store(card, buf, count,
894 &card->info.blkt.inter_packet_jumbo,100);
895}
896
897static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
898 qeth_dev_blkt_inter_jumbo_store);
899
900static struct device_attribute * qeth_blkt_device_attrs[] = {
901 &dev_attr_total,
902 &dev_attr_inter,
903 &dev_attr_inter_jumbo,
904 NULL,
905};
906
907static struct attribute_group qeth_device_blkt_group = {
908 .name = "blkt",
909 .attrs = (struct attribute **)qeth_blkt_device_attrs,
910};
911
912static struct device_attribute * qeth_device_attrs[] = {
913 &dev_attr_state,
914 &dev_attr_chpid,
915 &dev_attr_if_name,
916 &dev_attr_card_type,
917 &dev_attr_portno,
918 &dev_attr_portname,
919 &dev_attr_checksumming,
920 &dev_attr_priority_queueing,
921 &dev_attr_buffer_count,
922 &dev_attr_route4,
923#ifdef CONFIG_QETH_IPV6
924 &dev_attr_route6,
925#endif
926 &dev_attr_add_hhlen,
927 &dev_attr_fake_ll,
928 &dev_attr_fake_broadcast,
929 &dev_attr_recover,
930 &dev_attr_broadcast_mode,
931 &dev_attr_canonical_macaddr,
932 &dev_attr_layer2,
933 &dev_attr_large_send,
934 NULL,
935};
936
937static struct attribute_group qeth_device_attr_group = {
938 .attrs = (struct attribute **)qeth_device_attrs,
939};
940
941
942#define QETH_DEVICE_ATTR(_id,_name,_mode,_show,_store) \
943struct device_attribute dev_attr_##_id = { \
944 .attr = {.name=__stringify(_name), .mode=_mode, .owner=THIS_MODULE },\
945 .show = _show, \
946 .store = _store, \
947};
948
949int
950qeth_check_layer2(struct qeth_card *card)
951{
952 if (card->options.layer2)
953 return -EPERM;
954 return 0;
955}
956
957
958static ssize_t
959qeth_dev_ipato_enable_show(struct device *dev, char *buf)
960{
961 struct qeth_card *card = dev->driver_data;
962
963 if (!card)
964 return -EINVAL;
965
966 if (qeth_check_layer2(card))
967 return -EPERM;
968 return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
969}
970
971static ssize_t
972qeth_dev_ipato_enable_store(struct device *dev, const char *buf, size_t count)
973{
974 struct qeth_card *card = dev->driver_data;
975 char *tmp;
976
977 if (!card)
978 return -EINVAL;
979
980 if ((card->state != CARD_STATE_DOWN) &&
981 (card->state != CARD_STATE_RECOVER))
982 return -EPERM;
983
984 if (qeth_check_layer2(card))
985 return -EPERM;
986
987 tmp = strsep((char **) &buf, "\n");
988 if (!strcmp(tmp, "toggle")){
989 card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
990 } else if (!strcmp(tmp, "1")){
991 card->ipato.enabled = 1;
992 } else if (!strcmp(tmp, "0")){
993 card->ipato.enabled = 0;
994 } else {
995 PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to "
996 "this file\n");
997 return -EINVAL;
998 }
999 return count;
1000}
1001
1002static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
1003 qeth_dev_ipato_enable_show,
1004 qeth_dev_ipato_enable_store);
1005
1006static ssize_t
1007qeth_dev_ipato_invert4_show(struct device *dev, char *buf)
1008{
1009 struct qeth_card *card = dev->driver_data;
1010
1011 if (!card)
1012 return -EINVAL;
1013
1014 if (qeth_check_layer2(card))
1015 return -EPERM;
1016
1017 return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
1018}
1019
1020static ssize_t
1021qeth_dev_ipato_invert4_store(struct device *dev, const char *buf, size_t count)
1022{
1023 struct qeth_card *card = dev->driver_data;
1024 char *tmp;
1025
1026 if (!card)
1027 return -EINVAL;
1028
1029 if (qeth_check_layer2(card))
1030 return -EPERM;
1031
1032 tmp = strsep((char **) &buf, "\n");
1033 if (!strcmp(tmp, "toggle")){
1034 card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
1035 } else if (!strcmp(tmp, "1")){
1036 card->ipato.invert4 = 1;
1037 } else if (!strcmp(tmp, "0")){
1038 card->ipato.invert4 = 0;
1039 } else {
1040 PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to "
1041 "this file\n");
1042 return -EINVAL;
1043 }
1044 return count;
1045}
1046
1047static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
1048 qeth_dev_ipato_invert4_show,
1049 qeth_dev_ipato_invert4_store);
1050
1051static inline ssize_t
1052qeth_dev_ipato_add_show(char *buf, struct qeth_card *card,
1053 enum qeth_prot_versions proto)
1054{
1055 struct qeth_ipato_entry *ipatoe;
1056 unsigned long flags;
1057 char addr_str[40];
1058 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1059 int i = 0;
1060
1061 if (qeth_check_layer2(card))
1062 return -EPERM;
1063
1064 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1065 /* add strlen for "/<mask>\n" */
1066 entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
1067 spin_lock_irqsave(&card->ip_lock, flags);
1068 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
1069 if (ipatoe->proto != proto)
1070 continue;
1071 /* String must not be longer than PAGE_SIZE. So we check if
1072 * string length gets near PAGE_SIZE. Then we can savely display
1073 * the next IPv6 address (worst case, compared to IPv4) */
1074 if ((PAGE_SIZE - i) <= entry_len)
1075 break;
1076 qeth_ipaddr_to_string(proto, ipatoe->addr, addr_str);
1077 i += snprintf(buf + i, PAGE_SIZE - i,
1078 "%s/%i\n", addr_str, ipatoe->mask_bits);
1079 }
1080 spin_unlock_irqrestore(&card->ip_lock, flags);
1081 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1082
1083 return i;
1084}
1085
1086static ssize_t
1087qeth_dev_ipato_add4_show(struct device *dev, char *buf)
1088{
1089 struct qeth_card *card = dev->driver_data;
1090
1091 if (!card)
1092 return -EINVAL;
1093
1094 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
1095}
1096
1097static inline int
1098qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1099 u8 *addr, int *mask_bits)
1100{
1101 const char *start, *end;
1102 char *tmp;
1103 char buffer[49] = {0, };
1104
1105 start = buf;
1106 /* get address string */
1107 end = strchr(start, '/');
1108 if (!end){
1109 PRINT_WARN("Invalid format for ipato_addx/delx. "
1110 "Use <ip addr>/<mask bits>\n");
1111 return -EINVAL;
1112 }
1113 strncpy(buffer, start, end - start);
1114 if (qeth_string_to_ipaddr(buffer, proto, addr)){
1115 PRINT_WARN("Invalid IP address format!\n");
1116 return -EINVAL;
1117 }
1118 start = end + 1;
1119 *mask_bits = simple_strtoul(start, &tmp, 10);
1120
1121 return 0;
1122}
1123
1124static inline ssize_t
1125qeth_dev_ipato_add_store(const char *buf, size_t count,
1126 struct qeth_card *card, enum qeth_prot_versions proto)
1127{
1128 struct qeth_ipato_entry *ipatoe;
1129 u8 addr[16];
1130 int mask_bits;
1131 int rc;
1132
1133 if (qeth_check_layer2(card))
1134 return -EPERM;
1135 if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits)))
1136 return rc;
1137
1138 if (!(ipatoe = kmalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL))){
1139 PRINT_WARN("No memory to allocate ipato entry\n");
1140 return -ENOMEM;
1141 }
1142 memset(ipatoe, 0, sizeof(struct qeth_ipato_entry));
1143 ipatoe->proto = proto;
1144 memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
1145 ipatoe->mask_bits = mask_bits;
1146
1147 if ((rc = qeth_add_ipato_entry(card, ipatoe))){
1148 kfree(ipatoe);
1149 return rc;
1150 }
1151
1152 return count;
1153}
1154
1155static ssize_t
1156qeth_dev_ipato_add4_store(struct device *dev, const char *buf, size_t count)
1157{
1158 struct qeth_card *card = dev->driver_data;
1159
1160 if (!card)
1161 return -EINVAL;
1162
1163 return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
1164}
1165
1166static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
1167 qeth_dev_ipato_add4_show,
1168 qeth_dev_ipato_add4_store);
1169
1170static inline ssize_t
1171qeth_dev_ipato_del_store(const char *buf, size_t count,
1172 struct qeth_card *card, enum qeth_prot_versions proto)
1173{
1174 u8 addr[16];
1175 int mask_bits;
1176 int rc;
1177
1178 if (qeth_check_layer2(card))
1179 return -EPERM;
1180 if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits)))
1181 return rc;
1182
1183 qeth_del_ipato_entry(card, proto, addr, mask_bits);
1184
1185 return count;
1186}
1187
1188static ssize_t
1189qeth_dev_ipato_del4_store(struct device *dev, const char *buf, size_t count)
1190{
1191 struct qeth_card *card = dev->driver_data;
1192
1193 if (!card)
1194 return -EINVAL;
1195
1196 return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
1197}
1198
1199static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
1200 qeth_dev_ipato_del4_store);
1201
1202#ifdef CONFIG_QETH_IPV6
1203static ssize_t
1204qeth_dev_ipato_invert6_show(struct device *dev, char *buf)
1205{
1206 struct qeth_card *card = dev->driver_data;
1207
1208 if (!card)
1209 return -EINVAL;
1210
1211 if (qeth_check_layer2(card))
1212 return -EPERM;
1213
1214 return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
1215}
1216
1217static ssize_t
1218qeth_dev_ipato_invert6_store(struct device *dev, const char *buf, size_t count)
1219{
1220 struct qeth_card *card = dev->driver_data;
1221 char *tmp;
1222
1223 if (!card)
1224 return -EINVAL;
1225
1226 if (qeth_check_layer2(card))
1227 return -EPERM;
1228
1229 tmp = strsep((char **) &buf, "\n");
1230 if (!strcmp(tmp, "toggle")){
1231 card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
1232 } else if (!strcmp(tmp, "1")){
1233 card->ipato.invert6 = 1;
1234 } else if (!strcmp(tmp, "0")){
1235 card->ipato.invert6 = 0;
1236 } else {
1237 PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to "
1238 "this file\n");
1239 return -EINVAL;
1240 }
1241 return count;
1242}
1243
1244static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
1245 qeth_dev_ipato_invert6_show,
1246 qeth_dev_ipato_invert6_store);
1247
1248
1249static ssize_t
1250qeth_dev_ipato_add6_show(struct device *dev, char *buf)
1251{
1252 struct qeth_card *card = dev->driver_data;
1253
1254 if (!card)
1255 return -EINVAL;
1256
1257 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
1258}
1259
1260static ssize_t
1261qeth_dev_ipato_add6_store(struct device *dev, const char *buf, size_t count)
1262{
1263 struct qeth_card *card = dev->driver_data;
1264
1265 if (!card)
1266 return -EINVAL;
1267
1268 return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
1269}
1270
1271static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
1272 qeth_dev_ipato_add6_show,
1273 qeth_dev_ipato_add6_store);
1274
1275static ssize_t
1276qeth_dev_ipato_del6_store(struct device *dev, const char *buf, size_t count)
1277{
1278 struct qeth_card *card = dev->driver_data;
1279
1280 if (!card)
1281 return -EINVAL;
1282
1283 return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
1284}
1285
1286static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
1287 qeth_dev_ipato_del6_store);
1288#endif /* CONFIG_QETH_IPV6 */
1289
1290static struct device_attribute * qeth_ipato_device_attrs[] = {
1291 &dev_attr_ipato_enable,
1292 &dev_attr_ipato_invert4,
1293 &dev_attr_ipato_add4,
1294 &dev_attr_ipato_del4,
1295#ifdef CONFIG_QETH_IPV6
1296 &dev_attr_ipato_invert6,
1297 &dev_attr_ipato_add6,
1298 &dev_attr_ipato_del6,
1299#endif
1300 NULL,
1301};
1302
1303static struct attribute_group qeth_device_ipato_group = {
1304 .name = "ipa_takeover",
1305 .attrs = (struct attribute **)qeth_ipato_device_attrs,
1306};
1307
1308static inline ssize_t
1309qeth_dev_vipa_add_show(char *buf, struct qeth_card *card,
1310 enum qeth_prot_versions proto)
1311{
1312 struct qeth_ipaddr *ipaddr;
1313 char addr_str[40];
1314 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1315 unsigned long flags;
1316 int i = 0;
1317
1318 if (qeth_check_layer2(card))
1319 return -EPERM;
1320
1321 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1322 entry_len += 2; /* \n + terminator */
1323 spin_lock_irqsave(&card->ip_lock, flags);
1324 list_for_each_entry(ipaddr, &card->ip_list, entry){
1325 if (ipaddr->proto != proto)
1326 continue;
1327 if (ipaddr->type != QETH_IP_TYPE_VIPA)
1328 continue;
1329 /* String must not be longer than PAGE_SIZE. So we check if
1330 * string length gets near PAGE_SIZE. Then we can savely display
1331 * the next IPv6 address (worst case, compared to IPv4) */
1332 if ((PAGE_SIZE - i) <= entry_len)
1333 break;
1334 qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str);
1335 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
1336 }
1337 spin_unlock_irqrestore(&card->ip_lock, flags);
1338 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1339
1340 return i;
1341}
1342
1343static ssize_t
1344qeth_dev_vipa_add4_show(struct device *dev, char *buf)
1345{
1346 struct qeth_card *card = dev->driver_data;
1347
1348 if (!card)
1349 return -EINVAL;
1350
1351 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
1352}
1353
1354static inline int
1355qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1356 u8 *addr)
1357{
1358 if (qeth_string_to_ipaddr(buf, proto, addr)){
1359 PRINT_WARN("Invalid IP address format!\n");
1360 return -EINVAL;
1361 }
1362 return 0;
1363}
1364
1365static inline ssize_t
1366qeth_dev_vipa_add_store(const char *buf, size_t count,
1367 struct qeth_card *card, enum qeth_prot_versions proto)
1368{
1369 u8 addr[16] = {0, };
1370 int rc;
1371
1372 if (qeth_check_layer2(card))
1373 return -EPERM;
1374 if ((rc = qeth_parse_vipae(buf, proto, addr)))
1375 return rc;
1376
1377 if ((rc = qeth_add_vipa(card, proto, addr)))
1378 return rc;
1379
1380 return count;
1381}
1382
1383static ssize_t
1384qeth_dev_vipa_add4_store(struct device *dev, const char *buf, size_t count)
1385{
1386 struct qeth_card *card = dev->driver_data;
1387
1388 if (!card)
1389 return -EINVAL;
1390
1391 return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
1392}
1393
1394static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
1395 qeth_dev_vipa_add4_show,
1396 qeth_dev_vipa_add4_store);
1397
1398static inline ssize_t
1399qeth_dev_vipa_del_store(const char *buf, size_t count,
1400 struct qeth_card *card, enum qeth_prot_versions proto)
1401{
1402 u8 addr[16];
1403 int rc;
1404
1405 if (qeth_check_layer2(card))
1406 return -EPERM;
1407 if ((rc = qeth_parse_vipae(buf, proto, addr)))
1408 return rc;
1409
1410 qeth_del_vipa(card, proto, addr);
1411
1412 return count;
1413}
1414
1415static ssize_t
1416qeth_dev_vipa_del4_store(struct device *dev, const char *buf, size_t count)
1417{
1418 struct qeth_card *card = dev->driver_data;
1419
1420 if (!card)
1421 return -EINVAL;
1422
1423 return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
1424}
1425
1426static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
1427 qeth_dev_vipa_del4_store);
1428
1429#ifdef CONFIG_QETH_IPV6
1430static ssize_t
1431qeth_dev_vipa_add6_show(struct device *dev, char *buf)
1432{
1433 struct qeth_card *card = dev->driver_data;
1434
1435 if (!card)
1436 return -EINVAL;
1437
1438 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
1439}
1440
1441static ssize_t
1442qeth_dev_vipa_add6_store(struct device *dev, const char *buf, size_t count)
1443{
1444 struct qeth_card *card = dev->driver_data;
1445
1446 if (!card)
1447 return -EINVAL;
1448
1449 return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
1450}
1451
1452static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
1453 qeth_dev_vipa_add6_show,
1454 qeth_dev_vipa_add6_store);
1455
1456static ssize_t
1457qeth_dev_vipa_del6_store(struct device *dev, const char *buf, size_t count)
1458{
1459 struct qeth_card *card = dev->driver_data;
1460
1461 if (!card)
1462 return -EINVAL;
1463
1464 if (qeth_check_layer2(card))
1465 return -EPERM;
1466
1467 return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
1468}
1469
1470static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
1471 qeth_dev_vipa_del6_store);
1472#endif /* CONFIG_QETH_IPV6 */
1473
1474static struct device_attribute * qeth_vipa_device_attrs[] = {
1475 &dev_attr_vipa_add4,
1476 &dev_attr_vipa_del4,
1477#ifdef CONFIG_QETH_IPV6
1478 &dev_attr_vipa_add6,
1479 &dev_attr_vipa_del6,
1480#endif
1481 NULL,
1482};
1483
1484static struct attribute_group qeth_device_vipa_group = {
1485 .name = "vipa",
1486 .attrs = (struct attribute **)qeth_vipa_device_attrs,
1487};
1488
1489static inline ssize_t
1490qeth_dev_rxip_add_show(char *buf, struct qeth_card *card,
1491 enum qeth_prot_versions proto)
1492{
1493 struct qeth_ipaddr *ipaddr;
1494 char addr_str[40];
1495 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1496 unsigned long flags;
1497 int i = 0;
1498
1499 if (qeth_check_layer2(card))
1500 return -EPERM;
1501
1502 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1503 entry_len += 2; /* \n + terminator */
1504 spin_lock_irqsave(&card->ip_lock, flags);
1505 list_for_each_entry(ipaddr, &card->ip_list, entry){
1506 if (ipaddr->proto != proto)
1507 continue;
1508 if (ipaddr->type != QETH_IP_TYPE_RXIP)
1509 continue;
1510 /* String must not be longer than PAGE_SIZE. So we check if
1511 * string length gets near PAGE_SIZE. Then we can savely display
1512 * the next IPv6 address (worst case, compared to IPv4) */
1513 if ((PAGE_SIZE - i) <= entry_len)
1514 break;
1515 qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str);
1516 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
1517 }
1518 spin_unlock_irqrestore(&card->ip_lock, flags);
1519 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1520
1521 return i;
1522}
1523
1524static ssize_t
1525qeth_dev_rxip_add4_show(struct device *dev, char *buf)
1526{
1527 struct qeth_card *card = dev->driver_data;
1528
1529 if (!card)
1530 return -EINVAL;
1531
1532 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
1533}
1534
1535static inline int
1536qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1537 u8 *addr)
1538{
1539 if (qeth_string_to_ipaddr(buf, proto, addr)){
1540 PRINT_WARN("Invalid IP address format!\n");
1541 return -EINVAL;
1542 }
1543 return 0;
1544}
1545
1546static inline ssize_t
1547qeth_dev_rxip_add_store(const char *buf, size_t count,
1548 struct qeth_card *card, enum qeth_prot_versions proto)
1549{
1550 u8 addr[16] = {0, };
1551 int rc;
1552
1553 if (qeth_check_layer2(card))
1554 return -EPERM;
1555 if ((rc = qeth_parse_rxipe(buf, proto, addr)))
1556 return rc;
1557
1558 if ((rc = qeth_add_rxip(card, proto, addr)))
1559 return rc;
1560
1561 return count;
1562}
1563
1564static ssize_t
1565qeth_dev_rxip_add4_store(struct device *dev, const char *buf, size_t count)
1566{
1567 struct qeth_card *card = dev->driver_data;
1568
1569 if (!card)
1570 return -EINVAL;
1571
1572 return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
1573}
1574
1575static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
1576 qeth_dev_rxip_add4_show,
1577 qeth_dev_rxip_add4_store);
1578
1579static inline ssize_t
1580qeth_dev_rxip_del_store(const char *buf, size_t count,
1581 struct qeth_card *card, enum qeth_prot_versions proto)
1582{
1583 u8 addr[16];
1584 int rc;
1585
1586 if (qeth_check_layer2(card))
1587 return -EPERM;
1588 if ((rc = qeth_parse_rxipe(buf, proto, addr)))
1589 return rc;
1590
1591 qeth_del_rxip(card, proto, addr);
1592
1593 return count;
1594}
1595
1596static ssize_t
1597qeth_dev_rxip_del4_store(struct device *dev, const char *buf, size_t count)
1598{
1599 struct qeth_card *card = dev->driver_data;
1600
1601 if (!card)
1602 return -EINVAL;
1603
1604 return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
1605}
1606
1607static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
1608 qeth_dev_rxip_del4_store);
1609
1610#ifdef CONFIG_QETH_IPV6
1611static ssize_t
1612qeth_dev_rxip_add6_show(struct device *dev, char *buf)
1613{
1614 struct qeth_card *card = dev->driver_data;
1615
1616 if (!card)
1617 return -EINVAL;
1618
1619 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
1620}
1621
1622static ssize_t
1623qeth_dev_rxip_add6_store(struct device *dev, const char *buf, size_t count)
1624{
1625 struct qeth_card *card = dev->driver_data;
1626
1627 if (!card)
1628 return -EINVAL;
1629
1630 return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
1631}
1632
1633static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
1634 qeth_dev_rxip_add6_show,
1635 qeth_dev_rxip_add6_store);
1636
1637static ssize_t
1638qeth_dev_rxip_del6_store(struct device *dev, const char *buf, size_t count)
1639{
1640 struct qeth_card *card = dev->driver_data;
1641
1642 if (!card)
1643 return -EINVAL;
1644
1645 return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
1646}
1647
1648static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
1649 qeth_dev_rxip_del6_store);
1650#endif /* CONFIG_QETH_IPV6 */
1651
1652static struct device_attribute * qeth_rxip_device_attrs[] = {
1653 &dev_attr_rxip_add4,
1654 &dev_attr_rxip_del4,
1655#ifdef CONFIG_QETH_IPV6
1656 &dev_attr_rxip_add6,
1657 &dev_attr_rxip_del6,
1658#endif
1659 NULL,
1660};
1661
1662static struct attribute_group qeth_device_rxip_group = {
1663 .name = "rxip",
1664 .attrs = (struct attribute **)qeth_rxip_device_attrs,
1665};
1666
1667int
1668qeth_create_device_attributes(struct device *dev)
1669{
1670 int ret;
1671
1672 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group)))
1673 return ret;
1674 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group))){
1675 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1676 return ret;
1677 }
1678 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group))){
1679 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1680 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1681 return ret;
1682 }
1683 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group))){
1684 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1685 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1686 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1687 }
1688 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group)))
1689 return ret;
1690
1691 return ret;
1692}
1693
1694void
1695qeth_remove_device_attributes(struct device *dev)
1696{
1697 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1698 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1699 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1700 sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
1701 sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
1702}
1703
1704/**********************/
1705/* DRIVER ATTRIBUTES */
1706/**********************/
1707static ssize_t
1708qeth_driver_group_store(struct device_driver *ddrv, const char *buf,
1709 size_t count)
1710{
1711 const char *start, *end;
1712 char bus_ids[3][BUS_ID_SIZE], *argv[3];
1713 int i;
1714 int err;
1715
1716 start = buf;
1717 for (i = 0; i < 3; i++) {
1718 static const char delim[] = { ',', ',', '\n' };
1719 int len;
1720
1721 if (!(end = strchr(start, delim[i])))
1722 return -EINVAL;
1723 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start);
1724 strncpy(bus_ids[i], start, len);
1725 bus_ids[i][len] = '\0';
1726 start = end + 1;
1727 argv[i] = bus_ids[i];
1728 }
1729 err = ccwgroup_create(qeth_root_dev, qeth_ccwgroup_driver.driver_id,
1730 &qeth_ccw_driver, 3, argv);
1731 if (err)
1732 return err;
1733 else
1734 return count;
1735}
1736
1737
1738static DRIVER_ATTR(group, 0200, 0, qeth_driver_group_store);
1739
1740static ssize_t
1741qeth_driver_notifier_register_store(struct device_driver *ddrv, const char *buf,
1742 size_t count)
1743{
1744 int rc;
1745 int signum;
1746 char *tmp, *tmp2;
1747
1748 tmp = strsep((char **) &buf, "\n");
1749 if (!strncmp(tmp, "unregister", 10)){
1750 if ((rc = qeth_notifier_unregister(current)))
1751 return rc;
1752 return count;
1753 }
1754
1755 signum = simple_strtoul(tmp, &tmp2, 10);
1756 if ((signum < 0) || (signum > 32)){
1757 PRINT_WARN("Signal number %d is out of range\n", signum);
1758 return -EINVAL;
1759 }
1760 if ((rc = qeth_notifier_register(current, signum)))
1761 return rc;
1762
1763 return count;
1764}
1765
1766static DRIVER_ATTR(notifier_register, 0200, 0,
1767 qeth_driver_notifier_register_store);
1768
1769int
1770qeth_create_driver_attributes(void)
1771{
1772 int rc;
1773
1774 if ((rc = driver_create_file(&qeth_ccwgroup_driver.driver,
1775 &driver_attr_group)))
1776 return rc;
1777 return driver_create_file(&qeth_ccwgroup_driver.driver,
1778 &driver_attr_notifier_register);
1779}
1780
1781void
1782qeth_remove_driver_attributes(void)
1783{
1784 driver_remove_file(&qeth_ccwgroup_driver.driver,
1785 &driver_attr_group);
1786 driver_remove_file(&qeth_ccwgroup_driver.driver,
1787 &driver_attr_notifier_register);
1788}
diff --git a/drivers/s390/net/qeth_tso.c b/drivers/s390/net/qeth_tso.c
new file mode 100644
index 000000000000..c91976274e7b
--- /dev/null
+++ b/drivers/s390/net/qeth_tso.c
@@ -0,0 +1,285 @@
1/*
2 * linux/drivers/s390/net/qeth_tso.c ($Revision: 1.6 $)
3 *
4 * Header file for qeth TCP Segmentation Offload support.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Frank Pavlic <pavlic@de.ibm.com>
9 *
10 * $Revision: 1.6 $ $Date: 2005/03/24 09:04:18 $
11 *
12 */
13
14#include <linux/skbuff.h>
15#include <linux/tcp.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <net/ip6_checksum.h>
19#include "qeth.h"
20#include "qeth_mpc.h"
21#include "qeth_tso.h"
22
23/**
24 * skb already partially prepared
25 * classic qdio header in skb->data
26 * */
27static inline struct qeth_hdr_tso *
28qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
29{
30 int rc = 0;
31
32 QETH_DBF_TEXT(trace, 5, "tsoprsk");
33 rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
34 if (rc)
35 return NULL;
36
37 return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
38}
39
40/**
41 * fill header for a TSO packet
42 */
43static inline void
44qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
45{
46 struct qeth_hdr_tso *hdr;
47 struct tcphdr *tcph;
48 struct iphdr *iph;
49
50 QETH_DBF_TEXT(trace, 5, "tsofhdr");
51
52 hdr = (struct qeth_hdr_tso *) skb->data;
53 iph = skb->nh.iph;
54 tcph = skb->h.th;
55 /*fix header to TSO values ...*/
56 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
57 /*set values which are fix for the first approach ...*/
58 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
59 hdr->ext.imb_hdr_no = 1;
60 hdr->ext.hdr_type = 1;
61 hdr->ext.hdr_version = 1;
62 hdr->ext.hdr_len = 28;
63 /*insert non-fix values */
64 hdr->ext.mss = skb_shinfo(skb)->tso_size;
65 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
66 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
67 sizeof(struct qeth_hdr_tso));
68}
69
70/**
71 * change some header values as requested by hardware
72 */
73static inline void
74qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
75{
76 struct iphdr *iph;
77 struct ipv6hdr *ip6h;
78 struct tcphdr *tcph;
79
80 iph = skb->nh.iph;
81 ip6h = skb->nh.ipv6h;
82 tcph = skb->h.th;
83
84 tcph->check = 0;
85 if (skb->protocol == ETH_P_IPV6) {
86 ip6h->payload_len = 0;
87 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
88 0, IPPROTO_TCP, 0);
89 return;
90 }
91 /*OSA want us to set these values ...*/
92 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
93 0, IPPROTO_TCP, 0);
94 iph->tot_len = 0;
95 iph->check = 0;
96}
97
98static inline struct qeth_hdr_tso *
99qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
100 int ipv, int cast_type)
101{
102 struct qeth_hdr_tso *hdr;
103 int rc = 0;
104
105 QETH_DBF_TEXT(trace, 5, "tsoprep");
106
107 /*get headroom for tso qdio header */
108 hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
109 if (hdr == NULL) {
110 QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
111 return NULL;
112 }
113 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
114 /*fill first 32 bytes of qdio header as used
115 *FIXME: TSO has two struct members
116 * with different names but same size
117 * */
118 qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
119 qeth_tso_fill_header(card, skb);
120 qeth_tso_set_tcpip_header(card, skb);
121 return hdr;
122}
123
124static inline int
125qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
126{
127 struct qeth_qdio_out_buffer *buffer;
128 int flush_cnt = 0;
129
130 QETH_DBF_TEXT(trace, 5, "tsobuf");
131
132 /* force to non-packing*/
133 if (queue->do_pack)
134 queue->do_pack = 0;
135 buffer = &queue->bufs[queue->next_buf_to_fill];
136 /* get a new buffer if current is already in use*/
137 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
138 (buffer->next_element_to_fill > 0)) {
139 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
140 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
141 QDIO_MAX_BUFFERS_PER_Q;
142 flush_cnt++;
143 }
144 return flush_cnt;
145}
146
147static inline void
148__qeth_tso_fill_buffer_frag(struct qeth_qdio_out_buffer *buf,
149 struct sk_buff *skb)
150{
151 struct skb_frag_struct *frag;
152 struct qdio_buffer *buffer;
153 int fragno, cnt, element;
154 unsigned long addr;
155
156 QETH_DBF_TEXT(trace, 6, "tsfilfrg");
157
158 /*initialize variables ...*/
159 fragno = skb_shinfo(skb)->nr_frags;
160 buffer = buf->buffer;
161 element = buf->next_element_to_fill;
162 /*fill buffer elements .....*/
163 for (cnt = 0; cnt < fragno; cnt++) {
164 frag = &skb_shinfo(skb)->frags[cnt];
165 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
166 frag->page_offset;
167 buffer->element[element].addr = (char *)addr;
168 buffer->element[element].length = frag->size;
169 if (cnt < (fragno - 1))
170 buffer->element[element].flags =
171 SBAL_FLAGS_MIDDLE_FRAG;
172 else
173 buffer->element[element].flags =
174 SBAL_FLAGS_LAST_FRAG;
175 element++;
176 }
177 buf->next_element_to_fill = element;
178}
179
180static inline int
181qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
182 struct sk_buff *skb)
183{
184 int length, length_here, element;
185 int hdr_len;
186 struct qdio_buffer *buffer;
187 struct qeth_hdr_tso *hdr;
188 char *data;
189
190 QETH_DBF_TEXT(trace, 3, "tsfilbuf");
191
192 /*increment user count and queue skb ...*/
193 atomic_inc(&skb->users);
194 skb_queue_tail(&buf->skb_list, skb);
195
196 /*initialize all variables...*/
197 buffer = buf->buffer;
198 hdr = (struct qeth_hdr_tso *)skb->data;
199 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
200 data = skb->data + hdr_len;
201 length = skb->len - hdr_len;
202 element = buf->next_element_to_fill;
203 /*fill first buffer entry only with header information */
204 buffer->element[element].addr = skb->data;
205 buffer->element[element].length = hdr_len;
206 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
207 buf->next_element_to_fill++;
208
209 if (skb_shinfo(skb)->nr_frags > 0) {
210 __qeth_tso_fill_buffer_frag(buf, skb);
211 goto out;
212 }
213
214 /*start filling buffer entries ...*/
215 element++;
216 while (length > 0) {
217 /* length_here is the remaining amount of data in this page */
218 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
219 if (length < length_here)
220 length_here = length;
221 buffer->element[element].addr = data;
222 buffer->element[element].length = length_here;
223 length -= length_here;
224 if (!length)
225 buffer->element[element].flags =
226 SBAL_FLAGS_LAST_FRAG;
227 else
228 buffer->element[element].flags =
229 SBAL_FLAGS_MIDDLE_FRAG;
230 data += length_here;
231 element++;
232 }
233 /*set the buffer to primed ...*/
234 buf->next_element_to_fill = element;
235out:
236 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
237 return 1;
238}
239
240int
241qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
242 struct qeth_qdio_out_q *queue, int ipv, int cast_type)
243{
244 int flush_cnt = 0;
245 struct qeth_hdr_tso *hdr;
246 struct qeth_qdio_out_buffer *buffer;
247 int start_index;
248
249 QETH_DBF_TEXT(trace, 3, "tsosend");
250
251 if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
252 return -ENOMEM;
253 /*check if skb fits in one SBAL ...*/
254 if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
255 return -EINVAL;
256 /*lock queue, force switching to non-packing and send it ...*/
257 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
258 QETH_OUT_Q_LOCKED,
259 &queue->state));
260 start_index = queue->next_buf_to_fill;
261 buffer = &queue->bufs[queue->next_buf_to_fill];
262 /*check if card is too busy ...*/
263 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
264 card->stats.tx_dropped++;
265 goto out;
266 }
267 /*let's force to non-packing and get a new SBAL*/
268 flush_cnt += qeth_tso_get_queue_buffer(queue);
269 buffer = &queue->bufs[queue->next_buf_to_fill];
270 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
271 card->stats.tx_dropped++;
272 goto out;
273 }
274 flush_cnt += qeth_tso_fill_buffer(buffer, skb);
275 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
276 QDIO_MAX_BUFFERS_PER_Q;
277out:
278 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
279 if (flush_cnt)
280 qeth_flush_buffers(queue, 0, start_index, flush_cnt);
281 /*do some statistics */
282 card->stats.tx_packets++;
283 card->stats.tx_bytes += skb->len;
284 return 0;
285}
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h
new file mode 100644
index 000000000000..83504dee3f57
--- /dev/null
+++ b/drivers/s390/net/qeth_tso.h
@@ -0,0 +1,58 @@
1/*
2 * linux/drivers/s390/net/qeth_tso.h ($Revision: 1.4 $)
3 *
4 * Header file for qeth TCP Segmentation Offload support.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Frank Pavlic <pavlic@de.ibm.com>
9 *
10 * $Revision: 1.4 $ $Date: 2005/03/24 09:04:18 $
11 *
12 */
13#ifndef __QETH_TSO_H__
14#define __QETH_TSO_H__
15
16
17extern int
18qeth_tso_send_packet(struct qeth_card *, struct sk_buff *,
19 struct qeth_qdio_out_q *, int , int);
20
21struct qeth_hdr_ext_tso {
22 __u16 hdr_tot_len;
23 __u8 imb_hdr_no;
24 __u8 reserved;
25 __u8 hdr_type;
26 __u8 hdr_version;
27 __u16 hdr_len;
28 __u32 payload_len;
29 __u16 mss;
30 __u16 dg_hdr_len;
31 __u8 padding[16];
32} __attribute__ ((packed));
33
34struct qeth_hdr_tso {
35 struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
36 struct qeth_hdr_ext_tso ext;
37} __attribute__ ((packed));
38
39/*some helper functions*/
40
41static inline int
42qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb)
43{
44 int elements_needed = 0;
45
46 if (skb_shinfo(skb)->nr_frags > 0)
47 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
48 if (elements_needed == 0 )
49 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
50 + skb->len) >> PAGE_SHIFT);
51 if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
52 PRINT_ERR("qeth_do_send_packet: invalid size of "
53 "IP packet. Discarded.");
54 return 0;
55 }
56 return elements_needed;
57}
58#endif /* __QETH_TSO_H__ */
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
new file mode 100644
index 000000000000..a3d285859564
--- /dev/null
+++ b/drivers/s390/net/smsgiucv.c
@@ -0,0 +1,180 @@
1/*
2 * IUCV special message driver
3 *
4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/errno.h>
25#include <linux/device.h>
26#include <asm/cpcmd.h>
27#include <asm/ebcdic.h>
28
29#include "iucv.h"
30
31struct smsg_callback {
32 struct list_head list;
33 char *prefix;
34 int len;
35 void (*callback)(char *str);
36};
37
38MODULE_AUTHOR
39 ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)");
40MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
41
42static iucv_handle_t smsg_handle;
43static unsigned short smsg_pathid;
44static DEFINE_SPINLOCK(smsg_list_lock);
45static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list);
46
47static void
48smsg_connection_complete(iucv_ConnectionComplete *eib, void *pgm_data)
49{
50}
51
52
53static void
54smsg_message_pending(iucv_MessagePending *eib, void *pgm_data)
55{
56 struct smsg_callback *cb;
57 unsigned char *msg;
58 unsigned short len;
59 int rc;
60
61 len = eib->ln1msg2.ipbfln1f;
62 msg = kmalloc(len + 1, GFP_ATOMIC|GFP_DMA);
63 if (!msg) {
64 iucv_reject(eib->ippathid, eib->ipmsgid, eib->iptrgcls);
65 return;
66 }
67 rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls,
68 msg, len, 0, 0, 0);
69 if (rc == 0) {
70 msg[len] = 0;
71 EBCASC(msg, len);
72 spin_lock(&smsg_list_lock);
73 list_for_each_entry(cb, &smsg_list, list)
74 if (strncmp(msg + 8, cb->prefix, cb->len) == 0) {
75 cb->callback(msg + 8);
76 break;
77 }
78 spin_unlock(&smsg_list_lock);
79 }
80 kfree(msg);
81}
82
83static iucv_interrupt_ops_t smsg_ops = {
84 .ConnectionComplete = smsg_connection_complete,
85 .MessagePending = smsg_message_pending,
86};
87
88static struct device_driver smsg_driver = {
89 .name = "SMSGIUCV",
90 .bus = &iucv_bus,
91};
92
93int
94smsg_register_callback(char *prefix, void (*callback)(char *str))
95{
96 struct smsg_callback *cb;
97
98 cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL);
99 if (!cb)
100 return -ENOMEM;
101 cb->prefix = prefix;
102 cb->len = strlen(prefix);
103 cb->callback = callback;
104 spin_lock(&smsg_list_lock);
105 list_add_tail(&cb->list, &smsg_list);
106 spin_unlock(&smsg_list_lock);
107 return 0;
108}
109
110void
111smsg_unregister_callback(char *prefix, void (*callback)(char *str))
112{
113 struct smsg_callback *cb, *tmp;
114
115 spin_lock(&smsg_list_lock);
116 cb = 0;
117 list_for_each_entry(tmp, &smsg_list, list)
118 if (tmp->callback == callback &&
119 strcmp(tmp->prefix, prefix) == 0) {
120 cb = tmp;
121 list_del(&cb->list);
122 break;
123 }
124 spin_unlock(&smsg_list_lock);
125 kfree(cb);
126}
127
128static void __exit
129smsg_exit(void)
130{
131 if (smsg_handle > 0) {
132 cpcmd("SET SMSG OFF", 0, 0);
133 iucv_sever(smsg_pathid, 0);
134 iucv_unregister_program(smsg_handle);
135 driver_unregister(&smsg_driver);
136 }
137 return;
138}
139
140static int __init
141smsg_init(void)
142{
143 static unsigned char pgmmask[24] = {
144 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
145 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
146 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
147 };
148 int rc;
149
150 rc = driver_register(&smsg_driver);
151 if (rc != 0) {
152 printk(KERN_ERR "SMSGIUCV: failed to register driver.\n");
153 return rc;
154 }
155 smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ",
156 pgmmask, &smsg_ops, 0);
157 if (!smsg_handle) {
158 printk(KERN_ERR "SMSGIUCV: failed to register to iucv");
159 driver_unregister(&smsg_driver);
160 return -EIO; /* better errno ? */
161 }
162 rc = iucv_connect (&smsg_pathid, 1, 0, "*MSG ", 0, 0, 0, 0,
163 smsg_handle, 0);
164 if (rc) {
165 printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG");
166 iucv_unregister_program(smsg_handle);
167 driver_unregister(&smsg_driver);
168 smsg_handle = 0;
169 return -EIO;
170 }
171 cpcmd("SET SMSG IUCV", 0, 0);
172 return 0;
173}
174
175module_init(smsg_init);
176module_exit(smsg_exit);
177MODULE_LICENSE("GPL");
178
179EXPORT_SYMBOL(smsg_register_callback);
180EXPORT_SYMBOL(smsg_unregister_callback);
diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h
new file mode 100644
index 000000000000..04cd87152964
--- /dev/null
+++ b/drivers/s390/net/smsgiucv.h
@@ -0,0 +1,10 @@
1/*
2 * IUCV special message driver
3 *
4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8int smsg_register_callback(char *, void (*)(char *));
9void smsg_unregister_callback(char *, void (*)(char *));
10
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
new file mode 100644
index 000000000000..ffa996c8a908
--- /dev/null
+++ b/drivers/s390/s390mach.c
@@ -0,0 +1,219 @@
1/*
2 * drivers/s390/s390mach.c
3 * S/390 machine check handler
4 *
5 * S390 version
6 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */
10
11#include <linux/config.h>
12#include <linux/init.h>
13#include <linux/sched.h>
14#include <linux/errno.h>
15#include <linux/workqueue.h>
16
17#include <asm/lowcore.h>
18
19#include "s390mach.h"
20
21#define DBG printk
22// #define DBG(args,...) do {} while (0);
23
24static struct semaphore m_sem;
25
26extern int css_process_crw(int);
27extern int chsc_process_crw(void);
28extern int chp_process_crw(int, int);
29extern void css_reiterate_subchannels(void);
30
31extern struct workqueue_struct *slow_path_wq;
32extern struct work_struct slow_path_work;
33
34static void
35s390_handle_damage(char *msg)
36{
37 printk(KERN_EMERG "%s\n", msg);
38#ifdef CONFIG_SMP
39 smp_send_stop();
40#endif
41 disabled_wait((unsigned long) __builtin_return_address(0));
42}
43
44/*
45 * Retrieve CRWs and call function to handle event.
46 *
47 * Note : we currently process CRWs for io and chsc subchannels only
48 */
49static int
50s390_collect_crw_info(void *param)
51{
52 struct crw crw;
53 int ccode, ret, slow;
54 struct semaphore *sem;
55
56 sem = (struct semaphore *)param;
57 /* Set a nice name. */
58 daemonize("kmcheck");
59repeat:
60 down_interruptible(sem);
61 slow = 0;
62 while (1) {
63 ccode = stcrw(&crw);
64 if (ccode != 0)
65 break;
66 DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
67 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
68 crw.slct, crw.oflw, crw.chn, crw.rsc, crw.anc,
69 crw.erc, crw.rsid);
70 /* Check for overflows. */
71 if (crw.oflw) {
72 pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
73 css_reiterate_subchannels();
74 slow = 1;
75 continue;
76 }
77 switch (crw.rsc) {
78 case CRW_RSC_SCH:
79 pr_debug("source is subchannel %04X\n", crw.rsid);
80 ret = css_process_crw (crw.rsid);
81 if (ret == -EAGAIN)
82 slow = 1;
83 break;
84 case CRW_RSC_MONITOR:
85 pr_debug("source is monitoring facility\n");
86 break;
87 case CRW_RSC_CPATH:
88 pr_debug("source is channel path %02X\n", crw.rsid);
89 switch (crw.erc) {
90 case CRW_ERC_IPARM: /* Path has come. */
91 ret = chp_process_crw(crw.rsid, 1);
92 break;
93 case CRW_ERC_PERRI: /* Path has gone. */
94 case CRW_ERC_PERRN:
95 ret = chp_process_crw(crw.rsid, 0);
96 break;
97 default:
98 pr_debug("Don't know how to handle erc=%x\n",
99 crw.erc);
100 ret = 0;
101 }
102 if (ret == -EAGAIN)
103 slow = 1;
104 break;
105 case CRW_RSC_CONFIG:
106 pr_debug("source is configuration-alert facility\n");
107 break;
108 case CRW_RSC_CSS:
109 pr_debug("source is channel subsystem\n");
110 ret = chsc_process_crw();
111 if (ret == -EAGAIN)
112 slow = 1;
113 break;
114 default:
115 pr_debug("unknown source\n");
116 break;
117 }
118 }
119 if (slow)
120 queue_work(slow_path_wq, &slow_path_work);
121 goto repeat;
122 return 0;
123}
124
125/*
126 * machine check handler.
127 */
128void
129s390_do_machine_check(void)
130{
131 struct mci *mci;
132
133 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
134
135 if (mci->sd) /* system damage */
136 s390_handle_damage("received system damage machine check\n");
137
138 if (mci->pd) /* instruction processing damage */
139 s390_handle_damage("received instruction processing "
140 "damage machine check\n");
141
142 if (mci->se) /* storage error uncorrected */
143 s390_handle_damage("received storage error uncorrected "
144 "machine check\n");
145
146 if (mci->sc) /* storage error corrected */
147 printk(KERN_WARNING
148 "received storage error corrected machine check\n");
149
150 if (mci->ke) /* storage key-error uncorrected */
151 s390_handle_damage("received storage key-error uncorrected "
152 "machine check\n");
153
154 if (mci->ds && mci->fa) /* storage degradation */
155 s390_handle_damage("received storage degradation machine "
156 "check\n");
157
158 if (mci->cp) /* channel report word pending */
159 up(&m_sem);
160
161#ifdef CONFIG_MACHCHK_WARNING
162/*
163 * The warning may remain for a prolonged period on the bare iron.
164 * (actually till the machine is powered off, or until the problem is gone)
165 * So we just stop listening for the WARNING MCH and prevent continuously
166 * being interrupted. One caveat is however, that we must do this per
167 * processor and cannot use the smp version of ctl_clear_bit().
168 * On VM we only get one interrupt per virtally presented machinecheck.
169 * Though one suffices, we may get one interrupt per (virtual) processor.
170 */
171 if (mci->w) { /* WARNING pending ? */
172 static int mchchk_wng_posted = 0;
173 /*
174 * Use single machine clear, as we cannot handle smp right now
175 */
176 __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
177 if (xchg(&mchchk_wng_posted, 1) == 0)
178 kill_proc(1, SIGPWR, 1);
179 }
180#endif
181}
182
183/*
184 * s390_init_machine_check
185 *
186 * initialize machine check handling
187 */
188static int
189machine_check_init(void)
190{
191 init_MUTEX_LOCKED(&m_sem);
192 ctl_clear_bit(14, 25); /* disable damage MCH */
193 ctl_set_bit(14, 26); /* enable degradation MCH */
194 ctl_set_bit(14, 27); /* enable system recovery MCH */
195#ifdef CONFIG_MACHCHK_WARNING
196 ctl_set_bit(14, 24); /* enable warning MCH */
197#endif
198 return 0;
199}
200
201/*
202 * Initialize the machine check handler really early to be able to
203 * catch all machine checks that happen during boot
204 */
205arch_initcall(machine_check_init);
206
207/*
208 * Machine checks for the channel subsystem must be enabled
209 * after the channel subsystem is initialized
210 */
211static int __init
212machine_check_crw_init (void)
213{
214 kernel_thread(s390_collect_crw_info, &m_sem, CLONE_FS|CLONE_FILES);
215 ctl_set_bit(14, 28); /* enable channel report MCH */
216 return 0;
217}
218
219device_initcall (machine_check_crw_init);
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
new file mode 100644
index 000000000000..7e26f0f1b0dc
--- /dev/null
+++ b/drivers/s390/s390mach.h
@@ -0,0 +1,79 @@
1/*
2 * drivers/s390/s390mach.h
3 * S/390 data definitions for machine check processing
4 *
5 * S390 version
6 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 */
9
10#ifndef __s390mach_h
11#define __s390mach_h
12
13#include <asm/types.h>
14
15struct mci {
16 __u32 sd : 1; /* 00 system damage */
17 __u32 pd : 1; /* 01 instruction-processing damage */
18 __u32 sr : 1; /* 02 system recovery */
19 __u32 to_be_defined_1 : 4; /* 03-06 */
20 __u32 dg : 1; /* 07 degradation */
21 __u32 w : 1; /* 08 warning pending */
22 __u32 cp : 1; /* 09 channel-report pending */
23 __u32 to_be_defined_2 : 6; /* 10-15 */
24 __u32 se : 1; /* 16 storage error uncorrected */
25 __u32 sc : 1; /* 17 storage error corrected */
26 __u32 ke : 1; /* 18 storage-key error uncorrected */
27 __u32 ds : 1; /* 19 storage degradation */
28 __u32 to_be_defined_3 : 4; /* 20-23 */
29 __u32 fa : 1; /* 24 failing storage address validity */
30 __u32 to_be_defined_4 : 7; /* 25-31 */
31 __u32 ie : 1; /* 32 indirect storage error */
32 __u32 to_be_defined_5 : 31; /* 33-63 */
33};
34
35/*
36 * Channel Report Word
37 */
38struct crw {
39 __u32 res1 : 1; /* reserved zero */
40 __u32 slct : 1; /* solicited */
41 __u32 oflw : 1; /* overflow */
42 __u32 chn : 1; /* chained */
43 __u32 rsc : 4; /* reporting source code */
44 __u32 anc : 1; /* ancillary report */
45 __u32 res2 : 1; /* reserved zero */
46 __u32 erc : 6; /* error-recovery code */
47 __u32 rsid : 16; /* reporting-source ID */
48} __attribute__ ((packed));
49
50#define CRW_RSC_MONITOR 0x2 /* monitoring facility */
51#define CRW_RSC_SCH 0x3 /* subchannel */
52#define CRW_RSC_CPATH 0x4 /* channel path */
53#define CRW_RSC_CONFIG 0x9 /* configuration-alert facility */
54#define CRW_RSC_CSS 0xB /* channel subsystem */
55
56#define CRW_ERC_EVENT 0x00 /* event information pending */
57#define CRW_ERC_AVAIL 0x01 /* available */
58#define CRW_ERC_INIT 0x02 /* initialized */
59#define CRW_ERC_TERROR 0x03 /* temporary error */
60#define CRW_ERC_IPARM 0x04 /* installed parm initialized */
61#define CRW_ERC_TERM 0x05 /* terminal */
62#define CRW_ERC_PERRN 0x06 /* perm. error, fac. not init */
63#define CRW_ERC_PERRI 0x07 /* perm. error, facility init */
64#define CRW_ERC_PMOD 0x08 /* installed parameters modified */
65
66extern __inline__ int stcrw(struct crw *pcrw )
67{
68 int ccode;
69
70 __asm__ __volatile__(
71 "STCRW 0(%1)\n\t"
72 "IPM %0\n\t"
73 "SRL %0,28\n\t"
74 : "=d" (ccode) : "a" (pcrw)
75 : "cc", "1" );
76 return ccode;
77}
78
79#endif /* __s390mach */
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
new file mode 100644
index 000000000000..fc145307a7d4
--- /dev/null
+++ b/drivers/s390/scsi/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the S/390 specific device drivers
3#
4
5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
6 zfcp_fsf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \
7 zfcp_sysfs_unit.o zfcp_sysfs_driver.o
8
9obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
new file mode 100644
index 000000000000..6a43322ccb0a
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -0,0 +1,1977 @@
1/*
2 *
3 * linux/drivers/s390/scsi/zfcp_aux.c
4 *
5 * FCP adapter driver for IBM eServer zSeries
6 *
7 * (C) Copyright IBM Corp. 2002, 2004
8 *
9 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
10 * Raimund Schroeder <raimund.schroeder@de.ibm.com>
11 * Aron Zeh
12 * Wolfgang Taphorn
13 * Stefan Bader <stefan.bader@de.ibm.com>
14 * Heiko Carstens <heiko.carstens@de.ibm.com>
15 * Andreas Herrmann <aherrman@de.ibm.com>
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */
31
32#define ZFCP_AUX_REVISION "$Revision: 1.145 $"
33
34#include "zfcp_ext.h"
35
36/* accumulated log level (module parameter) */
37static u32 loglevel = ZFCP_LOG_LEVEL_DEFAULTS;
38static char *device;
39/*********************** FUNCTION PROTOTYPES *********************************/
40
41/* written against the module interface */
42static int __init zfcp_module_init(void);
43
44/* FCP related */
45static void zfcp_ns_gid_pn_handler(unsigned long);
46
47/* miscellaneous */
48static inline int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t);
49static inline void zfcp_sg_list_free(struct zfcp_sg_list *);
50static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
51 void __user *, size_t);
52static inline int zfcp_sg_list_copy_to_user(void __user *,
53 struct zfcp_sg_list *, size_t);
54
55static int zfcp_cfdc_dev_ioctl(struct inode *, struct file *,
56 unsigned int, unsigned long);
57
58#define ZFCP_CFDC_IOC_MAGIC 0xDD
59#define ZFCP_CFDC_IOC \
60 _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_sense_data)
61
62#ifdef CONFIG_COMPAT
63static struct ioctl_trans zfcp_ioctl_trans = {ZFCP_CFDC_IOC, (void*) sys_ioctl};
64#endif
65
66static struct file_operations zfcp_cfdc_fops = {
67 .ioctl = zfcp_cfdc_dev_ioctl
68};
69
70static struct miscdevice zfcp_cfdc_misc = {
71 .minor = ZFCP_CFDC_DEV_MINOR,
72 .name = ZFCP_CFDC_DEV_NAME,
73 .fops = &zfcp_cfdc_fops
74};
75
76/*********************** KERNEL/MODULE PARAMETERS ***************************/
77
78/* declare driver module init/cleanup functions */
79module_init(zfcp_module_init);
80
81MODULE_AUTHOR("Heiko Carstens <heiko.carstens@de.ibm.com>, "
82 "Andreas Herrman <aherrman@de.ibm.com>, "
83 "Martin Peschke <mpeschke@de.ibm.com>, "
84 "Raimund Schroeder <raimund.schroeder@de.ibm.com>, "
85 "Wolfgang Taphorn <taphorn@de.ibm.com>, "
86 "Aron Zeh <arzeh@de.ibm.com>, "
87 "IBM Deutschland Entwicklung GmbH");
88MODULE_DESCRIPTION
89 ("FCP (SCSI over Fibre Channel) HBA driver for IBM eServer zSeries");
90MODULE_LICENSE("GPL");
91
92module_param(device, charp, 0);
93MODULE_PARM_DESC(device, "specify initial device");
94
95module_param(loglevel, uint, 0);
96MODULE_PARM_DESC(loglevel,
97 "log levels, 8 nibbles: "
98 "FC ERP QDIO CIO Config FSF SCSI Other, "
99 "levels: 0=none 1=normal 2=devel 3=trace");
100
101#ifdef ZFCP_PRINT_FLAGS
102u32 flags_dump = 0;
103module_param(flags_dump, uint, 0);
104#endif
105
106/****************************************************************/
107/************** Functions without logging ***********************/
108/****************************************************************/
109
110void
111_zfcp_hex_dump(char *addr, int count)
112{
113 int i;
114 for (i = 0; i < count; i++) {
115 printk("%02x", addr[i]);
116 if ((i % 4) == 3)
117 printk(" ");
118 if ((i % 32) == 31)
119 printk("\n");
120 }
121 if (((i-1) % 32) != 31)
122 printk("\n");
123}
124
125/****************************************************************/
126/************** Uncategorised Functions *************************/
127/****************************************************************/
128
129#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
130
131static inline int
132zfcp_fsf_req_is_scsi_cmnd(struct zfcp_fsf_req *fsf_req)
133{
134 return ((fsf_req->fsf_command == FSF_QTCB_FCP_CMND) &&
135 !(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT));
136}
137
138void
139zfcp_cmd_dbf_event_fsf(const char *text, struct zfcp_fsf_req *fsf_req,
140 void *add_data, int add_length)
141{
142 struct zfcp_adapter *adapter = fsf_req->adapter;
143 struct scsi_cmnd *scsi_cmnd;
144 int level = 3;
145 int i;
146 unsigned long flags;
147
148 spin_lock_irqsave(&adapter->dbf_lock, flags);
149 if (zfcp_fsf_req_is_scsi_cmnd(fsf_req)) {
150 scsi_cmnd = fsf_req->data.send_fcp_command_task.scsi_cmnd;
151 debug_text_event(adapter->cmd_dbf, level, "fsferror");
152 debug_text_event(adapter->cmd_dbf, level, text);
153 debug_event(adapter->cmd_dbf, level, &fsf_req,
154 sizeof (unsigned long));
155 debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
156 sizeof (u32));
157 debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
158 sizeof (unsigned long));
159 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
160 min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
161 for (i = 0; i < add_length; i += ZFCP_CMD_DBF_LENGTH)
162 debug_event(adapter->cmd_dbf,
163 level,
164 (char *) add_data + i,
165 min(ZFCP_CMD_DBF_LENGTH, add_length - i));
166 }
167 spin_unlock_irqrestore(&adapter->dbf_lock, flags);
168}
169
170/* XXX additionally log unit if available */
171/* ---> introduce new parameter for unit, see 2.4 code */
172void
173zfcp_cmd_dbf_event_scsi(const char *text, struct scsi_cmnd *scsi_cmnd)
174{
175 struct zfcp_adapter *adapter;
176 union zfcp_req_data *req_data;
177 struct zfcp_fsf_req *fsf_req;
178 int level = ((host_byte(scsi_cmnd->result) != 0) ? 1 : 5);
179 unsigned long flags;
180
181 adapter = (struct zfcp_adapter *) scsi_cmnd->device->host->hostdata[0];
182 req_data = (union zfcp_req_data *) scsi_cmnd->host_scribble;
183 fsf_req = (req_data ? req_data->send_fcp_command_task.fsf_req : NULL);
184 spin_lock_irqsave(&adapter->dbf_lock, flags);
185 debug_text_event(adapter->cmd_dbf, level, "hostbyte");
186 debug_text_event(adapter->cmd_dbf, level, text);
187 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->result, sizeof (u32));
188 debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
189 sizeof (unsigned long));
190 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
191 min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
192 if (likely(fsf_req)) {
193 debug_event(adapter->cmd_dbf, level, &fsf_req,
194 sizeof (unsigned long));
195 debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
196 sizeof (u32));
197 } else {
198 debug_text_event(adapter->cmd_dbf, level, "");
199 debug_text_event(adapter->cmd_dbf, level, "");
200 }
201 spin_unlock_irqrestore(&adapter->dbf_lock, flags);
202}
203
204void
205zfcp_in_els_dbf_event(struct zfcp_adapter *adapter, const char *text,
206 struct fsf_status_read_buffer *status_buffer, int length)
207{
208 int level = 1;
209 int i;
210
211 debug_text_event(adapter->in_els_dbf, level, text);
212 debug_event(adapter->in_els_dbf, level, &status_buffer->d_id, 8);
213 for (i = 0; i < length; i += ZFCP_IN_ELS_DBF_LENGTH)
214 debug_event(adapter->in_els_dbf,
215 level,
216 (char *) status_buffer->payload + i,
217 min(ZFCP_IN_ELS_DBF_LENGTH, length - i));
218}
219
220/**
221 * zfcp_device_setup - setup function
222 * @str: pointer to parameter string
223 *
224 * Parse "device=..." parameter string.
225 */
226static int __init
227zfcp_device_setup(char *str)
228{
229 char *tmp;
230
231 if (!str)
232 return 0;
233
234 tmp = strchr(str, ',');
235 if (!tmp)
236 goto err_out;
237 *tmp++ = '\0';
238 strncpy(zfcp_data.init_busid, str, BUS_ID_SIZE);
239 zfcp_data.init_busid[BUS_ID_SIZE-1] = '\0';
240
241 zfcp_data.init_wwpn = simple_strtoull(tmp, &tmp, 0);
242 if (*tmp++ != ',')
243 goto err_out;
244 if (*tmp == '\0')
245 goto err_out;
246
247 zfcp_data.init_fcp_lun = simple_strtoull(tmp, &tmp, 0);
248 if (*tmp != '\0')
249 goto err_out;
250 return 1;
251
252 err_out:
253 ZFCP_LOG_NORMAL("Parse error for device parameter string %s\n", str);
254 return 0;
255}
256
257static void __init
258zfcp_init_device_configure(void)
259{
260 struct zfcp_adapter *adapter;
261 struct zfcp_port *port;
262 struct zfcp_unit *unit;
263
264 down(&zfcp_data.config_sema);
265 read_lock_irq(&zfcp_data.config_lock);
266 adapter = zfcp_get_adapter_by_busid(zfcp_data.init_busid);
267 if (adapter)
268 zfcp_adapter_get(adapter);
269 read_unlock_irq(&zfcp_data.config_lock);
270
271 if (adapter == NULL)
272 goto out_adapter;
273 port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0);
274 if (!port)
275 goto out_port;
276 unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun);
277 if (!unit)
278 goto out_unit;
279 up(&zfcp_data.config_sema);
280 ccw_device_set_online(adapter->ccw_device);
281 zfcp_erp_wait(adapter);
282 down(&zfcp_data.config_sema);
283 zfcp_unit_put(unit);
284 out_unit:
285 zfcp_port_put(port);
286 out_port:
287 zfcp_adapter_put(adapter);
288 out_adapter:
289 up(&zfcp_data.config_sema);
290 return;
291}
292
293static int __init
294zfcp_module_init(void)
295{
296
297 int retval = 0;
298
299 atomic_set(&zfcp_data.loglevel, loglevel);
300
301 /* initialize adapter list */
302 INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
303
304 /* initialize adapters to be removed list head */
305 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
306
307 zfcp_transport_template = fc_attach_transport(&zfcp_transport_functions);
308 if (!zfcp_transport_template)
309 return -ENODEV;
310
311 retval = register_ioctl32_conversion(zfcp_ioctl_trans.cmd,
312 zfcp_ioctl_trans.handler);
313 if (retval != 0) {
314 ZFCP_LOG_INFO("registration of ioctl32 conversion failed\n");
315 goto out;
316 }
317
318 retval = misc_register(&zfcp_cfdc_misc);
319 if (retval != 0) {
320 ZFCP_LOG_INFO("registration of misc device "
321 "zfcp_cfdc failed\n");
322 goto out_misc_register;
323 } else {
324 ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
325 ZFCP_CFDC_DEV_MAJOR, zfcp_cfdc_misc.minor);
326 }
327
328 /* Initialise proc semaphores */
329 sema_init(&zfcp_data.config_sema, 1);
330
331 /* initialise configuration rw lock */
332 rwlock_init(&zfcp_data.config_lock);
333
334 /* save address of data structure managing the driver module */
335 zfcp_data.scsi_host_template.module = THIS_MODULE;
336
337 /* setup dynamic I/O */
338 retval = zfcp_ccw_register();
339 if (retval) {
340 ZFCP_LOG_NORMAL("registration with common I/O layer failed\n");
341 goto out_ccw_register;
342 }
343
344 if (zfcp_device_setup(device))
345 zfcp_init_device_configure();
346
347 goto out;
348
349 out_ccw_register:
350 misc_deregister(&zfcp_cfdc_misc);
351 out_misc_register:
352 unregister_ioctl32_conversion(zfcp_ioctl_trans.cmd);
353 out:
354 return retval;
355}
356
357/*
358 * function: zfcp_cfdc_dev_ioctl
359 *
360 * purpose: Handle control file upload/download transaction via IOCTL
361 * interface
362 *
363 * returns: 0 - Operation completed successfuly
364 * -ENOTTY - Unknown IOCTL command
365 * -EINVAL - Invalid sense data record
366 * -ENXIO - The FCP adapter is not available
367 * -EOPNOTSUPP - The FCP adapter does not have CFDC support
368 * -ENOMEM - Insufficient memory
369 * -EFAULT - User space memory I/O operation fault
370 * -EPERM - Cannot create or queue FSF request or create SBALs
371 * -ERESTARTSYS- Received signal (is mapped to EAGAIN by VFS)
372 */
373static int
374zfcp_cfdc_dev_ioctl(struct inode *inode, struct file *file,
375 unsigned int command, unsigned long buffer)
376{
377 struct zfcp_cfdc_sense_data *sense_data, __user *sense_data_user;
378 struct zfcp_adapter *adapter = NULL;
379 struct zfcp_fsf_req *fsf_req = NULL;
380 struct zfcp_sg_list *sg_list = NULL;
381 u32 fsf_command, option;
382 char *bus_id = NULL;
383 int retval = 0;
384
385 sense_data = kmalloc(sizeof(struct zfcp_cfdc_sense_data), GFP_KERNEL);
386 if (sense_data == NULL) {
387 retval = -ENOMEM;
388 goto out;
389 }
390
391 sg_list = kmalloc(sizeof(struct zfcp_sg_list), GFP_KERNEL);
392 if (sg_list == NULL) {
393 retval = -ENOMEM;
394 goto out;
395 }
396 memset(sg_list, 0, sizeof(*sg_list));
397
398 if (command != ZFCP_CFDC_IOC) {
399 ZFCP_LOG_INFO("IOC request code 0x%x invalid\n", command);
400 retval = -ENOTTY;
401 goto out;
402 }
403
404 if ((sense_data_user = (void __user *) buffer) == NULL) {
405 ZFCP_LOG_INFO("sense data record is required\n");
406 retval = -EINVAL;
407 goto out;
408 }
409
410 retval = copy_from_user(sense_data, sense_data_user,
411 sizeof(struct zfcp_cfdc_sense_data));
412 if (retval) {
413 retval = -EFAULT;
414 goto out;
415 }
416
417 if (sense_data->signature != ZFCP_CFDC_SIGNATURE) {
418 ZFCP_LOG_INFO("invalid sense data request signature 0x%08x\n",
419 ZFCP_CFDC_SIGNATURE);
420 retval = -EINVAL;
421 goto out;
422 }
423
424 switch (sense_data->command) {
425
426 case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
427 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
428 option = FSF_CFDC_OPTION_NORMAL_MODE;
429 break;
430
431 case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
432 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
433 option = FSF_CFDC_OPTION_FORCE;
434 break;
435
436 case ZFCP_CFDC_CMND_FULL_ACCESS:
437 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
438 option = FSF_CFDC_OPTION_FULL_ACCESS;
439 break;
440
441 case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
442 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
443 option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
444 break;
445
446 case ZFCP_CFDC_CMND_UPLOAD:
447 fsf_command = FSF_QTCB_UPLOAD_CONTROL_FILE;
448 option = 0;
449 break;
450
451 default:
452 ZFCP_LOG_INFO("invalid command code 0x%08x\n",
453 sense_data->command);
454 retval = -EINVAL;
455 goto out;
456 }
457
458 bus_id = kmalloc(BUS_ID_SIZE, GFP_KERNEL);
459 if (bus_id == NULL) {
460 retval = -ENOMEM;
461 goto out;
462 }
463 snprintf(bus_id, BUS_ID_SIZE, "%d.%d.%04x",
464 (sense_data->devno >> 24),
465 (sense_data->devno >> 16) & 0xFF,
466 (sense_data->devno & 0xFFFF));
467
468 read_lock_irq(&zfcp_data.config_lock);
469 adapter = zfcp_get_adapter_by_busid(bus_id);
470 if (adapter)
471 zfcp_adapter_get(adapter);
472 read_unlock_irq(&zfcp_data.config_lock);
473
474 kfree(bus_id);
475
476 if (adapter == NULL) {
477 ZFCP_LOG_INFO("invalid adapter\n");
478 retval = -ENXIO;
479 goto out;
480 }
481
482 if (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE) {
483 retval = zfcp_sg_list_alloc(sg_list,
484 ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
485 if (retval) {
486 retval = -ENOMEM;
487 goto out;
488 }
489 }
490
491 if ((sense_data->command & ZFCP_CFDC_DOWNLOAD) &&
492 (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE)) {
493 retval = zfcp_sg_list_copy_from_user(
494 sg_list, &sense_data_user->control_file,
495 ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
496 if (retval) {
497 retval = -EFAULT;
498 goto out;
499 }
500 }
501
502 retval = zfcp_fsf_control_file(adapter, &fsf_req, fsf_command,
503 option, sg_list);
504 if (retval)
505 goto out;
506
507 if ((fsf_req->qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
508 (fsf_req->qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
509 retval = -ENXIO;
510 goto out;
511 }
512
513 sense_data->fsf_status = fsf_req->qtcb->header.fsf_status;
514 memcpy(&sense_data->fsf_status_qual,
515 &fsf_req->qtcb->header.fsf_status_qual,
516 sizeof(union fsf_status_qual));
517 memcpy(&sense_data->payloads, &fsf_req->qtcb->bottom.support.els, 256);
518
519 retval = copy_to_user(sense_data_user, sense_data,
520 sizeof(struct zfcp_cfdc_sense_data));
521 if (retval) {
522 retval = -EFAULT;
523 goto out;
524 }
525
526 if (sense_data->command & ZFCP_CFDC_UPLOAD) {
527 retval = zfcp_sg_list_copy_to_user(
528 &sense_data_user->control_file, sg_list,
529 ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
530 if (retval) {
531 retval = -EFAULT;
532 goto out;
533 }
534 }
535
536 out:
537 if (fsf_req != NULL)
538 zfcp_fsf_req_cleanup(fsf_req);
539
540 if ((adapter != NULL) && (retval != -ENXIO))
541 zfcp_adapter_put(adapter);
542
543 if (sg_list != NULL) {
544 zfcp_sg_list_free(sg_list);
545 kfree(sg_list);
546 }
547
548 if (sense_data != NULL)
549 kfree(sense_data);
550
551 return retval;
552}
553
554
555/**
556 * zfcp_sg_list_alloc - create a scatter-gather list of the specified size
557 * @sg_list: structure describing a scatter gather list
558 * @size: size of scatter-gather list
559 * Return: 0 on success, else -ENOMEM
560 *
561 * In sg_list->sg a pointer to the created scatter-gather list is returned,
562 * or NULL if we run out of memory. sg_list->count specifies the number of
563 * elements of the scatter-gather list. The maximum size of a single element
564 * in the scatter-gather list is PAGE_SIZE.
565 */
566static inline int
567zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
568{
569 struct scatterlist *sg;
570 unsigned int i;
571 int retval = 0;
572 void *address;
573
574 BUG_ON(sg_list == NULL);
575
576 sg_list->count = size >> PAGE_SHIFT;
577 if (size & ~PAGE_MASK)
578 sg_list->count++;
579 sg_list->sg = kmalloc(sg_list->count * sizeof(struct scatterlist),
580 GFP_KERNEL);
581 if (sg_list->sg == NULL) {
582 sg_list->count = 0;
583 retval = -ENOMEM;
584 goto out;
585 }
586 memset(sg_list->sg, 0, sg_list->count * sizeof(struct scatterlist));
587
588 for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) {
589 sg->length = min(size, PAGE_SIZE);
590 sg->offset = 0;
591 address = (void *) get_zeroed_page(GFP_KERNEL);
592 if (address == NULL) {
593 sg_list->count = i;
594 zfcp_sg_list_free(sg_list);
595 retval = -ENOMEM;
596 goto out;
597 }
598 zfcp_address_to_sg(address, sg);
599 size -= sg->length;
600 }
601
602 out:
603 return retval;
604}
605
606
607/**
608 * zfcp_sg_list_free - free memory of a scatter-gather list
609 * @sg_list: structure describing a scatter-gather list
610 *
611 * Memory for each element in the scatter-gather list is freed.
612 * Finally sg_list->sg is freed itself and sg_list->count is reset.
613 */
614static inline void
615zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
616{
617 struct scatterlist *sg;
618 unsigned int i;
619
620 BUG_ON(sg_list == NULL);
621
622 for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++)
623 free_page((unsigned long) zfcp_sg_to_address(sg));
624
625 sg_list->count = 0;
626 kfree(sg_list->sg);
627}
628
629/**
630 * zfcp_sg_size - determine size of a scatter-gather list
631 * @sg: array of (struct scatterlist)
632 * @sg_count: elements in array
633 * Return: size of entire scatter-gather list
634 */
635size_t
636zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
637{
638 unsigned int i;
639 struct scatterlist *p;
640 size_t size;
641
642 size = 0;
643 for (i = 0, p = sg; i < sg_count; i++, p++) {
644 BUG_ON(p == NULL);
645 size += p->length;
646 }
647
648 return size;
649}
650
651
652/**
653 * zfcp_sg_list_copy_from_user -copy data from user space to scatter-gather list
654 * @sg_list: structure describing a scatter-gather list
655 * @user_buffer: pointer to buffer in user space
656 * @size: number of bytes to be copied
657 * Return: 0 on success, -EFAULT if copy_from_user fails.
658 */
659static inline int
660zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
661 void __user *user_buffer,
662 size_t size)
663{
664 struct scatterlist *sg;
665 unsigned int length;
666 void *zfcp_buffer;
667 int retval = 0;
668
669 BUG_ON(sg_list == NULL);
670
671 if (zfcp_sg_size(sg_list->sg, sg_list->count) < size)
672 return -EFAULT;
673
674 for (sg = sg_list->sg; size > 0; sg++) {
675 length = min((unsigned int)size, sg->length);
676 zfcp_buffer = zfcp_sg_to_address(sg);
677 if (copy_from_user(zfcp_buffer, user_buffer, length)) {
678 retval = -EFAULT;
679 goto out;
680 }
681 user_buffer += length;
682 size -= length;
683 }
684
685 out:
686 return retval;
687}
688
689
690/**
691 * zfcp_sg_list_copy_to_user - copy data from scatter-gather list to user space
692 * @user_buffer: pointer to buffer in user space
693 * @sg_list: structure describing a scatter-gather list
694 * @size: number of bytes to be copied
695 * Return: 0 on success, -EFAULT if copy_to_user fails
696 */
697static inline int
698zfcp_sg_list_copy_to_user(void __user *user_buffer,
699 struct zfcp_sg_list *sg_list,
700 size_t size)
701{
702 struct scatterlist *sg;
703 unsigned int length;
704 void *zfcp_buffer;
705 int retval = 0;
706
707 BUG_ON(sg_list == NULL);
708
709 if (zfcp_sg_size(sg_list->sg, sg_list->count) < size)
710 return -EFAULT;
711
712 for (sg = sg_list->sg; size > 0; sg++) {
713 length = min((unsigned int) size, sg->length);
714 zfcp_buffer = zfcp_sg_to_address(sg);
715 if (copy_to_user(user_buffer, zfcp_buffer, length)) {
716 retval = -EFAULT;
717 goto out;
718 }
719 user_buffer += length;
720 size -= length;
721 }
722
723 out:
724 return retval;
725}
726
727
728#undef ZFCP_LOG_AREA
729
730/****************************************************************/
731/****** Functions for configuration/set-up of structures ********/
732/****************************************************************/
733
734#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
735
736/**
737 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
738 * @port: pointer to port to search for unit
739 * @fcp_lun: FCP LUN to search for
740 * Traverse list of all units of a port and return pointer to a unit
741 * with the given FCP LUN.
742 */
743struct zfcp_unit *
744zfcp_get_unit_by_lun(struct zfcp_port *port, fcp_lun_t fcp_lun)
745{
746 struct zfcp_unit *unit;
747 int found = 0;
748
749 list_for_each_entry(unit, &port->unit_list_head, list) {
750 if ((unit->fcp_lun == fcp_lun) &&
751 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status))
752 {
753 found = 1;
754 break;
755 }
756 }
757 return found ? unit : NULL;
758}
759
760/**
761 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
762 * @adapter: pointer to adapter to search for port
763 * @wwpn: wwpn to search for
764 * Traverse list of all ports of an adapter and return pointer to a port
765 * with the given wwpn.
766 */
767struct zfcp_port *
768zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, wwn_t wwpn)
769{
770 struct zfcp_port *port;
771 int found = 0;
772
773 list_for_each_entry(port, &adapter->port_list_head, list) {
774 if ((port->wwpn == wwpn) &&
775 !(atomic_read(&port->status) &
776 (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE))) {
777 found = 1;
778 break;
779 }
780 }
781 return found ? port : NULL;
782}
783
784/**
785 * zfcp_get_port_by_did - find port in port list of adapter by d_id
786 * @adapter: pointer to adapter to search for port
787 * @d_id: d_id to search for
788 * Traverse list of all ports of an adapter and return pointer to a port
789 * with the given d_id.
790 */
791struct zfcp_port *
792zfcp_get_port_by_did(struct zfcp_adapter *adapter, u32 d_id)
793{
794 struct zfcp_port *port;
795 int found = 0;
796
797 list_for_each_entry(port, &adapter->port_list_head, list) {
798 if ((port->d_id == d_id) &&
799 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status))
800 {
801 found = 1;
802 break;
803 }
804 }
805 return found ? port : NULL;
806}
807
808/**
809 * zfcp_get_adapter_by_busid - find adpater in adapter list by bus_id
810 * @bus_id: bus_id to search for
811 * Traverse list of all adapters and return pointer to an adapter
812 * with the given bus_id.
813 */
814struct zfcp_adapter *
815zfcp_get_adapter_by_busid(char *bus_id)
816{
817 struct zfcp_adapter *adapter;
818 int found = 0;
819
820 list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list) {
821 if ((strncmp(bus_id, zfcp_get_busid_by_adapter(adapter),
822 BUS_ID_SIZE) == 0) &&
823 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE,
824 &adapter->status)){
825 found = 1;
826 break;
827 }
828 }
829 return found ? adapter : NULL;
830}
831
832/**
833 * zfcp_unit_enqueue - enqueue unit to unit list of a port.
834 * @port: pointer to port where unit is added
835 * @fcp_lun: FCP LUN of unit to be enqueued
836 * Return: pointer to enqueued unit on success, NULL on error
837 * Locks: config_sema must be held to serialize changes to the unit list
838 *
839 * Sets up some unit internal structures and creates sysfs entry.
840 */
841struct zfcp_unit *
842zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
843{
844 struct zfcp_unit *unit, *tmp_unit;
845 scsi_lun_t scsi_lun;
846 int found;
847
848 /*
849 * check that there is no unit with this FCP_LUN already in list
850 * and enqueue it.
851 * Note: Unlike for the adapter and the port, this is an error
852 */
853 read_lock_irq(&zfcp_data.config_lock);
854 unit = zfcp_get_unit_by_lun(port, fcp_lun);
855 read_unlock_irq(&zfcp_data.config_lock);
856 if (unit)
857 return NULL;
858
859 unit = kmalloc(sizeof (struct zfcp_unit), GFP_KERNEL);
860 if (!unit)
861 return NULL;
862 memset(unit, 0, sizeof (struct zfcp_unit));
863
864 /* initialise reference count stuff */
865 atomic_set(&unit->refcount, 0);
866 init_waitqueue_head(&unit->remove_wq);
867
868 unit->port = port;
869 unit->fcp_lun = fcp_lun;
870
871 /* setup for sysfs registration */
872 snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun);
873 unit->sysfs_device.parent = &port->sysfs_device;
874 unit->sysfs_device.release = zfcp_sysfs_unit_release;
875 dev_set_drvdata(&unit->sysfs_device, unit);
876
877 /* mark unit unusable as long as sysfs registration is not complete */
878 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
879
880 if (device_register(&unit->sysfs_device)) {
881 kfree(unit);
882 return NULL;
883 }
884
885 if (zfcp_sysfs_unit_create_files(&unit->sysfs_device)) {
886 device_unregister(&unit->sysfs_device);
887 return NULL;
888 }
889
890 zfcp_unit_get(unit);
891
892 scsi_lun = 0;
893 found = 0;
894 write_lock_irq(&zfcp_data.config_lock);
895 list_for_each_entry(tmp_unit, &port->unit_list_head, list) {
896 if (tmp_unit->scsi_lun != scsi_lun) {
897 found = 1;
898 break;
899 }
900 scsi_lun++;
901 }
902 unit->scsi_lun = scsi_lun;
903 if (found)
904 list_add_tail(&unit->list, &tmp_unit->list);
905 else
906 list_add_tail(&unit->list, &port->unit_list_head);
907 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
908 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
909 write_unlock_irq(&zfcp_data.config_lock);
910
911 port->units++;
912 zfcp_port_get(port);
913
914 return unit;
915}
916
917void
918zfcp_unit_dequeue(struct zfcp_unit *unit)
919{
920 zfcp_unit_wait(unit);
921 write_lock_irq(&zfcp_data.config_lock);
922 list_del(&unit->list);
923 write_unlock_irq(&zfcp_data.config_lock);
924 unit->port->units--;
925 zfcp_port_put(unit->port);
926 zfcp_sysfs_unit_remove_files(&unit->sysfs_device);
927 device_unregister(&unit->sysfs_device);
928}
929
930static void *
931zfcp_mempool_alloc(unsigned int __nocast gfp_mask, void *size)
932{
933 return kmalloc((size_t) size, gfp_mask);
934}
935
936static void
937zfcp_mempool_free(void *element, void *size)
938{
939 kfree(element);
940}
941
942/*
943 * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
944 * commands.
945 * It also genrates fcp-nameserver request/response buffer and unsolicited
946 * status read fsf_req buffers.
947 *
948 * locks: must only be called with zfcp_data.config_sema taken
949 */
950static int
951zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
952{
953 adapter->pool.fsf_req_erp =
954 mempool_create(ZFCP_POOL_FSF_REQ_ERP_NR,
955 zfcp_mempool_alloc, zfcp_mempool_free, (void *)
956 sizeof(struct zfcp_fsf_req_pool_element));
957
958 if (NULL == adapter->pool.fsf_req_erp)
959 return -ENOMEM;
960
961 adapter->pool.fsf_req_scsi =
962 mempool_create(ZFCP_POOL_FSF_REQ_SCSI_NR,
963 zfcp_mempool_alloc, zfcp_mempool_free, (void *)
964 sizeof(struct zfcp_fsf_req_pool_element));
965
966 if (NULL == adapter->pool.fsf_req_scsi)
967 return -ENOMEM;
968
969 adapter->pool.fsf_req_abort =
970 mempool_create(ZFCP_POOL_FSF_REQ_ABORT_NR,
971 zfcp_mempool_alloc, zfcp_mempool_free, (void *)
972 sizeof(struct zfcp_fsf_req_pool_element));
973
974 if (NULL == adapter->pool.fsf_req_abort)
975 return -ENOMEM;
976
977 adapter->pool.fsf_req_status_read =
978 mempool_create(ZFCP_POOL_STATUS_READ_NR,
979 zfcp_mempool_alloc, zfcp_mempool_free,
980 (void *) sizeof(struct zfcp_fsf_req));
981
982 if (NULL == adapter->pool.fsf_req_status_read)
983 return -ENOMEM;
984
985 adapter->pool.data_status_read =
986 mempool_create(ZFCP_POOL_STATUS_READ_NR,
987 zfcp_mempool_alloc, zfcp_mempool_free,
988 (void *) sizeof(struct fsf_status_read_buffer));
989
990 if (NULL == adapter->pool.data_status_read)
991 return -ENOMEM;
992
993 adapter->pool.data_gid_pn =
994 mempool_create(ZFCP_POOL_DATA_GID_PN_NR,
995 zfcp_mempool_alloc, zfcp_mempool_free, (void *)
996 sizeof(struct zfcp_gid_pn_data));
997
998 if (NULL == adapter->pool.data_gid_pn)
999 return -ENOMEM;
1000
1001 return 0;
1002}
1003
1004/**
1005 * zfcp_free_low_mem_buffers - free memory pools of an adapter
1006 * @adapter: pointer to zfcp_adapter for which memory pools should be freed
1007 * locking: zfcp_data.config_sema must be held
1008 */
1009static void
1010zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
1011{
1012 if (adapter->pool.fsf_req_erp)
1013 mempool_destroy(adapter->pool.fsf_req_erp);
1014 if (adapter->pool.fsf_req_scsi)
1015 mempool_destroy(adapter->pool.fsf_req_scsi);
1016 if (adapter->pool.fsf_req_abort)
1017 mempool_destroy(adapter->pool.fsf_req_abort);
1018 if (adapter->pool.fsf_req_status_read)
1019 mempool_destroy(adapter->pool.fsf_req_status_read);
1020 if (adapter->pool.data_status_read)
1021 mempool_destroy(adapter->pool.data_status_read);
1022 if (adapter->pool.data_gid_pn)
1023 mempool_destroy(adapter->pool.data_gid_pn);
1024}
1025
1026/**
1027 * zfcp_adapter_debug_register - registers debug feature for an adapter
1028 * @adapter: pointer to adapter for which debug features should be registered
1029 * return: -ENOMEM on error, 0 otherwise
1030 */
1031int
1032zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
1033{
1034 char dbf_name[20];
1035
1036 /* debug feature area which records SCSI command failures (hostbyte) */
1037 spin_lock_init(&adapter->dbf_lock);
1038
1039 sprintf(dbf_name, ZFCP_CMD_DBF_NAME "%s",
1040 zfcp_get_busid_by_adapter(adapter));
1041 adapter->cmd_dbf = debug_register(dbf_name, ZFCP_CMD_DBF_INDEX,
1042 ZFCP_CMD_DBF_AREAS,
1043 ZFCP_CMD_DBF_LENGTH);
1044 debug_register_view(adapter->cmd_dbf, &debug_hex_ascii_view);
1045 debug_set_level(adapter->cmd_dbf, ZFCP_CMD_DBF_LEVEL);
1046
1047 /* debug feature area which records SCSI command aborts */
1048 sprintf(dbf_name, ZFCP_ABORT_DBF_NAME "%s",
1049 zfcp_get_busid_by_adapter(adapter));
1050 adapter->abort_dbf = debug_register(dbf_name, ZFCP_ABORT_DBF_INDEX,
1051 ZFCP_ABORT_DBF_AREAS,
1052 ZFCP_ABORT_DBF_LENGTH);
1053 debug_register_view(adapter->abort_dbf, &debug_hex_ascii_view);
1054 debug_set_level(adapter->abort_dbf, ZFCP_ABORT_DBF_LEVEL);
1055
1056 /* debug feature area which records incoming ELS commands */
1057 sprintf(dbf_name, ZFCP_IN_ELS_DBF_NAME "%s",
1058 zfcp_get_busid_by_adapter(adapter));
1059 adapter->in_els_dbf = debug_register(dbf_name, ZFCP_IN_ELS_DBF_INDEX,
1060 ZFCP_IN_ELS_DBF_AREAS,
1061 ZFCP_IN_ELS_DBF_LENGTH);
1062 debug_register_view(adapter->in_els_dbf, &debug_hex_ascii_view);
1063 debug_set_level(adapter->in_els_dbf, ZFCP_IN_ELS_DBF_LEVEL);
1064
1065 /* debug feature area which records erp events */
1066 sprintf(dbf_name, ZFCP_ERP_DBF_NAME "%s",
1067 zfcp_get_busid_by_adapter(adapter));
1068 adapter->erp_dbf = debug_register(dbf_name, ZFCP_ERP_DBF_INDEX,
1069 ZFCP_ERP_DBF_AREAS,
1070 ZFCP_ERP_DBF_LENGTH);
1071 debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
1072 debug_set_level(adapter->erp_dbf, ZFCP_ERP_DBF_LEVEL);
1073
1074 if (!(adapter->cmd_dbf && adapter->abort_dbf &&
1075 adapter->in_els_dbf && adapter->erp_dbf)) {
1076 zfcp_adapter_debug_unregister(adapter);
1077 return -ENOMEM;
1078 }
1079
1080 return 0;
1081
1082}
1083
1084/**
1085 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
1086 * @adapter: pointer to adapter for which debug features should be unregistered
1087 */
1088void
1089zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
1090{
1091 debug_unregister(adapter->abort_dbf);
1092 debug_unregister(adapter->cmd_dbf);
1093 debug_unregister(adapter->erp_dbf);
1094 debug_unregister(adapter->in_els_dbf);
1095 adapter->abort_dbf = NULL;
1096 adapter->cmd_dbf = NULL;
1097 adapter->erp_dbf = NULL;
1098 adapter->in_els_dbf = NULL;
1099}
1100
1101void
1102zfcp_dummy_release(struct device *dev)
1103{
1104 return;
1105}
1106
1107/*
1108 * Enqueues an adapter at the end of the adapter list in the driver data.
1109 * All adapter internal structures are set up.
1110 * Proc-fs entries are also created.
1111 *
1112 * returns: 0 if a new adapter was successfully enqueued
1113 * ZFCP_KNOWN if an adapter with this devno was already present
1114 * -ENOMEM if alloc failed
1115 * locks: config_sema must be held to serialise changes to the adapter list
1116 */
1117struct zfcp_adapter *
1118zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1119{
1120 int retval = 0;
1121 struct zfcp_adapter *adapter;
1122
1123 /*
1124 * Note: It is safe to release the list_lock, as any list changes
1125 * are protected by the config_sema, which must be held to get here
1126 */
1127
1128 /* try to allocate new adapter data structure (zeroed) */
1129 adapter = kmalloc(sizeof (struct zfcp_adapter), GFP_KERNEL);
1130 if (!adapter) {
1131 ZFCP_LOG_INFO("error: allocation of base adapter "
1132 "structure failed\n");
1133 goto out;
1134 }
1135 memset(adapter, 0, sizeof (struct zfcp_adapter));
1136
1137 ccw_device->handler = NULL;
1138
1139 /* save ccw_device pointer */
1140 adapter->ccw_device = ccw_device;
1141
1142 retval = zfcp_qdio_allocate_queues(adapter);
1143 if (retval)
1144 goto queues_alloc_failed;
1145
1146 retval = zfcp_qdio_allocate(adapter);
1147 if (retval)
1148 goto qdio_allocate_failed;
1149
1150 retval = zfcp_allocate_low_mem_buffers(adapter);
1151 if (retval) {
1152 ZFCP_LOG_INFO("error: pool allocation failed\n");
1153 goto failed_low_mem_buffers;
1154 }
1155
1156 /* initialise reference count stuff */
1157 atomic_set(&adapter->refcount, 0);
1158 init_waitqueue_head(&adapter->remove_wq);
1159
1160 /* initialise list of ports */
1161 INIT_LIST_HEAD(&adapter->port_list_head);
1162
1163 /* initialise list of ports to be removed */
1164 INIT_LIST_HEAD(&adapter->port_remove_lh);
1165
1166 /* initialize list of fsf requests */
1167 rwlock_init(&adapter->fsf_req_list_lock);
1168 INIT_LIST_HEAD(&adapter->fsf_req_list_head);
1169
1170 /* initialize abort lock */
1171 rwlock_init(&adapter->abort_lock);
1172
1173 /* initialise some erp stuff */
1174 init_waitqueue_head(&adapter->erp_thread_wqh);
1175 init_waitqueue_head(&adapter->erp_done_wqh);
1176
1177 /* initialize lock of associated request queue */
1178 rwlock_init(&adapter->request_queue.queue_lock);
1179
1180 /* intitialise SCSI ER timer */
1181 init_timer(&adapter->scsi_er_timer);
1182
1183 /* set FC service class used per default */
1184 adapter->fc_service_class = ZFCP_FC_SERVICE_CLASS_DEFAULT;
1185
1186 sprintf(adapter->name, "%s", zfcp_get_busid_by_adapter(adapter));
1187 ASCEBC(adapter->name, strlen(adapter->name));
1188
1189 /* mark adapter unusable as long as sysfs registration is not complete */
1190 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
1191
1192 adapter->ccw_device = ccw_device;
1193 dev_set_drvdata(&ccw_device->dev, adapter);
1194
1195 if (zfcp_sysfs_adapter_create_files(&ccw_device->dev))
1196 goto sysfs_failed;
1197
1198 adapter->generic_services.parent = &adapter->ccw_device->dev;
1199 adapter->generic_services.release = zfcp_dummy_release;
1200 snprintf(adapter->generic_services.bus_id, BUS_ID_SIZE,
1201 "generic_services");
1202
1203 if (device_register(&adapter->generic_services))
1204 goto generic_services_failed;
1205
1206 /* put allocated adapter at list tail */
1207 write_lock_irq(&zfcp_data.config_lock);
1208 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
1209 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
1210 write_unlock_irq(&zfcp_data.config_lock);
1211
1212 zfcp_data.adapters++;
1213
1214 goto out;
1215
1216 generic_services_failed:
1217 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
1218 sysfs_failed:
1219 dev_set_drvdata(&ccw_device->dev, NULL);
1220 failed_low_mem_buffers:
1221 zfcp_free_low_mem_buffers(adapter);
1222 if (qdio_free(ccw_device) != 0)
1223 ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n",
1224 zfcp_get_busid_by_adapter(adapter));
1225 qdio_allocate_failed:
1226 zfcp_qdio_free_queues(adapter);
1227 queues_alloc_failed:
1228 kfree(adapter);
1229 adapter = NULL;
1230 out:
1231 return adapter;
1232}
1233
1234/*
1235 * returns: 0 - struct zfcp_adapter data structure successfully removed
1236 * !0 - struct zfcp_adapter data structure could not be removed
1237 * (e.g. still used)
1238 * locks: adapter list write lock is assumed to be held by caller
1239 * adapter->fsf_req_list_lock is taken and released within this
1240 * function and must not be held on entry
1241 */
1242void
1243zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1244{
1245 int retval = 0;
1246 unsigned long flags;
1247
1248 device_unregister(&adapter->generic_services);
1249 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
1250 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
1251 /* sanity check: no pending FSF requests */
1252 read_lock_irqsave(&adapter->fsf_req_list_lock, flags);
1253 retval = !list_empty(&adapter->fsf_req_list_head);
1254 read_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
1255 if (retval) {
1256 ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, "
1257 "%i requests outstanding\n",
1258 zfcp_get_busid_by_adapter(adapter), adapter,
1259 atomic_read(&adapter->fsf_reqs_active));
1260 retval = -EBUSY;
1261 goto out;
1262 }
1263
1264 /* remove specified adapter data structure from list */
1265 write_lock_irq(&zfcp_data.config_lock);
1266 list_del(&adapter->list);
1267 write_unlock_irq(&zfcp_data.config_lock);
1268
1269 /* decrease number of adapters in list */
1270 zfcp_data.adapters--;
1271
1272 ZFCP_LOG_TRACE("adapter %s (%p) removed from list, "
1273 "%i adapters still in list\n",
1274 zfcp_get_busid_by_adapter(adapter),
1275 adapter, zfcp_data.adapters);
1276
1277 retval = qdio_free(adapter->ccw_device);
1278 if (retval)
1279 ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n",
1280 zfcp_get_busid_by_adapter(adapter));
1281
1282 zfcp_free_low_mem_buffers(adapter);
1283 /* free memory of adapter data structure and queues */
1284 zfcp_qdio_free_queues(adapter);
1285 ZFCP_LOG_TRACE("freeing adapter structure\n");
1286 kfree(adapter);
1287 out:
1288 return;
1289}
1290
1291/**
1292 * zfcp_port_enqueue - enqueue port to port list of adapter
1293 * @adapter: adapter where remote port is added
1294 * @wwpn: WWPN of the remote port to be enqueued
1295 * @status: initial status for the port
1296 * @d_id: destination id of the remote port to be enqueued
1297 * Return: pointer to enqueued port on success, NULL on error
1298 * Locks: config_sema must be held to serialize changes to the port list
1299 *
1300 * All port internal structures are set up and the sysfs entry is generated.
1301 * d_id is used to enqueue ports with a well known address like the Directory
1302 * Service for nameserver lookup.
1303 */
1304struct zfcp_port *
1305zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status,
1306 u32 d_id)
1307{
1308 struct zfcp_port *port, *tmp_port;
1309 int check_wwpn;
1310 scsi_id_t scsi_id;
1311 int found;
1312
1313 check_wwpn = !(status & ZFCP_STATUS_PORT_NO_WWPN);
1314
1315 /*
1316 * check that there is no port with this WWPN already in list
1317 */
1318 if (check_wwpn) {
1319 read_lock_irq(&zfcp_data.config_lock);
1320 port = zfcp_get_port_by_wwpn(adapter, wwpn);
1321 read_unlock_irq(&zfcp_data.config_lock);
1322 if (port)
1323 return NULL;
1324 }
1325
1326 port = kmalloc(sizeof (struct zfcp_port), GFP_KERNEL);
1327 if (!port)
1328 return NULL;
1329 memset(port, 0, sizeof (struct zfcp_port));
1330
1331 /* initialise reference count stuff */
1332 atomic_set(&port->refcount, 0);
1333 init_waitqueue_head(&port->remove_wq);
1334
1335 INIT_LIST_HEAD(&port->unit_list_head);
1336 INIT_LIST_HEAD(&port->unit_remove_lh);
1337
1338 port->adapter = adapter;
1339
1340 if (check_wwpn)
1341 port->wwpn = wwpn;
1342
1343 atomic_set_mask(status, &port->status);
1344
1345 /* setup for sysfs registration */
1346 if (status & ZFCP_STATUS_PORT_WKA) {
1347 switch (d_id) {
1348 case ZFCP_DID_DIRECTORY_SERVICE:
1349 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
1350 "directory");
1351 break;
1352 case ZFCP_DID_MANAGEMENT_SERVICE:
1353 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
1354 "management");
1355 break;
1356 case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
1357 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
1358 "key_distribution");
1359 break;
1360 case ZFCP_DID_ALIAS_SERVICE:
1361 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
1362 "alias");
1363 break;
1364 case ZFCP_DID_TIME_SERVICE:
1365 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE,
1366 "time");
1367 break;
1368 default:
1369 kfree(port);
1370 return NULL;
1371 }
1372 port->d_id = d_id;
1373 port->sysfs_device.parent = &adapter->generic_services;
1374 } else {
1375 snprintf(port->sysfs_device.bus_id,
1376 BUS_ID_SIZE, "0x%016llx", wwpn);
1377 port->sysfs_device.parent = &adapter->ccw_device->dev;
1378 }
1379 port->sysfs_device.release = zfcp_sysfs_port_release;
1380 dev_set_drvdata(&port->sysfs_device, port);
1381
1382 /* mark port unusable as long as sysfs registration is not complete */
1383 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
1384
1385 if (device_register(&port->sysfs_device)) {
1386 kfree(port);
1387 return NULL;
1388 }
1389
1390 if (zfcp_sysfs_port_create_files(&port->sysfs_device, status)) {
1391 device_unregister(&port->sysfs_device);
1392 return NULL;
1393 }
1394
1395 zfcp_port_get(port);
1396
1397 scsi_id = 1;
1398 found = 0;
1399 write_lock_irq(&zfcp_data.config_lock);
1400 list_for_each_entry(tmp_port, &adapter->port_list_head, list) {
1401 if (atomic_test_mask(ZFCP_STATUS_PORT_NO_SCSI_ID,
1402 &tmp_port->status))
1403 continue;
1404 if (tmp_port->scsi_id != scsi_id) {
1405 found = 1;
1406 break;
1407 }
1408 scsi_id++;
1409 }
1410 port->scsi_id = scsi_id;
1411 if (found)
1412 list_add_tail(&port->list, &tmp_port->list);
1413 else
1414 list_add_tail(&port->list, &adapter->port_list_head);
1415 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
1416 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
1417 if (d_id == ZFCP_DID_DIRECTORY_SERVICE)
1418 if (!adapter->nameserver_port)
1419 adapter->nameserver_port = port;
1420 adapter->ports++;
1421 write_unlock_irq(&zfcp_data.config_lock);
1422
1423 zfcp_adapter_get(adapter);
1424
1425 return port;
1426}
1427
1428void
1429zfcp_port_dequeue(struct zfcp_port *port)
1430{
1431 zfcp_port_wait(port);
1432 write_lock_irq(&zfcp_data.config_lock);
1433 list_del(&port->list);
1434 port->adapter->ports--;
1435 write_unlock_irq(&zfcp_data.config_lock);
1436 zfcp_adapter_put(port->adapter);
1437 zfcp_sysfs_port_remove_files(&port->sysfs_device,
1438 atomic_read(&port->status));
1439 device_unregister(&port->sysfs_device);
1440}
1441
1442/* Enqueues a nameserver port */
1443int
1444zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
1445{
1446 struct zfcp_port *port;
1447
1448 port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
1449 ZFCP_DID_DIRECTORY_SERVICE);
1450 if (!port) {
1451 ZFCP_LOG_INFO("error: enqueue of nameserver port for "
1452 "adapter %s failed\n",
1453 zfcp_get_busid_by_adapter(adapter));
1454 return -ENXIO;
1455 }
1456 zfcp_port_put(port);
1457
1458 return 0;
1459}
1460
1461#undef ZFCP_LOG_AREA
1462
1463/****************************************************************/
1464/******* Fibre Channel Standard related Functions **************/
1465/****************************************************************/
1466
1467#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC
1468
1469void
1470zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1471 struct fsf_status_read_buffer *status_buffer)
1472{
1473 struct fcp_rscn_head *fcp_rscn_head;
1474 struct fcp_rscn_element *fcp_rscn_element;
1475 struct zfcp_port *port;
1476 u16 i;
1477 u16 no_entries;
1478 u32 range_mask;
1479 unsigned long flags;
1480
1481 fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload;
1482 fcp_rscn_element = (struct fcp_rscn_element *) status_buffer->payload;
1483
1484 /* see FC-FS */
1485 no_entries = (fcp_rscn_head->payload_len / 4);
1486
1487 zfcp_in_els_dbf_event(adapter, "##rscn", status_buffer,
1488 fcp_rscn_head->payload_len);
1489
1490 debug_text_event(adapter->erp_dbf, 1, "unsol_els_rscn:");
1491 for (i = 1; i < no_entries; i++) {
1492 /* skip head and start with 1st element */
1493 fcp_rscn_element++;
1494 switch (fcp_rscn_element->addr_format) {
1495 case ZFCP_PORT_ADDRESS:
1496 ZFCP_LOG_FLAGS(1, "ZFCP_PORT_ADDRESS\n");
1497 range_mask = ZFCP_PORTS_RANGE_PORT;
1498 break;
1499 case ZFCP_AREA_ADDRESS:
1500 ZFCP_LOG_FLAGS(1, "ZFCP_AREA_ADDRESS\n");
1501 range_mask = ZFCP_PORTS_RANGE_AREA;
1502 break;
1503 case ZFCP_DOMAIN_ADDRESS:
1504 ZFCP_LOG_FLAGS(1, "ZFCP_DOMAIN_ADDRESS\n");
1505 range_mask = ZFCP_PORTS_RANGE_DOMAIN;
1506 break;
1507 case ZFCP_FABRIC_ADDRESS:
1508 ZFCP_LOG_FLAGS(1, "ZFCP_FABRIC_ADDRESS\n");
1509 range_mask = ZFCP_PORTS_RANGE_FABRIC;
1510 break;
1511 default:
1512 ZFCP_LOG_INFO("incoming RSCN with unknown "
1513 "address format\n");
1514 continue;
1515 }
1516 read_lock_irqsave(&zfcp_data.config_lock, flags);
1517 list_for_each_entry(port, &adapter->port_list_head, list) {
1518 if (atomic_test_mask
1519 (ZFCP_STATUS_PORT_WKA, &port->status))
1520 continue;
1521 /* Do we know this port? If not skip it. */
1522 if (!atomic_test_mask
1523 (ZFCP_STATUS_PORT_DID_DID, &port->status)) {
1524 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1525 "port 0x%016Lx\n", port->wwpn);
1526 debug_text_event(adapter->erp_dbf, 1,
1527 "unsol_els_rscnu:");
1528 zfcp_erp_port_reopen(port,
1529 ZFCP_STATUS_COMMON_ERP_FAILED);
1530 continue;
1531 }
1532
1533 /*
1534 * FIXME: race: d_id might being invalidated
1535 * (...DID_DID reset)
1536 */
1537 if ((port->d_id & range_mask)
1538 == (fcp_rscn_element->nport_did & range_mask)) {
1539 ZFCP_LOG_TRACE("reopen did 0x%08x\n",
1540 fcp_rscn_element->nport_did);
1541 /*
1542 * Unfortunately, an RSCN does not specify the
1543 * type of change a target underwent. We assume
1544 * that it makes sense to reopen the link.
1545 * FIXME: Shall we try to find out more about
1546 * the target and link state before closing it?
1547 * How to accomplish this? (nameserver?)
1548 * Where would such code be put in?
1549 * (inside or outside erp)
1550 */
1551 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1552 "port 0x%016Lx\n", port->wwpn);
1553 debug_text_event(adapter->erp_dbf, 1,
1554 "unsol_els_rscnk:");
1555 zfcp_test_link(port);
1556 }
1557 }
1558 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1559 }
1560}
1561
1562static void
1563zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
1564 struct fsf_status_read_buffer *status_buffer)
1565{
1566 logi *els_logi = (logi *) status_buffer->payload;
1567 struct zfcp_port *port;
1568 unsigned long flags;
1569
1570 zfcp_in_els_dbf_event(adapter, "##plogi", status_buffer, 28);
1571
1572 read_lock_irqsave(&zfcp_data.config_lock, flags);
1573 list_for_each_entry(port, &adapter->port_list_head, list) {
1574 if (port->wwpn == (*(wwn_t *) & els_logi->nport_wwn))
1575 break;
1576 }
1577 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1578
1579 if (!port || (port->wwpn != (*(wwn_t *) & els_logi->nport_wwn))) {
1580 ZFCP_LOG_DEBUG("ignored incoming PLOGI for nonexisting port "
1581 "with d_id 0x%08x on adapter %s\n",
1582 status_buffer->d_id,
1583 zfcp_get_busid_by_adapter(adapter));
1584 } else {
1585 debug_text_event(adapter->erp_dbf, 1, "unsol_els_plogi:");
1586 debug_event(adapter->erp_dbf, 1, &els_logi->nport_wwn, 8);
1587 zfcp_erp_port_forced_reopen(port, 0);
1588 }
1589}
1590
1591static void
1592zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
1593 struct fsf_status_read_buffer *status_buffer)
1594{
1595 struct fcp_logo *els_logo = (struct fcp_logo *) status_buffer->payload;
1596 struct zfcp_port *port;
1597 unsigned long flags;
1598
1599 zfcp_in_els_dbf_event(adapter, "##logo", status_buffer, 16);
1600
1601 read_lock_irqsave(&zfcp_data.config_lock, flags);
1602 list_for_each_entry(port, &adapter->port_list_head, list) {
1603 if (port->wwpn == els_logo->nport_wwpn)
1604 break;
1605 }
1606 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1607
1608 if (!port || (port->wwpn != els_logo->nport_wwpn)) {
1609 ZFCP_LOG_DEBUG("ignored incoming LOGO for nonexisting port "
1610 "with d_id 0x%08x on adapter %s\n",
1611 status_buffer->d_id,
1612 zfcp_get_busid_by_adapter(adapter));
1613 } else {
1614 debug_text_event(adapter->erp_dbf, 1, "unsol_els_logo:");
1615 debug_event(adapter->erp_dbf, 1, &els_logo->nport_wwpn, 8);
1616 zfcp_erp_port_forced_reopen(port, 0);
1617 }
1618}
1619
1620static void
1621zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter,
1622 struct fsf_status_read_buffer *status_buffer)
1623{
1624 zfcp_in_els_dbf_event(adapter, "##undef", status_buffer, 24);
1625 ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x "
1626 "for adapter %s\n", *(u32 *) (status_buffer->payload),
1627 zfcp_get_busid_by_adapter(adapter));
1628
1629}
1630
1631void
1632zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req)
1633{
1634 struct fsf_status_read_buffer *status_buffer;
1635 u32 els_type;
1636 struct zfcp_adapter *adapter;
1637
1638 status_buffer = fsf_req->data.status_read.buffer;
1639 els_type = *(u32 *) (status_buffer->payload);
1640 adapter = fsf_req->adapter;
1641
1642 if (els_type == LS_PLOGI)
1643 zfcp_fsf_incoming_els_plogi(adapter, status_buffer);
1644 else if (els_type == LS_LOGO)
1645 zfcp_fsf_incoming_els_logo(adapter, status_buffer);
1646 else if ((els_type & 0xffff0000) == LS_RSCN)
1647 /* we are only concerned with the command, not the length */
1648 zfcp_fsf_incoming_els_rscn(adapter, status_buffer);
1649 else
1650 zfcp_fsf_incoming_els_unknown(adapter, status_buffer);
1651}
1652
1653
1654/**
1655 * zfcp_gid_pn_buffers_alloc - allocate buffers for GID_PN nameserver request
1656 * @gid_pn: pointer to return pointer to struct zfcp_gid_pn_data
1657 * @pool: pointer to mempool_t if non-null memory pool is used for allocation
1658 */
1659static int
1660zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool)
1661{
1662 struct zfcp_gid_pn_data *data;
1663
1664 if (pool != NULL) {
1665 data = mempool_alloc(pool, GFP_ATOMIC);
1666 if (likely(data != NULL)) {
1667 data->ct.pool = pool;
1668 }
1669 } else {
1670 data = kmalloc(sizeof(struct zfcp_gid_pn_data), GFP_ATOMIC);
1671 }
1672
1673 if (NULL == data)
1674 return -ENOMEM;
1675
1676 memset(data, 0, sizeof(*data));
1677 data->ct.req = &data->req;
1678 data->ct.resp = &data->resp;
1679 data->ct.req_count = data->ct.resp_count = 1;
1680 zfcp_address_to_sg(&data->ct_iu_req, &data->req);
1681 zfcp_address_to_sg(&data->ct_iu_resp, &data->resp);
1682 data->req.length = sizeof(struct ct_iu_gid_pn_req);
1683 data->resp.length = sizeof(struct ct_iu_gid_pn_resp);
1684
1685 *gid_pn = data;
1686 return 0;
1687}
1688
1689/**
1690 * zfcp_gid_pn_buffers_free - free buffers for GID_PN nameserver request
1691 * @gid_pn: pointer to struct zfcp_gid_pn_data which has to be freed
1692 */
1693static void
1694zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
1695{
1696 if ((gid_pn->ct.pool != 0))
1697 mempool_free(gid_pn, gid_pn->ct.pool);
1698 else
1699 kfree(gid_pn);
1700
1701 return;
1702}
1703
1704/**
1705 * zfcp_ns_gid_pn_request - initiate GID_PN nameserver request
1706 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
1707 */
1708int
1709zfcp_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
1710{
1711 int ret;
1712 struct ct_iu_gid_pn_req *ct_iu_req;
1713 struct zfcp_gid_pn_data *gid_pn;
1714 struct zfcp_adapter *adapter = erp_action->adapter;
1715
1716 ret = zfcp_gid_pn_buffers_alloc(&gid_pn, adapter->pool.data_gid_pn);
1717 if (ret < 0) {
1718 ZFCP_LOG_INFO("error: buffer allocation for gid_pn nameserver "
1719 "request failed for adapter %s\n",
1720 zfcp_get_busid_by_adapter(adapter));
1721 goto out;
1722 }
1723
1724 /* setup nameserver request */
1725 ct_iu_req = zfcp_sg_to_address(gid_pn->ct.req);
1726 ct_iu_req->header.revision = ZFCP_CT_REVISION;
1727 ct_iu_req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
1728 ct_iu_req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
1729 ct_iu_req->header.options = ZFCP_CT_SYNCHRONOUS;
1730 ct_iu_req->header.cmd_rsp_code = ZFCP_CT_GID_PN;
1731 ct_iu_req->header.max_res_size = ZFCP_CT_MAX_SIZE;
1732 ct_iu_req->wwpn = erp_action->port->wwpn;
1733
1734 /* setup parameters for send generic command */
1735 gid_pn->ct.port = adapter->nameserver_port;
1736 gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
1737 gid_pn->ct.handler_data = (unsigned long) gid_pn;
1738 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
1739 gid_pn->ct.timer = &erp_action->timer;
1740 gid_pn->port = erp_action->port;
1741
1742 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
1743 erp_action);
1744 if (ret) {
1745 ZFCP_LOG_INFO("error: initiation of gid_pn nameserver request "
1746 "failed for adapter %s\n",
1747 zfcp_get_busid_by_adapter(adapter));
1748
1749 zfcp_gid_pn_buffers_free(gid_pn);
1750 }
1751
1752 out:
1753 return ret;
1754}
1755
1756/**
1757 * zfcp_ns_gid_pn_handler - handler for GID_PN nameserver request
1758 * @data: unsigned long, contains pointer to struct zfcp_gid_pn_data
1759 */
1760static void zfcp_ns_gid_pn_handler(unsigned long data)
1761{
1762 struct zfcp_port *port;
1763 struct zfcp_send_ct *ct;
1764 struct ct_iu_gid_pn_req *ct_iu_req;
1765 struct ct_iu_gid_pn_resp *ct_iu_resp;
1766 struct zfcp_gid_pn_data *gid_pn;
1767
1768
1769 gid_pn = (struct zfcp_gid_pn_data *) data;
1770 port = gid_pn->port;
1771 ct = &gid_pn->ct;
1772 ct_iu_req = zfcp_sg_to_address(ct->req);
1773 ct_iu_resp = zfcp_sg_to_address(ct->resp);
1774
1775 if ((ct->status != 0) || zfcp_check_ct_response(&ct_iu_resp->header)) {
1776 /* FIXME: do we need some specific erp entry points */
1777 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
1778 goto failed;
1779 }
1780 /* paranoia */
1781 if (ct_iu_req->wwpn != port->wwpn) {
1782 ZFCP_LOG_NORMAL("bug: wwpn 0x%016Lx returned by nameserver "
1783 "lookup does not match expected wwpn 0x%016Lx "
1784 "for adapter %s\n", ct_iu_req->wwpn, port->wwpn,
1785 zfcp_get_busid_by_port(port));
1786 goto mismatch;
1787 }
1788
1789 /* looks like a valid d_id */
1790 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
1791 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
1792 ZFCP_LOG_DEBUG("adapter %s: wwpn=0x%016Lx ---> d_id=0x%08x\n",
1793 zfcp_get_busid_by_port(port), port->wwpn, port->d_id);
1794 goto out;
1795
1796 mismatch:
1797 ZFCP_LOG_DEBUG("CT IUs do not match:\n");
1798 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_req,
1799 sizeof(struct ct_iu_gid_pn_req));
1800 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_resp,
1801 sizeof(struct ct_iu_gid_pn_resp));
1802
1803 failed:
1804 ZFCP_LOG_NORMAL("warning: failed gid_pn nameserver request for wwpn "
1805 "0x%016Lx for adapter %s\n",
1806 port->wwpn, zfcp_get_busid_by_port(port));
1807 out:
1808 zfcp_gid_pn_buffers_free(gid_pn);
1809 return;
1810}
1811
1812/* reject CT_IU reason codes acc. to FC-GS-4 */
1813static const struct zfcp_rc_entry zfcp_ct_rc[] = {
1814 {0x01, "invalid command code"},
1815 {0x02, "invalid version level"},
1816 {0x03, "logical error"},
1817 {0x04, "invalid CT_IU size"},
1818 {0x05, "logical busy"},
1819 {0x07, "protocol error"},
1820 {0x09, "unable to perform command request"},
1821 {0x0b, "command not supported"},
1822 {0x0d, "server not available"},
1823 {0x0e, "session could not be established"},
1824 {0xff, "vendor specific error"},
1825 {0, NULL},
1826};
1827
1828/* LS_RJT reason codes acc. to FC-FS */
1829static const struct zfcp_rc_entry zfcp_ls_rjt_rc[] = {
1830 {0x01, "invalid LS_Command code"},
1831 {0x03, "logical error"},
1832 {0x05, "logical busy"},
1833 {0x07, "protocol error"},
1834 {0x09, "unable to perform command request"},
1835 {0x0b, "command not supported"},
1836 {0x0e, "command already in progress"},
1837 {0xff, "vendor specific error"},
1838 {0, NULL},
1839};
1840
1841/* reject reason codes according to FC-PH/FC-FS */
1842static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = {
1843 {0x01, "invalid D_ID"},
1844 {0x02, "invalid S_ID"},
1845 {0x03, "Nx_Port not available, temporary"},
1846 {0x04, "Nx_Port not available, permament"},
1847 {0x05, "class not supported"},
1848 {0x06, "delimiter usage error"},
1849 {0x07, "TYPE not supported"},
1850 {0x08, "invalid Link_Control"},
1851 {0x09, "invalid R_CTL field"},
1852 {0x0a, "invalid F_CTL field"},
1853 {0x0b, "invalid OX_ID"},
1854 {0x0c, "invalid RX_ID"},
1855 {0x0d, "invalid SEQ_ID"},
1856 {0x0e, "invalid DF_CTL"},
1857 {0x0f, "invalid SEQ_CNT"},
1858 {0x10, "invalid parameter field"},
1859 {0x11, "exchange error"},
1860 {0x12, "protocol error"},
1861 {0x13, "incorrect length"},
1862 {0x14, "unsupported ACK"},
1863 {0x15, "class of service not supported by entity at FFFFFE"},
1864 {0x16, "login required"},
1865 {0x17, "excessive sequences attempted"},
1866 {0x18, "unable to establish exchange"},
1867 {0x1a, "fabric path not available"},
1868 {0x1b, "invalid VC_ID (class 4)"},
1869 {0x1c, "invalid CS_CTL field"},
1870 {0x1d, "insufficient resources for VC (class 4)"},
1871 {0x1f, "invalid class of service"},
1872 {0x20, "preemption request rejected"},
1873 {0x21, "preemption not enabled"},
1874 {0x22, "multicast error"},
1875 {0x23, "multicast error terminate"},
1876 {0x24, "process login required"},
1877 {0xff, "vendor specific reject"},
1878 {0, NULL},
1879};
1880
1881/**
1882 * zfcp_rc_description - return description for given reaon code
1883 * @code: reason code
1884 * @rc_table: table of reason codes and descriptions
1885 */
1886static inline const char *
1887zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table)
1888{
1889 const char *descr = "unknown reason code";
1890
1891 do {
1892 if (code == rc_table->code) {
1893 descr = rc_table->description;
1894 break;
1895 }
1896 rc_table++;
1897 } while (rc_table->code && rc_table->description);
1898
1899 return descr;
1900}
1901
1902/**
1903 * zfcp_check_ct_response - evaluate reason code for CT_IU
1904 * @rjt: response payload to an CT_IU request
1905 * Return: 0 for accept CT_IU, 1 for reject CT_IU or invlid response code
1906 */
1907int
1908zfcp_check_ct_response(struct ct_hdr *rjt)
1909{
1910 if (rjt->cmd_rsp_code == ZFCP_CT_ACCEPT)
1911 return 0;
1912
1913 if (rjt->cmd_rsp_code != ZFCP_CT_REJECT) {
1914 ZFCP_LOG_NORMAL("error: invalid Generic Service command/"
1915 "response code (0x%04hx)\n",
1916 rjt->cmd_rsp_code);
1917 return 1;
1918 }
1919
1920 ZFCP_LOG_INFO("Generic Service command rejected\n");
1921 ZFCP_LOG_INFO("%s (0x%02x, 0x%02x, 0x%02x)\n",
1922 zfcp_rc_description(rjt->reason_code, zfcp_ct_rc),
1923 (u32) rjt->reason_code, (u32) rjt->reason_code_expl,
1924 (u32) rjt->vendor_unique);
1925
1926 return 1;
1927}
1928
1929/**
1930 * zfcp_print_els_rjt - print reject parameter and description for ELS reject
1931 * @rjt_par: reject parameter acc. to FC-PH/FC-FS
1932 * @rc_table: table of reason codes and descriptions
1933 */
1934static inline void
1935zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par,
1936 const struct zfcp_rc_entry *rc_table)
1937{
1938 ZFCP_LOG_INFO("%s (%02x %02x %02x %02x)\n",
1939 zfcp_rc_description(rjt_par->reason_code, rc_table),
1940 (u32) rjt_par->action, (u32) rjt_par->reason_code,
1941 (u32) rjt_par->reason_expl, (u32) rjt_par->vendor_unique);
1942}
1943
1944/**
1945 * zfcp_fsf_handle_els_rjt - evaluate status qualifier/reason code on ELS reject
1946 * @sq: status qualifier word
1947 * @rjt_par: reject parameter as described in FC-PH and FC-FS
1948 * Return: -EROMTEIO for LS_RJT, -EREMCHG for invalid D_ID, -EIO else
1949 */
1950int
1951zfcp_handle_els_rjt(u32 sq, struct zfcp_ls_rjt_par *rjt_par)
1952{
1953 int ret = -EIO;
1954
1955 if (sq == FSF_IOSTAT_NPORT_RJT) {
1956 ZFCP_LOG_INFO("ELS rejected (P_RJT)\n");
1957 zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
1958 /* invalid d_id */
1959 if (rjt_par->reason_code == 0x01)
1960 ret = -EREMCHG;
1961 } else if (sq == FSF_IOSTAT_FABRIC_RJT) {
1962 ZFCP_LOG_INFO("ELS rejected (F_RJT)\n");
1963 zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
1964 /* invalid d_id */
1965 if (rjt_par->reason_code == 0x01)
1966 ret = -EREMCHG;
1967 } else if (sq == FSF_IOSTAT_LS_RJT) {
1968 ZFCP_LOG_INFO("ELS rejected (LS_RJT)\n");
1969 zfcp_print_els_rjt(rjt_par, zfcp_ls_rjt_rc);
1970 ret = -EREMOTEIO;
1971 } else
1972 ZFCP_LOG_INFO("unexpected SQ: 0x%02x\n", sq);
1973
1974 return ret;
1975}
1976
1977#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
new file mode 100644
index 000000000000..0fc46381fc22
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -0,0 +1,312 @@
1/*
2 * linux/drivers/s390/scsi/zfcp_ccw.c
3 *
4 * FCP adapter driver for IBM eServer zSeries
5 *
6 * CCW driver related routines
7 *
8 * (C) Copyright IBM Corp. 2003, 2004
9 *
10 * Authors:
11 * Martin Peschke <mpeschke@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Andreas Herrmann <aherrman@de.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30#define ZFCP_CCW_C_REVISION "$Revision: 1.58 $"
31
32#include "zfcp_ext.h"
33
34#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
35
36static int zfcp_ccw_probe(struct ccw_device *);
37static void zfcp_ccw_remove(struct ccw_device *);
38static int zfcp_ccw_set_online(struct ccw_device *);
39static int zfcp_ccw_set_offline(struct ccw_device *);
40static int zfcp_ccw_notify(struct ccw_device *, int);
41static void zfcp_ccw_shutdown(struct device *);
42
43static struct ccw_device_id zfcp_ccw_device_id[] = {
44 {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
45 ZFCP_CONTROL_UNIT_MODEL,
46 ZFCP_DEVICE_TYPE,
47 ZFCP_DEVICE_MODEL)},
48 {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
49 ZFCP_CONTROL_UNIT_MODEL,
50 ZFCP_DEVICE_TYPE,
51 ZFCP_DEVICE_MODEL_PRIV)},
52 {},
53};
54
55static struct ccw_driver zfcp_ccw_driver = {
56 .owner = THIS_MODULE,
57 .name = ZFCP_NAME,
58 .ids = zfcp_ccw_device_id,
59 .probe = zfcp_ccw_probe,
60 .remove = zfcp_ccw_remove,
61 .set_online = zfcp_ccw_set_online,
62 .set_offline = zfcp_ccw_set_offline,
63 .notify = zfcp_ccw_notify,
64 .driver = {
65 .shutdown = zfcp_ccw_shutdown,
66 },
67};
68
69MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
70
71/**
72 * zfcp_ccw_probe - probe function of zfcp driver
73 * @ccw_device: pointer to belonging ccw device
74 *
75 * This function gets called by the common i/o layer and sets up the initial
76 * data structures for each fcp adapter, which was detected by the system.
77 * Also the sysfs files for this adapter will be created by this function.
78 * In addition the nameserver port will be added to the ports of the adapter
79 * and its sysfs representation will be created too.
80 */
81static int
82zfcp_ccw_probe(struct ccw_device *ccw_device)
83{
84 struct zfcp_adapter *adapter;
85 int retval = 0;
86
87 down(&zfcp_data.config_sema);
88 adapter = zfcp_adapter_enqueue(ccw_device);
89 if (!adapter)
90 retval = -EINVAL;
91 else
92 ZFCP_LOG_DEBUG("Probed adapter %s\n",
93 zfcp_get_busid_by_adapter(adapter));
94 up(&zfcp_data.config_sema);
95 return retval;
96}
97
98/**
99 * zfcp_ccw_remove - remove function of zfcp driver
100 * @ccw_device: pointer to belonging ccw device
101 *
102 * This function gets called by the common i/o layer and removes an adapter
103 * from the system. Task of this function is to get rid of all units and
104 * ports that belong to this adapter. And in addition all resources of this
105 * adapter will be freed too.
106 */
107static void
108zfcp_ccw_remove(struct ccw_device *ccw_device)
109{
110 struct zfcp_adapter *adapter;
111 struct zfcp_port *port, *p;
112 struct zfcp_unit *unit, *u;
113
114 ccw_device_set_offline(ccw_device);
115 down(&zfcp_data.config_sema);
116 adapter = dev_get_drvdata(&ccw_device->dev);
117
118 ZFCP_LOG_DEBUG("Removing adapter %s\n",
119 zfcp_get_busid_by_adapter(adapter));
120 write_lock_irq(&zfcp_data.config_lock);
121 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
122 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
123 list_move(&unit->list, &port->unit_remove_lh);
124 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
125 &unit->status);
126 }
127 list_move(&port->list, &adapter->port_remove_lh);
128 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
129 }
130 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
131 write_unlock_irq(&zfcp_data.config_lock);
132
133 list_for_each_entry_safe(port, p, &adapter->port_remove_lh, list) {
134 list_for_each_entry_safe(unit, u, &port->unit_remove_lh, list) {
135 zfcp_unit_dequeue(unit);
136 }
137 zfcp_port_dequeue(port);
138 }
139 zfcp_adapter_wait(adapter);
140 zfcp_adapter_dequeue(adapter);
141
142 up(&zfcp_data.config_sema);
143}
144
145/**
146 * zfcp_ccw_set_online - set_online function of zfcp driver
147 * @ccw_device: pointer to belonging ccw device
148 *
149 * This function gets called by the common i/o layer and sets an adapter
150 * into state online. Setting an fcp device online means that it will be
151 * registered with the SCSI stack, that the QDIO queues will be set up
152 * and that the adapter will be opened (asynchronously).
153 */
154static int
155zfcp_ccw_set_online(struct ccw_device *ccw_device)
156{
157 struct zfcp_adapter *adapter;
158 int retval;
159
160 down(&zfcp_data.config_sema);
161 adapter = dev_get_drvdata(&ccw_device->dev);
162
163 retval = zfcp_adapter_debug_register(adapter);
164 if (retval)
165 goto out;
166 retval = zfcp_erp_thread_setup(adapter);
167 if (retval) {
168 ZFCP_LOG_INFO("error: start of error recovery thread for "
169 "adapter %s failed\n",
170 zfcp_get_busid_by_adapter(adapter));
171 goto out_erp_thread;
172 }
173
174 retval = zfcp_adapter_scsi_register(adapter);
175 if (retval)
176 goto out_scsi_register;
177 zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
178 ZFCP_SET);
179 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
180 zfcp_erp_wait(adapter);
181 goto out;
182
183 out_scsi_register:
184 zfcp_erp_thread_kill(adapter);
185 out_erp_thread:
186 zfcp_adapter_debug_unregister(adapter);
187 out:
188 up(&zfcp_data.config_sema);
189 return retval;
190}
191
192/**
193 * zfcp_ccw_set_offline - set_offline function of zfcp driver
194 * @ccw_device: pointer to belonging ccw device
195 *
196 * This function gets called by the common i/o layer and sets an adapter
197 * into state offline. Setting an fcp device offline means that it will be
198 * unregistered from the SCSI stack and that the adapter will be shut down
199 * asynchronously.
200 */
201static int
202zfcp_ccw_set_offline(struct ccw_device *ccw_device)
203{
204 struct zfcp_adapter *adapter;
205
206 down(&zfcp_data.config_sema);
207 adapter = dev_get_drvdata(&ccw_device->dev);
208 zfcp_erp_adapter_shutdown(adapter, 0);
209 zfcp_erp_wait(adapter);
210 zfcp_adapter_scsi_unregister(adapter);
211 zfcp_erp_thread_kill(adapter);
212 zfcp_adapter_debug_unregister(adapter);
213 up(&zfcp_data.config_sema);
214 return 0;
215}
216
217/**
218 * zfcp_ccw_notify
219 * @ccw_device: pointer to belonging ccw device
220 * @event: indicates if adapter was detached or attached
221 *
222 * This function gets called by the common i/o layer if an adapter has gone
223 * or reappeared.
224 */
225static int
226zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
227{
228 struct zfcp_adapter *adapter;
229
230 down(&zfcp_data.config_sema);
231 adapter = dev_get_drvdata(&ccw_device->dev);
232 switch (event) {
233 case CIO_GONE:
234 ZFCP_LOG_NORMAL("adapter %s: device gone\n",
235 zfcp_get_busid_by_adapter(adapter));
236 debug_text_event(adapter->erp_dbf,1,"dev_gone");
237 zfcp_erp_adapter_shutdown(adapter, 0);
238 break;
239 case CIO_NO_PATH:
240 ZFCP_LOG_NORMAL("adapter %s: no path\n",
241 zfcp_get_busid_by_adapter(adapter));
242 debug_text_event(adapter->erp_dbf,1,"no_path");
243 zfcp_erp_adapter_shutdown(adapter, 0);
244 break;
245 case CIO_OPER:
246 ZFCP_LOG_NORMAL("adapter %s: operational again\n",
247 zfcp_get_busid_by_adapter(adapter));
248 debug_text_event(adapter->erp_dbf,1,"dev_oper");
249 zfcp_erp_modify_adapter_status(adapter,
250 ZFCP_STATUS_COMMON_RUNNING,
251 ZFCP_SET);
252 zfcp_erp_adapter_reopen(adapter,
253 ZFCP_STATUS_COMMON_ERP_FAILED);
254 break;
255 }
256 zfcp_erp_wait(adapter);
257 up(&zfcp_data.config_sema);
258 return 1;
259}
260
261/**
262 * zfcp_ccw_register - ccw register function
263 *
264 * Registers the driver at the common i/o layer. This function will be called
265 * at module load time/system start.
266 */
267int __init
268zfcp_ccw_register(void)
269{
270 int retval;
271
272 retval = ccw_driver_register(&zfcp_ccw_driver);
273 if (retval)
274 goto out;
275 retval = zfcp_sysfs_driver_create_files(&zfcp_ccw_driver.driver);
276 if (retval)
277 ccw_driver_unregister(&zfcp_ccw_driver);
278 out:
279 return retval;
280}
281
282/**
283 * zfcp_ccw_unregister - ccw unregister function
284 *
285 * Unregisters the driver from common i/o layer. Function will be called at
286 * module unload/system shutdown.
287 */
288void __exit
289zfcp_ccw_unregister(void)
290{
291 zfcp_sysfs_driver_remove_files(&zfcp_ccw_driver.driver);
292 ccw_driver_unregister(&zfcp_ccw_driver);
293}
294
295/**
296 * zfcp_ccw_shutdown - gets called on reboot/shutdown
297 *
298 * Makes sure that QDIO queues are down when the system gets stopped.
299 */
300static void
301zfcp_ccw_shutdown(struct device *dev)
302{
303 struct zfcp_adapter *adapter;
304
305 down(&zfcp_data.config_sema);
306 adapter = dev_get_drvdata(dev);
307 zfcp_erp_adapter_shutdown(adapter, 0);
308 zfcp_erp_wait(adapter);
309 up(&zfcp_data.config_sema);
310}
311
312#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
new file mode 100644
index 000000000000..53fcccbb424c
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -0,0 +1,1121 @@
1/*
2 *
3 * linux/drivers/s390/scsi/zfcp_def.h
4 *
5 * FCP adapter driver for IBM eServer zSeries
6 *
7 * (C) Copyright IBM Corp. 2002, 2004
8 *
9 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
10 * Raimund Schroeder <raimund.schroeder@de.ibm.com>
11 * Aron Zeh
12 * Wolfgang Taphorn
13 * Stefan Bader <stefan.bader@de.ibm.com>
14 * Heiko Carstens <heiko.carstens@de.ibm.com>
15 * Andreas Herrmann <aherrman@de.ibm.com>
16 * Volker Sameske <sameske@de.ibm.com>
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33
34#ifndef ZFCP_DEF_H
35#define ZFCP_DEF_H
36
37#define ZFCP_DEF_REVISION "$Revision: 1.111 $"
38
39/*************************** INCLUDES *****************************************/
40
41#include <linux/init.h>
42#include <linux/moduleparam.h>
43#include <linux/miscdevice.h>
44#include <linux/major.h>
45#include <linux/blkdev.h>
46#include <linux/delay.h>
47#include <linux/timer.h>
48#include <scsi/scsi.h>
49#include <scsi/scsi_tcq.h>
50#include <scsi/scsi_cmnd.h>
51#include <scsi/scsi_device.h>
52#include <scsi/scsi_host.h>
53#include <scsi/scsi_transport.h>
54#include <scsi/scsi_transport_fc.h>
55#include "../../fc4/fc.h"
56#include "zfcp_fsf.h"
57#include <asm/ccwdev.h>
58#include <asm/qdio.h>
59#include <asm/debug.h>
60#include <asm/ebcdic.h>
61#include <linux/mempool.h>
62#include <linux/syscalls.h>
63#include <linux/ioctl.h>
64#include <linux/ioctl32.h>
65
66/************************ DEBUG FLAGS *****************************************/
67
68#define ZFCP_PRINT_FLAGS
69
70/********************* GENERAL DEFINES *********************************/
71
72/* zfcp version number, it consists of major, minor, and patch-level number */
73#define ZFCP_VERSION "4.2.0"
74
75/**
76 * zfcp_sg_to_address - determine kernel address from struct scatterlist
77 * @list: struct scatterlist
78 * Return: kernel address
79 */
80static inline void *
81zfcp_sg_to_address(struct scatterlist *list)
82{
83 return (void *) (page_address(list->page) + list->offset);
84}
85
86/**
87 * zfcp_address_to_sg - set up struct scatterlist from kernel address
88 * @address: kernel address
89 * @list: struct scatterlist
90 */
91static inline void
92zfcp_address_to_sg(void *address, struct scatterlist *list)
93{
94 list->page = virt_to_page(address);
95 list->offset = ((unsigned long) address) & (PAGE_SIZE - 1);
96}
97
98/********************* SCSI SPECIFIC DEFINES *********************************/
99
100/* 32 bit for SCSI ID and LUN as long as the SCSI stack uses this type */
101typedef u32 scsi_id_t;
102typedef u32 scsi_lun_t;
103
104#define ZFCP_ERP_SCSI_LOW_MEM_TIMEOUT (100*HZ)
105#define ZFCP_SCSI_ER_TIMEOUT (100*HZ)
106
107/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
108
109/* Adapter Identification Parameters */
110#define ZFCP_CONTROL_UNIT_TYPE 0x1731
111#define ZFCP_CONTROL_UNIT_MODEL 0x03
112#define ZFCP_DEVICE_TYPE 0x1732
113#define ZFCP_DEVICE_MODEL 0x03
114#define ZFCP_DEVICE_MODEL_PRIV 0x04
115
116/* allow as many chained SBALs as are supported by hardware */
117#define ZFCP_MAX_SBALS_PER_REQ FSF_MAX_SBALS_PER_REQ
118#define ZFCP_MAX_SBALS_PER_CT_REQ FSF_MAX_SBALS_PER_REQ
119#define ZFCP_MAX_SBALS_PER_ELS_REQ FSF_MAX_SBALS_PER_ELS_REQ
120
121/* DMQ bug workaround: don't use last SBALE */
122#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
123
124/* index of last SBALE (with respect to DMQ bug workaround) */
125#define ZFCP_LAST_SBALE_PER_SBAL (ZFCP_MAX_SBALES_PER_SBAL - 1)
126
127/* max. number of (data buffer) SBALEs in largest SBAL chain */
128#define ZFCP_MAX_SBALES_PER_REQ \
129 (ZFCP_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
130 /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
131
132/* FIXME(tune): free space should be one max. SBAL chain plus what? */
133#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
134 - (ZFCP_MAX_SBALS_PER_REQ + 4))
135
136#define ZFCP_SBAL_TIMEOUT (5*HZ)
137
138#define ZFCP_TYPE2_RECOVERY_TIME (8*HZ)
139
140/* queue polling (values in microseconds) */
141#define ZFCP_MAX_INPUT_THRESHOLD 5000 /* FIXME: tune */
142#define ZFCP_MAX_OUTPUT_THRESHOLD 1000 /* FIXME: tune */
143#define ZFCP_MIN_INPUT_THRESHOLD 1 /* ignored by QDIO layer */
144#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */
145
146#define QDIO_SCSI_QFMT 1 /* 1 for FSF */
147
148/********************* FSF SPECIFIC DEFINES *********************************/
149
150#define ZFCP_ULP_INFO_VERSION 26
151#define ZFCP_QTCB_VERSION FSF_QTCB_CURRENT_VERSION
152/* ATTENTION: value must not be used by hardware */
153#define FSF_QTCB_UNSOLICITED_STATUS 0x6305
154#define ZFCP_STATUS_READ_FAILED_THRESHOLD 3
155#define ZFCP_STATUS_READS_RECOM FSF_STATUS_READS_RECOM
156#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 6
157#define ZFCP_EXCHANGE_CONFIG_DATA_SLEEP 50
158
159/* timeout value for "default timer" for fsf requests */
160#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ);
161
162/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
163
164typedef unsigned long long wwn_t;
165typedef unsigned int fc_id_t;
166typedef unsigned long long fcp_lun_t;
167/* data length field may be at variable position in FCP-2 FCP_CMND IU */
168typedef unsigned int fcp_dl_t;
169
170#define ZFCP_FC_SERVICE_CLASS_DEFAULT FSF_CLASS_3
171
172/* timeout for name-server lookup (in seconds) */
173#define ZFCP_NS_GID_PN_TIMEOUT 10
174
175/* largest SCSI command we can process */
176/* FCP-2 (FCP_CMND IU) allows up to (255-3+16) */
177#define ZFCP_MAX_SCSI_CMND_LENGTH 255
178/* maximum number of commands in LUN queue (tagged queueing) */
179#define ZFCP_CMND_PER_LUN 32
180
181/* task attribute values in FCP-2 FCP_CMND IU */
182#define SIMPLE_Q 0
183#define HEAD_OF_Q 1
184#define ORDERED_Q 2
185#define ACA_Q 4
186#define UNTAGGED 5
187
188/* task management flags in FCP-2 FCP_CMND IU */
189#define FCP_CLEAR_ACA 0x40
190#define FCP_TARGET_RESET 0x20
191#define FCP_LOGICAL_UNIT_RESET 0x10
192#define FCP_CLEAR_TASK_SET 0x04
193#define FCP_ABORT_TASK_SET 0x02
194
195#define FCP_CDB_LENGTH 16
196
197#define ZFCP_DID_MASK 0x00FFFFFF
198
199/* FCP(-2) FCP_CMND IU */
200struct fcp_cmnd_iu {
201 fcp_lun_t fcp_lun; /* FCP logical unit number */
202 u8 crn; /* command reference number */
203 u8 reserved0:5; /* reserved */
204 u8 task_attribute:3; /* task attribute */
205 u8 task_management_flags; /* task management flags */
206 u8 add_fcp_cdb_length:6; /* additional FCP_CDB length */
207 u8 rddata:1; /* read data */
208 u8 wddata:1; /* write data */
209 u8 fcp_cdb[FCP_CDB_LENGTH];
210} __attribute__((packed));
211
212/* FCP(-2) FCP_RSP IU */
213struct fcp_rsp_iu {
214 u8 reserved0[10];
215 union {
216 struct {
217 u8 reserved1:3;
218 u8 fcp_conf_req:1;
219 u8 fcp_resid_under:1;
220 u8 fcp_resid_over:1;
221 u8 fcp_sns_len_valid:1;
222 u8 fcp_rsp_len_valid:1;
223 } bits;
224 u8 value;
225 } validity;
226 u8 scsi_status;
227 u32 fcp_resid;
228 u32 fcp_sns_len;
229 u32 fcp_rsp_len;
230} __attribute__((packed));
231
232
233#define RSP_CODE_GOOD 0
234#define RSP_CODE_LENGTH_MISMATCH 1
235#define RSP_CODE_FIELD_INVALID 2
236#define RSP_CODE_RO_MISMATCH 3
237#define RSP_CODE_TASKMAN_UNSUPP 4
238#define RSP_CODE_TASKMAN_FAILED 5
239
240/* see fc-fs */
241#define LS_FAN 0x60000000
242#define LS_RSCN 0x61040000
243
244struct fcp_rscn_head {
245 u8 command;
246 u8 page_length; /* always 0x04 */
247 u16 payload_len;
248} __attribute__((packed));
249
250struct fcp_rscn_element {
251 u8 reserved:2;
252 u8 event_qual:4;
253 u8 addr_format:2;
254 u32 nport_did:24;
255} __attribute__((packed));
256
257#define ZFCP_PORT_ADDRESS 0x0
258#define ZFCP_AREA_ADDRESS 0x1
259#define ZFCP_DOMAIN_ADDRESS 0x2
260#define ZFCP_FABRIC_ADDRESS 0x3
261
262#define ZFCP_PORTS_RANGE_PORT 0xFFFFFF
263#define ZFCP_PORTS_RANGE_AREA 0xFFFF00
264#define ZFCP_PORTS_RANGE_DOMAIN 0xFF0000
265#define ZFCP_PORTS_RANGE_FABRIC 0x000000
266
267#define ZFCP_NO_PORTS_PER_AREA 0x100
268#define ZFCP_NO_PORTS_PER_DOMAIN 0x10000
269#define ZFCP_NO_PORTS_PER_FABRIC 0x1000000
270
271struct fcp_fan {
272 u32 command;
273 u32 fport_did;
274 wwn_t fport_wwpn;
275 wwn_t fport_wwname;
276} __attribute__((packed));
277
278/* see fc-ph */
279struct fcp_logo {
280 u32 command;
281 u32 nport_did;
282 wwn_t nport_wwpn;
283} __attribute__((packed));
284
285/*
286 * FC-FS stuff
287 */
288#define R_A_TOV 10 /* seconds */
289#define ZFCP_ELS_TIMEOUT (2 * R_A_TOV)
290
291#define ZFCP_LS_RLS 0x0f
292#define ZFCP_LS_ADISC 0x52
293#define ZFCP_LS_RPS 0x56
294#define ZFCP_LS_RSCN 0x61
295#define ZFCP_LS_RNID 0x78
296
297struct zfcp_ls_rjt_par {
298 u8 action;
299 u8 reason_code;
300 u8 reason_expl;
301 u8 vendor_unique;
302} __attribute__ ((packed));
303
304struct zfcp_ls_adisc {
305 u8 code;
306 u8 field[3];
307 u32 hard_nport_id;
308 u64 wwpn;
309 u64 wwnn;
310 u32 nport_id;
311} __attribute__ ((packed));
312
313struct zfcp_ls_adisc_acc {
314 u8 code;
315 u8 field[3];
316 u32 hard_nport_id;
317 u64 wwpn;
318 u64 wwnn;
319 u32 nport_id;
320} __attribute__ ((packed));
321
322struct zfcp_rc_entry {
323 u8 code;
324 const char *description;
325};
326
327/*
328 * FC-GS-2 stuff
329 */
330#define ZFCP_CT_REVISION 0x01
331#define ZFCP_CT_DIRECTORY_SERVICE 0xFC
332#define ZFCP_CT_NAME_SERVER 0x02
333#define ZFCP_CT_SYNCHRONOUS 0x00
334#define ZFCP_CT_GID_PN 0x0121
335#define ZFCP_CT_MAX_SIZE 0x1020
336#define ZFCP_CT_ACCEPT 0x8002
337#define ZFCP_CT_REJECT 0x8001
338
339/*
340 * FC-GS-4 stuff
341 */
342#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
343
344
345/***************** S390 DEBUG FEATURE SPECIFIC DEFINES ***********************/
346
347/* debug feature entries per adapter */
348#define ZFCP_ERP_DBF_INDEX 1
349#define ZFCP_ERP_DBF_AREAS 2
350#define ZFCP_ERP_DBF_LENGTH 16
351#define ZFCP_ERP_DBF_LEVEL 3
352#define ZFCP_ERP_DBF_NAME "zfcperp"
353
354#define ZFCP_CMD_DBF_INDEX 2
355#define ZFCP_CMD_DBF_AREAS 1
356#define ZFCP_CMD_DBF_LENGTH 8
357#define ZFCP_CMD_DBF_LEVEL 3
358#define ZFCP_CMD_DBF_NAME "zfcpcmd"
359
360#define ZFCP_ABORT_DBF_INDEX 2
361#define ZFCP_ABORT_DBF_AREAS 1
362#define ZFCP_ABORT_DBF_LENGTH 8
363#define ZFCP_ABORT_DBF_LEVEL 6
364#define ZFCP_ABORT_DBF_NAME "zfcpabt"
365
366#define ZFCP_IN_ELS_DBF_INDEX 2
367#define ZFCP_IN_ELS_DBF_AREAS 1
368#define ZFCP_IN_ELS_DBF_LENGTH 8
369#define ZFCP_IN_ELS_DBF_LEVEL 6
370#define ZFCP_IN_ELS_DBF_NAME "zfcpels"
371
372/******************** LOGGING MACROS AND DEFINES *****************************/
373
374/*
375 * Logging may be applied on certain kinds of driver operations
376 * independently. Additionally, different log-levels are supported for
377 * each of these areas.
378 */
379
380#define ZFCP_NAME "zfcp"
381
382/* read-only LUN sharing switch initial value */
383#define ZFCP_RO_LUN_SHARING_DEFAULTS 0
384
385/* independent log areas */
386#define ZFCP_LOG_AREA_OTHER 0
387#define ZFCP_LOG_AREA_SCSI 1
388#define ZFCP_LOG_AREA_FSF 2
389#define ZFCP_LOG_AREA_CONFIG 3
390#define ZFCP_LOG_AREA_CIO 4
391#define ZFCP_LOG_AREA_QDIO 5
392#define ZFCP_LOG_AREA_ERP 6
393#define ZFCP_LOG_AREA_FC 7
394
395/* log level values*/
396#define ZFCP_LOG_LEVEL_NORMAL 0
397#define ZFCP_LOG_LEVEL_INFO 1
398#define ZFCP_LOG_LEVEL_DEBUG 2
399#define ZFCP_LOG_LEVEL_TRACE 3
400
401/*
402 * this allows removal of logging code by the preprocessor
403 * (the most detailed log level still to be compiled in is specified,
404 * higher log levels are removed)
405 */
406#define ZFCP_LOG_LEVEL_LIMIT ZFCP_LOG_LEVEL_TRACE
407
408/* get "loglevel" nibble assignment */
409#define ZFCP_GET_LOG_VALUE(zfcp_lognibble) \
410 ((atomic_read(&zfcp_data.loglevel) >> (zfcp_lognibble<<2)) & 0xF)
411
412/* set "loglevel" nibble */
413#define ZFCP_SET_LOG_NIBBLE(value, zfcp_lognibble) \
414 (value << (zfcp_lognibble << 2))
415
416/* all log-level defaults are combined to generate initial log-level */
417#define ZFCP_LOG_LEVEL_DEFAULTS \
418 (ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_OTHER) | \
419 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_SCSI) | \
420 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FSF) | \
421 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CONFIG) | \
422 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CIO) | \
423 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_QDIO) | \
424 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_ERP) | \
425 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FC))
426
427/* check whether we have the right level for logging */
428#define ZFCP_LOG_CHECK(level) \
429 ((ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA)) >= level)
430
431/* logging routine for zfcp */
432#define _ZFCP_LOG(fmt, args...) \
433 printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __FUNCTION__, \
434 __LINE__ , ##args)
435
436#define ZFCP_LOG(level, fmt, args...) \
437do { \
438 if (ZFCP_LOG_CHECK(level)) \
439 _ZFCP_LOG(fmt, ##args); \
440} while (0)
441
442#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL
443# define ZFCP_LOG_NORMAL(fmt, args...)
444#else
445# define ZFCP_LOG_NORMAL(fmt, args...) \
446do { \
447 if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_NORMAL)) \
448 printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
449} while (0)
450#endif
451
452#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO
453# define ZFCP_LOG_INFO(fmt, args...)
454#else
455# define ZFCP_LOG_INFO(fmt, args...) \
456do { \
457 if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_INFO)) \
458 printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
459} while (0)
460#endif
461
462#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG
463# define ZFCP_LOG_DEBUG(fmt, args...)
464#else
465# define ZFCP_LOG_DEBUG(fmt, args...) \
466 ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args)
467#endif
468
469#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE
470# define ZFCP_LOG_TRACE(fmt, args...)
471#else
472# define ZFCP_LOG_TRACE(fmt, args...) \
473 ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args)
474#endif
475
476#ifndef ZFCP_PRINT_FLAGS
477# define ZFCP_LOG_FLAGS(level, fmt, args...)
478#else
479extern u32 flags_dump;
480# define ZFCP_LOG_FLAGS(level, fmt, args...) \
481do { \
482 if (level <= flags_dump) \
483 _ZFCP_LOG(fmt, ##args); \
484} while (0)
485#endif
486
487/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
488
489/*
490 * Note, the leftmost status byte is common among adapter, port
491 * and unit
492 */
493#define ZFCP_COMMON_FLAGS 0xfff00000
494#define ZFCP_SPECIFIC_FLAGS 0x000fffff
495
496/* common status bits */
497#define ZFCP_STATUS_COMMON_REMOVE 0x80000000
498#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
499#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
500#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
501#define ZFCP_STATUS_COMMON_OPENING 0x08000000
502#define ZFCP_STATUS_COMMON_OPEN 0x04000000
503#define ZFCP_STATUS_COMMON_CLOSING 0x02000000
504#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
505#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000
506
507/* adapter status */
508#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
509#define ZFCP_STATUS_ADAPTER_REGISTERED 0x00000004
510#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
511#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
512#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020
513#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
514#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
515#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
516
517#define ZFCP_STATUS_ADAPTER_SCSI_UP \
518 (ZFCP_STATUS_COMMON_UNBLOCKED | \
519 ZFCP_STATUS_ADAPTER_REGISTERED)
520
521
522/* FC-PH/FC-GS well-known address identifiers for generic services */
523#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA
524#define ZFCP_DID_TIME_SERVICE 0xFFFFFB
525#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC
526#define ZFCP_DID_ALIAS_SERVICE 0xFFFFF8
527#define ZFCP_DID_KEY_DISTRIBUTION_SERVICE 0xFFFFF7
528
529/* remote port status */
530#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
531#define ZFCP_STATUS_PORT_DID_DID 0x00000002
532#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
533#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
534#define ZFCP_STATUS_PORT_NO_SCSI_ID 0x00000010
535#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
536#define ZFCP_STATUS_PORT_ACCESS_DENIED 0x00000040
537
538/* for ports with well known addresses */
539#define ZFCP_STATUS_PORT_WKA \
540 (ZFCP_STATUS_PORT_NO_WWPN | \
541 ZFCP_STATUS_PORT_NO_SCSI_ID)
542
543/* logical unit status */
544#define ZFCP_STATUS_UNIT_NOTSUPPUNITRESET 0x00000001
545#define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002
546#define ZFCP_STATUS_UNIT_SHARED 0x00000004
547#define ZFCP_STATUS_UNIT_READONLY 0x00000008
548
549/* FSF request status (this does not have a common part) */
550#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000
551#define ZFCP_STATUS_FSFREQ_POOL 0x00000001
552#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
553#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004
554#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
555#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
556#define ZFCP_STATUS_FSFREQ_ABORTING 0x00000020
557#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
558#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
559#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
560#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
561#define ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP 0x00000400
562#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800
563#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
564
565/*********************** ERROR RECOVERY PROCEDURE DEFINES ********************/
566
567#define ZFCP_MAX_ERPS 3
568
569#define ZFCP_ERP_FSFREQ_TIMEOUT (30 * HZ)
570#define ZFCP_ERP_MEMWAIT_TIMEOUT HZ
571
572#define ZFCP_STATUS_ERP_TIMEDOUT 0x10000000
573#define ZFCP_STATUS_ERP_CLOSE_ONLY 0x01000000
574#define ZFCP_STATUS_ERP_DISMISSING 0x00100000
575#define ZFCP_STATUS_ERP_DISMISSED 0x00200000
576#define ZFCP_STATUS_ERP_LOWMEM 0x00400000
577
578#define ZFCP_ERP_STEP_UNINITIALIZED 0x00000000
579#define ZFCP_ERP_STEP_FSF_XCONFIG 0x00000001
580#define ZFCP_ERP_STEP_PHYS_PORT_CLOSING 0x00000010
581#define ZFCP_ERP_STEP_PORT_CLOSING 0x00000100
582#define ZFCP_ERP_STEP_NAMESERVER_OPEN 0x00000200
583#define ZFCP_ERP_STEP_NAMESERVER_LOOKUP 0x00000400
584#define ZFCP_ERP_STEP_PORT_OPENING 0x00000800
585#define ZFCP_ERP_STEP_UNIT_CLOSING 0x00001000
586#define ZFCP_ERP_STEP_UNIT_OPENING 0x00002000
587
588/* Ordered by escalation level (necessary for proper erp-code operation) */
589#define ZFCP_ERP_ACTION_REOPEN_ADAPTER 0x4
590#define ZFCP_ERP_ACTION_REOPEN_PORT_FORCED 0x3
591#define ZFCP_ERP_ACTION_REOPEN_PORT 0x2
592#define ZFCP_ERP_ACTION_REOPEN_UNIT 0x1
593
594#define ZFCP_ERP_ACTION_RUNNING 0x1
595#define ZFCP_ERP_ACTION_READY 0x2
596
597#define ZFCP_ERP_SUCCEEDED 0x0
598#define ZFCP_ERP_FAILED 0x1
599#define ZFCP_ERP_CONTINUES 0x2
600#define ZFCP_ERP_EXIT 0x3
601#define ZFCP_ERP_DISMISSED 0x4
602#define ZFCP_ERP_NOMEM 0x5
603
604
605/******************** CFDC SPECIFIC STUFF *****************************/
606
607/* Firewall data channel sense data record */
608struct zfcp_cfdc_sense_data {
609 u32 signature; /* Request signature */
610 u32 devno; /* FCP adapter device number */
611 u32 command; /* Command code */
612 u32 fsf_status; /* FSF request status and status qualifier */
613 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
614 u8 payloads[256]; /* Access conflicts list */
615 u8 control_file[0]; /* Access control table */
616};
617
618#define ZFCP_CFDC_SIGNATURE 0xCFDCACDF
619
620#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
621#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
622#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
623#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
624#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
625
626#define ZFCP_CFDC_DOWNLOAD 0x00000001
627#define ZFCP_CFDC_UPLOAD 0x00000002
628#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
629
630#define ZFCP_CFDC_DEV_NAME "zfcp_cfdc"
631#define ZFCP_CFDC_DEV_MAJOR MISC_MAJOR
632#define ZFCP_CFDC_DEV_MINOR MISC_DYNAMIC_MINOR
633
634#define ZFCP_CFDC_MAX_CONTROL_FILE_SIZE 127 * 1024
635
636/************************* STRUCTURE DEFINITIONS *****************************/
637
638struct zfcp_fsf_req;
639
640/* holds various memory pools of an adapter */
641struct zfcp_adapter_mempool {
642 mempool_t *fsf_req_erp;
643 mempool_t *fsf_req_scsi;
644 mempool_t *fsf_req_abort;
645 mempool_t *fsf_req_status_read;
646 mempool_t *data_status_read;
647 mempool_t *data_gid_pn;
648};
649
650struct zfcp_exchange_config_data{
651};
652
653struct zfcp_open_port {
654 struct zfcp_port *port;
655};
656
657struct zfcp_close_port {
658 struct zfcp_port *port;
659};
660
661struct zfcp_open_unit {
662 struct zfcp_unit *unit;
663};
664
665struct zfcp_close_unit {
666 struct zfcp_unit *unit;
667};
668
669struct zfcp_close_physical_port {
670 struct zfcp_port *port;
671};
672
673struct zfcp_send_fcp_command_task {
674 struct zfcp_fsf_req *fsf_req;
675 struct zfcp_unit *unit;
676 struct scsi_cmnd *scsi_cmnd;
677 unsigned long start_jiffies;
678};
679
680struct zfcp_send_fcp_command_task_management {
681 struct zfcp_unit *unit;
682};
683
684struct zfcp_abort_fcp_command {
685 struct zfcp_fsf_req *fsf_req;
686 struct zfcp_unit *unit;
687};
688
689/*
690 * header for CT_IU
691 */
692struct ct_hdr {
693 u8 revision; // 0x01
694 u8 in_id[3]; // 0x00
695 u8 gs_type; // 0xFC Directory Service
696 u8 gs_subtype; // 0x02 Name Server
697 u8 options; // 0x00 single bidirectional exchange
698 u8 reserved0;
699 u16 cmd_rsp_code; // 0x0121 GID_PN, or 0x0100 GA_NXT
700 u16 max_res_size; // <= (4096 - 16) / 4
701 u8 reserved1;
702 u8 reason_code;
703 u8 reason_code_expl;
704 u8 vendor_unique;
705} __attribute__ ((packed));
706
707/* nameserver request CT_IU -- for requests where
708 * a port name is required */
709struct ct_iu_gid_pn_req {
710 struct ct_hdr header;
711 wwn_t wwpn;
712} __attribute__ ((packed));
713
714/* FS_ACC IU and data unit for GID_PN nameserver request */
715struct ct_iu_gid_pn_resp {
716 struct ct_hdr header;
717 fc_id_t d_id;
718} __attribute__ ((packed));
719
720typedef void (*zfcp_send_ct_handler_t)(unsigned long);
721
722/**
723 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
724 * @port: port where the request is sent to
725 * @req: scatter-gather list for request
726 * @resp: scatter-gather list for response
727 * @req_count: number of elements in request scatter-gather list
728 * @resp_count: number of elements in response scatter-gather list
729 * @handler: handler function (called for response to the request)
730 * @handler_data: data passed to handler function
731 * @pool: pointer to memory pool for ct request structure
732 * @timeout: FSF timeout for this request
733 * @timer: timer (e.g. for request initiated by erp)
734 * @completion: completion for synchronization purposes
735 * @status: used to pass error status to calling function
736 */
737struct zfcp_send_ct {
738 struct zfcp_port *port;
739 struct scatterlist *req;
740 struct scatterlist *resp;
741 unsigned int req_count;
742 unsigned int resp_count;
743 zfcp_send_ct_handler_t handler;
744 unsigned long handler_data;
745 mempool_t *pool;
746 int timeout;
747 struct timer_list *timer;
748 struct completion *completion;
749 int status;
750};
751
752/* used for name server requests in error recovery */
753struct zfcp_gid_pn_data {
754 struct zfcp_send_ct ct;
755 struct scatterlist req;
756 struct scatterlist resp;
757 struct ct_iu_gid_pn_req ct_iu_req;
758 struct ct_iu_gid_pn_resp ct_iu_resp;
759 struct zfcp_port *port;
760};
761
762typedef void (*zfcp_send_els_handler_t)(unsigned long);
763
764/**
765 * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
766 * @adapter: adapter where request is sent from
767 * @d_id: destiniation id of port where request is sent to
768 * @req: scatter-gather list for request
769 * @resp: scatter-gather list for response
770 * @req_count: number of elements in request scatter-gather list
771 * @resp_count: number of elements in response scatter-gather list
772 * @handler: handler function (called for response to the request)
773 * @handler_data: data passed to handler function
774 * @timer: timer (e.g. for request initiated by erp)
775 * @completion: completion for synchronization purposes
776 * @ls_code: hex code of ELS command
777 * @status: used to pass error status to calling function
778 */
779struct zfcp_send_els {
780 struct zfcp_adapter *adapter;
781 fc_id_t d_id;
782 struct scatterlist *req;
783 struct scatterlist *resp;
784 unsigned int req_count;
785 unsigned int resp_count;
786 zfcp_send_els_handler_t handler;
787 unsigned long handler_data;
788 struct timer_list *timer;
789 struct completion *completion;
790 int ls_code;
791 int status;
792};
793
794struct zfcp_status_read {
795 struct fsf_status_read_buffer *buffer;
796};
797
798struct zfcp_fsf_done {
799 struct completion *complete;
800 int status;
801};
802
803/* request specific data */
804union zfcp_req_data {
805 struct zfcp_exchange_config_data exchange_config_data;
806 struct zfcp_open_port open_port;
807 struct zfcp_close_port close_port;
808 struct zfcp_open_unit open_unit;
809 struct zfcp_close_unit close_unit;
810 struct zfcp_close_physical_port close_physical_port;
811 struct zfcp_send_fcp_command_task send_fcp_command_task;
812 struct zfcp_send_fcp_command_task_management
813 send_fcp_command_task_management;
814 struct zfcp_abort_fcp_command abort_fcp_command;
815 struct zfcp_send_ct *send_ct;
816 struct zfcp_send_els *send_els;
817 struct zfcp_status_read status_read;
818 struct fsf_qtcb_bottom_port *port_data;
819};
820
821struct zfcp_qdio_queue {
822 struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
823 u8 free_index; /* index of next free bfr
824 in queue (free_count>0) */
825 atomic_t free_count; /* number of free buffers
826 in queue */
827 rwlock_t queue_lock; /* lock for operations on queue */
828 int distance_from_int; /* SBALs used since PCI indication
829 was last set */
830};
831
832struct zfcp_erp_action {
833 struct list_head list;
834 int action; /* requested action code */
835 struct zfcp_adapter *adapter; /* device which should be recovered */
836 struct zfcp_port *port;
837 struct zfcp_unit *unit;
838 volatile u32 status; /* recovery status */
839 u32 step; /* active step of this erp action */
840 struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
841 for this action */
842 struct timer_list timer;
843};
844
845
846struct zfcp_adapter {
847 struct list_head list; /* list of adapters */
848 atomic_t refcount; /* reference count */
849 wait_queue_head_t remove_wq; /* can be used to wait for
850 refcount drop to zero */
851 wwn_t wwnn; /* WWNN */
852 wwn_t wwpn; /* WWPN */
853 fc_id_t s_id; /* N_Port ID */
854 struct ccw_device *ccw_device; /* S/390 ccw device */
855 u8 fc_service_class;
856 u32 fc_topology; /* FC topology */
857 u32 fc_link_speed; /* FC interface speed */
858 u32 hydra_version; /* Hydra version */
859 u32 fsf_lic_version;
860 u32 supported_features;/* of FCP channel */
861 u32 hardware_version; /* of FCP channel */
862 u8 serial_number[32]; /* of hardware */
863 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
864 unsigned short scsi_host_no; /* Assigned host number */
865 unsigned char name[9];
866 struct list_head port_list_head; /* remote port list */
867 struct list_head port_remove_lh; /* head of ports to be
868 removed */
869 u32 ports; /* number of remote ports */
870 struct timer_list scsi_er_timer; /* SCSI err recovery watch */
871 struct list_head fsf_req_list_head; /* head of FSF req list */
872 rwlock_t fsf_req_list_lock; /* lock for ops on list of
873 FSF requests */
874 atomic_t fsf_reqs_active; /* # active FSF reqs */
875 struct zfcp_qdio_queue request_queue; /* request queue */
876 u32 fsf_req_seq_no; /* FSF cmnd seq number */
877 wait_queue_head_t request_wq; /* can be used to wait for
878 more avaliable SBALs */
879 struct zfcp_qdio_queue response_queue; /* response queue */
880 rwlock_t abort_lock; /* Protects against SCSI
881 stack abort/command
882 completion races */
883 u16 status_read_failed; /* # failed status reads */
884 atomic_t status; /* status of this adapter */
885 struct list_head erp_ready_head; /* error recovery for this
886 adapter/devices */
887 struct list_head erp_running_head;
888 rwlock_t erp_lock;
889 struct semaphore erp_ready_sem;
890 wait_queue_head_t erp_thread_wqh;
891 wait_queue_head_t erp_done_wqh;
892 struct zfcp_erp_action erp_action; /* pending error recovery */
893 atomic_t erp_counter;
894 u32 erp_total_count; /* total nr of enqueued erp
895 actions */
896 u32 erp_low_mem_count; /* nr of erp actions waiting
897 for memory */
898 struct zfcp_port *nameserver_port; /* adapter's nameserver */
899 debug_info_t *erp_dbf; /* S/390 debug features */
900 debug_info_t *abort_dbf;
901 debug_info_t *in_els_dbf;
902 debug_info_t *cmd_dbf;
903 spinlock_t dbf_lock;
904 struct zfcp_adapter_mempool pool; /* Adapter memory pools */
905 struct qdio_initialize qdio_init_data; /* for qdio_establish */
906 struct device generic_services; /* directory for WKA ports */
907};
908
909/*
910 * the struct device sysfs_device must be at the beginning of this structure.
911 * pointer to struct device is used to free port structure in release function
912 * of the device. don't change!
913 */
914struct zfcp_port {
915 struct device sysfs_device; /* sysfs device */
916 struct list_head list; /* list of remote ports */
917 atomic_t refcount; /* reference count */
918 wait_queue_head_t remove_wq; /* can be used to wait for
919 refcount drop to zero */
920 struct zfcp_adapter *adapter; /* adapter used to access port */
921 struct list_head unit_list_head; /* head of logical unit list */
922 struct list_head unit_remove_lh; /* head of luns to be removed
923 list */
924 u32 units; /* # of logical units in list */
925 atomic_t status; /* status of this remote port */
926 scsi_id_t scsi_id; /* own SCSI ID */
927 wwn_t wwnn; /* WWNN if known */
928 wwn_t wwpn; /* WWPN */
929 fc_id_t d_id; /* D_ID */
930 u32 handle; /* handle assigned by FSF */
931 struct zfcp_erp_action erp_action; /* pending error recovery */
932 atomic_t erp_counter;
933};
934
935/* the struct device sysfs_device must be at the beginning of this structure.
936 * pointer to struct device is used to free unit structure in release function
937 * of the device. don't change!
938 */
939struct zfcp_unit {
940 struct device sysfs_device; /* sysfs device */
941 struct list_head list; /* list of logical units */
942 atomic_t refcount; /* reference count */
943 wait_queue_head_t remove_wq; /* can be used to wait for
944 refcount drop to zero */
945 struct zfcp_port *port; /* remote port of unit */
946 atomic_t status; /* status of this logical unit */
947 scsi_lun_t scsi_lun; /* own SCSI LUN */
948 fcp_lun_t fcp_lun; /* own FCP_LUN */
949 u32 handle; /* handle assigned by FSF */
950 struct scsi_device *device; /* scsi device struct pointer */
951 struct zfcp_erp_action erp_action; /* pending error recovery */
952 atomic_t erp_counter;
953};
954
955/* FSF request */
956struct zfcp_fsf_req {
957 struct list_head list; /* list of FSF requests */
958 struct zfcp_adapter *adapter; /* adapter request belongs to */
959 u8 sbal_number; /* nr of SBALs free for use */
960 u8 sbal_first; /* first SBAL for this request */
961 u8 sbal_last; /* last possible SBAL for
962 this reuest */
963 u8 sbal_curr; /* current SBAL during creation
964 of request */
965 u8 sbale_curr; /* current SBALE during creation
966 of request */
967 wait_queue_head_t completion_wq; /* can be used by a routine
968 to wait for completion */
969 volatile u32 status; /* status of this request */
970 u32 fsf_command; /* FSF Command copy */
971 struct fsf_qtcb *qtcb; /* address of associated QTCB */
972 u32 seq_no; /* Sequence number of request */
973 union zfcp_req_data data; /* Info fields of request */
974 struct zfcp_erp_action *erp_action; /* used if this request is
975 issued on behalf of erp */
976 mempool_t *pool; /* used if request was alloacted
977 from emergency pool */
978};
979
980typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
981
982/* driver data */
983struct zfcp_data {
984 struct scsi_host_template scsi_host_template;
985 atomic_t status; /* Module status flags */
986 struct list_head adapter_list_head; /* head of adapter list */
987 struct list_head adapter_remove_lh; /* head of adapters to be
988 removed */
989 rwlock_t status_read_lock; /* for status read thread */
990 struct list_head status_read_receive_head;
991 struct list_head status_read_send_head;
992 struct semaphore status_read_sema;
993 wait_queue_head_t status_read_thread_wqh;
994 u32 adapters; /* # of adapters in list */
995 rwlock_t config_lock; /* serialises changes
996 to adapter/port/unit
997 lists */
998 struct semaphore config_sema; /* serialises configuration
999 changes */
1000 atomic_t loglevel; /* current loglevel */
1001 char init_busid[BUS_ID_SIZE];
1002 wwn_t init_wwpn;
1003 fcp_lun_t init_fcp_lun;
1004 char *driver_version;
1005};
1006
1007/**
1008 * struct zfcp_sg_list - struct describing a scatter-gather list
1009 * @sg: pointer to array of (struct scatterlist)
1010 * @count: number of elements in scatter-gather list
1011 */
1012struct zfcp_sg_list {
1013 struct scatterlist *sg;
1014 unsigned int count;
1015};
1016
1017/* number of elements for various memory pools */
1018#define ZFCP_POOL_FSF_REQ_ERP_NR 1
1019#define ZFCP_POOL_FSF_REQ_SCSI_NR 1
1020#define ZFCP_POOL_FSF_REQ_ABORT_NR 1
1021#define ZFCP_POOL_STATUS_READ_NR ZFCP_STATUS_READS_RECOM
1022#define ZFCP_POOL_DATA_GID_PN_NR 1
1023
1024/* struct used by memory pools for fsf_requests */
1025struct zfcp_fsf_req_pool_element {
1026 struct zfcp_fsf_req fsf_req;
1027 struct fsf_qtcb qtcb;
1028};
1029
1030/********************** ZFCP SPECIFIC DEFINES ********************************/
1031
1032#define ZFCP_FSFREQ_CLEANUP_TIMEOUT HZ/10
1033
1034#define ZFCP_KNOWN 0x00000001
1035#define ZFCP_REQ_AUTO_CLEANUP 0x00000002
1036#define ZFCP_WAIT_FOR_SBAL 0x00000004
1037#define ZFCP_REQ_NO_QTCB 0x00000008
1038
1039#define ZFCP_SET 0x00000100
1040#define ZFCP_CLEAR 0x00000200
1041
1042#define ZFCP_INTERRUPTIBLE 1
1043#define ZFCP_UNINTERRUPTIBLE 0
1044
1045#ifndef atomic_test_mask
1046#define atomic_test_mask(mask, target) \
1047 ((atomic_read(target) & mask) == mask)
1048#endif
1049
1050extern void _zfcp_hex_dump(char *, int);
1051#define ZFCP_HEX_DUMP(level, addr, count) \
1052 if (ZFCP_LOG_CHECK(level)) { \
1053 _zfcp_hex_dump(addr, count); \
1054 }
1055
1056#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
1057#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
1058#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
1059
1060/*
1061 * functions needed for reference/usage counting
1062 */
1063
1064static inline void
1065zfcp_unit_get(struct zfcp_unit *unit)
1066{
1067 atomic_inc(&unit->refcount);
1068}
1069
1070static inline void
1071zfcp_unit_put(struct zfcp_unit *unit)
1072{
1073 if (atomic_dec_return(&unit->refcount) == 0)
1074 wake_up(&unit->remove_wq);
1075}
1076
1077static inline void
1078zfcp_unit_wait(struct zfcp_unit *unit)
1079{
1080 wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
1081}
1082
1083static inline void
1084zfcp_port_get(struct zfcp_port *port)
1085{
1086 atomic_inc(&port->refcount);
1087}
1088
1089static inline void
1090zfcp_port_put(struct zfcp_port *port)
1091{
1092 if (atomic_dec_return(&port->refcount) == 0)
1093 wake_up(&port->remove_wq);
1094}
1095
1096static inline void
1097zfcp_port_wait(struct zfcp_port *port)
1098{
1099 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
1100}
1101
1102static inline void
1103zfcp_adapter_get(struct zfcp_adapter *adapter)
1104{
1105 atomic_inc(&adapter->refcount);
1106}
1107
1108static inline void
1109zfcp_adapter_put(struct zfcp_adapter *adapter)
1110{
1111 if (atomic_dec_return(&adapter->refcount) == 0)
1112 wake_up(&adapter->remove_wq);
1113}
1114
1115static inline void
1116zfcp_adapter_wait(struct zfcp_adapter *adapter)
1117{
1118 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
1119}
1120
1121#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
new file mode 100644
index 000000000000..cfc0d8c588df
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -0,0 +1,3585 @@
1/*
2 *
3 * linux/drivers/s390/scsi/zfcp_erp.c
4 *
5 * FCP adapter driver for IBM eServer zSeries
6 *
7 * (C) Copyright IBM Corp. 2002, 2004
8 *
9 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
10 * Raimund Schroeder <raimund.schroeder@de.ibm.com>
11 * Aron Zeh
12 * Wolfgang Taphorn
13 * Stefan Bader <stefan.bader@de.ibm.com>
14 * Heiko Carstens <heiko.carstens@de.ibm.com>
15 * Andreas Herrmann <aherrman@de.ibm.com>
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */
31
32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
33
34#define ZFCP_ERP_REVISION "$Revision: 1.86 $"
35
36#include "zfcp_ext.h"
37
38static int zfcp_erp_adisc(struct zfcp_adapter *, fc_id_t);
39static void zfcp_erp_adisc_handler(unsigned long);
40
41static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *, int);
42static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *, int);
43static int zfcp_erp_port_reopen_internal(struct zfcp_port *, int);
44static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *, int);
45
46static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *, int);
47static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *, int);
48
49static void zfcp_erp_adapter_block(struct zfcp_adapter *, int);
50static void zfcp_erp_adapter_unblock(struct zfcp_adapter *);
51static void zfcp_erp_port_block(struct zfcp_port *, int);
52static void zfcp_erp_port_unblock(struct zfcp_port *);
53static void zfcp_erp_unit_block(struct zfcp_unit *, int);
54static void zfcp_erp_unit_unblock(struct zfcp_unit *);
55
56static int zfcp_erp_thread(void *);
57
58static int zfcp_erp_strategy(struct zfcp_erp_action *);
59
60static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *);
61static int zfcp_erp_strategy_memwait(struct zfcp_erp_action *);
62static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *, int);
63static int zfcp_erp_strategy_check_unit(struct zfcp_unit *, int);
64static int zfcp_erp_strategy_check_port(struct zfcp_port *, int);
65static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *, int);
66static int zfcp_erp_strategy_statechange(int, u32, struct zfcp_adapter *,
67 struct zfcp_port *,
68 struct zfcp_unit *, int);
69static inline int zfcp_erp_strategy_statechange_detected(atomic_t *, u32);
70static int zfcp_erp_strategy_followup_actions(int, struct zfcp_adapter *,
71 struct zfcp_port *,
72 struct zfcp_unit *, int);
73static int zfcp_erp_strategy_check_queues(struct zfcp_adapter *);
74static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
75
76static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
77static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
78static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
79static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
80static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
81static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
82static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
83static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
84static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *);
85static int zfcp_erp_adapter_strategy_open_fsf_statusread(
86 struct zfcp_erp_action *);
87
88static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *);
89static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *);
90
91static int zfcp_erp_port_strategy(struct zfcp_erp_action *);
92static int zfcp_erp_port_strategy_clearstati(struct zfcp_port *);
93static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *);
94static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *);
95static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *);
96static int zfcp_erp_port_strategy_open_nameserver_wakeup(
97 struct zfcp_erp_action *);
98static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *);
99static int zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *);
100static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *);
101
102static int zfcp_erp_unit_strategy(struct zfcp_erp_action *);
103static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
104static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
105static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
106
107static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
108static int zfcp_erp_action_dismiss_port(struct zfcp_port *);
109static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
110static int zfcp_erp_action_dismiss(struct zfcp_erp_action *);
111
112static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
113 struct zfcp_port *, struct zfcp_unit *);
114static int zfcp_erp_action_dequeue(struct zfcp_erp_action *);
115static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *,
116 struct zfcp_port *, struct zfcp_unit *,
117 int);
118
119static void zfcp_erp_action_ready(struct zfcp_erp_action *);
120static int zfcp_erp_action_exists(struct zfcp_erp_action *);
121
122static inline void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
123static inline void zfcp_erp_action_to_running(struct zfcp_erp_action *);
124
125static void zfcp_erp_memwait_handler(unsigned long);
126static void zfcp_erp_timeout_handler(unsigned long);
127static inline void zfcp_erp_timeout_init(struct zfcp_erp_action *);
128
129/**
130 * zfcp_fsf_request_timeout_handler - called if a request timed out
131 * @data: pointer to adapter for handler function
132 *
133 * This function needs to be called if requests (ELS, Generic Service,
134 * or SCSI commands) exceed a certain time limit. The assumption is
135 * that after the time limit the adapter get stuck. So we trigger a reopen of
136 * the adapter. This should not be used for error recovery, SCSI abort
137 * commands and SCSI requests from SCSI mid-layer.
138 */
139void
140zfcp_fsf_request_timeout_handler(unsigned long data)
141{
142 struct zfcp_adapter *adapter;
143
144 adapter = (struct zfcp_adapter *) data;
145
146 zfcp_erp_adapter_reopen(adapter, 0);
147}
148
149/*
150 * function: zfcp_fsf_scsi_er_timeout_handler
151 *
152 * purpose: This function needs to be called whenever a SCSI error recovery
153 * action (abort/reset) does not return.
154 * Re-opening the adapter means that the command can be returned
155 * by zfcp (it is guarranteed that it does not return via the
156 * adapter anymore). The buffer can then be used again.
157 *
158 * returns: sod all
159 */
160void
161zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
162{
163 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
164
165 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. "
166 "Restarting all operations on the adapter %s\n",
167 zfcp_get_busid_by_adapter(adapter));
168 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout");
169 zfcp_erp_adapter_reopen(adapter, 0);
170
171 return;
172}
173
174/*
175 * function:
176 *
177 * purpose: called if an adapter failed,
178 * initiates adapter recovery which is done
179 * asynchronously
180 *
181 * returns: 0 - initiated action succesfully
182 * <0 - failed to initiate action
183 */
184int
185zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
186{
187 int retval;
188
189 debug_text_event(adapter->erp_dbf, 5, "a_ro");
190 ZFCP_LOG_DEBUG("reopen adapter %s\n",
191 zfcp_get_busid_by_adapter(adapter));
192
193 zfcp_erp_adapter_block(adapter, clear_mask);
194
195 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
196 ZFCP_LOG_DEBUG("skipped reopen of failed adapter %s\n",
197 zfcp_get_busid_by_adapter(adapter));
198 debug_text_event(adapter->erp_dbf, 5, "a_ro_f");
199 /* ensure propagation of failed status to new devices */
200 zfcp_erp_adapter_failed(adapter);
201 retval = -EIO;
202 goto out;
203 }
204 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
205 adapter, NULL, NULL);
206
207 out:
208 return retval;
209}
210
211/*
212 * function:
213 *
214 * purpose: Wrappper for zfcp_erp_adapter_reopen_internal
215 * used to ensure the correct locking
216 *
217 * returns: 0 - initiated action succesfully
218 * <0 - failed to initiate action
219 */
220int
221zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask)
222{
223 int retval;
224 unsigned long flags;
225
226 read_lock_irqsave(&zfcp_data.config_lock, flags);
227 write_lock(&adapter->erp_lock);
228 retval = zfcp_erp_adapter_reopen_internal(adapter, clear_mask);
229 write_unlock(&adapter->erp_lock);
230 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
231
232 return retval;
233}
234
235/*
236 * function:
237 *
238 * purpose:
239 *
240 * returns:
241 */
242int
243zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear_mask)
244{
245 int retval;
246
247 retval = zfcp_erp_adapter_reopen(adapter,
248 ZFCP_STATUS_COMMON_RUNNING |
249 ZFCP_STATUS_COMMON_ERP_FAILED |
250 clear_mask);
251
252 return retval;
253}
254
255/*
256 * function:
257 *
258 * purpose:
259 *
260 * returns:
261 */
262int
263zfcp_erp_port_shutdown(struct zfcp_port *port, int clear_mask)
264{
265 int retval;
266
267 retval = zfcp_erp_port_reopen(port,
268 ZFCP_STATUS_COMMON_RUNNING |
269 ZFCP_STATUS_COMMON_ERP_FAILED |
270 clear_mask);
271
272 return retval;
273}
274
275/*
276 * function:
277 *
278 * purpose:
279 *
280 * returns:
281 */
282int
283zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask)
284{
285 int retval;
286
287 retval = zfcp_erp_unit_reopen(unit,
288 ZFCP_STATUS_COMMON_RUNNING |
289 ZFCP_STATUS_COMMON_ERP_FAILED |
290 clear_mask);
291
292 return retval;
293}
294
295
296/**
297 * zfcp_erp_adisc - send ADISC ELS command
298 * @adapter: adapter structure
299 * @d_id: d_id of port where ADISC is sent to
300 */
301int
302zfcp_erp_adisc(struct zfcp_adapter *adapter, fc_id_t d_id)
303{
304 struct zfcp_send_els *send_els;
305 struct zfcp_ls_adisc *adisc;
306 void *address = NULL;
307 int retval = 0;
308 struct timer_list *timer;
309
310 send_els = kmalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC);
311 if (send_els == NULL)
312 goto nomem;
313 memset(send_els, 0, sizeof(*send_els));
314
315 send_els->req = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
316 if (send_els->req == NULL)
317 goto nomem;
318 memset(send_els->req, 0, sizeof(*send_els->req));
319
320 send_els->resp = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
321 if (send_els->resp == NULL)
322 goto nomem;
323 memset(send_els->resp, 0, sizeof(*send_els->resp));
324
325 address = (void *) get_zeroed_page(GFP_ATOMIC);
326 if (address == NULL)
327 goto nomem;
328
329 zfcp_address_to_sg(address, send_els->req);
330 address += PAGE_SIZE >> 1;
331 zfcp_address_to_sg(address, send_els->resp);
332 send_els->req_count = send_els->resp_count = 1;
333
334 send_els->adapter = adapter;
335 send_els->d_id = d_id;
336 send_els->handler = zfcp_erp_adisc_handler;
337 send_els->handler_data = (unsigned long) send_els;
338
339 adisc = zfcp_sg_to_address(send_els->req);
340 send_els->ls_code = adisc->code = ZFCP_LS_ADISC;
341
342 send_els->req->length = sizeof(struct zfcp_ls_adisc);
343 send_els->resp->length = sizeof(struct zfcp_ls_adisc_acc);
344
345 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
346 without FC-AL-2 capability, so we don't set it */
347 adisc->wwpn = adapter->wwpn;
348 adisc->wwnn = adapter->wwnn;
349 adisc->nport_id = adapter->s_id;
350 ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x "
351 "(wwpn=0x%016Lx, wwnn=0x%016Lx, "
352 "hard_nport_id=0x%08x, nport_id=0x%08x)\n",
353 adapter->s_id, d_id, (wwn_t) adisc->wwpn,
354 (wwn_t) adisc->wwnn, adisc->hard_nport_id,
355 adisc->nport_id);
356
357 timer = kmalloc(sizeof(struct timer_list), GFP_ATOMIC);
358 if (!timer)
359 goto nomem;
360
361 init_timer(timer);
362 timer->function = zfcp_fsf_request_timeout_handler;
363 timer->data = (unsigned long) adapter;
364 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
365 send_els->timer = timer;
366
367 retval = zfcp_fsf_send_els(send_els);
368 if (retval != 0) {
369 ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port "
370 "0x%08x on adapter %s\n", d_id,
371 zfcp_get_busid_by_adapter(adapter));
372 del_timer(send_els->timer);
373 goto freemem;
374 }
375
376 goto out;
377
378 nomem:
379 retval = -ENOMEM;
380 freemem:
381 if (address != NULL)
382 __free_pages(send_els->req->page, 0);
383 if (send_els != NULL) {
384 kfree(send_els->timer);
385 kfree(send_els->req);
386 kfree(send_els->resp);
387 kfree(send_els);
388 }
389 out:
390 return retval;
391}
392
393
394/**
395 * zfcp_erp_adisc_handler - handler for ADISC ELS command
396 * @data: pointer to struct zfcp_send_els
397 *
398 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered.
399 */
400void
401zfcp_erp_adisc_handler(unsigned long data)
402{
403 struct zfcp_send_els *send_els;
404 struct zfcp_port *port;
405 struct zfcp_adapter *adapter;
406 fc_id_t d_id;
407 struct zfcp_ls_adisc_acc *adisc;
408
409 send_els = (struct zfcp_send_els *) data;
410
411 del_timer(send_els->timer);
412
413 adapter = send_els->adapter;
414 d_id = send_els->d_id;
415
416 read_lock(&zfcp_data.config_lock);
417 port = zfcp_get_port_by_did(send_els->adapter, send_els->d_id);
418 read_unlock(&zfcp_data.config_lock);
419
420 BUG_ON(port == NULL);
421
422 /* request rejected or timed out */
423 if (send_els->status != 0) {
424 ZFCP_LOG_NORMAL("ELS request rejected/timed out, "
425 "force physical port reopen "
426 "(adapter %s, port d_id=0x%08x)\n",
427 zfcp_get_busid_by_adapter(adapter), d_id);
428 debug_text_event(adapter->erp_dbf, 3, "forcreop");
429 if (zfcp_erp_port_forced_reopen(port, 0))
430 ZFCP_LOG_NORMAL("failed reopen of port "
431 "(adapter %s, wwpn=0x%016Lx)\n",
432 zfcp_get_busid_by_port(port),
433 port->wwpn);
434 goto out;
435 }
436
437 adisc = zfcp_sg_to_address(send_els->resp);
438
439 ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id "
440 "0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
441 "hard_nport_id=0x%08x, nport_id=0x%08x)\n",
442 d_id, adapter->s_id, (wwn_t) adisc->wwpn,
443 (wwn_t) adisc->wwnn, adisc->hard_nport_id,
444 adisc->nport_id);
445
446 /* set wwnn for port */
447 if (port->wwnn == 0)
448 port->wwnn = adisc->wwnn;
449
450 if (port->wwpn != adisc->wwpn) {
451 ZFCP_LOG_NORMAL("d_id assignment changed, reopening "
452 "port (adapter %s, wwpn=0x%016Lx, "
453 "adisc_resp_wwpn=0x%016Lx)\n",
454 zfcp_get_busid_by_port(port),
455 port->wwpn, (wwn_t) adisc->wwpn);
456 if (zfcp_erp_port_reopen(port, 0))
457 ZFCP_LOG_NORMAL("failed reopen of port "
458 "(adapter %s, wwpn=0x%016Lx)\n",
459 zfcp_get_busid_by_port(port),
460 port->wwpn);
461 }
462
463 out:
464 zfcp_port_put(port);
465 __free_pages(send_els->req->page, 0);
466 kfree(send_els->timer);
467 kfree(send_els->req);
468 kfree(send_els->resp);
469 kfree(send_els);
470}
471
472
473/**
474 * zfcp_test_link - lightweight link test procedure
475 * @port: port to be tested
476 *
477 * Test status of a link to a remote port using the ELS command ADISC.
478 */
479int
480zfcp_test_link(struct zfcp_port *port)
481{
482 int retval;
483
484 zfcp_port_get(port);
485 retval = zfcp_erp_adisc(port->adapter, port->d_id);
486 if (retval != 0) {
487 zfcp_port_put(port);
488 ZFCP_LOG_NORMAL("reopen needed for port 0x%016Lx "
489 "on adapter %s\n ", port->wwpn,
490 zfcp_get_busid_by_port(port));
491 retval = zfcp_erp_port_forced_reopen(port, 0);
492 if (retval != 0) {
493 ZFCP_LOG_NORMAL("reopen of remote port 0x%016Lx "
494 "on adapter %s failed\n", port->wwpn,
495 zfcp_get_busid_by_port(port));
496 retval = -EPERM;
497 }
498 }
499
500 return retval;
501}
502
503
504/*
505 * function:
506 *
507 * purpose: called if a port failed to be opened normally
508 * initiates Forced Reopen recovery which is done
509 * asynchronously
510 *
511 * returns: 0 - initiated action succesfully
512 * <0 - failed to initiate action
513 */
514static int
515zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, int clear_mask)
516{
517 int retval;
518 struct zfcp_adapter *adapter = port->adapter;
519
520 debug_text_event(adapter->erp_dbf, 5, "pf_ro");
521 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
522
523 ZFCP_LOG_DEBUG("forced reopen of port 0x%016Lx on adapter %s\n",
524 port->wwpn, zfcp_get_busid_by_port(port));
525
526 zfcp_erp_port_block(port, clear_mask);
527
528 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
529 ZFCP_LOG_DEBUG("skipped forced reopen of failed port 0x%016Lx "
530 "on adapter %s\n", port->wwpn,
531 zfcp_get_busid_by_port(port));
532 debug_text_event(adapter->erp_dbf, 5, "pf_ro_f");
533 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
534 retval = -EIO;
535 goto out;
536 }
537
538 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
539 port->adapter, port, NULL);
540
541 out:
542 return retval;
543}
544
545/*
546 * function:
547 *
548 * purpose: Wrappper for zfcp_erp_port_forced_reopen_internal
549 * used to ensure the correct locking
550 *
551 * returns: 0 - initiated action succesfully
552 * <0 - failed to initiate action
553 */
554int
555zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask)
556{
557 int retval;
558 unsigned long flags;
559 struct zfcp_adapter *adapter;
560
561 adapter = port->adapter;
562 read_lock_irqsave(&zfcp_data.config_lock, flags);
563 write_lock(&adapter->erp_lock);
564 retval = zfcp_erp_port_forced_reopen_internal(port, clear_mask);
565 write_unlock(&adapter->erp_lock);
566 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
567
568 return retval;
569}
570
571/*
572 * function:
573 *
574 * purpose: called if a port is to be opened
575 * initiates Reopen recovery which is done
576 * asynchronously
577 *
578 * returns: 0 - initiated action succesfully
579 * <0 - failed to initiate action
580 */
581static int
582zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask)
583{
584 int retval;
585 struct zfcp_adapter *adapter = port->adapter;
586
587 debug_text_event(adapter->erp_dbf, 5, "p_ro");
588 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
589
590 ZFCP_LOG_DEBUG("reopen of port 0x%016Lx on adapter %s\n",
591 port->wwpn, zfcp_get_busid_by_port(port));
592
593 zfcp_erp_port_block(port, clear_mask);
594
595 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
596 ZFCP_LOG_DEBUG("skipped reopen of failed port 0x%016Lx "
597 "on adapter %s\n", port->wwpn,
598 zfcp_get_busid_by_port(port));
599 debug_text_event(adapter->erp_dbf, 5, "p_ro_f");
600 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
601 /* ensure propagation of failed status to new devices */
602 zfcp_erp_port_failed(port);
603 retval = -EIO;
604 goto out;
605 }
606
607 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
608 port->adapter, port, NULL);
609
610 out:
611 return retval;
612}
613
614/**
615 * zfcp_erp_port_reopen - initiate reopen of a remote port
616 * @port: port to be reopened
617 * @clear_mask: specifies flags in port status to be cleared
618 * Return: 0 on success, < 0 on error
619 *
620 * This is a wrappper function for zfcp_erp_port_reopen_internal. It ensures
621 * correct locking. An error recovery task is initiated to do the reopen.
622 * To wait for the completion of the reopen zfcp_erp_wait should be used.
623 */
624int
625zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask)
626{
627 int retval;
628 unsigned long flags;
629 struct zfcp_adapter *adapter = port->adapter;
630
631 read_lock_irqsave(&zfcp_data.config_lock, flags);
632 write_lock(&adapter->erp_lock);
633 retval = zfcp_erp_port_reopen_internal(port, clear_mask);
634 write_unlock(&adapter->erp_lock);
635 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
636
637 return retval;
638}
639
640/*
641 * function:
642 *
643 * purpose: called if a unit is to be opened
644 * initiates Reopen recovery which is done
645 * asynchronously
646 *
647 * returns: 0 - initiated action succesfully
648 * <0 - failed to initiate action
649 */
650static int
651zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask)
652{
653 int retval;
654 struct zfcp_adapter *adapter = unit->port->adapter;
655
656 debug_text_event(adapter->erp_dbf, 5, "u_ro");
657 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
658 ZFCP_LOG_DEBUG("reopen of unit 0x%016Lx on port 0x%016Lx "
659 "on adapter %s\n", unit->fcp_lun,
660 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
661
662 zfcp_erp_unit_block(unit, clear_mask);
663
664 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) {
665 ZFCP_LOG_DEBUG("skipped reopen of failed unit 0x%016Lx "
666 "on port 0x%016Lx on adapter %s\n",
667 unit->fcp_lun, unit->port->wwpn,
668 zfcp_get_busid_by_unit(unit));
669 debug_text_event(adapter->erp_dbf, 5, "u_ro_f");
670 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
671 sizeof (fcp_lun_t));
672 retval = -EIO;
673 goto out;
674 }
675
676 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT,
677 unit->port->adapter, unit->port, unit);
678 out:
679 return retval;
680}
681
682/**
683 * zfcp_erp_unit_reopen - initiate reopen of a unit
684 * @unit: unit to be reopened
685 * @clear_mask: specifies flags in unit status to be cleared
686 * Return: 0 on success, < 0 on error
687 *
688 * This is a wrappper for zfcp_erp_unit_reopen_internal. It ensures correct
689 * locking. An error recovery task is initiated to do the reopen.
690 * To wait for the completion of the reopen zfcp_erp_wait should be used.
691 */
692int
693zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask)
694{
695 int retval;
696 unsigned long flags;
697 struct zfcp_adapter *adapter;
698 struct zfcp_port *port;
699
700 port = unit->port;
701 adapter = port->adapter;
702
703 read_lock_irqsave(&zfcp_data.config_lock, flags);
704 write_lock(&adapter->erp_lock);
705 retval = zfcp_erp_unit_reopen_internal(unit, clear_mask);
706 write_unlock(&adapter->erp_lock);
707 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
708
709 return retval;
710}
711
712/*
713 * function:
714 *
715 * purpose: disable I/O,
716 * return any open requests and clean them up,
717 * aim: no pending and incoming I/O
718 *
719 * returns:
720 */
721static void
722zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
723{
724 debug_text_event(adapter->erp_dbf, 6, "a_bl");
725 zfcp_erp_modify_adapter_status(adapter,
726 ZFCP_STATUS_COMMON_UNBLOCKED |
727 clear_mask, ZFCP_CLEAR);
728}
729
730/*
731 * function:
732 *
733 * purpose: enable I/O
734 *
735 * returns:
736 */
737static void
738zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
739{
740 debug_text_event(adapter->erp_dbf, 6, "a_ubl");
741 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
742}
743
744/*
745 * function:
746 *
747 * purpose: disable I/O,
748 * return any open requests and clean them up,
749 * aim: no pending and incoming I/O
750 *
751 * returns:
752 */
753static void
754zfcp_erp_port_block(struct zfcp_port *port, int clear_mask)
755{
756 struct zfcp_adapter *adapter = port->adapter;
757
758 debug_text_event(adapter->erp_dbf, 6, "p_bl");
759 debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
760 zfcp_erp_modify_port_status(port,
761 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
762 ZFCP_CLEAR);
763}
764
765/*
766 * function:
767 *
768 * purpose: enable I/O
769 *
770 * returns:
771 */
772static void
773zfcp_erp_port_unblock(struct zfcp_port *port)
774{
775 struct zfcp_adapter *adapter = port->adapter;
776
777 debug_text_event(adapter->erp_dbf, 6, "p_ubl");
778 debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
779 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
780}
781
782/*
783 * function:
784 *
785 * purpose: disable I/O,
786 * return any open requests and clean them up,
787 * aim: no pending and incoming I/O
788 *
789 * returns:
790 */
791static void
792zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
793{
794 struct zfcp_adapter *adapter = unit->port->adapter;
795
796 debug_text_event(adapter->erp_dbf, 6, "u_bl");
797 debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
798 zfcp_erp_modify_unit_status(unit,
799 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
800 ZFCP_CLEAR);
801}
802
803/*
804 * function:
805 *
806 * purpose: enable I/O
807 *
808 * returns:
809 */
810static void
811zfcp_erp_unit_unblock(struct zfcp_unit *unit)
812{
813 struct zfcp_adapter *adapter = unit->port->adapter;
814
815 debug_text_event(adapter->erp_dbf, 6, "u_ubl");
816 debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
817 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
818}
819
820/*
821 * function:
822 *
823 * purpose:
824 *
825 * returns:
826 */
827static void
828zfcp_erp_action_ready(struct zfcp_erp_action *erp_action)
829{
830 struct zfcp_adapter *adapter = erp_action->adapter;
831
832 debug_text_event(adapter->erp_dbf, 4, "a_ar");
833 debug_event(adapter->erp_dbf, 4, &erp_action->action, sizeof (int));
834
835 zfcp_erp_action_to_ready(erp_action);
836 up(&adapter->erp_ready_sem);
837}
838
839/*
840 * function:
841 *
842 * purpose:
843 *
844 * returns: <0 erp_action not found in any list
845 * ZFCP_ERP_ACTION_READY erp_action is in ready list
846 * ZFCP_ERP_ACTION_RUNNING erp_action is in running list
847 *
848 * locks: erp_lock must be held
849 */
850static int
851zfcp_erp_action_exists(struct zfcp_erp_action *erp_action)
852{
853 int retval = -EINVAL;
854 struct list_head *entry;
855 struct zfcp_erp_action *entry_erp_action;
856 struct zfcp_adapter *adapter = erp_action->adapter;
857
858 /* search in running list */
859 list_for_each(entry, &adapter->erp_running_head) {
860 entry_erp_action =
861 list_entry(entry, struct zfcp_erp_action, list);
862 if (entry_erp_action == erp_action) {
863 retval = ZFCP_ERP_ACTION_RUNNING;
864 goto out;
865 }
866 }
867 /* search in ready list */
868 list_for_each(entry, &adapter->erp_ready_head) {
869 entry_erp_action =
870 list_entry(entry, struct zfcp_erp_action, list);
871 if (entry_erp_action == erp_action) {
872 retval = ZFCP_ERP_ACTION_READY;
873 goto out;
874 }
875 }
876
877 out:
878 return retval;
879}
880
881/*
882 * purpose: checks current status of action (timed out, dismissed, ...)
883 * and does appropriate preparations (dismiss fsf request, ...)
884 *
885 * locks: called under erp_lock (disabled interrupts)
886 *
887 * returns: 0
888 */
889static int
890zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
891{
892 int retval = 0;
893 struct zfcp_fsf_req *fsf_req;
894 struct zfcp_adapter *adapter = erp_action->adapter;
895
896 if (erp_action->fsf_req) {
897 /* take lock to ensure that request is not being deleted meanwhile */
898 write_lock(&adapter->fsf_req_list_lock);
899 /* check whether fsf req does still exist */
900 list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list)
901 if (fsf_req == erp_action->fsf_req)
902 break;
903 if (fsf_req == erp_action->fsf_req) {
904 /* fsf_req still exists */
905 debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
906 debug_event(adapter->erp_dbf, 3, &fsf_req,
907 sizeof (unsigned long));
908 /* dismiss fsf_req of timed out or dismissed erp_action */
909 if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
910 ZFCP_STATUS_ERP_TIMEDOUT)) {
911 debug_text_event(adapter->erp_dbf, 3,
912 "a_ca_disreq");
913 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
914 }
915 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
916 ZFCP_LOG_NORMAL("error: erp step timed out "
917 "(action=%d, fsf_req=%p)\n ",
918 erp_action->action,
919 erp_action->fsf_req);
920 }
921 /*
922 * If fsf_req is neither dismissed nor completed
923 * then keep it running asynchronously and don't mess
924 * with the association of erp_action and fsf_req.
925 */
926 if (fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED |
927 ZFCP_STATUS_FSFREQ_DISMISSED)) {
928 /* forget about association between fsf_req
929 and erp_action */
930 fsf_req->erp_action = NULL;
931 erp_action->fsf_req = NULL;
932 }
933 } else {
934 debug_text_event(adapter->erp_dbf, 3, "a_ca_gonereq");
935 /*
936 * even if this fsf_req has gone, forget about
937 * association between erp_action and fsf_req
938 */
939 erp_action->fsf_req = NULL;
940 }
941 write_unlock(&adapter->fsf_req_list_lock);
942 } else
943 debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
944
945 return retval;
946}
947
948/*
949 * purpose: generic handler for asynchronous events related to erp_action events
950 * (normal completion, time-out, dismissing, retry after
951 * low memory condition)
952 *
953 * note: deletion of timer is not required (e.g. in case of a time-out),
954 * but a second try does no harm,
955 * we leave it in here to allow for greater simplification
956 *
957 * returns: 0 - there was an action to handle
958 * !0 - otherwise
959 */
960static int
961zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
962 unsigned long set_mask)
963{
964 int retval;
965 struct zfcp_adapter *adapter = erp_action->adapter;
966
967 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
968 debug_text_event(adapter->erp_dbf, 2, "a_asyh_ex");
969 debug_event(adapter->erp_dbf, 2, &erp_action->action,
970 sizeof (int));
971 if (!(set_mask & ZFCP_STATUS_ERP_TIMEDOUT))
972 del_timer(&erp_action->timer);
973 erp_action->status |= set_mask;
974 zfcp_erp_action_ready(erp_action);
975 retval = 0;
976 } else {
977 /* action is ready or gone - nothing to do */
978 debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone");
979 debug_event(adapter->erp_dbf, 3, &erp_action->action,
980 sizeof (int));
981 retval = 1;
982 }
983
984 return retval;
985}
986
987/*
988 * purpose: generic handler for asynchronous events related to erp_action
989 * events (normal completion, time-out, dismissing, retry after
990 * low memory condition)
991 *
992 * note: deletion of timer is not required (e.g. in case of a time-out),
993 * but a second try does no harm,
994 * we leave it in here to allow for greater simplification
995 *
996 * returns: 0 - there was an action to handle
997 * !0 - otherwise
998 */
999int
1000zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
1001 unsigned long set_mask)
1002{
1003 struct zfcp_adapter *adapter = erp_action->adapter;
1004 unsigned long flags;
1005 int retval;
1006
1007 write_lock_irqsave(&adapter->erp_lock, flags);
1008 retval = zfcp_erp_async_handler_nolock(erp_action, set_mask);
1009 write_unlock_irqrestore(&adapter->erp_lock, flags);
1010
1011 return retval;
1012}
1013
1014/*
1015 * purpose: is called for erp_action which was slept waiting for
1016 * memory becoming avaliable,
1017 * will trigger that this action will be continued
1018 */
1019static void
1020zfcp_erp_memwait_handler(unsigned long data)
1021{
1022 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
1023 struct zfcp_adapter *adapter = erp_action->adapter;
1024
1025 debug_text_event(adapter->erp_dbf, 2, "a_mwh");
1026 debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
1027
1028 zfcp_erp_async_handler(erp_action, 0);
1029}
1030
1031/*
1032 * purpose: is called if an asynchronous erp step timed out,
1033 * action gets an appropriate flag and will be processed
1034 * accordingly
1035 */
1036static void
1037zfcp_erp_timeout_handler(unsigned long data)
1038{
1039 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
1040 struct zfcp_adapter *adapter = erp_action->adapter;
1041
1042 debug_text_event(adapter->erp_dbf, 2, "a_th");
1043 debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
1044
1045 zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
1046}
1047
1048/*
1049 * purpose: is called for an erp_action which needs to be ended
1050 * though not being done,
1051 * this is usually required if an higher is generated,
1052 * action gets an appropriate flag and will be processed
1053 * accordingly
1054 *
1055 * locks: erp_lock held (thus we need to call another handler variant)
1056 */
1057static int
1058zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1059{
1060 struct zfcp_adapter *adapter = erp_action->adapter;
1061
1062 debug_text_event(adapter->erp_dbf, 2, "a_adis");
1063 debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
1064
1065 zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED);
1066
1067 return 0;
1068}
1069
1070int
1071zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
1072{
1073 int retval = 0;
1074
1075 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1076
1077 rwlock_init(&adapter->erp_lock);
1078 INIT_LIST_HEAD(&adapter->erp_ready_head);
1079 INIT_LIST_HEAD(&adapter->erp_running_head);
1080 sema_init(&adapter->erp_ready_sem, 0);
1081
1082 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
1083 if (retval < 0) {
1084 ZFCP_LOG_NORMAL("error: creation of erp thread failed for "
1085 "adapter %s\n",
1086 zfcp_get_busid_by_adapter(adapter));
1087 debug_text_event(adapter->erp_dbf, 5, "a_thset_fail");
1088 } else {
1089 wait_event(adapter->erp_thread_wqh,
1090 atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
1091 &adapter->status));
1092 debug_text_event(adapter->erp_dbf, 5, "a_thset_ok");
1093 }
1094
1095 return (retval < 0);
1096}
1097
1098/*
1099 * function:
1100 *
1101 * purpose:
1102 *
1103 * returns:
1104 *
1105 * context: process (i.e. proc-fs or rmmod/insmod)
1106 *
1107 * note: The caller of this routine ensures that the specified
1108 * adapter has been shut down and that this operation
1109 * has been completed. Thus, there are no pending erp_actions
1110 * which would need to be handled here.
1111 */
1112int
1113zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1114{
1115 int retval = 0;
1116
1117 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
1118 up(&adapter->erp_ready_sem);
1119
1120 wait_event(adapter->erp_thread_wqh,
1121 !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
1122 &adapter->status));
1123
1124 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
1125 &adapter->status);
1126
1127 debug_text_event(adapter->erp_dbf, 5, "a_thki_ok");
1128
1129 return retval;
1130}
1131
1132/*
1133 * purpose: is run as a kernel thread,
1134 * goes through list of error recovery actions of associated adapter
1135 * and delegates single action to execution
1136 *
1137 * returns: 0
1138 */
1139static int
1140zfcp_erp_thread(void *data)
1141{
1142 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
1143 struct list_head *next;
1144 struct zfcp_erp_action *erp_action;
1145 unsigned long flags;
1146
1147 daemonize("zfcperp%s", zfcp_get_busid_by_adapter(adapter));
1148 /* Block all signals */
1149 siginitsetinv(&current->blocked, 0);
1150 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1151 debug_text_event(adapter->erp_dbf, 5, "a_th_run");
1152 wake_up(&adapter->erp_thread_wqh);
1153
1154 while (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
1155 &adapter->status)) {
1156
1157 write_lock_irqsave(&adapter->erp_lock, flags);
1158 next = adapter->erp_ready_head.prev;
1159 write_unlock_irqrestore(&adapter->erp_lock, flags);
1160
1161 if (next != &adapter->erp_ready_head) {
1162 erp_action =
1163 list_entry(next, struct zfcp_erp_action, list);
1164 /*
1165 * process action (incl. [re]moving it
1166 * from 'ready' queue)
1167 */
1168 zfcp_erp_strategy(erp_action);
1169 }
1170
1171 /*
1172 * sleep as long as there is nothing to do, i.e.
1173 * no action in 'ready' queue to be processed and
1174 * thread is not to be killed
1175 */
1176 down_interruptible(&adapter->erp_ready_sem);
1177 debug_text_event(adapter->erp_dbf, 5, "a_th_woken");
1178 }
1179
1180 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1181 debug_text_event(adapter->erp_dbf, 5, "a_th_stop");
1182 wake_up(&adapter->erp_thread_wqh);
1183
1184 return 0;
1185}
1186
1187/*
1188 * function:
1189 *
1190 * purpose: drives single error recovery action and schedules higher and
1191 * subordinate actions, if necessary
1192 *
1193 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
1194 * ZFCP_ERP_SUCCEEDED - action finished successfully (deqd)
1195 * ZFCP_ERP_FAILED - action finished unsuccessfully (deqd)
1196 * ZFCP_ERP_EXIT - action finished (dequeued), offline
1197 * ZFCP_ERP_DISMISSED - action canceled (dequeued)
1198 */
1199static int
1200zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1201{
1202 int retval = 0;
1203 struct zfcp_adapter *adapter = erp_action->adapter;
1204 struct zfcp_port *port = erp_action->port;
1205 struct zfcp_unit *unit = erp_action->unit;
1206 int action = erp_action->action;
1207 u32 status = erp_action->status;
1208 unsigned long flags;
1209
1210 /* serialise dismissing, timing out, moving, enqueueing */
1211 read_lock_irqsave(&zfcp_data.config_lock, flags);
1212 write_lock(&adapter->erp_lock);
1213
1214 /* dequeue dismissed action and leave, if required */
1215 retval = zfcp_erp_strategy_check_action(erp_action, retval);
1216 if (retval == ZFCP_ERP_DISMISSED) {
1217 debug_text_event(adapter->erp_dbf, 4, "a_st_dis1");
1218 goto unlock;
1219 }
1220
1221 /*
1222 * move action to 'running' queue before processing it
1223 * (to avoid a race condition regarding moving the
1224 * action to the 'running' queue and back)
1225 */
1226 zfcp_erp_action_to_running(erp_action);
1227
1228 /*
1229 * try to process action as far as possible,
1230 * no lock to allow for blocking operations (kmalloc, qdio, ...),
1231 * afterwards the lock is required again for the following reasons:
1232 * - dequeueing of finished action and enqueueing of
1233 * follow-up actions must be atomic so that any other
1234 * reopen-routine does not believe there is nothing to do
1235 * and that it is safe to enqueue something else,
1236 * - we want to force any control thread which is dismissing
1237 * actions to finish this before we decide about
1238 * necessary steps to be taken here further
1239 */
1240 write_unlock(&adapter->erp_lock);
1241 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1242 retval = zfcp_erp_strategy_do_action(erp_action);
1243 read_lock_irqsave(&zfcp_data.config_lock, flags);
1244 write_lock(&adapter->erp_lock);
1245
1246 /*
1247 * check for dismissed status again to avoid follow-up actions,
1248 * failing of targets and so on for dismissed actions
1249 */
1250 retval = zfcp_erp_strategy_check_action(erp_action, retval);
1251
1252 switch (retval) {
1253 case ZFCP_ERP_DISMISSED:
1254 /* leave since this action has ridden to its ancestors */
1255 debug_text_event(adapter->erp_dbf, 6, "a_st_dis2");
1256 goto unlock;
1257 case ZFCP_ERP_NOMEM:
1258 /* no memory to continue immediately, let it sleep */
1259 if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
1260 ++adapter->erp_low_mem_count;
1261 erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
1262 }
1263 /* This condition is true if there is no memory available
1264 for any erp_action on this adapter. This implies that there
1265 are no elements in the memory pool(s) left for erp_actions.
1266 This might happen if an erp_action that used a memory pool
1267 element was timed out.
1268 */
1269 if (adapter->erp_total_count == adapter->erp_low_mem_count) {
1270 debug_text_event(adapter->erp_dbf, 3, "a_st_lowmem");
1271 ZFCP_LOG_NORMAL("error: no mempool elements available, "
1272 "restarting I/O on adapter %s "
1273 "to free mempool\n",
1274 zfcp_get_busid_by_adapter(adapter));
1275 zfcp_erp_adapter_reopen_internal(adapter, 0);
1276 } else {
1277 debug_text_event(adapter->erp_dbf, 2, "a_st_memw");
1278 retval = zfcp_erp_strategy_memwait(erp_action);
1279 }
1280 goto unlock;
1281 case ZFCP_ERP_CONTINUES:
1282 /* leave since this action runs asynchronously */
1283 debug_text_event(adapter->erp_dbf, 6, "a_st_cont");
1284 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
1285 --adapter->erp_low_mem_count;
1286 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
1287 }
1288 goto unlock;
1289 }
1290 /* ok, finished action (whatever its result is) */
1291
1292 /* check for unrecoverable targets */
1293 retval = zfcp_erp_strategy_check_target(erp_action, retval);
1294
1295 /* action must be dequeued (here to allow for further ones) */
1296 zfcp_erp_action_dequeue(erp_action);
1297
1298 /*
1299 * put this target through the erp mill again if someone has
1300 * requested to change the status of a target being online
1301 * to offline or the other way around
1302 * (old retval is preserved if nothing has to be done here)
1303 */
1304 retval = zfcp_erp_strategy_statechange(action, status, adapter,
1305 port, unit, retval);
1306
1307 /*
1308 * leave if target is in permanent error state or if
1309 * action is repeated in order to process state change
1310 */
1311 if (retval == ZFCP_ERP_EXIT) {
1312 debug_text_event(adapter->erp_dbf, 2, "a_st_exit");
1313 goto unlock;
1314 }
1315
1316 /* trigger follow up actions */
1317 zfcp_erp_strategy_followup_actions(action, adapter, port, unit, retval);
1318
1319 unlock:
1320 write_unlock(&adapter->erp_lock);
1321 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1322
1323 if (retval != ZFCP_ERP_CONTINUES)
1324 zfcp_erp_action_cleanup(action, adapter, port, unit, retval);
1325
1326 /*
1327 * a few tasks remain when the erp queues are empty
1328 * (don't do that if the last action evaluated was dismissed
1329 * since this clearly indicates that there is more to come) :
1330 * - close the name server port if it is open yet
1331 * (enqueues another [probably] final action)
1332 * - otherwise, wake up whoever wants to be woken when we are
1333 * done with erp
1334 */
1335 if (retval != ZFCP_ERP_DISMISSED)
1336 zfcp_erp_strategy_check_queues(adapter);
1337
1338 debug_text_event(adapter->erp_dbf, 6, "a_st_done");
1339
1340 return retval;
1341}
1342
1343/*
1344 * function:
1345 *
1346 * purpose:
1347 *
1348 * returns: ZFCP_ERP_DISMISSED - if action has been dismissed
1349 * retval - otherwise
1350 */
1351static int
1352zfcp_erp_strategy_check_action(struct zfcp_erp_action *erp_action, int retval)
1353{
1354 struct zfcp_adapter *adapter = erp_action->adapter;
1355
1356 zfcp_erp_strategy_check_fsfreq(erp_action);
1357
1358 debug_event(adapter->erp_dbf, 5, &erp_action->action, sizeof (int));
1359 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
1360 debug_text_event(adapter->erp_dbf, 3, "a_stcd_dis");
1361 zfcp_erp_action_dequeue(erp_action);
1362 retval = ZFCP_ERP_DISMISSED;
1363 } else
1364 debug_text_event(adapter->erp_dbf, 5, "a_stcd_nodis");
1365
1366 return retval;
1367}
1368
1369/*
1370 * function:
1371 *
1372 * purpose:
1373 *
1374 * returns:
1375 */
1376static int
1377zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1378{
1379 int retval = ZFCP_ERP_FAILED;
1380 struct zfcp_adapter *adapter = erp_action->adapter;
1381
1382 /*
1383 * try to execute/continue action as far as possible,
1384 * note: no lock in subsequent strategy routines
1385 * (this allows these routine to call schedule, e.g.
1386 * kmalloc with such flags or qdio_initialize & friends)
1387 * Note: in case of timeout, the seperate strategies will fail
1388 * anyhow. No need for a special action. Even worse, a nameserver
1389 * failure would not wake up waiting ports without the call.
1390 */
1391 switch (erp_action->action) {
1392
1393 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1394 retval = zfcp_erp_adapter_strategy(erp_action);
1395 break;
1396
1397 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1398 retval = zfcp_erp_port_forced_strategy(erp_action);
1399 break;
1400
1401 case ZFCP_ERP_ACTION_REOPEN_PORT:
1402 retval = zfcp_erp_port_strategy(erp_action);
1403 break;
1404
1405 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1406 retval = zfcp_erp_unit_strategy(erp_action);
1407 break;
1408
1409 default:
1410 debug_text_exception(adapter->erp_dbf, 1, "a_stda_bug");
1411 debug_event(adapter->erp_dbf, 1, &erp_action->action,
1412 sizeof (int));
1413 ZFCP_LOG_NORMAL("bug: unknown erp action requested on "
1414 "adapter %s (action=%d)\n",
1415 zfcp_get_busid_by_adapter(erp_action->adapter),
1416 erp_action->action);
1417 }
1418
1419 return retval;
1420}
1421
1422/*
1423 * function:
1424 *
1425 * purpose: triggers retry of this action after a certain amount of time
1426 * by means of timer provided by erp_action
1427 *
1428 * returns: ZFCP_ERP_CONTINUES - erp_action sleeps in erp running queue
1429 */
1430static int
1431zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
1432{
1433 int retval = ZFCP_ERP_CONTINUES;
1434 struct zfcp_adapter *adapter = erp_action->adapter;
1435
1436 debug_text_event(adapter->erp_dbf, 6, "a_mwinit");
1437 debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int));
1438 init_timer(&erp_action->timer);
1439 erp_action->timer.function = zfcp_erp_memwait_handler;
1440 erp_action->timer.data = (unsigned long) erp_action;
1441 erp_action->timer.expires = jiffies + ZFCP_ERP_MEMWAIT_TIMEOUT;
1442 add_timer(&erp_action->timer);
1443
1444 return retval;
1445}
1446
1447/*
1448 * function: zfcp_erp_adapter_failed
1449 *
1450 * purpose: sets the adapter and all underlying devices to ERP_FAILED
1451 *
1452 */
1453void
1454zfcp_erp_adapter_failed(struct zfcp_adapter *adapter)
1455{
1456 zfcp_erp_modify_adapter_status(adapter,
1457 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1458 ZFCP_LOG_NORMAL("adapter erp failed on adapter %s\n",
1459 zfcp_get_busid_by_adapter(adapter));
1460 debug_text_event(adapter->erp_dbf, 2, "a_afail");
1461}
1462
1463/*
1464 * function: zfcp_erp_port_failed
1465 *
1466 * purpose: sets the port and all underlying devices to ERP_FAILED
1467 *
1468 */
1469void
1470zfcp_erp_port_failed(struct zfcp_port *port)
1471{
1472 zfcp_erp_modify_port_status(port,
1473 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1474
1475 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
1476 ZFCP_LOG_NORMAL("port erp failed (adapter %s, "
1477 "port d_id=0x%08x)\n",
1478 zfcp_get_busid_by_port(port), port->d_id);
1479 else
1480 ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n",
1481 zfcp_get_busid_by_port(port), port->wwpn);
1482
1483 debug_text_event(port->adapter->erp_dbf, 2, "p_pfail");
1484 debug_event(port->adapter->erp_dbf, 2, &port->wwpn, sizeof (wwn_t));
1485}
1486
1487/*
1488 * function: zfcp_erp_unit_failed
1489 *
1490 * purpose: sets the unit to ERP_FAILED
1491 *
1492 */
1493void
1494zfcp_erp_unit_failed(struct zfcp_unit *unit)
1495{
1496 zfcp_erp_modify_unit_status(unit,
1497 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1498
1499 ZFCP_LOG_NORMAL("unit erp failed on unit 0x%016Lx on port 0x%016Lx "
1500 " on adapter %s\n", unit->fcp_lun,
1501 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
1502 debug_text_event(unit->port->adapter->erp_dbf, 2, "u_ufail");
1503 debug_event(unit->port->adapter->erp_dbf, 2,
1504 &unit->fcp_lun, sizeof (fcp_lun_t));
1505}
1506
1507/*
1508 * function: zfcp_erp_strategy_check_target
1509 *
1510 * purpose: increments the erp action count on the device currently in
1511 * recovery if the action failed or resets the count in case of
1512 * success. If a maximum count is exceeded the device is marked
1513 * as ERP_FAILED.
1514 * The 'blocked' state of a target which has been recovered
1515 * successfully is reset.
1516 *
1517 * returns: ZFCP_ERP_CONTINUES - action continues (not considered)
1518 * ZFCP_ERP_SUCCEEDED - action finished successfully
1519 * ZFCP_ERP_EXIT - action failed and will not continue
1520 */
1521static int
1522zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, int result)
1523{
1524 struct zfcp_adapter *adapter = erp_action->adapter;
1525 struct zfcp_port *port = erp_action->port;
1526 struct zfcp_unit *unit = erp_action->unit;
1527
1528 debug_text_event(adapter->erp_dbf, 5, "a_stct_norm");
1529 debug_event(adapter->erp_dbf, 5, &erp_action->action, sizeof (int));
1530 debug_event(adapter->erp_dbf, 5, &result, sizeof (int));
1531
1532 switch (erp_action->action) {
1533
1534 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1535 result = zfcp_erp_strategy_check_unit(unit, result);
1536 break;
1537
1538 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1539 case ZFCP_ERP_ACTION_REOPEN_PORT:
1540 result = zfcp_erp_strategy_check_port(port, result);
1541 break;
1542
1543 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1544 result = zfcp_erp_strategy_check_adapter(adapter, result);
1545 break;
1546 }
1547
1548 return result;
1549}
1550
1551/*
1552 * function:
1553 *
1554 * purpose:
1555 *
1556 * returns:
1557 */
1558static int
1559zfcp_erp_strategy_statechange(int action,
1560 u32 status,
1561 struct zfcp_adapter *adapter,
1562 struct zfcp_port *port,
1563 struct zfcp_unit *unit, int retval)
1564{
1565 debug_text_event(adapter->erp_dbf, 3, "a_stsc");
1566 debug_event(adapter->erp_dbf, 3, &action, sizeof (int));
1567
1568 switch (action) {
1569
1570 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1571 if (zfcp_erp_strategy_statechange_detected(&adapter->status,
1572 status)) {
1573 zfcp_erp_adapter_reopen_internal(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
1574 retval = ZFCP_ERP_EXIT;
1575 }
1576 break;
1577
1578 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1579 case ZFCP_ERP_ACTION_REOPEN_PORT:
1580 if (zfcp_erp_strategy_statechange_detected(&port->status,
1581 status)) {
1582 zfcp_erp_port_reopen_internal(port, ZFCP_STATUS_COMMON_ERP_FAILED);
1583 retval = ZFCP_ERP_EXIT;
1584 }
1585 break;
1586
1587 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1588 if (zfcp_erp_strategy_statechange_detected(&unit->status,
1589 status)) {
1590 zfcp_erp_unit_reopen_internal(unit, ZFCP_STATUS_COMMON_ERP_FAILED);
1591 retval = ZFCP_ERP_EXIT;
1592 }
1593 break;
1594 }
1595
1596 return retval;
1597}
1598
1599/*
1600 * function:
1601 *
1602 * purpose:
1603 *
1604 * returns:
1605 */
1606static inline int
1607zfcp_erp_strategy_statechange_detected(atomic_t * target_status, u32 erp_status)
1608{
1609 return
1610 /* take it online */
1611 (atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
1612 (ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status)) ||
1613 /* take it offline */
1614 (!atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
1615 !(ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status));
1616}
1617
1618/*
1619 * function:
1620 *
1621 * purpose:
1622 *
1623 * returns:
1624 */
1625static int
1626zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
1627{
1628 debug_text_event(unit->port->adapter->erp_dbf, 5, "u_stct");
1629 debug_event(unit->port->adapter->erp_dbf, 5, &unit->fcp_lun,
1630 sizeof (fcp_lun_t));
1631
1632 switch (result) {
1633 case ZFCP_ERP_SUCCEEDED :
1634 atomic_set(&unit->erp_counter, 0);
1635 zfcp_erp_unit_unblock(unit);
1636 break;
1637 case ZFCP_ERP_FAILED :
1638 atomic_inc(&unit->erp_counter);
1639 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS)
1640 zfcp_erp_unit_failed(unit);
1641 break;
1642 case ZFCP_ERP_EXIT :
1643 /* nothing */
1644 break;
1645 }
1646
1647 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) {
1648 zfcp_erp_unit_block(unit, 0); /* for ZFCP_ERP_SUCCEEDED */
1649 result = ZFCP_ERP_EXIT;
1650 }
1651
1652 return result;
1653}
1654
1655/*
1656 * function:
1657 *
1658 * purpose:
1659 *
1660 * returns:
1661 */
1662static int
1663zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1664{
1665 debug_text_event(port->adapter->erp_dbf, 5, "p_stct");
1666 debug_event(port->adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
1667
1668 switch (result) {
1669 case ZFCP_ERP_SUCCEEDED :
1670 atomic_set(&port->erp_counter, 0);
1671 zfcp_erp_port_unblock(port);
1672 break;
1673 case ZFCP_ERP_FAILED :
1674 atomic_inc(&port->erp_counter);
1675 if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS)
1676 zfcp_erp_port_failed(port);
1677 break;
1678 case ZFCP_ERP_EXIT :
1679 /* nothing */
1680 break;
1681 }
1682
1683 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
1684 zfcp_erp_port_block(port, 0); /* for ZFCP_ERP_SUCCEEDED */
1685 result = ZFCP_ERP_EXIT;
1686 }
1687
1688 return result;
1689}
1690
1691/*
1692 * function:
1693 *
1694 * purpose:
1695 *
1696 * returns:
1697 */
1698static int
1699zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result)
1700{
1701 debug_text_event(adapter->erp_dbf, 5, "a_stct");
1702
1703 switch (result) {
1704 case ZFCP_ERP_SUCCEEDED :
1705 atomic_set(&adapter->erp_counter, 0);
1706 zfcp_erp_adapter_unblock(adapter);
1707 break;
1708 case ZFCP_ERP_FAILED :
1709 atomic_inc(&adapter->erp_counter);
1710 if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS)
1711 zfcp_erp_adapter_failed(adapter);
1712 break;
1713 case ZFCP_ERP_EXIT :
1714 /* nothing */
1715 break;
1716 }
1717
1718 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
1719 zfcp_erp_adapter_block(adapter, 0); /* for ZFCP_ERP_SUCCEEDED */
1720 result = ZFCP_ERP_EXIT;
1721 }
1722
1723 return result;
1724}
1725
1726/*
1727 * function:
1728 *
1729 * purpose: remaining things in good cases,
1730 * escalation in bad cases
1731 *
1732 * returns:
1733 */
1734static int
1735zfcp_erp_strategy_followup_actions(int action,
1736 struct zfcp_adapter *adapter,
1737 struct zfcp_port *port,
1738 struct zfcp_unit *unit, int status)
1739{
1740 debug_text_event(adapter->erp_dbf, 5, "a_stfol");
1741 debug_event(adapter->erp_dbf, 5, &action, sizeof (int));
1742
1743 /* initiate follow-up actions depending on success of finished action */
1744 switch (action) {
1745
1746 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1747 if (status == ZFCP_ERP_SUCCEEDED)
1748 zfcp_erp_port_reopen_all_internal(adapter, 0);
1749 else
1750 zfcp_erp_adapter_reopen_internal(adapter, 0);
1751 break;
1752
1753 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1754 if (status == ZFCP_ERP_SUCCEEDED)
1755 zfcp_erp_port_reopen_internal(port, 0);
1756 else
1757 zfcp_erp_adapter_reopen_internal(adapter, 0);
1758 break;
1759
1760 case ZFCP_ERP_ACTION_REOPEN_PORT:
1761 if (status == ZFCP_ERP_SUCCEEDED)
1762 zfcp_erp_unit_reopen_all_internal(port, 0);
1763 else
1764 zfcp_erp_port_forced_reopen_internal(port, 0);
1765 break;
1766
1767 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1768 if (status == ZFCP_ERP_SUCCEEDED) ; /* no further action */
1769 else
1770 zfcp_erp_port_reopen_internal(unit->port, 0);
1771 break;
1772 }
1773
1774 return 0;
1775}
1776
1777/*
1778 * function:
1779 *
1780 * purpose:
1781 *
1782 * returns:
1783 */
1784static int
1785zfcp_erp_strategy_check_queues(struct zfcp_adapter *adapter)
1786{
1787 unsigned long flags;
1788
1789 read_lock_irqsave(&zfcp_data.config_lock, flags);
1790 read_lock(&adapter->erp_lock);
1791 if (list_empty(&adapter->erp_ready_head) &&
1792 list_empty(&adapter->erp_running_head)) {
1793 debug_text_event(adapter->erp_dbf, 4, "a_cq_wake");
1794 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
1795 &adapter->status);
1796 wake_up(&adapter->erp_done_wqh);
1797 } else
1798 debug_text_event(adapter->erp_dbf, 5, "a_cq_notempty");
1799 read_unlock(&adapter->erp_lock);
1800 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1801
1802 return 0;
1803}
1804
1805/**
1806 * zfcp_erp_wait - wait for completion of error recovery on an adapter
1807 * @adapter: adapter for which to wait for completion of its error recovery
1808 * Return: 0
1809 */
1810int
1811zfcp_erp_wait(struct zfcp_adapter *adapter)
1812{
1813 int retval = 0;
1814
1815 wait_event(adapter->erp_done_wqh,
1816 !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
1817 &adapter->status));
1818
1819 return retval;
1820}
1821
1822/*
1823 * function: zfcp_erp_modify_adapter_status
1824 *
1825 * purpose:
1826 *
1827 */
1828void
1829zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter,
1830 u32 mask, int set_or_clear)
1831{
1832 struct zfcp_port *port;
1833 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1834
1835 if (set_or_clear == ZFCP_SET) {
1836 atomic_set_mask(mask, &adapter->status);
1837 debug_text_event(adapter->erp_dbf, 3, "a_mod_as_s");
1838 } else {
1839 atomic_clear_mask(mask, &adapter->status);
1840 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1841 atomic_set(&adapter->erp_counter, 0);
1842 debug_text_event(adapter->erp_dbf, 3, "a_mod_as_c");
1843 }
1844 debug_event(adapter->erp_dbf, 3, &mask, sizeof (u32));
1845
1846 /* Deal with all underlying devices, only pass common_mask */
1847 if (common_mask)
1848 list_for_each_entry(port, &adapter->port_list_head, list)
1849 zfcp_erp_modify_port_status(port, common_mask,
1850 set_or_clear);
1851}
1852
1853/*
1854 * function: zfcp_erp_modify_port_status
1855 *
1856 * purpose: sets the port and all underlying devices to ERP_FAILED
1857 *
1858 */
1859void
1860zfcp_erp_modify_port_status(struct zfcp_port *port, u32 mask, int set_or_clear)
1861{
1862 struct zfcp_unit *unit;
1863 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1864
1865 if (set_or_clear == ZFCP_SET) {
1866 atomic_set_mask(mask, &port->status);
1867 debug_text_event(port->adapter->erp_dbf, 3, "p_mod_ps_s");
1868 } else {
1869 atomic_clear_mask(mask, &port->status);
1870 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1871 atomic_set(&port->erp_counter, 0);
1872 debug_text_event(port->adapter->erp_dbf, 3, "p_mod_ps_c");
1873 }
1874 debug_event(port->adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t));
1875 debug_event(port->adapter->erp_dbf, 3, &mask, sizeof (u32));
1876
1877 /* Modify status of all underlying devices, only pass common mask */
1878 if (common_mask)
1879 list_for_each_entry(unit, &port->unit_list_head, list)
1880 zfcp_erp_modify_unit_status(unit, common_mask,
1881 set_or_clear);
1882}
1883
1884/*
1885 * function: zfcp_erp_modify_unit_status
1886 *
1887 * purpose: sets the unit to ERP_FAILED
1888 *
1889 */
1890void
1891zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u32 mask, int set_or_clear)
1892{
1893 if (set_or_clear == ZFCP_SET) {
1894 atomic_set_mask(mask, &unit->status);
1895 debug_text_event(unit->port->adapter->erp_dbf, 3, "u_mod_us_s");
1896 } else {
1897 atomic_clear_mask(mask, &unit->status);
1898 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
1899 atomic_set(&unit->erp_counter, 0);
1900 }
1901 debug_text_event(unit->port->adapter->erp_dbf, 3, "u_mod_us_c");
1902 }
1903 debug_event(unit->port->adapter->erp_dbf, 3, &unit->fcp_lun,
1904 sizeof (fcp_lun_t));
1905 debug_event(unit->port->adapter->erp_dbf, 3, &mask, sizeof (u32));
1906}
1907
1908/*
1909 * function:
1910 *
1911 * purpose: Wrappper for zfcp_erp_port_reopen_all_internal
1912 * used to ensure the correct locking
1913 *
1914 * returns: 0 - initiated action succesfully
1915 * <0 - failed to initiate action
1916 */
1917int
1918zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear_mask)
1919{
1920 int retval;
1921 unsigned long flags;
1922
1923 read_lock_irqsave(&zfcp_data.config_lock, flags);
1924 write_lock(&adapter->erp_lock);
1925 retval = zfcp_erp_port_reopen_all_internal(adapter, clear_mask);
1926 write_unlock(&adapter->erp_lock);
1927 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1928
1929 return retval;
1930}
1931
1932/*
1933 * function:
1934 *
1935 * purpose:
1936 *
1937 * returns: FIXME
1938 */
1939static int
1940zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *adapter, int clear_mask)
1941{
1942 int retval = 0;
1943 struct zfcp_port *port;
1944
1945 list_for_each_entry(port, &adapter->port_list_head, list)
1946 if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
1947 zfcp_erp_port_reopen_internal(port, clear_mask);
1948
1949 return retval;
1950}
1951
1952/*
1953 * function:
1954 *
1955 * purpose:
1956 *
1957 * returns: FIXME
1958 */
1959static int
1960zfcp_erp_unit_reopen_all_internal(struct zfcp_port *port, int clear_mask)
1961{
1962 int retval = 0;
1963 struct zfcp_unit *unit;
1964
1965 list_for_each_entry(unit, &port->unit_list_head, list)
1966 zfcp_erp_unit_reopen_internal(unit, clear_mask);
1967
1968 return retval;
1969}
1970
1971/*
1972 * function:
1973 *
1974 * purpose: this routine executes the 'Reopen Adapter' action
1975 * (the entire action is processed synchronously, since
1976 * there are no actions which might be run concurrently
1977 * per definition)
1978 *
1979 * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
1980 * ZFCP_ERP_FAILED - action finished unsuccessfully
1981 */
1982static int
1983zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action)
1984{
1985 int retval;
1986 struct zfcp_adapter *adapter = erp_action->adapter;
1987
1988 retval = zfcp_erp_adapter_strategy_close(erp_action);
1989 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
1990 retval = ZFCP_ERP_EXIT;
1991 else
1992 retval = zfcp_erp_adapter_strategy_open(erp_action);
1993
1994 debug_text_event(adapter->erp_dbf, 3, "a_ast/ret");
1995 debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
1996 debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
1997
1998 if (retval == ZFCP_ERP_FAILED) {
1999 ZFCP_LOG_INFO("Waiting to allow the adapter %s "
2000 "to recover itself\n",
2001 zfcp_get_busid_by_adapter(adapter));
2002 msleep(jiffies_to_msecs(ZFCP_TYPE2_RECOVERY_TIME));
2003 }
2004
2005 return retval;
2006}
2007
2008/*
2009 * function:
2010 *
2011 * purpose:
2012 *
2013 * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
2014 * ZFCP_ERP_FAILED - action finished unsuccessfully
2015 */
2016static int
2017zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *erp_action)
2018{
2019 int retval;
2020
2021 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING,
2022 &erp_action->adapter->status);
2023 retval = zfcp_erp_adapter_strategy_generic(erp_action, 1);
2024 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING,
2025 &erp_action->adapter->status);
2026
2027 return retval;
2028}
2029
2030/*
2031 * function:
2032 *
2033 * purpose:
2034 *
2035 * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
2036 * ZFCP_ERP_FAILED - action finished unsuccessfully
2037 */
2038static int
2039zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *erp_action)
2040{
2041 int retval;
2042
2043 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING,
2044 &erp_action->adapter->status);
2045 retval = zfcp_erp_adapter_strategy_generic(erp_action, 0);
2046 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING,
2047 &erp_action->adapter->status);
2048
2049 return retval;
2050}
2051
2052/*
2053 * function: zfcp_register_adapter
2054 *
2055 * purpose: allocate the irq associated with this devno and register
2056 * the FSF adapter with the SCSI stack
2057 *
2058 * returns:
2059 */
2060static int
2061zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
2062{
2063 int retval = ZFCP_ERP_SUCCEEDED;
2064
2065 if (close)
2066 goto close_only;
2067
2068 retval = zfcp_erp_adapter_strategy_open_qdio(erp_action);
2069 if (retval != ZFCP_ERP_SUCCEEDED)
2070 goto failed_qdio;
2071
2072 retval = zfcp_erp_adapter_strategy_open_fsf(erp_action);
2073 if (retval != ZFCP_ERP_SUCCEEDED)
2074 goto failed_openfcp;
2075
2076 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &erp_action->adapter->status);
2077 goto out;
2078
2079 close_only:
2080 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
2081 &erp_action->adapter->status);
2082
2083 failed_openfcp:
2084 zfcp_erp_adapter_strategy_close_qdio(erp_action);
2085 zfcp_erp_adapter_strategy_close_fsf(erp_action);
2086 failed_qdio:
2087 out:
2088 return retval;
2089}
2090
2091/*
2092 * function: zfcp_qdio_init
2093 *
2094 * purpose: setup QDIO operation for specified adapter
2095 *
2096 * returns: 0 - successful setup
2097 * !0 - failed setup
2098 */
2099int
2100zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
2101{
2102 int retval;
2103 int i;
2104 volatile struct qdio_buffer_element *sbale;
2105 struct zfcp_adapter *adapter = erp_action->adapter;
2106
2107 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
2108 ZFCP_LOG_NORMAL("bug: second attempt to set up QDIO on "
2109 "adapter %s\n",
2110 zfcp_get_busid_by_adapter(adapter));
2111 goto failed_sanity;
2112 }
2113
2114 if (qdio_establish(&adapter->qdio_init_data) != 0) {
2115 ZFCP_LOG_INFO("error: establishment of QDIO queues failed "
2116 "on adapter %s\n",
2117 zfcp_get_busid_by_adapter(adapter));
2118 goto failed_qdio_establish;
2119 }
2120 debug_text_event(adapter->erp_dbf, 3, "qdio_est");
2121
2122 if (qdio_activate(adapter->ccw_device, 0) != 0) {
2123 ZFCP_LOG_INFO("error: activation of QDIO queues failed "
2124 "on adapter %s\n",
2125 zfcp_get_busid_by_adapter(adapter));
2126 goto failed_qdio_activate;
2127 }
2128 debug_text_event(adapter->erp_dbf, 3, "qdio_act");
2129
2130 /*
2131 * put buffers into response queue,
2132 */
2133 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2134 sbale = &(adapter->response_queue.buffer[i]->element[0]);
2135 sbale->length = 0;
2136 sbale->flags = SBAL_FLAGS_LAST_ENTRY;
2137 sbale->addr = 0;
2138 }
2139
2140 ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
2141 "queue_no=%i, index_in_queue=%i, count=%i)\n",
2142 zfcp_get_busid_by_adapter(adapter),
2143 QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q);
2144
2145 retval = do_QDIO(adapter->ccw_device,
2146 QDIO_FLAG_SYNC_INPUT,
2147 0, 0, QDIO_MAX_BUFFERS_PER_Q, NULL);
2148
2149 if (retval) {
2150 ZFCP_LOG_NORMAL("bug: setup of QDIO failed (retval=%d)\n",
2151 retval);
2152 goto failed_do_qdio;
2153 } else {
2154 adapter->response_queue.free_index = 0;
2155 atomic_set(&adapter->response_queue.free_count, 0);
2156 ZFCP_LOG_DEBUG("%i buffers successfully enqueued to "
2157 "response queue\n", QDIO_MAX_BUFFERS_PER_Q);
2158 }
2159 /* set index of first avalable SBALS / number of available SBALS */
2160 adapter->request_queue.free_index = 0;
2161 atomic_set(&adapter->request_queue.free_count, QDIO_MAX_BUFFERS_PER_Q);
2162 adapter->request_queue.distance_from_int = 0;
2163
2164 /* initialize waitqueue used to wait for free SBALs in requests queue */
2165 init_waitqueue_head(&adapter->request_wq);
2166
2167 /* ok, we did it - skip all cleanups for different failures */
2168 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
2169 retval = ZFCP_ERP_SUCCEEDED;
2170 goto out;
2171
2172 failed_do_qdio:
2173 /* NOP */
2174
2175 failed_qdio_activate:
2176 debug_text_event(adapter->erp_dbf, 3, "qdio_down1a");
2177 while (qdio_shutdown(adapter->ccw_device,
2178 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
2179 msleep(1000);
2180 debug_text_event(adapter->erp_dbf, 3, "qdio_down1b");
2181
2182 failed_qdio_establish:
2183 failed_sanity:
2184 retval = ZFCP_ERP_FAILED;
2185
2186 out:
2187 return retval;
2188}
2189
2190/*
2191 * function: zfcp_qdio_cleanup
2192 *
2193 * purpose: cleans up QDIO operation for the specified adapter
2194 *
2195 * returns: 0 - successful cleanup
2196 * !0 - failed cleanup
2197 */
2198int
2199zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2200{
2201 int retval = ZFCP_ERP_SUCCEEDED;
2202 int first_used;
2203 int used_count;
2204 struct zfcp_adapter *adapter = erp_action->adapter;
2205
2206 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
2207 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO "
2208 "queues on adapter %s\n",
2209 zfcp_get_busid_by_adapter(adapter));
2210 retval = ZFCP_ERP_FAILED;
2211 goto out;
2212 }
2213
2214 /*
2215 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
2216 * do_QDIO won't be called while qdio_shutdown is in progress.
2217 */
2218
2219 write_lock_irq(&adapter->request_queue.queue_lock);
2220 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
2221 write_unlock_irq(&adapter->request_queue.queue_lock);
2222
2223 debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
2224 while (qdio_shutdown(adapter->ccw_device,
2225 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
2226 msleep(1000);
2227 debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
2228
2229 /*
2230 * First we had to stop QDIO operation.
2231 * Now it is safe to take the following actions.
2232 */
2233
2234 /* Cleanup only necessary when there are unacknowledged buffers */
2235 if (atomic_read(&adapter->request_queue.free_count)
2236 < QDIO_MAX_BUFFERS_PER_Q) {
2237 first_used = (adapter->request_queue.free_index +
2238 atomic_read(&adapter->request_queue.free_count))
2239 % QDIO_MAX_BUFFERS_PER_Q;
2240 used_count = QDIO_MAX_BUFFERS_PER_Q -
2241 atomic_read(&adapter->request_queue.free_count);
2242 zfcp_qdio_zero_sbals(adapter->request_queue.buffer,
2243 first_used, used_count);
2244 }
2245 adapter->response_queue.free_index = 0;
2246 atomic_set(&adapter->response_queue.free_count, 0);
2247 adapter->request_queue.free_index = 0;
2248 atomic_set(&adapter->request_queue.free_count, 0);
2249 adapter->request_queue.distance_from_int = 0;
2250 out:
2251 return retval;
2252}
2253
2254/*
2255 * function: zfcp_fsf_init
2256 *
2257 * purpose: initializes FSF operation for the specified adapter
2258 *
2259 * returns: 0 - succesful initialization of FSF operation
2260 * !0 - failed to initialize FSF operation
2261 */
2262static int
2263zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2264{
2265 int retval;
2266
2267 /* do 'exchange configuration data' */
2268 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
2269 if (retval == ZFCP_ERP_FAILED)
2270 return retval;
2271
2272 /* start the desired number of Status Reads */
2273 retval = zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2274 return retval;
2275}
2276
2277/*
2278 * function:
2279 *
2280 * purpose:
2281 *
2282 * returns:
2283 */
2284static int
2285zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2286{
2287 int retval = ZFCP_ERP_SUCCEEDED;
2288 int retries;
2289 struct zfcp_adapter *adapter = erp_action->adapter;
2290
2291 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
2292 retries = ZFCP_EXCHANGE_CONFIG_DATA_RETRIES;
2293
2294 do {
2295 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
2296 &adapter->status);
2297 ZFCP_LOG_DEBUG("Doing exchange config data\n");
2298 zfcp_erp_action_to_running(erp_action);
2299 zfcp_erp_timeout_init(erp_action);
2300 if (zfcp_fsf_exchange_config_data(erp_action)) {
2301 retval = ZFCP_ERP_FAILED;
2302 debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
2303 ZFCP_LOG_INFO("error: initiation of exchange of "
2304 "configuration data failed for "
2305 "adapter %s\n",
2306 zfcp_get_busid_by_adapter(adapter));
2307 break;
2308 }
2309 debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok");
2310 ZFCP_LOG_DEBUG("Xchange underway\n");
2311
2312 /*
2313 * Why this works:
2314 * Both the normal completion handler as well as the timeout
2315 * handler will do an 'up' when the 'exchange config data'
2316 * request completes or times out. Thus, the signal to go on
2317 * won't be lost utilizing this semaphore.
2318 * Furthermore, this 'adapter_reopen' action is
2319 * guaranteed to be the only action being there (highest action
2320 * which prevents other actions from being created).
2321 * Resulting from that, the wake signal recognized here
2322 * _must_ be the one belonging to the 'exchange config
2323 * data' request.
2324 */
2325 down(&adapter->erp_ready_sem);
2326 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
2327 ZFCP_LOG_INFO("error: exchange of configuration data "
2328 "for adapter %s timed out\n",
2329 zfcp_get_busid_by_adapter(adapter));
2330 break;
2331 }
2332 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
2333 &adapter->status)) {
2334 ZFCP_LOG_DEBUG("host connection still initialising... "
2335 "waiting and retrying...\n");
2336 /* sleep a little bit before retry */
2337 msleep(jiffies_to_msecs(ZFCP_EXCHANGE_CONFIG_DATA_SLEEP));
2338 }
2339 } while ((retries--) &&
2340 atomic_test_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
2341 &adapter->status));
2342
2343 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
2344 &adapter->status)) {
2345 ZFCP_LOG_INFO("error: exchange of configuration data for "
2346 "adapter %s failed\n",
2347 zfcp_get_busid_by_adapter(adapter));
2348 retval = ZFCP_ERP_FAILED;
2349 }
2350
2351 return retval;
2352}
2353
2354/*
2355 * function:
2356 *
2357 * purpose:
2358 *
2359 * returns:
2360 */
2361static int
2362zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
2363 *erp_action)
2364{
2365 int retval = ZFCP_ERP_SUCCEEDED;
2366 int temp_ret;
2367 struct zfcp_adapter *adapter = erp_action->adapter;
2368 int i;
2369
2370 adapter->status_read_failed = 0;
2371 for (i = 0; i < ZFCP_STATUS_READS_RECOM; i++) {
2372 temp_ret = zfcp_fsf_status_read(adapter, ZFCP_WAIT_FOR_SBAL);
2373 if (temp_ret < 0) {
2374 ZFCP_LOG_INFO("error: set-up of unsolicited status "
2375 "notification failed on adapter %s\n",
2376 zfcp_get_busid_by_adapter(adapter));
2377 retval = ZFCP_ERP_FAILED;
2378 i--;
2379 break;
2380 }
2381 }
2382
2383 return retval;
2384}
2385
2386/*
2387 * function: zfcp_fsf_cleanup
2388 *
2389 * purpose: cleanup FSF operation for specified adapter
2390 *
2391 * returns: 0 - FSF operation successfully cleaned up
2392 * !0 - failed to cleanup FSF operation for this adapter
2393 */
2394static int
2395zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2396{
2397 int retval = ZFCP_ERP_SUCCEEDED;
2398 struct zfcp_adapter *adapter = erp_action->adapter;
2399
2400 /*
2401 * wake waiting initiators of requests,
2402 * return SCSI commands (with error status),
2403 * clean up all requests (synchronously)
2404 */
2405 zfcp_fsf_req_dismiss_all(adapter);
2406 /* reset FSF request sequence number */
2407 adapter->fsf_req_seq_no = 0;
2408 /* all ports and units are closed */
2409 zfcp_erp_modify_adapter_status(adapter,
2410 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
2411
2412 return retval;
2413}
2414
2415/*
2416 * function:
2417 *
2418 * purpose: this routine executes the 'Reopen Physical Port' action
2419 *
2420 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2421 * ZFCP_ERP_SUCCEEDED - action finished successfully
2422 * ZFCP_ERP_FAILED - action finished unsuccessfully
2423 */
2424static int
2425zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
2426{
2427 int retval = ZFCP_ERP_FAILED;
2428 struct zfcp_port *port = erp_action->port;
2429 struct zfcp_adapter *adapter = erp_action->adapter;
2430
2431 switch (erp_action->step) {
2432
2433 /*
2434 * FIXME:
2435 * the ULP spec. begs for waiting for oustanding commands
2436 */
2437 case ZFCP_ERP_STEP_UNINITIALIZED:
2438 zfcp_erp_port_strategy_clearstati(port);
2439 /*
2440 * it would be sufficient to test only the normal open flag
2441 * since the phys. open flag cannot be set if the normal
2442 * open flag is unset - however, this is for readabilty ...
2443 */
2444 if (atomic_test_mask((ZFCP_STATUS_PORT_PHYS_OPEN |
2445 ZFCP_STATUS_COMMON_OPEN),
2446 &port->status)) {
2447 ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying "
2448 "close physical\n", port->wwpn);
2449 retval =
2450 zfcp_erp_port_forced_strategy_close(erp_action);
2451 } else
2452 retval = ZFCP_ERP_FAILED;
2453 break;
2454
2455 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
2456 if (atomic_test_mask(ZFCP_STATUS_PORT_PHYS_OPEN,
2457 &port->status)) {
2458 ZFCP_LOG_DEBUG("close physical failed for port "
2459 "0x%016Lx\n", port->wwpn);
2460 retval = ZFCP_ERP_FAILED;
2461 } else
2462 retval = ZFCP_ERP_SUCCEEDED;
2463 break;
2464 }
2465
2466 debug_text_event(adapter->erp_dbf, 3, "p_pfst/ret");
2467 debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t));
2468 debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
2469 debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
2470
2471 return retval;
2472}
2473
2474/*
2475 * function:
2476 *
2477 * purpose: this routine executes the 'Reopen Port' action
2478 *
2479 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2480 * ZFCP_ERP_SUCCEEDED - action finished successfully
2481 * ZFCP_ERP_FAILED - action finished unsuccessfully
2482 */
2483static int
2484zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
2485{
2486 int retval = ZFCP_ERP_FAILED;
2487 struct zfcp_port *port = erp_action->port;
2488 struct zfcp_adapter *adapter = erp_action->adapter;
2489
2490 switch (erp_action->step) {
2491
2492 /*
2493 * FIXME:
2494 * the ULP spec. begs for waiting for oustanding commands
2495 */
2496 case ZFCP_ERP_STEP_UNINITIALIZED:
2497 zfcp_erp_port_strategy_clearstati(port);
2498 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
2499 ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying "
2500 "close\n", port->wwpn);
2501 retval = zfcp_erp_port_strategy_close(erp_action);
2502 goto out;
2503 } /* else it's already closed, open it */
2504 break;
2505
2506 case ZFCP_ERP_STEP_PORT_CLOSING:
2507 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
2508 ZFCP_LOG_DEBUG("close failed for port 0x%016Lx\n",
2509 port->wwpn);
2510 retval = ZFCP_ERP_FAILED;
2511 goto out;
2512 } /* else it's closed now, open it */
2513 break;
2514 }
2515 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
2516 retval = ZFCP_ERP_EXIT;
2517 else
2518 retval = zfcp_erp_port_strategy_open(erp_action);
2519
2520 out:
2521 debug_text_event(adapter->erp_dbf, 3, "p_pst/ret");
2522 debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t));
2523 debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
2524 debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
2525
2526 return retval;
2527}
2528
2529/*
2530 * function:
2531 *
2532 * purpose:
2533 *
2534 * returns:
2535 */
2536static int
2537zfcp_erp_port_strategy_open(struct zfcp_erp_action *erp_action)
2538{
2539 int retval;
2540
2541 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA,
2542 &erp_action->port->status))
2543 retval = zfcp_erp_port_strategy_open_nameserver(erp_action);
2544 else
2545 retval = zfcp_erp_port_strategy_open_common(erp_action);
2546
2547 return retval;
2548}
2549
2550/*
2551 * function:
2552 *
2553 * purpose:
2554 *
2555 * returns:
2556 *
2557 * FIXME(design): currently only prepared for fabric (nameserver!)
2558 */
2559static int
2560zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *erp_action)
2561{
2562 int retval = 0;
2563 struct zfcp_adapter *adapter = erp_action->adapter;
2564 struct zfcp_port *port = erp_action->port;
2565
2566 switch (erp_action->step) {
2567
2568 case ZFCP_ERP_STEP_UNINITIALIZED:
2569 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
2570 case ZFCP_ERP_STEP_PORT_CLOSING:
2571 if (!(adapter->nameserver_port)) {
2572 retval = zfcp_nameserver_enqueue(adapter);
2573 if (retval != 0) {
2574 ZFCP_LOG_NORMAL("error: nameserver port "
2575 "unavailable for adapter %s\n",
2576 zfcp_get_busid_by_adapter(adapter));
2577 retval = ZFCP_ERP_FAILED;
2578 break;
2579 }
2580 }
2581 if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
2582 &adapter->nameserver_port->status)) {
2583 ZFCP_LOG_DEBUG("nameserver port is not open -> open "
2584 "nameserver port\n");
2585 /* nameserver port may live again */
2586 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
2587 &adapter->nameserver_port->status);
2588 if (zfcp_erp_port_reopen(adapter->nameserver_port, 0)
2589 >= 0) {
2590 erp_action->step =
2591 ZFCP_ERP_STEP_NAMESERVER_OPEN;
2592 retval = ZFCP_ERP_CONTINUES;
2593 } else
2594 retval = ZFCP_ERP_FAILED;
2595 break;
2596 }
2597 /* else nameserver port is already open, fall through */
2598 case ZFCP_ERP_STEP_NAMESERVER_OPEN:
2599 if (!atomic_test_mask(ZFCP_STATUS_COMMON_OPEN,
2600 &adapter->nameserver_port->status)) {
2601 ZFCP_LOG_DEBUG("open failed for nameserver port\n");
2602 retval = ZFCP_ERP_FAILED;
2603 } else {
2604 ZFCP_LOG_DEBUG("nameserver port is open -> "
2605 "nameserver look-up for port 0x%016Lx\n",
2606 port->wwpn);
2607 retval = zfcp_erp_port_strategy_open_common_lookup
2608 (erp_action);
2609 }
2610 break;
2611
2612 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
2613 if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) {
2614 if (atomic_test_mask
2615 (ZFCP_STATUS_PORT_INVALID_WWPN, &port->status)) {
2616 ZFCP_LOG_DEBUG("nameserver look-up failed "
2617 "for port 0x%016Lx "
2618 "(misconfigured WWPN?)\n",
2619 port->wwpn);
2620 zfcp_erp_port_failed(port);
2621 retval = ZFCP_ERP_EXIT;
2622 } else {
2623 ZFCP_LOG_DEBUG("nameserver look-up failed for "
2624 "port 0x%016Lx\n", port->wwpn);
2625 retval = ZFCP_ERP_FAILED;
2626 }
2627 } else {
2628 ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%08x -> "
2629 "trying open\n", port->wwpn, port->d_id);
2630 retval = zfcp_erp_port_strategy_open_port(erp_action);
2631 }
2632 break;
2633
2634 case ZFCP_ERP_STEP_PORT_OPENING:
2635 /* D_ID might have changed during open */
2636 if (atomic_test_mask((ZFCP_STATUS_COMMON_OPEN |
2637 ZFCP_STATUS_PORT_DID_DID),
2638 &port->status)) {
2639 ZFCP_LOG_DEBUG("port 0x%016Lx is open\n", port->wwpn);
2640 retval = ZFCP_ERP_SUCCEEDED;
2641 } else {
2642 ZFCP_LOG_DEBUG("open failed for port 0x%016Lx\n",
2643 port->wwpn);
2644 retval = ZFCP_ERP_FAILED;
2645 }
2646 break;
2647
2648 default:
2649 ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n",
2650 erp_action->step);
2651 retval = ZFCP_ERP_FAILED;
2652 }
2653
2654 return retval;
2655}
2656
2657/*
2658 * function:
2659 *
2660 * purpose:
2661 *
2662 * returns:
2663 */
2664static int
2665zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *erp_action)
2666{
2667 int retval;
2668 struct zfcp_port *port = erp_action->port;
2669
2670 switch (erp_action->step) {
2671
2672 case ZFCP_ERP_STEP_UNINITIALIZED:
2673 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
2674 case ZFCP_ERP_STEP_PORT_CLOSING:
2675 ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%08x -> trying open\n",
2676 port->wwpn, port->d_id);
2677 retval = zfcp_erp_port_strategy_open_port(erp_action);
2678 break;
2679
2680 case ZFCP_ERP_STEP_PORT_OPENING:
2681 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
2682 ZFCP_LOG_DEBUG("WKA port is open\n");
2683 retval = ZFCP_ERP_SUCCEEDED;
2684 } else {
2685 ZFCP_LOG_DEBUG("open failed for WKA port\n");
2686 retval = ZFCP_ERP_FAILED;
2687 }
2688 /* this is needed anyway (dont care for retval of wakeup) */
2689 ZFCP_LOG_DEBUG("continue other open port operations\n");
2690 zfcp_erp_port_strategy_open_nameserver_wakeup(erp_action);
2691 break;
2692
2693 default:
2694 ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n",
2695 erp_action->step);
2696 retval = ZFCP_ERP_FAILED;
2697 }
2698
2699 return retval;
2700}
2701
2702/*
2703 * function:
2704 *
2705 * purpose: makes the erp thread continue with reopen (physical) port
2706 * actions which have been paused until the name server port
2707 * is opened (or failed)
2708 *
2709 * returns: 0 (a kind of void retval, its not used)
2710 */
2711static int
2712zfcp_erp_port_strategy_open_nameserver_wakeup(struct zfcp_erp_action
2713 *ns_erp_action)
2714{
2715 int retval = 0;
2716 unsigned long flags;
2717 struct zfcp_adapter *adapter = ns_erp_action->adapter;
2718 struct zfcp_erp_action *erp_action, *tmp;
2719
2720 read_lock_irqsave(&adapter->erp_lock, flags);
2721 list_for_each_entry_safe(erp_action, tmp, &adapter->erp_running_head,
2722 list) {
2723 debug_text_event(adapter->erp_dbf, 4, "p_pstnsw_n");
2724 debug_event(adapter->erp_dbf, 4, &erp_action->port->wwpn,
2725 sizeof (wwn_t));
2726 if (erp_action->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
2727 debug_text_event(adapter->erp_dbf, 3, "p_pstnsw_w");
2728 debug_event(adapter->erp_dbf, 3,
2729 &erp_action->port->wwpn, sizeof (wwn_t));
2730 if (atomic_test_mask(
2731 ZFCP_STATUS_COMMON_ERP_FAILED,
2732 &adapter->nameserver_port->status))
2733 zfcp_erp_port_failed(erp_action->port);
2734 zfcp_erp_action_ready(erp_action);
2735 }
2736 }
2737 read_unlock_irqrestore(&adapter->erp_lock, flags);
2738
2739 return retval;
2740}
2741
2742/*
2743 * function:
2744 *
2745 * purpose:
2746 *
2747 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2748 * ZFCP_ERP_FAILED - action finished unsuccessfully
2749 */
2750static int
2751zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action)
2752{
2753 int retval;
2754 struct zfcp_adapter *adapter = erp_action->adapter;
2755 struct zfcp_port *port = erp_action->port;
2756
2757 zfcp_erp_timeout_init(erp_action);
2758 retval = zfcp_fsf_close_physical_port(erp_action);
2759 if (retval == -ENOMEM) {
2760 debug_text_event(adapter->erp_dbf, 5, "o_pfstc_nomem");
2761 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
2762 retval = ZFCP_ERP_NOMEM;
2763 goto out;
2764 }
2765 erp_action->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
2766 if (retval != 0) {
2767 debug_text_event(adapter->erp_dbf, 5, "o_pfstc_cpf");
2768 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
2769 /* could not send 'open', fail */
2770 retval = ZFCP_ERP_FAILED;
2771 goto out;
2772 }
2773 debug_text_event(adapter->erp_dbf, 6, "o_pfstc_cpok");
2774 debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
2775 retval = ZFCP_ERP_CONTINUES;
2776 out:
2777 return retval;
2778}
2779
2780/*
2781 * function:
2782 *
2783 * purpose:
2784 *
2785 * returns:
2786 */
2787static int
2788zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
2789{
2790 int retval = 0;
2791 struct zfcp_adapter *adapter = port->adapter;
2792
2793 debug_text_event(adapter->erp_dbf, 5, "p_pstclst");
2794 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
2795
2796 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
2797 ZFCP_STATUS_COMMON_CLOSING |
2798 ZFCP_STATUS_COMMON_ACCESS_DENIED |
2799 ZFCP_STATUS_PORT_DID_DID |
2800 ZFCP_STATUS_PORT_PHYS_CLOSING |
2801 ZFCP_STATUS_PORT_INVALID_WWPN,
2802 &port->status);
2803 return retval;
2804}
2805
2806/*
2807 * function:
2808 *
2809 * purpose:
2810 *
2811 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2812 * ZFCP_ERP_FAILED - action finished unsuccessfully
2813 */
2814static int
2815zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
2816{
2817 int retval;
2818 struct zfcp_adapter *adapter = erp_action->adapter;
2819 struct zfcp_port *port = erp_action->port;
2820
2821 zfcp_erp_timeout_init(erp_action);
2822 retval = zfcp_fsf_close_port(erp_action);
2823 if (retval == -ENOMEM) {
2824 debug_text_event(adapter->erp_dbf, 5, "p_pstc_nomem");
2825 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
2826 retval = ZFCP_ERP_NOMEM;
2827 goto out;
2828 }
2829 erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
2830 if (retval != 0) {
2831 debug_text_event(adapter->erp_dbf, 5, "p_pstc_cpf");
2832 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
2833 /* could not send 'close', fail */
2834 retval = ZFCP_ERP_FAILED;
2835 goto out;
2836 }
2837 debug_text_event(adapter->erp_dbf, 6, "p_pstc_cpok");
2838 debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
2839 retval = ZFCP_ERP_CONTINUES;
2840 out:
2841 return retval;
2842}
2843
2844/*
2845 * function:
2846 *
2847 * purpose:
2848 *
2849 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2850 * ZFCP_ERP_FAILED - action finished unsuccessfully
2851 */
2852static int
2853zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
2854{
2855 int retval;
2856 struct zfcp_adapter *adapter = erp_action->adapter;
2857 struct zfcp_port *port = erp_action->port;
2858
2859 zfcp_erp_timeout_init(erp_action);
2860 retval = zfcp_fsf_open_port(erp_action);
2861 if (retval == -ENOMEM) {
2862 debug_text_event(adapter->erp_dbf, 5, "p_psto_nomem");
2863 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
2864 retval = ZFCP_ERP_NOMEM;
2865 goto out;
2866 }
2867 erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
2868 if (retval != 0) {
2869 debug_text_event(adapter->erp_dbf, 5, "p_psto_opf");
2870 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
2871 /* could not send 'open', fail */
2872 retval = ZFCP_ERP_FAILED;
2873 goto out;
2874 }
2875 debug_text_event(adapter->erp_dbf, 6, "p_psto_opok");
2876 debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
2877 retval = ZFCP_ERP_CONTINUES;
2878 out:
2879 return retval;
2880}
2881
2882/*
2883 * function:
2884 *
2885 * purpose:
2886 *
2887 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2888 * ZFCP_ERP_FAILED - action finished unsuccessfully
2889 */
2890static int
2891zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action)
2892{
2893 int retval;
2894 struct zfcp_adapter *adapter = erp_action->adapter;
2895 struct zfcp_port *port = erp_action->port;
2896
2897 zfcp_erp_timeout_init(erp_action);
2898 retval = zfcp_ns_gid_pn_request(erp_action);
2899 if (retval == -ENOMEM) {
2900 debug_text_event(adapter->erp_dbf, 5, "p_pstn_nomem");
2901 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
2902 retval = ZFCP_ERP_NOMEM;
2903 goto out;
2904 }
2905 erp_action->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
2906 if (retval != 0) {
2907 debug_text_event(adapter->erp_dbf, 5, "p_pstn_ref");
2908 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
2909 /* could not send nameserver request, fail */
2910 retval = ZFCP_ERP_FAILED;
2911 goto out;
2912 }
2913 debug_text_event(adapter->erp_dbf, 6, "p_pstn_reok");
2914 debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t));
2915 retval = ZFCP_ERP_CONTINUES;
2916 out:
2917 return retval;
2918}
2919
2920/*
2921 * function:
2922 *
2923 * purpose: this routine executes the 'Reopen Unit' action
2924 * currently no retries
2925 *
2926 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2927 * ZFCP_ERP_SUCCEEDED - action finished successfully
2928 * ZFCP_ERP_FAILED - action finished unsuccessfully
2929 */
2930static int
2931zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action)
2932{
2933 int retval = ZFCP_ERP_FAILED;
2934 struct zfcp_unit *unit = erp_action->unit;
2935 struct zfcp_adapter *adapter = erp_action->adapter;
2936
2937 switch (erp_action->step) {
2938
2939 /*
2940 * FIXME:
2941 * the ULP spec. begs for waiting for oustanding commands
2942 */
2943 case ZFCP_ERP_STEP_UNINITIALIZED:
2944 zfcp_erp_unit_strategy_clearstati(unit);
2945 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
2946 ZFCP_LOG_DEBUG("unit 0x%016Lx is open -> "
2947 "trying close\n", unit->fcp_lun);
2948 retval = zfcp_erp_unit_strategy_close(erp_action);
2949 break;
2950 }
2951 /* else it's already closed, fall through */
2952 case ZFCP_ERP_STEP_UNIT_CLOSING:
2953 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
2954 ZFCP_LOG_DEBUG("close failed for unit 0x%016Lx\n",
2955 unit->fcp_lun);
2956 retval = ZFCP_ERP_FAILED;
2957 } else {
2958 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
2959 retval = ZFCP_ERP_EXIT;
2960 else {
2961 ZFCP_LOG_DEBUG("unit 0x%016Lx is not open -> "
2962 "trying open\n", unit->fcp_lun);
2963 retval =
2964 zfcp_erp_unit_strategy_open(erp_action);
2965 }
2966 }
2967 break;
2968
2969 case ZFCP_ERP_STEP_UNIT_OPENING:
2970 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
2971 ZFCP_LOG_DEBUG("unit 0x%016Lx is open\n",
2972 unit->fcp_lun);
2973 retval = ZFCP_ERP_SUCCEEDED;
2974 } else {
2975 ZFCP_LOG_DEBUG("open failed for unit 0x%016Lx\n",
2976 unit->fcp_lun);
2977 retval = ZFCP_ERP_FAILED;
2978 }
2979 break;
2980 }
2981
2982 debug_text_event(adapter->erp_dbf, 3, "u_ust/ret");
2983 debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof (fcp_lun_t));
2984 debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
2985 debug_event(adapter->erp_dbf, 3, &retval, sizeof (int));
2986 return retval;
2987}
2988
2989/*
2990 * function:
2991 *
2992 * purpose:
2993 *
2994 * returns:
2995 */
2996static int
2997zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
2998{
2999 int retval = 0;
3000 struct zfcp_adapter *adapter = unit->port->adapter;
3001
3002 debug_text_event(adapter->erp_dbf, 5, "u_ustclst");
3003 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
3004
3005 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
3006 ZFCP_STATUS_COMMON_CLOSING |
3007 ZFCP_STATUS_COMMON_ACCESS_DENIED |
3008 ZFCP_STATUS_UNIT_SHARED |
3009 ZFCP_STATUS_UNIT_READONLY,
3010 &unit->status);
3011
3012 return retval;
3013}
3014
3015/*
3016 * function:
3017 *
3018 * purpose:
3019 *
3020 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
3021 * ZFCP_ERP_FAILED - action finished unsuccessfully
3022 */
3023static int
3024zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
3025{
3026 int retval;
3027 struct zfcp_adapter *adapter = erp_action->adapter;
3028 struct zfcp_unit *unit = erp_action->unit;
3029
3030 zfcp_erp_timeout_init(erp_action);
3031 retval = zfcp_fsf_close_unit(erp_action);
3032 if (retval == -ENOMEM) {
3033 debug_text_event(adapter->erp_dbf, 5, "u_ustc_nomem");
3034 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
3035 sizeof (fcp_lun_t));
3036 retval = ZFCP_ERP_NOMEM;
3037 goto out;
3038 }
3039 erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING;
3040 if (retval != 0) {
3041 debug_text_event(adapter->erp_dbf, 5, "u_ustc_cuf");
3042 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
3043 sizeof (fcp_lun_t));
3044 /* could not send 'close', fail */
3045 retval = ZFCP_ERP_FAILED;
3046 goto out;
3047 }
3048 debug_text_event(adapter->erp_dbf, 6, "u_ustc_cuok");
3049 debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
3050 retval = ZFCP_ERP_CONTINUES;
3051
3052 out:
3053 return retval;
3054}
3055
3056/*
3057 * function:
3058 *
3059 * purpose:
3060 *
3061 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
3062 * ZFCP_ERP_FAILED - action finished unsuccessfully
3063 */
3064static int
3065zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
3066{
3067 int retval;
3068 struct zfcp_adapter *adapter = erp_action->adapter;
3069 struct zfcp_unit *unit = erp_action->unit;
3070
3071 zfcp_erp_timeout_init(erp_action);
3072 retval = zfcp_fsf_open_unit(erp_action);
3073 if (retval == -ENOMEM) {
3074 debug_text_event(adapter->erp_dbf, 5, "u_usto_nomem");
3075 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
3076 sizeof (fcp_lun_t));
3077 retval = ZFCP_ERP_NOMEM;
3078 goto out;
3079 }
3080 erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
3081 if (retval != 0) {
3082 debug_text_event(adapter->erp_dbf, 5, "u_usto_ouf");
3083 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun,
3084 sizeof (fcp_lun_t));
3085 /* could not send 'open', fail */
3086 retval = ZFCP_ERP_FAILED;
3087 goto out;
3088 }
3089 debug_text_event(adapter->erp_dbf, 6, "u_usto_ouok");
3090 debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t));
3091 retval = ZFCP_ERP_CONTINUES;
3092 out:
3093 return retval;
3094}
3095
3096/*
3097 * function:
3098 *
3099 * purpose:
3100 *
3101 * returns:
3102 */
3103static inline void
3104zfcp_erp_timeout_init(struct zfcp_erp_action *erp_action)
3105{
3106 init_timer(&erp_action->timer);
3107 erp_action->timer.function = zfcp_erp_timeout_handler;
3108 erp_action->timer.data = (unsigned long) erp_action;
3109 /* jiffies will be added in zfcp_fsf_req_send */
3110 erp_action->timer.expires = ZFCP_ERP_FSFREQ_TIMEOUT;
3111}
3112
3113/*
3114 * function:
3115 *
3116 * purpose: enqueue the specified error recovery action, if needed
3117 *
3118 * returns:
3119 */
3120static int
3121zfcp_erp_action_enqueue(int action,
3122 struct zfcp_adapter *adapter,
3123 struct zfcp_port *port, struct zfcp_unit *unit)
3124{
3125 int retval = 1;
3126 struct zfcp_erp_action *erp_action = NULL;
3127 int stronger_action = 0;
3128 u32 status = 0;
3129
3130 /*
3131 * We need some rules here which check whether we really need
3132 * this action or whether we should just drop it.
3133 * E.g. if there is a unfinished 'Reopen Port' request then we drop a
3134 * 'Reopen Unit' request for an associated unit since we can't
3135 * satisfy this request now. A 'Reopen Port' action will trigger
3136 * 'Reopen Unit' actions when it completes.
3137 * Thus, there are only actions in the queue which can immediately be
3138 * executed. This makes the processing of the action queue more
3139 * efficient.
3140 */
3141
3142 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
3143 &adapter->status))
3144 return -EIO;
3145
3146 debug_event(adapter->erp_dbf, 4, &action, sizeof (int));
3147 /* check whether we really need this */
3148 switch (action) {
3149 case ZFCP_ERP_ACTION_REOPEN_UNIT:
3150 if (atomic_test_mask
3151 (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) {
3152 debug_text_event(adapter->erp_dbf, 4, "u_actenq_drp");
3153 debug_event(adapter->erp_dbf, 4, &port->wwpn,
3154 sizeof (wwn_t));
3155 debug_event(adapter->erp_dbf, 4, &unit->fcp_lun,
3156 sizeof (fcp_lun_t));
3157 goto out;
3158 }
3159 if (!atomic_test_mask
3160 (ZFCP_STATUS_COMMON_RUNNING, &port->status) ||
3161 atomic_test_mask
3162 (ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
3163 goto out;
3164 }
3165 if (!atomic_test_mask
3166 (ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) {
3167 stronger_action = ZFCP_ERP_ACTION_REOPEN_PORT;
3168 unit = NULL;
3169 }
3170 /* fall through !!! */
3171
3172 case ZFCP_ERP_ACTION_REOPEN_PORT:
3173 if (atomic_test_mask
3174 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) {
3175 debug_text_event(adapter->erp_dbf, 4, "p_actenq_drp");
3176 debug_event(adapter->erp_dbf, 4, &port->wwpn,
3177 sizeof (wwn_t));
3178 goto out;
3179 }
3180 /* fall through !!! */
3181
3182 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
3183 if (atomic_test_mask
3184 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)
3185 && port->erp_action.action ==
3186 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
3187 debug_text_event(adapter->erp_dbf, 4, "pf_actenq_drp");
3188 debug_event(adapter->erp_dbf, 4, &port->wwpn,
3189 sizeof (wwn_t));
3190 goto out;
3191 }
3192 if (!atomic_test_mask
3193 (ZFCP_STATUS_COMMON_RUNNING, &adapter->status) ||
3194 atomic_test_mask
3195 (ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
3196 goto out;
3197 }
3198 if (!atomic_test_mask
3199 (ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) {
3200 stronger_action = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
3201 port = NULL;
3202 }
3203 /* fall through !!! */
3204
3205 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3206 if (atomic_test_mask
3207 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) {
3208 debug_text_event(adapter->erp_dbf, 4, "a_actenq_drp");
3209 goto out;
3210 }
3211 break;
3212
3213 default:
3214 debug_text_exception(adapter->erp_dbf, 1, "a_actenq_bug");
3215 debug_event(adapter->erp_dbf, 1, &action, sizeof (int));
3216 ZFCP_LOG_NORMAL("bug: unknown erp action requested "
3217 "on adapter %s (action=%d)\n",
3218 zfcp_get_busid_by_adapter(adapter), action);
3219 goto out;
3220 }
3221
3222 /* check whether we need something stronger first */
3223 if (stronger_action) {
3224 debug_text_event(adapter->erp_dbf, 4, "a_actenq_str");
3225 debug_event(adapter->erp_dbf, 4, &stronger_action,
3226 sizeof (int));
3227 ZFCP_LOG_DEBUG("stronger erp action %d needed before "
3228 "erp action %d on adapter %s\n",
3229 stronger_action, action,
3230 zfcp_get_busid_by_adapter(adapter));
3231 action = stronger_action;
3232 }
3233
3234 /* mark adapter to have some error recovery pending */
3235 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
3236
3237 /* setup error recovery action */
3238 switch (action) {
3239
3240 case ZFCP_ERP_ACTION_REOPEN_UNIT:
3241 zfcp_unit_get(unit);
3242 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
3243 erp_action = &unit->erp_action;
3244 if (!atomic_test_mask
3245 (ZFCP_STATUS_COMMON_RUNNING, &unit->status))
3246 status = ZFCP_STATUS_ERP_CLOSE_ONLY;
3247 break;
3248
3249 case ZFCP_ERP_ACTION_REOPEN_PORT:
3250 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
3251 zfcp_port_get(port);
3252 zfcp_erp_action_dismiss_port(port);
3253 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
3254 erp_action = &port->erp_action;
3255 if (!atomic_test_mask
3256 (ZFCP_STATUS_COMMON_RUNNING, &port->status))
3257 status = ZFCP_STATUS_ERP_CLOSE_ONLY;
3258 break;
3259
3260 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3261 zfcp_adapter_get(adapter);
3262 zfcp_erp_action_dismiss_adapter(adapter);
3263 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
3264 erp_action = &adapter->erp_action;
3265 if (!atomic_test_mask
3266 (ZFCP_STATUS_COMMON_RUNNING, &adapter->status))
3267 status = ZFCP_STATUS_ERP_CLOSE_ONLY;
3268 break;
3269 }
3270
3271 debug_text_event(adapter->erp_dbf, 4, "a_actenq");
3272
3273 memset(erp_action, 0, sizeof (struct zfcp_erp_action));
3274 erp_action->adapter = adapter;
3275 erp_action->port = port;
3276 erp_action->unit = unit;
3277 erp_action->action = action;
3278 erp_action->status = status;
3279
3280 ++adapter->erp_total_count;
3281
3282 /* finally put it into 'ready' queue and kick erp thread */
3283 list_add(&erp_action->list, &adapter->erp_ready_head);
3284 up(&adapter->erp_ready_sem);
3285 retval = 0;
3286 out:
3287 return retval;
3288}
3289
3290/*
3291 * function:
3292 *
3293 * purpose:
3294 *
3295 * returns:
3296 */
3297static int
3298zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
3299{
3300 int retval = 0;
3301 struct zfcp_adapter *adapter = erp_action->adapter;
3302
3303 --adapter->erp_total_count;
3304 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
3305 --adapter->erp_low_mem_count;
3306 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
3307 }
3308
3309 debug_text_event(adapter->erp_dbf, 4, "a_actdeq");
3310 debug_event(adapter->erp_dbf, 4, &erp_action->action, sizeof (int));
3311 list_del(&erp_action->list);
3312 switch (erp_action->action) {
3313 case ZFCP_ERP_ACTION_REOPEN_UNIT:
3314 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
3315 &erp_action->unit->status);
3316 break;
3317 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
3318 case ZFCP_ERP_ACTION_REOPEN_PORT:
3319 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
3320 &erp_action->port->status);
3321 break;
3322 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3323 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
3324 &erp_action->adapter->status);
3325 break;
3326 default:
3327 /* bug */
3328 break;
3329 }
3330 return retval;
3331}
3332
3333/**
3334 * zfcp_erp_action_cleanup
3335 *
3336 * Register unit with scsi stack if appropiate and fix reference counts.
3337 * Note: Temporary units are not registered with scsi stack.
3338 */
3339static void
3340zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3341 struct zfcp_port *port, struct zfcp_unit *unit,
3342 int result)
3343{
3344 switch (action) {
3345 case ZFCP_ERP_ACTION_REOPEN_UNIT:
3346 if ((result == ZFCP_ERP_SUCCEEDED)
3347 && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY,
3348 &unit->status))
3349 && (!unit->device))
3350 scsi_add_device(unit->port->adapter->scsi_host, 0,
3351 unit->port->scsi_id, unit->scsi_lun);
3352 zfcp_unit_put(unit);
3353 break;
3354 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
3355 case ZFCP_ERP_ACTION_REOPEN_PORT:
3356 zfcp_port_put(port);
3357 break;
3358 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3359 zfcp_adapter_put(adapter);
3360 break;
3361 default:
3362 break;
3363 }
3364}
3365
3366
3367/*
3368 * function:
3369 *
3370 * purpose:
3371 *
3372 * returns: FIXME
3373 */
3374static int
3375zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3376{
3377 int retval = 0;
3378 struct zfcp_port *port;
3379
3380 debug_text_event(adapter->erp_dbf, 5, "a_actab");
3381 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status))
3382 zfcp_erp_action_dismiss(&adapter->erp_action);
3383 else
3384 list_for_each_entry(port, &adapter->port_list_head, list)
3385 zfcp_erp_action_dismiss_port(port);
3386
3387 return retval;
3388}
3389
3390/*
3391 * function:
3392 *
3393 * purpose:
3394 *
3395 * returns: FIXME
3396 */
3397static int
3398zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3399{
3400 int retval = 0;
3401 struct zfcp_unit *unit;
3402 struct zfcp_adapter *adapter = port->adapter;
3403
3404 debug_text_event(adapter->erp_dbf, 5, "p_actab");
3405 debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t));
3406 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status))
3407 zfcp_erp_action_dismiss(&port->erp_action);
3408 else
3409 list_for_each_entry(unit, &port->unit_list_head, list)
3410 zfcp_erp_action_dismiss_unit(unit);
3411
3412 return retval;
3413}
3414
3415/*
3416 * function:
3417 *
3418 * purpose:
3419 *
3420 * returns: FIXME
3421 */
3422static int
3423zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
3424{
3425 int retval = 0;
3426 struct zfcp_adapter *adapter = unit->port->adapter;
3427
3428 debug_text_event(adapter->erp_dbf, 5, "u_actab");
3429 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
3430 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status))
3431 zfcp_erp_action_dismiss(&unit->erp_action);
3432
3433 return retval;
3434}
3435
3436/*
3437 * function:
3438 *
3439 * purpose: moves erp_action to 'erp running list'
3440 *
3441 * returns:
3442 */
3443static inline void
3444zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
3445{
3446 struct zfcp_adapter *adapter = erp_action->adapter;
3447
3448 debug_text_event(adapter->erp_dbf, 6, "a_toru");
3449 debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int));
3450 list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
3451}
3452
3453/*
3454 * function:
3455 *
3456 * purpose: moves erp_action to 'erp ready list'
3457 *
3458 * returns:
3459 */
3460static inline void
3461zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action)
3462{
3463 struct zfcp_adapter *adapter = erp_action->adapter;
3464
3465 debug_text_event(adapter->erp_dbf, 6, "a_tore");
3466 debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int));
3467 list_move(&erp_action->list, &erp_action->adapter->erp_ready_head);
3468}
3469
3470/*
3471 * function: zfcp_erp_port_access_denied
3472 *
3473 * purpose:
3474 */
3475void
3476zfcp_erp_port_access_denied(struct zfcp_port *port)
3477{
3478 struct zfcp_adapter *adapter = port->adapter;
3479 unsigned long flags;
3480
3481 debug_text_event(adapter->erp_dbf, 3, "p_access_block");
3482 debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t));
3483 read_lock_irqsave(&zfcp_data.config_lock, flags);
3484 zfcp_erp_modify_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED |
3485 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
3486 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
3487}
3488
3489/*
3490 * function: zfcp_erp_unit_access_denied
3491 *
3492 * purpose:
3493 */
3494void
3495zfcp_erp_unit_access_denied(struct zfcp_unit *unit)
3496{
3497 struct zfcp_adapter *adapter = unit->port->adapter;
3498
3499 debug_text_event(adapter->erp_dbf, 3, "u_access_block");
3500 debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t));
3501 zfcp_erp_modify_unit_status(unit, ZFCP_STATUS_COMMON_ERP_FAILED |
3502 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
3503}
3504
3505/*
3506 * function: zfcp_erp_adapter_access_changed
3507 *
3508 * purpose:
3509 */
3510void
3511zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter)
3512{
3513 struct zfcp_port *port;
3514 unsigned long flags;
3515
3516 debug_text_event(adapter->erp_dbf, 3, "a_access_unblock");
3517 debug_event(adapter->erp_dbf, 3, &adapter->name, 8);
3518
3519 zfcp_erp_port_access_changed(adapter->nameserver_port);
3520 read_lock_irqsave(&zfcp_data.config_lock, flags);
3521 list_for_each_entry(port, &adapter->port_list_head, list)
3522 if (port != adapter->nameserver_port)
3523 zfcp_erp_port_access_changed(port);
3524 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
3525}
3526
3527/*
3528 * function: zfcp_erp_port_access_changed
3529 *
3530 * purpose:
3531 */
3532void
3533zfcp_erp_port_access_changed(struct zfcp_port *port)
3534{
3535 struct zfcp_adapter *adapter = port->adapter;
3536 struct zfcp_unit *unit;
3537
3538 debug_text_event(adapter->erp_dbf, 3, "p_access_unblock");
3539 debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t));
3540
3541 if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
3542 &port->status)) {
3543 if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
3544 list_for_each_entry(unit, &port->unit_list_head, list)
3545 zfcp_erp_unit_access_changed(unit);
3546 return;
3547 }
3548
3549 ZFCP_LOG_NORMAL("reopen of port 0x%016Lx on adapter %s "
3550 "(due to ACT update)\n",
3551 port->wwpn, zfcp_get_busid_by_adapter(adapter));
3552 if (zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
3553 ZFCP_LOG_NORMAL("failed reopen of port"
3554 "(adapter %s, wwpn=0x%016Lx)\n",
3555 zfcp_get_busid_by_adapter(adapter), port->wwpn);
3556}
3557
3558/*
3559 * function: zfcp_erp_unit_access_changed
3560 *
3561 * purpose:
3562 */
3563void
3564zfcp_erp_unit_access_changed(struct zfcp_unit *unit)
3565{
3566 struct zfcp_adapter *adapter = unit->port->adapter;
3567
3568 debug_text_event(adapter->erp_dbf, 3, "u_access_unblock");
3569 debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t));
3570
3571 if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, &unit->status))
3572 return;
3573
3574 ZFCP_LOG_NORMAL("reopen of unit 0x%016Lx on port 0x%016Lx "
3575 " on adapter %s (due to ACT update)\n",
3576 unit->fcp_lun, unit->port->wwpn,
3577 zfcp_get_busid_by_adapter(adapter));
3578 if (zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
3579 ZFCP_LOG_NORMAL("failed reopen of unit (adapter %s, "
3580 "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n",
3581 zfcp_get_busid_by_adapter(adapter),
3582 unit->port->wwpn, unit->fcp_lun);
3583}
3584
3585#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
new file mode 100644
index 000000000000..d5fd43352071
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -0,0 +1,186 @@
1/*
2 *
3 * linux/drivers/s390/scsi/zfcp_ext.h
4 *
5 * FCP adapter driver for IBM eServer zSeries
6 *
7 * (C) Copyright IBM Corp. 2002, 2004
8 *
9 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
10 * Raimund Schroeder <raimund.schroeder@de.ibm.com>
11 * Aron Zeh
12 * Wolfgang Taphorn
13 * Stefan Bader <stefan.bader@de.ibm.com>
14 * Heiko Carstens <heiko.carstens@de.ibm.com>
15 * Andreas Herrmann <aherrman@de.ibm.com>
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */
31
32#ifndef ZFCP_EXT_H
33#define ZFCP_EXT_H
34
35#define ZFCP_EXT_REVISION "$Revision: 1.62 $"
36
37#include "zfcp_def.h"
38
39extern struct zfcp_data zfcp_data;
40
41/******************************** SYSFS *************************************/
42extern int zfcp_sysfs_driver_create_files(struct device_driver *);
43extern void zfcp_sysfs_driver_remove_files(struct device_driver *);
44extern int zfcp_sysfs_adapter_create_files(struct device *);
45extern void zfcp_sysfs_adapter_remove_files(struct device *);
46extern int zfcp_sysfs_port_create_files(struct device *, u32);
47extern void zfcp_sysfs_port_remove_files(struct device *, u32);
48extern int zfcp_sysfs_unit_create_files(struct device *);
49extern void zfcp_sysfs_unit_remove_files(struct device *);
50extern void zfcp_sysfs_port_release(struct device *);
51extern void zfcp_sysfs_unit_release(struct device *);
52
53/**************************** CONFIGURATION *********************************/
54extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, fcp_lun_t);
55extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, wwn_t);
56extern struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *, u32);
57struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
58extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
59extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
60extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
61extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
62extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t,
63 u32, u32);
64extern void zfcp_port_dequeue(struct zfcp_port *);
65extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t);
66extern void zfcp_unit_dequeue(struct zfcp_unit *);
67
68/******************************* S/390 IO ************************************/
69extern int zfcp_ccw_register(void);
70extern void zfcp_ccw_unregister(void);
71
72extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int);
73extern int zfcp_qdio_allocate(struct zfcp_adapter *);
74extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *);
75extern void zfcp_qdio_free_queues(struct zfcp_adapter *);
76extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *,
77 struct zfcp_fsf_req *);
78extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *);
79
80extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req
81 (struct zfcp_fsf_req *, int, int);
82extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr
83 (struct zfcp_fsf_req *);
84extern int zfcp_qdio_sbals_from_sg
85 (struct zfcp_fsf_req *, unsigned long, struct scatterlist *, int, int);
86extern int zfcp_qdio_sbals_from_scsicmnd
87 (struct zfcp_fsf_req *, unsigned long, struct scsi_cmnd *);
88
89
90/******************************** FSF ****************************************/
91extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
92extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
93extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
94
95extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
96extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
97
98extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
99extern int zfcp_fsf_exchange_port_data(struct zfcp_adapter *,
100 struct fsf_qtcb_bottom_port *);
101extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
102 u32, u32, struct zfcp_sg_list *);
103extern void zfcp_fsf_request_timeout_handler(unsigned long);
104extern void zfcp_fsf_scsi_er_timeout_handler(unsigned long);
105extern int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
106extern int zfcp_fsf_status_read(struct zfcp_adapter *, int);
107extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
108 unsigned long *, struct zfcp_fsf_req **);
109extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
110 struct zfcp_erp_action *);
111extern int zfcp_fsf_send_els(struct zfcp_send_els *);
112extern int zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *, int, u32 *);
113extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
114 struct zfcp_unit *,
115 struct scsi_cmnd *,
116 struct timer_list*, int);
117extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *);
118extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *);
119extern void zfcp_fsf_req_cleanup(struct zfcp_fsf_req *);
120extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_command_task_management(
121 struct zfcp_adapter *, struct zfcp_unit *, u8, int);
122extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(
123 unsigned long, struct zfcp_adapter *, struct zfcp_unit *, int);
124
125/******************************* FC/FCP **************************************/
126extern int zfcp_nameserver_enqueue(struct zfcp_adapter *);
127extern int zfcp_ns_gid_pn_request(struct zfcp_erp_action *);
128extern int zfcp_check_ct_response(struct ct_hdr *);
129extern int zfcp_handle_els_rjt(u32, struct zfcp_ls_rjt_par *);
130
131/******************************* SCSI ****************************************/
132extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
133extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
134extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
135extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
136extern void set_host_byte(u32 *, char);
137extern void set_driver_byte(u32 *, char);
138extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
139extern void zfcp_fsf_start_scsi_er_timer(struct zfcp_adapter *);
140extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
141
142extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *,
143 struct scsi_cmnd *, struct timer_list *);
144extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *,
145 struct timer_list *);
146extern struct scsi_transport_template *zfcp_transport_template;
147extern struct fc_function_template zfcp_transport_functions;
148
149/******************************** ERP ****************************************/
150extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
151extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
152extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
153extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
154
155extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
156extern int zfcp_erp_port_reopen(struct zfcp_port *, int);
157extern int zfcp_erp_port_shutdown(struct zfcp_port *, int);
158extern int zfcp_erp_port_forced_reopen(struct zfcp_port *, int);
159extern void zfcp_erp_port_failed(struct zfcp_port *);
160extern int zfcp_erp_port_reopen_all(struct zfcp_adapter *, int);
161
162extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u32, int);
163extern int zfcp_erp_unit_reopen(struct zfcp_unit *, int);
164extern int zfcp_erp_unit_shutdown(struct zfcp_unit *, int);
165extern void zfcp_erp_unit_failed(struct zfcp_unit *);
166
167extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
168extern int zfcp_erp_thread_kill(struct zfcp_adapter *);
169extern int zfcp_erp_wait(struct zfcp_adapter *);
170extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
171
172extern int zfcp_test_link(struct zfcp_port *);
173
174extern void zfcp_erp_port_access_denied(struct zfcp_port *);
175extern void zfcp_erp_unit_access_denied(struct zfcp_unit *);
176extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *);
177extern void zfcp_erp_port_access_changed(struct zfcp_port *);
178extern void zfcp_erp_unit_access_changed(struct zfcp_unit *);
179
180/******************************** AUX ****************************************/
181extern void zfcp_cmd_dbf_event_fsf(const char *, struct zfcp_fsf_req *,
182 void *, int);
183extern void zfcp_cmd_dbf_event_scsi(const char *, struct scsi_cmnd *);
184extern void zfcp_in_els_dbf_event(struct zfcp_adapter *, const char *,
185 struct fsf_status_read_buffer *, int);
186#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
new file mode 100644
index 000000000000..578b9fbe5206
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -0,0 +1,5087 @@
1/*
2 *
3 * linux/drivers/s390/scsi/zfcp_fsf.c
4 *
5 * FCP adapter driver for IBM eServer zSeries
6 *
7 * (C) Copyright IBM Corp. 2002, 2004
8 *
9 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
10 * Raimund Schroeder <raimund.schroeder@de.ibm.com>
11 * Aron Zeh
12 * Wolfgang Taphorn
13 * Stefan Bader <stefan.bader@de.ibm.com>
14 * Heiko Carstens <heiko.carstens@de.ibm.com>
15 * Andreas Herrmann <aherrman@de.ibm.com>
16 * Volker Sameske <sameske@de.ibm.com>
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33#define ZFCP_FSF_C_REVISION "$Revision: 1.92 $"
34
35#include "zfcp_ext.h"
36
37static int zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *);
38static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *);
39static int zfcp_fsf_open_port_handler(struct zfcp_fsf_req *);
40static int zfcp_fsf_close_port_handler(struct zfcp_fsf_req *);
41static int zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *);
42static int zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *);
43static int zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *);
44static int zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *);
45static int zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *);
46static int zfcp_fsf_send_fcp_command_task_management_handler(
47 struct zfcp_fsf_req *);
48static int zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *);
49static int zfcp_fsf_status_read_handler(struct zfcp_fsf_req *);
50static int zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *);
51static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *);
52static int zfcp_fsf_control_file_handler(struct zfcp_fsf_req *);
53static inline int zfcp_fsf_req_sbal_check(
54 unsigned long *, struct zfcp_qdio_queue *, int);
55static inline int zfcp_use_one_sbal(
56 struct scatterlist *, int, struct scatterlist *, int);
57static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int);
58static int zfcp_fsf_req_send(struct zfcp_fsf_req *, struct timer_list *);
59static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
60static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
61static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
62static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
63static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
64static void zfcp_fsf_req_free(struct zfcp_fsf_req *);
65
66/* association between FSF command and FSF QTCB type */
67static u32 fsf_qtcb_type[] = {
68 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
69 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
70 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
71 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
72 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
73 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
74 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
75 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
76 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
77 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
78 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
79 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
80 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
81};
82
83static const char zfcp_act_subtable_type[5][8] = {
84 "unknown", "OS", "WWPN", "DID", "LUN"
85};
86
87/****************************************************************/
88/*************** FSF related Functions *************************/
89/****************************************************************/
90
91#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
92
93/*
94 * function: zfcp_fsf_req_alloc
95 *
96 * purpose: Obtains an fsf_req and potentially a qtcb (for all but
97 * unsolicited requests) via helper functions
98 * Does some initial fsf request set-up.
99 *
100 * returns: pointer to allocated fsf_req if successfull
101 * NULL otherwise
102 *
103 * locks: none
104 *
105 */
106static struct zfcp_fsf_req *
107zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
108{
109 size_t size;
110 void *ptr;
111 struct zfcp_fsf_req *fsf_req = NULL;
112
113 if (req_flags & ZFCP_REQ_NO_QTCB)
114 size = sizeof(struct zfcp_fsf_req);
115 else
116 size = sizeof(struct zfcp_fsf_req_pool_element);
117
118 if (likely(pool != NULL))
119 ptr = mempool_alloc(pool, GFP_ATOMIC);
120 else
121 ptr = kmalloc(size, GFP_ATOMIC);
122
123 if (unlikely(NULL == ptr))
124 goto out;
125
126 memset(ptr, 0, size);
127
128 if (req_flags & ZFCP_REQ_NO_QTCB) {
129 fsf_req = (struct zfcp_fsf_req *) ptr;
130 } else {
131 fsf_req = &((struct zfcp_fsf_req_pool_element *) ptr)->fsf_req;
132 fsf_req->qtcb =
133 &((struct zfcp_fsf_req_pool_element *) ptr)->qtcb;
134 }
135
136 fsf_req->pool = pool;
137
138 out:
139 return fsf_req;
140}
141
142/*
143 * function: zfcp_fsf_req_free
144 *
145 * purpose: Frees the memory of an fsf_req (and potentially a qtcb) or
146 * returns it into the pool via helper functions.
147 *
148 * returns: sod all
149 *
150 * locks: none
151 */
152static void
153zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
154{
155 if (likely(fsf_req->pool != NULL))
156 mempool_free(fsf_req, fsf_req->pool);
157 else
158 kfree(fsf_req);
159}
160
161/*
162 * function:
163 *
164 * purpose:
165 *
166 * returns:
167 *
168 * note: qdio queues shall be down (no ongoing inbound processing)
169 */
170int
171zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
172{
173 int retval = 0;
174 struct zfcp_fsf_req *fsf_req, *tmp;
175
176 list_for_each_entry_safe(fsf_req, tmp, &adapter->fsf_req_list_head,
177 list)
178 zfcp_fsf_req_dismiss(fsf_req);
179 /* wait_event_timeout? */
180 while (!list_empty(&adapter->fsf_req_list_head)) {
181 ZFCP_LOG_DEBUG("fsf req list of adapter %s not yet empty\n",
182 zfcp_get_busid_by_adapter(adapter));
183 /* wait for woken intiators to clean up their requests */
184 msleep(jiffies_to_msecs(ZFCP_FSFREQ_CLEANUP_TIMEOUT));
185 }
186
187 /* consistency check */
188 if (atomic_read(&adapter->fsf_reqs_active)) {
189 ZFCP_LOG_NORMAL("bug: There are still %d FSF requests pending "
190 "on adapter %s after cleanup.\n",
191 atomic_read(&adapter->fsf_reqs_active),
192 zfcp_get_busid_by_adapter(adapter));
193 atomic_set(&adapter->fsf_reqs_active, 0);
194 }
195
196 return retval;
197}
198
199/*
200 * function:
201 *
202 * purpose:
203 *
204 * returns:
205 */
206static void
207zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req)
208{
209 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
210 zfcp_fsf_req_complete(fsf_req);
211}
212
213/*
214 * function: zfcp_fsf_req_complete
215 *
216 * purpose: Updates active counts and timers for openfcp-reqs
217 * May cleanup request after req_eval returns
218 *
219 * returns: 0 - success
220 * !0 - failure
221 *
222 * context:
223 */
224int
225zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req)
226{
227 int retval = 0;
228 int cleanup;
229 struct zfcp_adapter *adapter = fsf_req->adapter;
230
231 /* do some statistics */
232 atomic_dec(&adapter->fsf_reqs_active);
233
234 if (unlikely(fsf_req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
235 ZFCP_LOG_DEBUG("Status read response received\n");
236 /*
237 * Note: all cleanup handling is done in the callchain of
238 * the function call-chain below.
239 */
240 zfcp_fsf_status_read_handler(fsf_req);
241 goto out;
242 } else
243 zfcp_fsf_protstatus_eval(fsf_req);
244
245 /*
246 * fsf_req may be deleted due to waking up functions, so
247 * cleanup is saved here and used later
248 */
249 if (likely(fsf_req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
250 cleanup = 1;
251 else
252 cleanup = 0;
253
254 fsf_req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
255
256 /* cleanup request if requested by initiator */
257 if (likely(cleanup)) {
258 ZFCP_LOG_TRACE("removing FSF request %p\n", fsf_req);
259 /*
260 * lock must not be held here since it will be
261 * grabed by the called routine, too
262 */
263 zfcp_fsf_req_cleanup(fsf_req);
264 } else {
265 /* notify initiator waiting for the requests completion */
266 ZFCP_LOG_TRACE("waking initiator of FSF request %p\n",fsf_req);
267 /*
268 * FIXME: Race! We must not access fsf_req here as it might have been
269 * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED
270 * flag. It's an improbable case. But, we have the same paranoia for
271 * the cleanup flag already.
272 * Might better be handled using complete()?
273 * (setting the flag and doing wakeup ought to be atomic
274 * with regard to checking the flag as long as waitqueue is
275 * part of the to be released structure)
276 */
277 wake_up(&fsf_req->completion_wq);
278 }
279
280 out:
281 return retval;
282}
283
284/*
285 * function: zfcp_fsf_protstatus_eval
286 *
287 * purpose: evaluates the QTCB of the finished FSF request
288 * and initiates appropriate actions
289 * (usually calling FSF command specific handlers)
290 *
291 * returns:
292 *
293 * context:
294 *
295 * locks:
296 */
297static int
298zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
299{
300 int retval = 0;
301 struct zfcp_adapter *adapter = fsf_req->adapter;
302
303 ZFCP_LOG_DEBUG("QTCB is at %p\n", fsf_req->qtcb);
304
305 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
306 ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n",
307 (unsigned long) fsf_req);
308 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
309 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
310 zfcp_cmd_dbf_event_fsf("dismiss", fsf_req, NULL, 0);
311 goto skip_protstatus;
312 }
313
314 /* log additional information provided by FSF (if any) */
315 if (unlikely(fsf_req->qtcb->header.log_length)) {
316 /* do not trust them ;-) */
317 if (fsf_req->qtcb->header.log_start > sizeof(struct fsf_qtcb)) {
318 ZFCP_LOG_NORMAL
319 ("bug: ULP (FSF logging) log data starts "
320 "beyond end of packet header. Ignored. "
321 "(start=%i, size=%li)\n",
322 fsf_req->qtcb->header.log_start,
323 sizeof(struct fsf_qtcb));
324 goto forget_log;
325 }
326 if ((size_t) (fsf_req->qtcb->header.log_start +
327 fsf_req->qtcb->header.log_length)
328 > sizeof(struct fsf_qtcb)) {
329 ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends "
330 "beyond end of packet header. Ignored. "
331 "(start=%i, length=%i, size=%li)\n",
332 fsf_req->qtcb->header.log_start,
333 fsf_req->qtcb->header.log_length,
334 sizeof(struct fsf_qtcb));
335 goto forget_log;
336 }
337 ZFCP_LOG_TRACE("ULP log data: \n");
338 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
339 (char *) fsf_req->qtcb +
340 fsf_req->qtcb->header.log_start,
341 fsf_req->qtcb->header.log_length);
342 }
343 forget_log:
344
345 /* evaluate FSF Protocol Status */
346 switch (fsf_req->qtcb->prefix.prot_status) {
347
348 case FSF_PROT_GOOD:
349 ZFCP_LOG_TRACE("FSF_PROT_GOOD\n");
350 break;
351
352 case FSF_PROT_FSF_STATUS_PRESENTED:
353 ZFCP_LOG_TRACE("FSF_PROT_FSF_STATUS_PRESENTED\n");
354 break;
355
356 case FSF_PROT_QTCB_VERSION_ERROR:
357 ZFCP_LOG_FLAGS(0, "FSF_PROT_QTCB_VERSION_ERROR\n");
358 ZFCP_LOG_NORMAL("error: The adapter %s contains "
359 "microcode of version 0x%x, the device driver "
360 "only supports 0x%x. Aborting.\n",
361 zfcp_get_busid_by_adapter(adapter),
362 fsf_req->qtcb->prefix.prot_status_qual.
363 version_error.fsf_version, ZFCP_QTCB_VERSION);
364 /* stop operation for this adapter */
365 debug_text_exception(adapter->erp_dbf, 0, "prot_ver_err");
366 zfcp_erp_adapter_shutdown(adapter, 0);
367 zfcp_cmd_dbf_event_fsf("qverserr", fsf_req,
368 &fsf_req->qtcb->prefix.prot_status_qual,
369 sizeof (union fsf_prot_status_qual));
370 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
371 break;
372
373 case FSF_PROT_SEQ_NUMB_ERROR:
374 ZFCP_LOG_FLAGS(0, "FSF_PROT_SEQ_NUMB_ERROR\n");
375 ZFCP_LOG_NORMAL("bug: Sequence number mismatch between "
376 "driver (0x%x) and adapter %s (0x%x). "
377 "Restarting all operations on this adapter.\n",
378 fsf_req->qtcb->prefix.req_seq_no,
379 zfcp_get_busid_by_adapter(adapter),
380 fsf_req->qtcb->prefix.prot_status_qual.
381 sequence_error.exp_req_seq_no);
382 debug_text_exception(adapter->erp_dbf, 0, "prot_seq_err");
383 /* restart operation on this adapter */
384 zfcp_erp_adapter_reopen(adapter, 0);
385 zfcp_cmd_dbf_event_fsf("seqnoerr", fsf_req,
386 &fsf_req->qtcb->prefix.prot_status_qual,
387 sizeof (union fsf_prot_status_qual));
388 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
389 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
390 break;
391
392 case FSF_PROT_UNSUPP_QTCB_TYPE:
393 ZFCP_LOG_FLAGS(0, "FSF_PROT_UNSUP_QTCB_TYPE\n");
394 ZFCP_LOG_NORMAL("error: Packet header type used by the "
395 "device driver is incompatible with "
396 "that used on adapter %s. "
397 "Stopping all operations on this adapter.\n",
398 zfcp_get_busid_by_adapter(adapter));
399 debug_text_exception(adapter->erp_dbf, 0, "prot_unsup_qtcb");
400 zfcp_erp_adapter_shutdown(adapter, 0);
401 zfcp_cmd_dbf_event_fsf("unsqtcbt", fsf_req,
402 &fsf_req->qtcb->prefix.prot_status_qual,
403 sizeof (union fsf_prot_status_qual));
404 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
405 break;
406
407 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
408 ZFCP_LOG_FLAGS(1, "FSF_PROT_HOST_CONNECTION_INITIALIZING\n");
409 zfcp_cmd_dbf_event_fsf("hconinit", fsf_req,
410 &fsf_req->qtcb->prefix.prot_status_qual,
411 sizeof (union fsf_prot_status_qual));
412 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
413 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
414 &(adapter->status));
415 debug_text_event(adapter->erp_dbf, 3, "prot_con_init");
416 break;
417
418 case FSF_PROT_DUPLICATE_REQUEST_ID:
419 ZFCP_LOG_FLAGS(0, "FSF_PROT_DUPLICATE_REQUEST_IDS\n");
420 if (fsf_req->qtcb) {
421 ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx "
422 "to the adapter %s is ambiguous. "
423 "Stopping all operations on this "
424 "adapter.\n",
425 *(unsigned long long *)
426 (&fsf_req->qtcb->bottom.support.
427 req_handle),
428 zfcp_get_busid_by_adapter(adapter));
429 } else {
430 ZFCP_LOG_NORMAL("bug: The request identifier %p "
431 "to the adapter %s is ambiguous. "
432 "Stopping all operations on this "
433 "adapter. "
434 "(bug: got this for an unsolicited "
435 "status read request)\n",
436 fsf_req,
437 zfcp_get_busid_by_adapter(adapter));
438 }
439 debug_text_exception(adapter->erp_dbf, 0, "prot_dup_id");
440 zfcp_erp_adapter_shutdown(adapter, 0);
441 zfcp_cmd_dbf_event_fsf("dupreqid", fsf_req,
442 &fsf_req->qtcb->prefix.prot_status_qual,
443 sizeof (union fsf_prot_status_qual));
444 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
445 break;
446
447 case FSF_PROT_LINK_DOWN:
448 ZFCP_LOG_FLAGS(1, "FSF_PROT_LINK_DOWN\n");
449 /*
450 * 'test and set' is not atomic here -
451 * it's ok as long as calls to our response queue handler
452 * (and thus execution of this code here) are serialized
453 * by the qdio module
454 */
455 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
456 &adapter->status)) {
457 switch (fsf_req->qtcb->prefix.prot_status_qual.
458 locallink_error.code) {
459 case FSF_PSQ_LINK_NOLIGHT:
460 ZFCP_LOG_INFO("The local link to adapter %s "
461 "is down (no light detected).\n",
462 zfcp_get_busid_by_adapter(
463 adapter));
464 break;
465 case FSF_PSQ_LINK_WRAPPLUG:
466 ZFCP_LOG_INFO("The local link to adapter %s "
467 "is down (wrap plug detected).\n",
468 zfcp_get_busid_by_adapter(
469 adapter));
470 break;
471 case FSF_PSQ_LINK_NOFCP:
472 ZFCP_LOG_INFO("The local link to adapter %s "
473 "is down (adjacent node on "
474 "link does not support FCP).\n",
475 zfcp_get_busid_by_adapter(
476 adapter));
477 break;
478 default:
479 ZFCP_LOG_INFO("The local link to adapter %s "
480 "is down "
481 "(warning: unknown reason "
482 "code).\n",
483 zfcp_get_busid_by_adapter(
484 adapter));
485 break;
486
487 }
488 /*
489 * Due to the 'erp failed' flag the adapter won't
490 * be recovered but will be just set to 'blocked'
491 * state. All subordinary devices will have state
492 * 'blocked' and 'erp failed', too.
493 * Thus the adapter is still able to provide
494 * 'link up' status without being flooded with
495 * requests.
496 * (note: even 'close port' is not permitted)
497 */
498 ZFCP_LOG_INFO("Stopping all operations for adapter "
499 "%s.\n",
500 zfcp_get_busid_by_adapter(adapter));
501 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
502 ZFCP_STATUS_COMMON_ERP_FAILED,
503 &adapter->status);
504 zfcp_erp_adapter_reopen(adapter, 0);
505 debug_text_event(adapter->erp_dbf, 1, "prot_link_down");
506 }
507 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
508 break;
509
510 case FSF_PROT_REEST_QUEUE:
511 ZFCP_LOG_FLAGS(1, "FSF_PROT_REEST_QUEUE\n");
512 debug_text_event(adapter->erp_dbf, 1, "prot_reest_queue");
513 ZFCP_LOG_INFO("The local link to adapter with "
514 "%s was re-plugged. "
515 "Re-starting operations on this adapter.\n",
516 zfcp_get_busid_by_adapter(adapter));
517 /* All ports should be marked as ready to run again */
518 zfcp_erp_modify_adapter_status(adapter,
519 ZFCP_STATUS_COMMON_RUNNING,
520 ZFCP_SET);
521 zfcp_erp_adapter_reopen(adapter,
522 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
523 | ZFCP_STATUS_COMMON_ERP_FAILED);
524 zfcp_cmd_dbf_event_fsf("reestque", fsf_req,
525 &fsf_req->qtcb->prefix.prot_status_qual,
526 sizeof (union fsf_prot_status_qual));
527 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
528 break;
529
530 case FSF_PROT_ERROR_STATE:
531 ZFCP_LOG_FLAGS(0, "FSF_PROT_ERROR_STATE\n");
532 ZFCP_LOG_NORMAL("error: The adapter %s "
533 "has entered the error state. "
534 "Restarting all operations on this "
535 "adapter.\n",
536 zfcp_get_busid_by_adapter(adapter));
537 debug_text_event(adapter->erp_dbf, 0, "prot_err_sta");
538 /* restart operation on this adapter */
539 zfcp_erp_adapter_reopen(adapter, 0);
540 zfcp_cmd_dbf_event_fsf("proterrs", fsf_req,
541 &fsf_req->qtcb->prefix.prot_status_qual,
542 sizeof (union fsf_prot_status_qual));
543 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
544 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
545 break;
546
547 default:
548 ZFCP_LOG_NORMAL("bug: Transfer protocol status information "
549 "provided by the adapter %s "
550 "is not compatible with the device driver. "
551 "Stopping all operations on this adapter. "
552 "(debug info 0x%x).\n",
553 zfcp_get_busid_by_adapter(adapter),
554 fsf_req->qtcb->prefix.prot_status);
555 debug_text_event(adapter->erp_dbf, 0, "prot_inval:");
556 debug_exception(adapter->erp_dbf, 0,
557 &fsf_req->qtcb->prefix.prot_status,
558 sizeof (u32));
559 zfcp_erp_adapter_shutdown(adapter, 0);
560 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
561 }
562
563 skip_protstatus:
564 /*
565 * always call specific handlers to give them a chance to do
566 * something meaningful even in error cases
567 */
568 zfcp_fsf_fsfstatus_eval(fsf_req);
569 return retval;
570}
571
572/*
573 * function: zfcp_fsf_fsfstatus_eval
574 *
575 * purpose: evaluates FSF status of completed FSF request
576 * and acts accordingly
577 *
578 * returns:
579 */
580static int
581zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req)
582{
583 int retval = 0;
584
585 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
586 goto skip_fsfstatus;
587 }
588
589 /* evaluate FSF Status */
590 switch (fsf_req->qtcb->header.fsf_status) {
591 case FSF_UNKNOWN_COMMAND:
592 ZFCP_LOG_FLAGS(0, "FSF_UNKNOWN_COMMAND\n");
593 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
594 "not known by the adapter %s "
595 "Stopping all operations on this adapter. "
596 "(debug info 0x%x).\n",
597 zfcp_get_busid_by_adapter(fsf_req->adapter),
598 fsf_req->qtcb->header.fsf_command);
599 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
600 "fsf_s_unknown");
601 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
602 zfcp_cmd_dbf_event_fsf("unknownc", fsf_req,
603 &fsf_req->qtcb->header.fsf_status_qual,
604 sizeof (union fsf_status_qual));
605 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
606 break;
607
608 case FSF_FCP_RSP_AVAILABLE:
609 ZFCP_LOG_FLAGS(2, "FSF_FCP_RSP_AVAILABLE\n");
610 ZFCP_LOG_DEBUG("FCP Sense data will be presented to the "
611 "SCSI stack.\n");
612 debug_text_event(fsf_req->adapter->erp_dbf, 3, "fsf_s_rsp");
613 break;
614
615 case FSF_ADAPTER_STATUS_AVAILABLE:
616 ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
617 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_astatus");
618 zfcp_fsf_fsfstatus_qual_eval(fsf_req);
619 break;
620
621 default:
622 break;
623 }
624
625 skip_fsfstatus:
626 /*
627 * always call specific handlers to give them a chance to do
628 * something meaningful even in error cases
629 */
630 zfcp_fsf_req_dispatch(fsf_req);
631
632 return retval;
633}
634
635/*
636 * function: zfcp_fsf_fsfstatus_qual_eval
637 *
638 * purpose: evaluates FSF status-qualifier of completed FSF request
639 * and acts accordingly
640 *
641 * returns:
642 */
643static int
644zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
645{
646 int retval = 0;
647
648 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
649 case FSF_SQ_FCP_RSP_AVAILABLE:
650 ZFCP_LOG_FLAGS(2, "FSF_SQ_FCP_RSP_AVAILABLE\n");
651 debug_text_event(fsf_req->adapter->erp_dbf, 4, "fsf_sq_rsp");
652 break;
653 case FSF_SQ_RETRY_IF_POSSIBLE:
654 ZFCP_LOG_FLAGS(2, "FSF_SQ_RETRY_IF_POSSIBLE\n");
655 /* The SCSI-stack may now issue retries or escalate */
656 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_retry");
657 zfcp_cmd_dbf_event_fsf("sqretry", fsf_req,
658 &fsf_req->qtcb->header.fsf_status_qual,
659 sizeof (union fsf_status_qual));
660 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
661 break;
662 case FSF_SQ_COMMAND_ABORTED:
663 ZFCP_LOG_FLAGS(2, "FSF_SQ_COMMAND_ABORTED\n");
664 /* Carry the aborted state on to upper layer */
665 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_abort");
666 zfcp_cmd_dbf_event_fsf("sqabort", fsf_req,
667 &fsf_req->qtcb->header.fsf_status_qual,
668 sizeof (union fsf_status_qual));
669 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
670 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
671 break;
672 case FSF_SQ_NO_RECOM:
673 ZFCP_LOG_FLAGS(0, "FSF_SQ_NO_RECOM\n");
674 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
675 "fsf_sq_no_rec");
676 ZFCP_LOG_NORMAL("bug: No recommendation could be given for a"
677 "problem on the adapter %s "
678 "Stopping all operations on this adapter. ",
679 zfcp_get_busid_by_adapter(fsf_req->adapter));
680 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
681 zfcp_cmd_dbf_event_fsf("sqnrecom", fsf_req,
682 &fsf_req->qtcb->header.fsf_status_qual,
683 sizeof (union fsf_status_qual));
684 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
685 break;
686 case FSF_SQ_ULP_PROGRAMMING_ERROR:
687 ZFCP_LOG_FLAGS(0, "FSF_SQ_ULP_PROGRAMMING_ERROR\n");
688 ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer "
689 "(adapter %s)\n",
690 zfcp_get_busid_by_adapter(fsf_req->adapter));
691 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
692 "fsf_sq_ulp_err");
693 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
694 break;
695 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
696 case FSF_SQ_NO_RETRY_POSSIBLE:
697 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
698 /* dealt with in the respective functions */
699 break;
700 default:
701 ZFCP_LOG_NORMAL("bug: Additional status info could "
702 "not be interpreted properly.\n");
703 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
704 (char *) &fsf_req->qtcb->header.fsf_status_qual,
705 sizeof (union fsf_status_qual));
706 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval:");
707 debug_exception(fsf_req->adapter->erp_dbf, 0,
708 &fsf_req->qtcb->header.fsf_status_qual.word[0],
709 sizeof (u32));
710 zfcp_cmd_dbf_event_fsf("squndef", fsf_req,
711 &fsf_req->qtcb->header.fsf_status_qual,
712 sizeof (union fsf_status_qual));
713 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
714 break;
715 }
716
717 return retval;
718}
719
720/*
721 * function: zfcp_fsf_req_dispatch
722 *
723 * purpose: calls the appropriate command specific handler
724 *
725 * returns:
726 */
727static int
728zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
729{
730 struct zfcp_erp_action *erp_action = fsf_req->erp_action;
731 struct zfcp_adapter *adapter = fsf_req->adapter;
732 int retval = 0;
733
734 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
735 ZFCP_LOG_TRACE("fsf_req=%p, QTCB=%p\n", fsf_req, fsf_req->qtcb);
736 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
737 (char *) fsf_req->qtcb, sizeof(struct fsf_qtcb));
738 }
739
740 switch (fsf_req->fsf_command) {
741
742 case FSF_QTCB_FCP_CMND:
743 ZFCP_LOG_FLAGS(3, "FSF_QTCB_FCP_CMND\n");
744 zfcp_fsf_send_fcp_command_handler(fsf_req);
745 break;
746
747 case FSF_QTCB_ABORT_FCP_CMND:
748 ZFCP_LOG_FLAGS(2, "FSF_QTCB_ABORT_FCP_CMND\n");
749 zfcp_fsf_abort_fcp_command_handler(fsf_req);
750 break;
751
752 case FSF_QTCB_SEND_GENERIC:
753 ZFCP_LOG_FLAGS(2, "FSF_QTCB_SEND_GENERIC\n");
754 zfcp_fsf_send_ct_handler(fsf_req);
755 break;
756
757 case FSF_QTCB_OPEN_PORT_WITH_DID:
758 ZFCP_LOG_FLAGS(2, "FSF_QTCB_OPEN_PORT_WITH_DID\n");
759 zfcp_fsf_open_port_handler(fsf_req);
760 break;
761
762 case FSF_QTCB_OPEN_LUN:
763 ZFCP_LOG_FLAGS(2, "FSF_QTCB_OPEN_LUN\n");
764 zfcp_fsf_open_unit_handler(fsf_req);
765 break;
766
767 case FSF_QTCB_CLOSE_LUN:
768 ZFCP_LOG_FLAGS(2, "FSF_QTCB_CLOSE_LUN\n");
769 zfcp_fsf_close_unit_handler(fsf_req);
770 break;
771
772 case FSF_QTCB_CLOSE_PORT:
773 ZFCP_LOG_FLAGS(2, "FSF_QTCB_CLOSE_PORT\n");
774 zfcp_fsf_close_port_handler(fsf_req);
775 break;
776
777 case FSF_QTCB_CLOSE_PHYSICAL_PORT:
778 ZFCP_LOG_FLAGS(2, "FSF_QTCB_CLOSE_PHYSICAL_PORT\n");
779 zfcp_fsf_close_physical_port_handler(fsf_req);
780 break;
781
782 case FSF_QTCB_EXCHANGE_CONFIG_DATA:
783 ZFCP_LOG_FLAGS(2, "FSF_QTCB_EXCHANGE_CONFIG_DATA\n");
784 zfcp_fsf_exchange_config_data_handler(fsf_req);
785 break;
786
787 case FSF_QTCB_EXCHANGE_PORT_DATA:
788 ZFCP_LOG_FLAGS(2, "FSF_QTCB_EXCHANGE_PORT_DATA\n");
789 zfcp_fsf_exchange_port_data_handler(fsf_req);
790 break;
791
792 case FSF_QTCB_SEND_ELS:
793 ZFCP_LOG_FLAGS(2, "FSF_QTCB_SEND_ELS\n");
794 zfcp_fsf_send_els_handler(fsf_req);
795 break;
796
797 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
798 ZFCP_LOG_FLAGS(2, "FSF_QTCB_DOWNLOAD_CONTROL_FILE\n");
799 zfcp_fsf_control_file_handler(fsf_req);
800 break;
801
802 case FSF_QTCB_UPLOAD_CONTROL_FILE:
803 ZFCP_LOG_FLAGS(2, "FSF_QTCB_UPLOAD_CONTROL_FILE\n");
804 zfcp_fsf_control_file_handler(fsf_req);
805 break;
806
807 default:
808 ZFCP_LOG_FLAGS(2, "FSF_QTCB_UNKNOWN\n");
809 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
810 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
811 "not supported by the adapter %s\n",
812 zfcp_get_busid_by_adapter(fsf_req->adapter));
813 if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command)
814 ZFCP_LOG_NORMAL
815 ("bug: Command issued by the device driver differs "
816 "from the command returned by the adapter %s "
817 "(debug info 0x%x, 0x%x).\n",
818 zfcp_get_busid_by_adapter(fsf_req->adapter),
819 fsf_req->fsf_command,
820 fsf_req->qtcb->header.fsf_command);
821 }
822
823 if (!erp_action)
824 return retval;
825
826 debug_text_event(adapter->erp_dbf, 3, "a_frh");
827 debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
828 zfcp_erp_async_handler(erp_action, 0);
829
830 return retval;
831}
832
833/*
834 * function: zfcp_fsf_status_read
835 *
836 * purpose: initiates a Status Read command at the specified adapter
837 *
838 * returns:
839 */
840int
841zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
842{
843 struct zfcp_fsf_req *fsf_req;
844 struct fsf_status_read_buffer *status_buffer;
845 unsigned long lock_flags;
846 volatile struct qdio_buffer_element *sbale;
847 int retval = 0;
848
849 /* setup new FSF request */
850 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
851 req_flags | ZFCP_REQ_NO_QTCB,
852 adapter->pool.fsf_req_status_read,
853 &lock_flags, &fsf_req);
854 if (retval < 0) {
855 ZFCP_LOG_INFO("error: Could not create unsolicited status "
856 "buffer for adapter %s.\n",
857 zfcp_get_busid_by_adapter(adapter));
858 goto failed_req_create;
859 }
860
861 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
862 sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
863 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
864 fsf_req->sbale_curr = 2;
865
866 status_buffer =
867 mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC);
868 if (!status_buffer) {
869 ZFCP_LOG_NORMAL("bug: could not get some buffer\n");
870 goto failed_buf;
871 }
872 memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer));
873 fsf_req->data.status_read.buffer = status_buffer;
874
875 /* insert pointer to respective buffer */
876 sbale = zfcp_qdio_sbale_curr(fsf_req);
877 sbale->addr = (void *) status_buffer;
878 sbale->length = sizeof(struct fsf_status_read_buffer);
879
880 /* start QDIO request for this FSF request */
881 retval = zfcp_fsf_req_send(fsf_req, NULL);
882 if (retval) {
883 ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status "
884 "environment.\n");
885 goto failed_req_send;
886 }
887
888 ZFCP_LOG_TRACE("Status Read request initiated (adapter%s)\n",
889 zfcp_get_busid_by_adapter(adapter));
890 goto out;
891
892 failed_req_send:
893 mempool_free(status_buffer, adapter->pool.data_status_read);
894
895 failed_buf:
896 zfcp_fsf_req_free(fsf_req);
897 failed_req_create:
898 out:
899 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
900 return retval;
901}
902
903static int
904zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
905{
906 struct fsf_status_read_buffer *status_buffer;
907 struct zfcp_adapter *adapter;
908 struct zfcp_port *port;
909 unsigned long flags;
910
911 status_buffer = fsf_req->data.status_read.buffer;
912 adapter = fsf_req->adapter;
913
914 read_lock_irqsave(&zfcp_data.config_lock, flags);
915 list_for_each_entry(port, &adapter->port_list_head, list)
916 if (port->d_id == (status_buffer->d_id & ZFCP_DID_MASK))
917 break;
918 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
919
920 if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) {
921 ZFCP_LOG_NORMAL("bug: Reopen port indication received for"
922 "nonexisting port with d_id 0x%08x on "
923 "adapter %s. Ignored.\n",
924 status_buffer->d_id & ZFCP_DID_MASK,
925 zfcp_get_busid_by_adapter(adapter));
926 goto out;
927 }
928
929 switch (status_buffer->status_subtype) {
930
931 case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
932 ZFCP_LOG_FLAGS(2, "FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT\n");
933 debug_text_event(adapter->erp_dbf, 3, "unsol_pc_phys:");
934 zfcp_erp_port_reopen(port, 0);
935 break;
936
937 case FSF_STATUS_READ_SUB_ERROR_PORT:
938 ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_SUB_ERROR_PORT\n");
939 debug_text_event(adapter->erp_dbf, 1, "unsol_pc_err:");
940 zfcp_erp_port_shutdown(port, 0);
941 break;
942
943 default:
944 debug_text_event(adapter->erp_dbf, 0, "unsol_unk_sub:");
945 debug_exception(adapter->erp_dbf, 0,
946 &status_buffer->status_subtype, sizeof (u32));
947 ZFCP_LOG_NORMAL("bug: Undefined status subtype received "
948 "for a reopen indication on port with "
949 "d_id 0x%08x on the adapter %s. "
950 "Ignored. (debug info 0x%x)\n",
951 status_buffer->d_id,
952 zfcp_get_busid_by_adapter(adapter),
953 status_buffer->status_subtype);
954 }
955 out:
956 return 0;
957}
958
959/*
960 * function: zfcp_fsf_status_read_handler
961 *
962 * purpose: is called for finished Open Port command
963 *
964 * returns:
965 */
966static int
967zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
968{
969 int retval = 0;
970 struct zfcp_adapter *adapter = fsf_req->adapter;
971 struct fsf_status_read_buffer *status_buffer =
972 fsf_req->data.status_read.buffer;
973
974 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
975 mempool_free(status_buffer, adapter->pool.data_status_read);
976 zfcp_fsf_req_cleanup(fsf_req);
977 goto out;
978 }
979
980 switch (status_buffer->status_type) {
981
982 case FSF_STATUS_READ_PORT_CLOSED:
983 ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_PORT_CLOSED\n");
984 debug_text_event(adapter->erp_dbf, 3, "unsol_pclosed:");
985 debug_event(adapter->erp_dbf, 3,
986 &status_buffer->d_id, sizeof (u32));
987 zfcp_fsf_status_read_port_closed(fsf_req);
988 break;
989
990 case FSF_STATUS_READ_INCOMING_ELS:
991 ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_INCOMING_ELS\n");
992 debug_text_event(adapter->erp_dbf, 3, "unsol_els:");
993 zfcp_fsf_incoming_els(fsf_req);
994 break;
995
996 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
997 ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_SENSE_DATA_AVAIL\n");
998 debug_text_event(adapter->erp_dbf, 3, "unsol_sense:");
999 ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n",
1000 zfcp_get_busid_by_adapter(adapter));
1001 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, (char *) status_buffer,
1002 sizeof(struct fsf_status_read_buffer));
1003 break;
1004
1005 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
1006 ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_BIT_ERROR_THRESHOLD\n");
1007 debug_text_event(adapter->erp_dbf, 3, "unsol_bit_err:");
1008 ZFCP_LOG_NORMAL("Bit error threshold data received:\n");
1009 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
1010 (char *) status_buffer,
1011 sizeof (struct fsf_status_read_buffer));
1012 break;
1013
1014 case FSF_STATUS_READ_LINK_DOWN:
1015 ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_LINK_DOWN\n");
1016 debug_text_event(adapter->erp_dbf, 0, "unsol_link_down:");
1017 ZFCP_LOG_INFO("Local link to adapter %s is down\n",
1018 zfcp_get_busid_by_adapter(adapter));
1019 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
1020 &adapter->status);
1021 zfcp_erp_adapter_failed(adapter);
1022 break;
1023
1024 case FSF_STATUS_READ_LINK_UP:
1025 ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_LINK_UP\n");
1026 debug_text_event(adapter->erp_dbf, 2, "unsol_link_up:");
1027 ZFCP_LOG_INFO("Local link to adapter %s was replugged. "
1028 "Restarting operations on this adapter\n",
1029 zfcp_get_busid_by_adapter(adapter));
1030 /* All ports should be marked as ready to run again */
1031 zfcp_erp_modify_adapter_status(adapter,
1032 ZFCP_STATUS_COMMON_RUNNING,
1033 ZFCP_SET);
1034 zfcp_erp_adapter_reopen(adapter,
1035 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
1036 | ZFCP_STATUS_COMMON_ERP_FAILED);
1037 break;
1038
1039 case FSF_STATUS_READ_CFDC_UPDATED:
1040 ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_CFDC_UPDATED\n");
1041 debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_update:");
1042 ZFCP_LOG_INFO("CFDC has been updated on the adapter %s\n",
1043 zfcp_get_busid_by_adapter(adapter));
1044 zfcp_erp_adapter_access_changed(adapter);
1045 break;
1046
1047 case FSF_STATUS_READ_CFDC_HARDENED:
1048 ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_CFDC_HARDENED\n");
1049 debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_harden:");
1050 switch (status_buffer->status_subtype) {
1051 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE:
1052 ZFCP_LOG_INFO("CFDC of adapter %s saved on SE\n",
1053 zfcp_get_busid_by_adapter(adapter));
1054 break;
1055 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2:
1056 ZFCP_LOG_INFO("CFDC of adapter %s has been copied "
1057 "to the secondary SE\n",
1058 zfcp_get_busid_by_adapter(adapter));
1059 break;
1060 default:
1061 ZFCP_LOG_INFO("CFDC of adapter %s has been hardened\n",
1062 zfcp_get_busid_by_adapter(adapter));
1063 }
1064 break;
1065
1066 default:
1067 debug_text_event(adapter->erp_dbf, 0, "unsol_unknown:");
1068 debug_exception(adapter->erp_dbf, 0,
1069 &status_buffer->status_type, sizeof (u32));
1070 ZFCP_LOG_NORMAL("bug: An unsolicited status packet of unknown "
1071 "type was received (debug info 0x%x)\n",
1072 status_buffer->status_type);
1073 ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n",
1074 status_buffer);
1075 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
1076 (char *) status_buffer,
1077 sizeof (struct fsf_status_read_buffer));
1078 break;
1079 }
1080 mempool_free(status_buffer, adapter->pool.data_status_read);
1081 zfcp_fsf_req_cleanup(fsf_req);
1082 /*
1083 * recycle buffer and start new request repeat until outbound
1084 * queue is empty or adapter shutdown is requested
1085 */
1086 /*
1087 * FIXME(qdio):
1088 * we may wait in the req_create for 5s during shutdown, so
1089 * qdio_cleanup will have to wait at least that long before returning
1090 * with failure to allow us a proper cleanup under all circumstances
1091 */
1092 /*
1093 * FIXME:
1094 * allocation failure possible? (Is this code needed?)
1095 */
1096 retval = zfcp_fsf_status_read(adapter, 0);
1097 if (retval < 0) {
1098 ZFCP_LOG_INFO("Failed to create unsolicited status read "
1099 "request for the adapter %s.\n",
1100 zfcp_get_busid_by_adapter(adapter));
1101 /* temporary fix to avoid status read buffer shortage */
1102 adapter->status_read_failed++;
1103 if ((ZFCP_STATUS_READS_RECOM - adapter->status_read_failed)
1104 < ZFCP_STATUS_READ_FAILED_THRESHOLD) {
1105 ZFCP_LOG_INFO("restart adapter %s due to status read "
1106 "buffer shortage\n",
1107 zfcp_get_busid_by_adapter(adapter));
1108 zfcp_erp_adapter_reopen(adapter, 0);
1109 }
1110 }
1111 out:
1112 return retval;
1113}
1114
1115/*
1116 * function: zfcp_fsf_abort_fcp_command
1117 *
1118 * purpose: tells FSF to abort a running SCSI command
1119 *
1120 * returns: address of initiated FSF request
1121 * NULL - request could not be initiated
1122 *
1123 * FIXME(design): should be watched by a timeout !!!
1124 * FIXME(design) shouldn't this be modified to return an int
1125 * also...don't know how though
1126 */
1127struct zfcp_fsf_req *
1128zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1129 struct zfcp_adapter *adapter,
1130 struct zfcp_unit *unit, int req_flags)
1131{
1132 volatile struct qdio_buffer_element *sbale;
1133 unsigned long lock_flags;
1134 struct zfcp_fsf_req *fsf_req = NULL;
1135 int retval = 0;
1136
1137 /* setup new FSF request */
1138 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
1139 req_flags, adapter->pool.fsf_req_abort,
1140 &lock_flags, &fsf_req);
1141 if (retval < 0) {
1142 ZFCP_LOG_INFO("error: Failed to create an abort command "
1143 "request for lun 0x%016Lx on port 0x%016Lx "
1144 "on adapter %s.\n",
1145 unit->fcp_lun,
1146 unit->port->wwpn,
1147 zfcp_get_busid_by_adapter(adapter));
1148 goto out;
1149 }
1150
1151 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1152 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1153 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1154
1155 fsf_req->data.abort_fcp_command.unit = unit;
1156
1157 /* set handles of unit and its parent port in QTCB */
1158 fsf_req->qtcb->header.lun_handle = unit->handle;
1159 fsf_req->qtcb->header.port_handle = unit->port->handle;
1160
1161 /* set handle of request which should be aborted */
1162 fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id;
1163
1164 /* start QDIO request for this FSF request */
1165
1166 zfcp_fsf_start_scsi_er_timer(adapter);
1167 retval = zfcp_fsf_req_send(fsf_req, NULL);
1168 if (retval) {
1169 del_timer(&adapter->scsi_er_timer);
1170 ZFCP_LOG_INFO("error: Failed to send abort command request "
1171 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
1172 zfcp_get_busid_by_adapter(adapter),
1173 unit->port->wwpn, unit->fcp_lun);
1174 zfcp_fsf_req_free(fsf_req);
1175 fsf_req = NULL;
1176 goto out;
1177 }
1178
1179 ZFCP_LOG_DEBUG("Abort FCP Command request initiated "
1180 "(adapter%s, port d_id=0x%08x, "
1181 "unit x%016Lx, old_req_id=0x%lx)\n",
1182 zfcp_get_busid_by_adapter(adapter),
1183 unit->port->d_id,
1184 unit->fcp_lun, old_req_id);
1185 out:
1186 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
1187 return fsf_req;
1188}
1189
1190/*
1191 * function: zfcp_fsf_abort_fcp_command_handler
1192 *
1193 * purpose: is called for finished Abort FCP Command request
1194 *
1195 * returns:
1196 */
1197static int
1198zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1199{
1200 int retval = -EINVAL;
1201 struct zfcp_unit *unit = new_fsf_req->data.abort_fcp_command.unit;
1202 unsigned char status_qual =
1203 new_fsf_req->qtcb->header.fsf_status_qual.word[0];
1204
1205 del_timer(&new_fsf_req->adapter->scsi_er_timer);
1206
1207 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1208 /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
1209 goto skip_fsfstatus;
1210 }
1211
1212 /* evaluate FSF status in QTCB */
1213 switch (new_fsf_req->qtcb->header.fsf_status) {
1214
1215 case FSF_PORT_HANDLE_NOT_VALID:
1216 if (status_qual >> 4 != status_qual % 0xf) {
1217 ZFCP_LOG_FLAGS(2, "FSF_PORT_HANDLE_NOT_VALID\n");
1218 debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
1219 "fsf_s_phand_nv0");
1220 /*
1221 * In this case a command that was sent prior to a port
1222 * reopen was aborted (handles are different). This is
1223 * fine.
1224 */
1225 } else {
1226 ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
1227 ZFCP_LOG_INFO("Temporary port identifier 0x%x for "
1228 "port 0x%016Lx on adapter %s invalid. "
1229 "This may happen occasionally.\n",
1230 unit->port->handle,
1231 unit->port->wwpn,
1232 zfcp_get_busid_by_unit(unit));
1233 ZFCP_LOG_INFO("status qualifier:\n");
1234 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1235 (char *) &new_fsf_req->qtcb->header.
1236 fsf_status_qual,
1237 sizeof (union fsf_status_qual));
1238 /* Let's hope this sorts out the mess */
1239 debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
1240 "fsf_s_phand_nv1");
1241 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
1242 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1243 }
1244 break;
1245
1246 case FSF_LUN_HANDLE_NOT_VALID:
1247 if (status_qual >> 4 != status_qual % 0xf) {
1248 /* 2 */
1249 ZFCP_LOG_FLAGS(0, "FSF_LUN_HANDLE_NOT_VALID\n");
1250 debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
1251 "fsf_s_lhand_nv0");
1252 /*
1253 * In this case a command that was sent prior to a unit
1254 * reopen was aborted (handles are different).
1255 * This is fine.
1256 */
1257 } else {
1258 ZFCP_LOG_FLAGS(1, "FSF_LUN_HANDLE_NOT_VALID\n");
1259 ZFCP_LOG_INFO
1260 ("Warning: Temporary LUN identifier 0x%x of LUN "
1261 "0x%016Lx on port 0x%016Lx on adapter %s is "
1262 "invalid. This may happen in rare cases. "
1263 "Trying to re-establish link.\n",
1264 unit->handle,
1265 unit->fcp_lun,
1266 unit->port->wwpn,
1267 zfcp_get_busid_by_unit(unit));
1268 ZFCP_LOG_DEBUG("Status qualifier data:\n");
1269 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
1270 (char *) &new_fsf_req->qtcb->header.
1271 fsf_status_qual,
1272 sizeof (union fsf_status_qual));
1273 /* Let's hope this sorts out the mess */
1274 debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
1275 "fsf_s_lhand_nv1");
1276 zfcp_erp_port_reopen(unit->port, 0);
1277 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1278 }
1279 break;
1280
1281 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
1282 ZFCP_LOG_FLAGS(2, "FSF_FCP_COMMAND_DOES_NOT_EXIST\n");
1283 retval = 0;
1284 debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
1285 "fsf_s_no_exist");
1286 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
1287 break;
1288
1289 case FSF_PORT_BOXED:
1290 /* 2 */
1291 ZFCP_LOG_FLAGS(0, "FSF_PORT_BOXED\n");
1292 ZFCP_LOG_INFO("Remote port 0x%016Lx on adapter %s needs to "
1293 "be reopened\n", unit->port->wwpn,
1294 zfcp_get_busid_by_unit(unit));
1295 debug_text_event(new_fsf_req->adapter->erp_dbf, 2,
1296 "fsf_s_pboxed");
1297 zfcp_erp_port_reopen(unit->port, 0);
1298 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
1299 | ZFCP_STATUS_FSFREQ_RETRY;
1300 break;
1301
1302 case FSF_LUN_BOXED:
1303 ZFCP_LOG_FLAGS(0, "FSF_LUN_BOXED\n");
1304 ZFCP_LOG_INFO(
1305 "unit 0x%016Lx on port 0x%016Lx on adapter %s needs "
1306 "to be reopened\n",
1307 unit->fcp_lun, unit->port->wwpn,
1308 zfcp_get_busid_by_unit(unit));
1309 debug_text_event(new_fsf_req->adapter->erp_dbf, 1, "fsf_s_lboxed");
1310 zfcp_erp_unit_reopen(unit, 0);
1311 zfcp_cmd_dbf_event_fsf("unitbox", new_fsf_req,
1312 &new_fsf_req->qtcb->header.fsf_status_qual,
1313 sizeof(union fsf_status_qual));
1314 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
1315 | ZFCP_STATUS_FSFREQ_RETRY;
1316 break;
1317
1318 case FSF_ADAPTER_STATUS_AVAILABLE:
1319 /* 2 */
1320 ZFCP_LOG_FLAGS(0, "FSF_ADAPTER_STATUS_AVAILABLE\n");
1321 switch (new_fsf_req->qtcb->header.fsf_status_qual.word[0]) {
1322 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1323 ZFCP_LOG_FLAGS(2,
1324 "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
1325 debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
1326 "fsf_sq_ltest");
1327 /* reopening link to port */
1328 zfcp_erp_port_reopen(unit->port, 0);
1329 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1330 break;
1331 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1332 ZFCP_LOG_FLAGS(2,
1333 "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
1334 /* SCSI stack will escalate */
1335 debug_text_event(new_fsf_req->adapter->erp_dbf, 1,
1336 "fsf_sq_ulp");
1337 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1338 break;
1339 default:
1340 ZFCP_LOG_NORMAL
1341 ("bug: Wrong status qualifier 0x%x arrived.\n",
1342 new_fsf_req->qtcb->header.fsf_status_qual.word[0]);
1343 debug_text_event(new_fsf_req->adapter->erp_dbf, 0,
1344 "fsf_sq_inval:");
1345 debug_exception(new_fsf_req->adapter->erp_dbf, 0,
1346 &new_fsf_req->qtcb->header.
1347 fsf_status_qual.word[0], sizeof (u32));
1348 break;
1349 }
1350 break;
1351
1352 case FSF_GOOD:
1353 /* 3 */
1354 ZFCP_LOG_FLAGS(0, "FSF_GOOD\n");
1355 retval = 0;
1356 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
1357 break;
1358
1359 default:
1360 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
1361 "(debug info 0x%x)\n",
1362 new_fsf_req->qtcb->header.fsf_status);
1363 debug_text_event(new_fsf_req->adapter->erp_dbf, 0,
1364 "fsf_s_inval:");
1365 debug_exception(new_fsf_req->adapter->erp_dbf, 0,
1366 &new_fsf_req->qtcb->header.fsf_status,
1367 sizeof (u32));
1368 break;
1369 }
1370 skip_fsfstatus:
1371 return retval;
1372}
1373
1374/**
1375 * zfcp_use_one_sbal - checks whether req buffer and resp bother each fit into
1376 * one SBALE
1377 * Two scatter-gather lists are passed, one for the reqeust and one for the
1378 * response.
1379 */
1380static inline int
1381zfcp_use_one_sbal(struct scatterlist *req, int req_count,
1382 struct scatterlist *resp, int resp_count)
1383{
1384 return ((req_count == 1) &&
1385 (resp_count == 1) &&
1386 (((unsigned long) zfcp_sg_to_address(&req[0]) &
1387 PAGE_MASK) ==
1388 ((unsigned long) (zfcp_sg_to_address(&req[0]) +
1389 req[0].length - 1) & PAGE_MASK)) &&
1390 (((unsigned long) zfcp_sg_to_address(&resp[0]) &
1391 PAGE_MASK) ==
1392 ((unsigned long) (zfcp_sg_to_address(&resp[0]) +
1393 resp[0].length - 1) & PAGE_MASK)));
1394}
1395
1396/**
1397 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1398 * @ct: pointer to struct zfcp_send_ct which conatins all needed data for
1399 * the request
1400 * @pool: pointer to memory pool, if non-null this pool is used to allocate
1401 * a struct zfcp_fsf_req
1402 * @erp_action: pointer to erp_action, if non-null the Generic Service request
1403 * is sent within error recovery
1404 */
1405int
1406zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1407 struct zfcp_erp_action *erp_action)
1408{
1409 volatile struct qdio_buffer_element *sbale;
1410 struct zfcp_port *port;
1411 struct zfcp_adapter *adapter;
1412 struct zfcp_fsf_req *fsf_req;
1413 unsigned long lock_flags;
1414 int bytes;
1415 int ret = 0;
1416
1417 port = ct->port;
1418 adapter = port->adapter;
1419
1420 ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
1421 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
1422 pool, &lock_flags, &fsf_req);
1423 if (ret < 0) {
1424 ZFCP_LOG_INFO("error: Could not create CT request (FC-GS) for "
1425 "adapter: %s\n",
1426 zfcp_get_busid_by_adapter(adapter));
1427 goto failed_req;
1428 }
1429
1430 if (erp_action != NULL) {
1431 erp_action->fsf_req = fsf_req;
1432 fsf_req->erp_action = erp_action;
1433 }
1434
1435 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1436 if (zfcp_use_one_sbal(ct->req, ct->req_count,
1437 ct->resp, ct->resp_count)){
1438 /* both request buffer and response buffer
1439 fit into one sbale each */
1440 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1441 sbale[2].addr = zfcp_sg_to_address(&ct->req[0]);
1442 sbale[2].length = ct->req[0].length;
1443 sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]);
1444 sbale[3].length = ct->resp[0].length;
1445 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1446 } else if (adapter->supported_features &
1447 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1448 /* try to use chained SBALs */
1449 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1450 SBAL_FLAGS0_TYPE_WRITE_READ,
1451 ct->req, ct->req_count,
1452 ZFCP_MAX_SBALS_PER_CT_REQ);
1453 if (bytes <= 0) {
1454 ZFCP_LOG_INFO("error: creation of CT request failed "
1455 "on adapter %s\n",
1456 zfcp_get_busid_by_adapter(adapter));
1457 if (bytes == 0)
1458 ret = -ENOMEM;
1459 else
1460 ret = bytes;
1461
1462 goto failed_send;
1463 }
1464 fsf_req->qtcb->bottom.support.req_buf_length = bytes;
1465 fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1466 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1467 SBAL_FLAGS0_TYPE_WRITE_READ,
1468 ct->resp, ct->resp_count,
1469 ZFCP_MAX_SBALS_PER_CT_REQ);
1470 if (bytes <= 0) {
1471 ZFCP_LOG_INFO("error: creation of CT request failed "
1472 "on adapter %s\n",
1473 zfcp_get_busid_by_adapter(adapter));
1474 if (bytes == 0)
1475 ret = -ENOMEM;
1476 else
1477 ret = bytes;
1478
1479 goto failed_send;
1480 }
1481 fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
1482 } else {
1483 /* reject send generic request */
1484 ZFCP_LOG_INFO(
1485 "error: microcode does not support chained SBALs,"
1486 "CT request too big (adapter %s)\n",
1487 zfcp_get_busid_by_adapter(adapter));
1488 ret = -EOPNOTSUPP;
1489 goto failed_send;
1490 }
1491
1492 /* settings in QTCB */
1493 fsf_req->qtcb->header.port_handle = port->handle;
1494 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
1495 fsf_req->qtcb->bottom.support.timeout = ct->timeout;
1496 fsf_req->data.send_ct = ct;
1497
1498 /* start QDIO request for this FSF request */
1499 ret = zfcp_fsf_req_send(fsf_req, ct->timer);
1500 if (ret) {
1501 ZFCP_LOG_DEBUG("error: initiation of CT request failed "
1502 "(adapter %s, port 0x%016Lx)\n",
1503 zfcp_get_busid_by_adapter(adapter), port->wwpn);
1504 goto failed_send;
1505 }
1506
1507 ZFCP_LOG_DEBUG("CT request initiated (adapter %s, port 0x%016Lx)\n",
1508 zfcp_get_busid_by_adapter(adapter), port->wwpn);
1509 goto out;
1510
1511 failed_send:
1512 zfcp_fsf_req_free(fsf_req);
1513 if (erp_action != NULL) {
1514 erp_action->fsf_req = NULL;
1515 }
1516 failed_req:
1517 out:
1518 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
1519 lock_flags);
1520 return ret;
1521}
1522
1523/**
1524 * zfcp_fsf_send_ct_handler - handler for Generic Service requests
1525 * @fsf_req: pointer to struct zfcp_fsf_req
1526 *
1527 * Data specific for the Generic Service request is passed by
1528 * fsf_req->data.send_ct
1529 * Usually a specific handler for the request is called via
1530 * fsf_req->data.send_ct->handler at end of this function.
1531 */
1532static int
1533zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1534{
1535 struct zfcp_port *port;
1536 struct zfcp_adapter *adapter;
1537 struct zfcp_send_ct *send_ct;
1538 struct fsf_qtcb_header *header;
1539 struct fsf_qtcb_bottom_support *bottom;
1540 int retval = -EINVAL;
1541 u16 subtable, rule, counter;
1542
1543 adapter = fsf_req->adapter;
1544 send_ct = fsf_req->data.send_ct;
1545 port = send_ct->port;
1546 header = &fsf_req->qtcb->header;
1547 bottom = &fsf_req->qtcb->bottom.support;
1548
1549 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
1550 goto skip_fsfstatus;
1551
1552 /* evaluate FSF status in QTCB */
1553 switch (header->fsf_status) {
1554
1555 case FSF_GOOD:
1556 ZFCP_LOG_FLAGS(2,"FSF_GOOD\n");
1557 retval = 0;
1558 break;
1559
1560 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1561 ZFCP_LOG_FLAGS(2, "FSF_SERVICE_CLASS_NOT_SUPPORTED\n");
1562 if (adapter->fc_service_class <= 3) {
1563 ZFCP_LOG_INFO("error: adapter %s does not support fc "
1564 "class %d.\n",
1565 zfcp_get_busid_by_port(port),
1566 adapter->fc_service_class);
1567 } else {
1568 ZFCP_LOG_INFO("bug: The fibre channel class at the "
1569 "adapter %s is invalid. "
1570 "(debug info %d)\n",
1571 zfcp_get_busid_by_port(port),
1572 adapter->fc_service_class);
1573 }
1574 /* stop operation for this adapter */
1575 debug_text_exception(adapter->erp_dbf, 0, "fsf_s_class_nsup");
1576 zfcp_erp_adapter_shutdown(adapter, 0);
1577 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1578 break;
1579
1580 case FSF_ADAPTER_STATUS_AVAILABLE:
1581 ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
1582 switch (header->fsf_status_qual.word[0]){
1583 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1584 ZFCP_LOG_FLAGS(2,"FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
1585 /* reopening link to port */
1586 debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ltest");
1587 zfcp_test_link(port);
1588 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1589 break;
1590 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1591 ZFCP_LOG_FLAGS(2,"FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
1592 /* ERP strategy will escalate */
1593 debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ulp");
1594 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1595 break;
1596 default:
1597 ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x "
1598 "arrived.\n",
1599 header->fsf_status_qual.word[0]);
1600 break;
1601 }
1602 break;
1603
1604 case FSF_ACCESS_DENIED:
1605 ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
1606 ZFCP_LOG_NORMAL("access denied, cannot send generic service "
1607 "command (adapter %s, port d_id=0x%08x)\n",
1608 zfcp_get_busid_by_port(port), port->d_id);
1609 for (counter = 0; counter < 2; counter++) {
1610 subtable = header->fsf_status_qual.halfword[counter * 2];
1611 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
1612 switch (subtable) {
1613 case FSF_SQ_CFDC_SUBTABLE_OS:
1614 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
1615 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
1616 case FSF_SQ_CFDC_SUBTABLE_LUN:
1617 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
1618 zfcp_act_subtable_type[subtable], rule);
1619 break;
1620 }
1621 }
1622 debug_text_event(adapter->erp_dbf, 1, "fsf_s_access");
1623 zfcp_erp_port_access_denied(port);
1624 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1625 break;
1626
1627 case FSF_GENERIC_COMMAND_REJECTED:
1628 ZFCP_LOG_FLAGS(2, "FSF_GENERIC_COMMAND_REJECTED\n");
1629 ZFCP_LOG_INFO("generic service command rejected "
1630 "(adapter %s, port d_id=0x%08x)\n",
1631 zfcp_get_busid_by_port(port), port->d_id);
1632 ZFCP_LOG_INFO("status qualifier:\n");
1633 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1634 (char *) &header->fsf_status_qual,
1635 sizeof (union fsf_status_qual));
1636 debug_text_event(adapter->erp_dbf, 1, "fsf_s_gcom_rej");
1637 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1638 break;
1639
1640 case FSF_PORT_HANDLE_NOT_VALID:
1641 ZFCP_LOG_FLAGS(2, "FSF_PORT_HANDLE_NOT_VALID\n");
1642 ZFCP_LOG_DEBUG("Temporary port identifier 0x%x for port "
1643 "0x%016Lx on adapter %s invalid. This may "
1644 "happen occasionally.\n", port->handle,
1645 port->wwpn, zfcp_get_busid_by_port(port));
1646 ZFCP_LOG_INFO("status qualifier:\n");
1647 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1648 (char *) &header->fsf_status_qual,
1649 sizeof (union fsf_status_qual));
1650 debug_text_event(adapter->erp_dbf, 1, "fsf_s_phandle_nv");
1651 zfcp_erp_adapter_reopen(adapter, 0);
1652 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1653 break;
1654
1655 case FSF_PORT_BOXED:
1656 ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
1657 ZFCP_LOG_INFO("port needs to be reopened "
1658 "(adapter %s, port d_id=0x%08x)\n",
1659 zfcp_get_busid_by_port(port), port->d_id);
1660 debug_text_event(adapter->erp_dbf, 2, "fsf_s_pboxed");
1661 zfcp_erp_port_reopen(port, 0);
1662 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
1663 | ZFCP_STATUS_FSFREQ_RETRY;
1664 break;
1665
1666 /* following states should never occure, all cases avoided
1667 in zfcp_fsf_send_ct - but who knows ... */
1668 case FSF_PAYLOAD_SIZE_MISMATCH:
1669 ZFCP_LOG_FLAGS(2, "FSF_PAYLOAD_SIZE_MISMATCH\n");
1670 ZFCP_LOG_INFO("payload size mismatch (adapter: %s, "
1671 "req_buf_length=%d, resp_buf_length=%d)\n",
1672 zfcp_get_busid_by_adapter(adapter),
1673 bottom->req_buf_length, bottom->resp_buf_length);
1674 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1675 break;
1676 case FSF_REQUEST_SIZE_TOO_LARGE:
1677 ZFCP_LOG_FLAGS(2, "FSF_REQUEST_SIZE_TOO_LARGE\n");
1678 ZFCP_LOG_INFO("request size too large (adapter: %s, "
1679 "req_buf_length=%d)\n",
1680 zfcp_get_busid_by_adapter(adapter),
1681 bottom->req_buf_length);
1682 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1683 break;
1684 case FSF_RESPONSE_SIZE_TOO_LARGE:
1685 ZFCP_LOG_FLAGS(2, "FSF_RESPONSE_SIZE_TOO_LARGE\n");
1686 ZFCP_LOG_INFO("response size too large (adapter: %s, "
1687 "resp_buf_length=%d)\n",
1688 zfcp_get_busid_by_adapter(adapter),
1689 bottom->resp_buf_length);
1690 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1691 break;
1692 case FSF_SBAL_MISMATCH:
1693 ZFCP_LOG_FLAGS(2, "FSF_SBAL_MISMATCH\n");
1694 ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, "
1695 "resp_buf_length=%d)\n",
1696 zfcp_get_busid_by_adapter(adapter),
1697 bottom->req_buf_length, bottom->resp_buf_length);
1698 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1699 break;
1700
1701 default:
1702 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
1703 "(debug info 0x%x)\n", header->fsf_status);
1704 debug_text_event(adapter->erp_dbf, 0, "fsf_sq_inval:");
1705 debug_exception(adapter->erp_dbf, 0,
1706 &header->fsf_status_qual.word[0], sizeof (u32));
1707 break;
1708 }
1709
1710skip_fsfstatus:
1711 send_ct->status = retval;
1712
1713 if (send_ct->handler != NULL)
1714 send_ct->handler(send_ct->handler_data);
1715
1716 return retval;
1717}
1718
1719/**
1720 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1721 * @els: pointer to struct zfcp_send_els which contains all needed data for
1722 * the command.
1723 */
1724int
1725zfcp_fsf_send_els(struct zfcp_send_els *els)
1726{
1727 volatile struct qdio_buffer_element *sbale;
1728 struct zfcp_fsf_req *fsf_req;
1729 fc_id_t d_id;
1730 struct zfcp_adapter *adapter;
1731 unsigned long lock_flags;
1732 int bytes;
1733 int ret = 0;
1734
1735 d_id = els->d_id;
1736 adapter = els->adapter;
1737
1738 ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
1739 ZFCP_REQ_AUTO_CLEANUP,
1740 NULL, &lock_flags, &fsf_req);
1741 if (ret < 0) {
1742 ZFCP_LOG_INFO("error: creation of ELS request failed "
1743 "(adapter %s, port d_id: 0x%08x)\n",
1744 zfcp_get_busid_by_adapter(adapter), d_id);
1745 goto failed_req;
1746 }
1747
1748 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1749 if (zfcp_use_one_sbal(els->req, els->req_count,
1750 els->resp, els->resp_count)){
1751 /* both request buffer and response buffer
1752 fit into one sbale each */
1753 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1754 sbale[2].addr = zfcp_sg_to_address(&els->req[0]);
1755 sbale[2].length = els->req[0].length;
1756 sbale[3].addr = zfcp_sg_to_address(&els->resp[0]);
1757 sbale[3].length = els->resp[0].length;
1758 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1759 } else if (adapter->supported_features &
1760 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1761 /* try to use chained SBALs */
1762 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1763 SBAL_FLAGS0_TYPE_WRITE_READ,
1764 els->req, els->req_count,
1765 ZFCP_MAX_SBALS_PER_ELS_REQ);
1766 if (bytes <= 0) {
1767 ZFCP_LOG_INFO("error: creation of ELS request failed "
1768 "(adapter %s, port d_id: 0x%08x)\n",
1769 zfcp_get_busid_by_adapter(adapter), d_id);
1770 if (bytes == 0) {
1771 ret = -ENOMEM;
1772 } else {
1773 ret = bytes;
1774 }
1775 goto failed_send;
1776 }
1777 fsf_req->qtcb->bottom.support.req_buf_length = bytes;
1778 fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1779 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1780 SBAL_FLAGS0_TYPE_WRITE_READ,
1781 els->resp, els->resp_count,
1782 ZFCP_MAX_SBALS_PER_ELS_REQ);
1783 if (bytes <= 0) {
1784 ZFCP_LOG_INFO("error: creation of ELS request failed "
1785 "(adapter %s, port d_id: 0x%08x)\n",
1786 zfcp_get_busid_by_adapter(adapter), d_id);
1787 if (bytes == 0) {
1788 ret = -ENOMEM;
1789 } else {
1790 ret = bytes;
1791 }
1792 goto failed_send;
1793 }
1794 fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
1795 } else {
1796 /* reject request */
1797 ZFCP_LOG_INFO("error: microcode does not support chained SBALs"
1798 ", ELS request too big (adapter %s, "
1799 "port d_id: 0x%08x)\n",
1800 zfcp_get_busid_by_adapter(adapter), d_id);
1801 ret = -EOPNOTSUPP;
1802 goto failed_send;
1803 }
1804
1805 /* settings in QTCB */
1806 fsf_req->qtcb->bottom.support.d_id = d_id;
1807 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
1808 fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
1809 fsf_req->data.send_els = els;
1810
1811 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1812
1813 /* start QDIO request for this FSF request */
1814 ret = zfcp_fsf_req_send(fsf_req, els->timer);
1815 if (ret) {
1816 ZFCP_LOG_DEBUG("error: initiation of ELS request failed "
1817 "(adapter %s, port d_id: 0x%08x)\n",
1818 zfcp_get_busid_by_adapter(adapter), d_id);
1819 goto failed_send;
1820 }
1821
1822 ZFCP_LOG_DEBUG("ELS request initiated (adapter %s, port d_id: "
1823 "0x%08x)\n", zfcp_get_busid_by_adapter(adapter), d_id);
1824 goto out;
1825
1826 failed_send:
1827 zfcp_fsf_req_free(fsf_req);
1828
1829 failed_req:
1830 out:
1831 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
1832 lock_flags);
1833
1834 return ret;
1835}
1836
1837/**
1838 * zfcp_fsf_send_els_handler - handler for ELS commands
1839 * @fsf_req: pointer to struct zfcp_fsf_req
1840 *
1841 * Data specific for the ELS command is passed by
1842 * fsf_req->data.send_els
1843 * Usually a specific handler for the command is called via
1844 * fsf_req->data.send_els->handler at end of this function.
1845 */
1846static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1847{
1848 struct zfcp_adapter *adapter;
1849 fc_id_t d_id;
1850 struct zfcp_port *port;
1851 struct fsf_qtcb_header *header;
1852 struct fsf_qtcb_bottom_support *bottom;
1853 struct zfcp_send_els *send_els;
1854 int retval = -EINVAL;
1855 u16 subtable, rule, counter;
1856
1857 send_els = fsf_req->data.send_els;
1858 adapter = send_els->adapter;
1859 d_id = send_els->d_id;
1860 header = &fsf_req->qtcb->header;
1861 bottom = &fsf_req->qtcb->bottom.support;
1862
1863 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
1864 goto skip_fsfstatus;
1865
1866 switch (header->fsf_status) {
1867
1868 case FSF_GOOD:
1869 ZFCP_LOG_FLAGS(2, "FSF_GOOD\n");
1870 retval = 0;
1871 break;
1872
1873 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1874 ZFCP_LOG_FLAGS(2, "FSF_SERVICE_CLASS_NOT_SUPPORTED\n");
1875 if (adapter->fc_service_class <= 3) {
1876 ZFCP_LOG_INFO("error: adapter %s does "
1877 "not support fibrechannel class %d.\n",
1878 zfcp_get_busid_by_adapter(adapter),
1879 adapter->fc_service_class);
1880 } else {
1881 ZFCP_LOG_INFO("bug: The fibrechannel class at "
1882 "adapter %s is invalid. "
1883 "(debug info %d)\n",
1884 zfcp_get_busid_by_adapter(adapter),
1885 adapter->fc_service_class);
1886 }
1887 /* stop operation for this adapter */
1888 debug_text_exception(adapter->erp_dbf, 0, "fsf_s_class_nsup");
1889 zfcp_erp_adapter_shutdown(adapter, 0);
1890 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1891 break;
1892
1893 case FSF_ADAPTER_STATUS_AVAILABLE:
1894 ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
1895 switch (header->fsf_status_qual.word[0]){
1896 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1897 ZFCP_LOG_FLAGS(2,"FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
1898 debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ltest");
1899 if (send_els->ls_code != ZFCP_LS_ADISC) {
1900 read_lock(&zfcp_data.config_lock);
1901 port = zfcp_get_port_by_did(adapter, d_id);
1902 if (port)
1903 zfcp_test_link(port);
1904 read_unlock(&zfcp_data.config_lock);
1905 }
1906 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1907 break;
1908 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1909 ZFCP_LOG_FLAGS(2,"FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
1910 debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ulp");
1911 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1912 retval =
1913 zfcp_handle_els_rjt(header->fsf_status_qual.word[1],
1914 (struct zfcp_ls_rjt_par *)
1915 &header->fsf_status_qual.word[2]);
1916 break;
1917 case FSF_SQ_RETRY_IF_POSSIBLE:
1918 ZFCP_LOG_FLAGS(2, "FSF_SQ_RETRY_IF_POSSIBLE\n");
1919 debug_text_event(adapter->erp_dbf, 1, "fsf_sq_retry");
1920 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1921 break;
1922 default:
1923 ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x\n",
1924 header->fsf_status_qual.word[0]);
1925 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1926 (char*)header->fsf_status_qual.word, 16);
1927 }
1928 break;
1929
1930 case FSF_ELS_COMMAND_REJECTED:
1931 ZFCP_LOG_FLAGS(2, "FSF_ELS_COMMAND_REJECTED\n");
1932 ZFCP_LOG_INFO("ELS has been rejected because command filter "
1933 "prohibited sending "
1934 "(adapter: %s, port d_id: 0x%08x)\n",
1935 zfcp_get_busid_by_adapter(adapter), d_id);
1936
1937 break;
1938
1939 case FSF_PAYLOAD_SIZE_MISMATCH:
1940 ZFCP_LOG_FLAGS(2, "FSF_PAYLOAD_SIZE_MISMATCH\n");
1941 ZFCP_LOG_INFO(
1942 "ELS request size and ELS response size must be either "
1943 "both 0, or both greater than 0 "
1944 "(adapter: %s, req_buf_length=%d resp_buf_length=%d)\n",
1945 zfcp_get_busid_by_adapter(adapter),
1946 bottom->req_buf_length,
1947 bottom->resp_buf_length);
1948 break;
1949
1950 case FSF_REQUEST_SIZE_TOO_LARGE:
1951 ZFCP_LOG_FLAGS(2, "FSF_REQUEST_SIZE_TOO_LARGE\n");
1952 ZFCP_LOG_INFO(
1953 "Length of the ELS request buffer, "
1954 "specified in QTCB bottom, "
1955 "exceeds the size of the buffers "
1956 "that have been allocated for ELS request data "
1957 "(adapter: %s, req_buf_length=%d)\n",
1958 zfcp_get_busid_by_adapter(adapter),
1959 bottom->req_buf_length);
1960 break;
1961
1962 case FSF_RESPONSE_SIZE_TOO_LARGE:
1963 ZFCP_LOG_FLAGS(2, "FSF_RESPONSE_SIZE_TOO_LARGE\n");
1964 ZFCP_LOG_INFO(
1965 "Length of the ELS response buffer, "
1966 "specified in QTCB bottom, "
1967 "exceeds the size of the buffers "
1968 "that have been allocated for ELS response data "
1969 "(adapter: %s, resp_buf_length=%d)\n",
1970 zfcp_get_busid_by_adapter(adapter),
1971 bottom->resp_buf_length);
1972 break;
1973
1974 case FSF_SBAL_MISMATCH:
1975 /* should never occure, avoided in zfcp_fsf_send_els */
1976 ZFCP_LOG_FLAGS(2, "FSF_SBAL_MISMATCH\n");
1977 ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, "
1978 "resp_buf_length=%d)\n",
1979 zfcp_get_busid_by_adapter(adapter),
1980 bottom->req_buf_length, bottom->resp_buf_length);
1981 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1982 break;
1983
1984 case FSF_ACCESS_DENIED:
1985 ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
1986 ZFCP_LOG_NORMAL("access denied, cannot send ELS command "
1987 "(adapter %s, port d_id=0x%08x)\n",
1988 zfcp_get_busid_by_adapter(adapter), d_id);
1989 for (counter = 0; counter < 2; counter++) {
1990 subtable = header->fsf_status_qual.halfword[counter * 2];
1991 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
1992 switch (subtable) {
1993 case FSF_SQ_CFDC_SUBTABLE_OS:
1994 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
1995 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
1996 case FSF_SQ_CFDC_SUBTABLE_LUN:
1997 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
1998 zfcp_act_subtable_type[subtable], rule);
1999 break;
2000 }
2001 }
2002 debug_text_event(adapter->erp_dbf, 1, "fsf_s_access");
2003 read_lock(&zfcp_data.config_lock);
2004 port = zfcp_get_port_by_did(adapter, d_id);
2005 if (port != NULL)
2006 zfcp_erp_port_access_denied(port);
2007 read_unlock(&zfcp_data.config_lock);
2008 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2009 break;
2010
2011 default:
2012 ZFCP_LOG_NORMAL(
2013 "bug: An unknown FSF Status was presented "
2014 "(adapter: %s, fsf_status=0x%08x)\n",
2015 zfcp_get_busid_by_adapter(adapter),
2016 header->fsf_status);
2017 debug_text_event(adapter->erp_dbf, 0, "fsf_sq_inval");
2018 debug_exception(adapter->erp_dbf, 0,
2019 &header->fsf_status_qual.word[0], sizeof(u32));
2020 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2021 break;
2022 }
2023
2024skip_fsfstatus:
2025 send_els->status = retval;
2026
2027 if (send_els->handler != 0)
2028 send_els->handler(send_els->handler_data);
2029
2030 return retval;
2031}
2032
2033/*
2034 * function:
2035 *
2036 * purpose:
2037 *
2038 * returns: address of initiated FSF request
2039 * NULL - request could not be initiated
2040 */
2041int
2042zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
2043{
2044 volatile struct qdio_buffer_element *sbale;
2045 unsigned long lock_flags;
2046 int retval = 0;
2047
2048 /* setup new FSF request */
2049 retval = zfcp_fsf_req_create(erp_action->adapter,
2050 FSF_QTCB_EXCHANGE_CONFIG_DATA,
2051 ZFCP_REQ_AUTO_CLEANUP,
2052 erp_action->adapter->pool.fsf_req_erp,
2053 &lock_flags, &(erp_action->fsf_req));
2054 if (retval < 0) {
2055 ZFCP_LOG_INFO("error: Could not create exchange configuration "
2056 "data request for adapter %s.\n",
2057 zfcp_get_busid_by_adapter(erp_action->adapter));
2058 goto out;
2059 }
2060
2061 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
2062 erp_action->fsf_req->sbal_curr, 0);
2063 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2064 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2065
2066 erp_action->fsf_req->erp_action = erp_action;
2067 erp_action->fsf_req->qtcb->bottom.config.feature_selection =
2068 (FSF_FEATURE_CFDC | FSF_FEATURE_LUN_SHARING);
2069
2070 /* start QDIO request for this FSF request */
2071 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
2072 if (retval) {
2073 ZFCP_LOG_INFO
2074 ("error: Could not send exchange configuration data "
2075 "command on the adapter %s\n",
2076 zfcp_get_busid_by_adapter(erp_action->adapter));
2077 zfcp_fsf_req_free(erp_action->fsf_req);
2078 erp_action->fsf_req = NULL;
2079 goto out;
2080 }
2081
2082 ZFCP_LOG_DEBUG("exchange configuration data request initiated "
2083 "(adapter %s)\n",
2084 zfcp_get_busid_by_adapter(erp_action->adapter));
2085
2086 out:
2087 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2088 lock_flags);
2089 return retval;
2090}
2091
2092/**
2093 * zfcp_fsf_exchange_config_evaluate
2094 * @fsf_req: fsf_req which belongs to xchg config data request
2095 * @xchg_ok: specifies if xchg config data was incomplete or complete (0/1)
2096 *
2097 * returns: -EIO on error, 0 otherwise
2098 */
2099static int
2100zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2101{
2102 struct fsf_qtcb_bottom_config *bottom;
2103 struct zfcp_adapter *adapter = fsf_req->adapter;
2104
2105 bottom = &fsf_req->qtcb->bottom.config;
2106 ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n",
2107 bottom->low_qtcb_version, bottom->high_qtcb_version);
2108 adapter->fsf_lic_version = bottom->lic_version;
2109 adapter->supported_features = bottom->supported_features;
2110
2111 if (xchg_ok) {
2112 adapter->wwnn = bottom->nport_serv_param.wwnn;
2113 adapter->wwpn = bottom->nport_serv_param.wwpn;
2114 adapter->s_id = bottom->s_id & ZFCP_DID_MASK;
2115 adapter->fc_topology = bottom->fc_topology;
2116 adapter->fc_link_speed = bottom->fc_link_speed;
2117 adapter->hydra_version = bottom->adapter_type;
2118 } else {
2119 adapter->wwnn = 0;
2120 adapter->wwpn = 0;
2121 adapter->s_id = 0;
2122 adapter->fc_topology = 0;
2123 adapter->fc_link_speed = 0;
2124 adapter->hydra_version = 0;
2125 }
2126
2127 if(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT){
2128 adapter->hardware_version = bottom->hardware_version;
2129 memcpy(adapter->serial_number, bottom->serial_number, 17);
2130 EBCASC(adapter->serial_number, sizeof(adapter->serial_number));
2131 }
2132
2133 ZFCP_LOG_INFO("The adapter %s reported the following characteristics:\n"
2134 "WWNN 0x%016Lx, "
2135 "WWPN 0x%016Lx, "
2136 "S_ID 0x%08x,\n"
2137 "adapter version 0x%x, "
2138 "LIC version 0x%x, "
2139 "FC link speed %d Gb/s\n",
2140 zfcp_get_busid_by_adapter(adapter),
2141 adapter->wwnn,
2142 adapter->wwpn,
2143 (unsigned int) adapter->s_id,
2144 adapter->hydra_version,
2145 adapter->fsf_lic_version,
2146 adapter->fc_link_speed);
2147 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
2148 ZFCP_LOG_NORMAL("error: the adapter %s "
2149 "only supports newer control block "
2150 "versions in comparison to this device "
2151 "driver (try updated device driver)\n",
2152 zfcp_get_busid_by_adapter(adapter));
2153 debug_text_event(adapter->erp_dbf, 0, "low_qtcb_ver");
2154 zfcp_erp_adapter_shutdown(adapter, 0);
2155 return -EIO;
2156 }
2157 if (ZFCP_QTCB_VERSION > bottom->high_qtcb_version) {
2158 ZFCP_LOG_NORMAL("error: the adapter %s "
2159 "only supports older control block "
2160 "versions than this device driver uses"
2161 "(consider a microcode upgrade)\n",
2162 zfcp_get_busid_by_adapter(adapter));
2163 debug_text_event(adapter->erp_dbf, 0, "high_qtcb_ver");
2164 zfcp_erp_adapter_shutdown(adapter, 0);
2165 return -EIO;
2166 }
2167 return 0;
2168}
2169
2170/*
2171 * function: zfcp_fsf_exchange_config_data_handler
2172 *
2173 * purpose: is called for finished Exchange Configuration Data command
2174 *
2175 * returns:
2176 */
2177static int
2178zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2179{
2180 struct fsf_qtcb_bottom_config *bottom;
2181 struct zfcp_adapter *adapter = fsf_req->adapter;
2182
2183 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2184 return -EIO;
2185
2186 switch (fsf_req->qtcb->header.fsf_status) {
2187
2188 case FSF_GOOD:
2189 ZFCP_LOG_FLAGS(2, "FSF_GOOD\n");
2190
2191 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1))
2192 return -EIO;
2193
2194 switch (adapter->fc_topology) {
2195 case FSF_TOPO_P2P:
2196 ZFCP_LOG_FLAGS(1, "FSF_TOPO_P2P\n");
2197 ZFCP_LOG_NORMAL("error: Point-to-point fibrechannel "
2198 "configuration detected at adapter %s "
2199 "unsupported, shutting down adapter\n",
2200 zfcp_get_busid_by_adapter(adapter));
2201 debug_text_event(fsf_req->adapter->erp_dbf, 0,
2202 "top-p-to-p");
2203 zfcp_erp_adapter_shutdown(adapter, 0);
2204 return -EIO;
2205 case FSF_TOPO_AL:
2206 ZFCP_LOG_FLAGS(1, "FSF_TOPO_AL\n");
2207 ZFCP_LOG_NORMAL("error: Arbitrated loop fibrechannel "
2208 "topology detected at adapter %s "
2209 "unsupported, shutting down adapter\n",
2210 zfcp_get_busid_by_adapter(adapter));
2211 debug_text_event(fsf_req->adapter->erp_dbf, 0,
2212 "top-al");
2213 zfcp_erp_adapter_shutdown(adapter, 0);
2214 return -EIO;
2215 case FSF_TOPO_FABRIC:
2216 ZFCP_LOG_FLAGS(1, "FSF_TOPO_FABRIC\n");
2217 ZFCP_LOG_INFO("Switched fabric fibrechannel "
2218 "network detected at adapter %s.\n",
2219 zfcp_get_busid_by_adapter(adapter));
2220 break;
2221 default:
2222 ZFCP_LOG_NORMAL("bug: The fibrechannel topology "
2223 "reported by the exchange "
2224 "configuration command for "
2225 "the adapter %s is not "
2226 "of a type known to the zfcp "
2227 "driver, shutting down adapter\n",
2228 zfcp_get_busid_by_adapter(adapter));
2229 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
2230 "unknown-topo");
2231 zfcp_erp_adapter_shutdown(adapter, 0);
2232 return -EIO;
2233 }
2234 bottom = &fsf_req->qtcb->bottom.config;
2235 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
2236 ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) "
2237 "allowed by the adapter %s "
2238 "is lower than the minimum "
2239 "required by the driver (%ld bytes).\n",
2240 bottom->max_qtcb_size,
2241 zfcp_get_busid_by_adapter(adapter),
2242 sizeof(struct fsf_qtcb));
2243 debug_text_event(fsf_req->adapter->erp_dbf, 0,
2244 "qtcb-size");
2245 debug_event(fsf_req->adapter->erp_dbf, 0,
2246 &bottom->max_qtcb_size, sizeof (u32));
2247 zfcp_erp_adapter_shutdown(adapter, 0);
2248 return -EIO;
2249 }
2250 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
2251 &adapter->status);
2252 break;
2253 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
2254 debug_text_event(adapter->erp_dbf, 0, "xchg-inco");
2255
2256 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
2257 return -EIO;
2258
2259 ZFCP_LOG_INFO("Local link to adapter %s is down\n",
2260 zfcp_get_busid_by_adapter(adapter));
2261 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
2262 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2263 &adapter->status);
2264 zfcp_erp_adapter_failed(adapter);
2265 break;
2266 default:
2267 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng");
2268 debug_event(fsf_req->adapter->erp_dbf, 0,
2269 &fsf_req->qtcb->header.fsf_status, sizeof (u32));
2270 zfcp_erp_adapter_shutdown(adapter, 0);
2271 return -EIO;
2272 }
2273 return 0;
2274}
2275
2276/**
2277 * zfcp_fsf_exchange_port_data - request information about local port
2278 * @adapter: for which port data is requested
2279 * @data: response to exchange port data request
2280 */
2281int
2282zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2283 struct fsf_qtcb_bottom_port *data)
2284{
2285 volatile struct qdio_buffer_element *sbale;
2286 int retval = 0;
2287 unsigned long lock_flags;
2288 struct zfcp_fsf_req *fsf_req;
2289 struct timer_list *timer;
2290
2291 if(!(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT)){
2292 ZFCP_LOG_INFO("error: exchange port data "
2293 "command not supported by adapter %s\n",
2294 zfcp_get_busid_by_adapter(adapter));
2295 return -EOPNOTSUPP;
2296 }
2297
2298 timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
2299 if (!timer)
2300 return -ENOMEM;
2301
2302 /* setup new FSF request */
2303 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
2304 0, 0, &lock_flags, &fsf_req);
2305 if (retval < 0) {
2306 ZFCP_LOG_INFO("error: Out of resources. Could not create an "
2307 "exchange port data request for"
2308 "the adapter %s.\n",
2309 zfcp_get_busid_by_adapter(adapter));
2310 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2311 lock_flags);
2312 goto out;
2313 }
2314
2315 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2316 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2317 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2318
2319 fsf_req->data.port_data = data;
2320
2321 init_timer(timer);
2322 timer->function = zfcp_fsf_request_timeout_handler;
2323 timer->data = (unsigned long) adapter;
2324 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
2325
2326 retval = zfcp_fsf_req_send(fsf_req, timer);
2327 if (retval) {
2328 ZFCP_LOG_INFO("error: Could not send an exchange port data "
2329 "command on the adapter %s\n",
2330 zfcp_get_busid_by_adapter(adapter));
2331 zfcp_fsf_req_free(fsf_req);
2332 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2333 lock_flags);
2334 goto out;
2335 }
2336
2337 ZFCP_LOG_DEBUG("Exchange Port Data request initiated (adapter %s)\n",
2338 zfcp_get_busid_by_adapter(adapter));
2339
2340 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2341 lock_flags);
2342
2343 wait_event(fsf_req->completion_wq,
2344 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2345 del_timer_sync(timer);
2346 zfcp_fsf_req_cleanup(fsf_req);
2347 out:
2348 kfree(timer);
2349 return retval;
2350}
2351
2352
2353/**
2354 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request
2355 * @fsf_req: pointer to struct zfcp_fsf_req
2356 */
2357static void
2358zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
2359{
2360 struct fsf_qtcb_bottom_port *bottom;
2361 struct fsf_qtcb_bottom_port *data = fsf_req->data.port_data;
2362
2363 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2364 return;
2365
2366 switch (fsf_req->qtcb->header.fsf_status) {
2367 case FSF_GOOD:
2368 ZFCP_LOG_FLAGS(2,"FSF_GOOD\n");
2369 bottom = &fsf_req->qtcb->bottom.port;
2370 memcpy(data, bottom, sizeof(*data));
2371 break;
2372
2373 default:
2374 debug_text_event(fsf_req->adapter->erp_dbf, 0, "xchg-port-ng");
2375 debug_event(fsf_req->adapter->erp_dbf, 0,
2376 &fsf_req->qtcb->header.fsf_status, sizeof(u32));
2377 }
2378}
2379
2380
2381/*
2382 * function: zfcp_fsf_open_port
2383 *
2384 * purpose:
2385 *
2386 * returns: address of initiated FSF request
2387 * NULL - request could not be initiated
2388 */
2389int
2390zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2391{
2392 volatile struct qdio_buffer_element *sbale;
2393 unsigned long lock_flags;
2394 int retval = 0;
2395
2396 /* setup new FSF request */
2397 retval = zfcp_fsf_req_create(erp_action->adapter,
2398 FSF_QTCB_OPEN_PORT_WITH_DID,
2399 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2400 erp_action->adapter->pool.fsf_req_erp,
2401 &lock_flags, &(erp_action->fsf_req));
2402 if (retval < 0) {
2403 ZFCP_LOG_INFO("error: Could not create open port request "
2404 "for port 0x%016Lx on adapter %s.\n",
2405 erp_action->port->wwpn,
2406 zfcp_get_busid_by_adapter(erp_action->adapter));
2407 goto out;
2408 }
2409
2410 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
2411 erp_action->fsf_req->sbal_curr, 0);
2412 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2413 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2414
2415 erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
2416 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
2417 erp_action->fsf_req->data.open_port.port = erp_action->port;
2418 erp_action->fsf_req->erp_action = erp_action;
2419
2420 /* start QDIO request for this FSF request */
2421 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
2422 if (retval) {
2423 ZFCP_LOG_INFO("error: Could not send open port request for "
2424 "port 0x%016Lx on adapter %s.\n",
2425 erp_action->port->wwpn,
2426 zfcp_get_busid_by_adapter(erp_action->adapter));
2427 zfcp_fsf_req_free(erp_action->fsf_req);
2428 erp_action->fsf_req = NULL;
2429 goto out;
2430 }
2431
2432 ZFCP_LOG_DEBUG("open port request initiated "
2433 "(adapter %s, port 0x%016Lx)\n",
2434 zfcp_get_busid_by_adapter(erp_action->adapter),
2435 erp_action->port->wwpn);
2436 out:
2437 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2438 lock_flags);
2439 return retval;
2440}
2441
2442/*
2443 * function: zfcp_fsf_open_port_handler
2444 *
2445 * purpose: is called for finished Open Port command
2446 *
2447 * returns:
2448 */
2449static int
2450zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
2451{
2452 int retval = -EINVAL;
2453 struct zfcp_port *port;
2454 struct fsf_plogi *plogi;
2455 struct fsf_qtcb_header *header;
2456 u16 subtable, rule, counter;
2457
2458 port = fsf_req->data.open_port.port;
2459 header = &fsf_req->qtcb->header;
2460
2461 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2462 /* don't change port status in our bookkeeping */
2463 goto skip_fsfstatus;
2464 }
2465
2466 /* evaluate FSF status in QTCB */
2467 switch (header->fsf_status) {
2468
2469 case FSF_PORT_ALREADY_OPEN:
2470 ZFCP_LOG_FLAGS(0, "FSF_PORT_ALREADY_OPEN\n");
2471 ZFCP_LOG_NORMAL("bug: remote port 0x%016Lx on adapter %s "
2472 "is already open.\n",
2473 port->wwpn, zfcp_get_busid_by_port(port));
2474 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
2475 "fsf_s_popen");
2476 /*
2477 * This is a bug, however operation should continue normally
2478 * if it is simply ignored
2479 */
2480 break;
2481
2482 case FSF_ACCESS_DENIED:
2483 ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
2484 ZFCP_LOG_NORMAL("Access denied, cannot open port 0x%016Lx "
2485 "on adapter %s\n",
2486 port->wwpn, zfcp_get_busid_by_port(port));
2487 for (counter = 0; counter < 2; counter++) {
2488 subtable = header->fsf_status_qual.halfword[counter * 2];
2489 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
2490 switch (subtable) {
2491 case FSF_SQ_CFDC_SUBTABLE_OS:
2492 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
2493 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
2494 case FSF_SQ_CFDC_SUBTABLE_LUN:
2495 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
2496 zfcp_act_subtable_type[subtable], rule);
2497 break;
2498 }
2499 }
2500 debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
2501 zfcp_erp_port_access_denied(port);
2502 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2503 break;
2504
2505 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
2506 ZFCP_LOG_FLAGS(1, "FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED\n");
2507 ZFCP_LOG_INFO("error: The FSF adapter is out of resources. "
2508 "The remote port 0x%016Lx on adapter %s "
2509 "could not be opened. Disabling it.\n",
2510 port->wwpn, zfcp_get_busid_by_port(port));
2511 debug_text_event(fsf_req->adapter->erp_dbf, 1,
2512 "fsf_s_max_ports");
2513 zfcp_erp_port_failed(port);
2514 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2515 break;
2516
2517 case FSF_ADAPTER_STATUS_AVAILABLE:
2518 ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
2519 switch (header->fsf_status_qual.word[0]) {
2520 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2521 ZFCP_LOG_FLAGS(2,
2522 "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
2523 debug_text_event(fsf_req->adapter->erp_dbf, 1,
2524 "fsf_sq_ltest");
2525 /* ERP strategy will escalate */
2526 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2527 break;
2528 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2529 /* ERP strategy will escalate */
2530 debug_text_event(fsf_req->adapter->erp_dbf, 1,
2531 "fsf_sq_ulp");
2532 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2533 break;
2534 case FSF_SQ_NO_RETRY_POSSIBLE:
2535 ZFCP_LOG_FLAGS(0, "FSF_SQ_NO_RETRY_POSSIBLE\n");
2536 ZFCP_LOG_NORMAL("The remote port 0x%016Lx on "
2537 "adapter %s could not be opened. "
2538 "Disabling it.\n",
2539 port->wwpn,
2540 zfcp_get_busid_by_port(port));
2541 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
2542 "fsf_sq_no_retry");
2543 zfcp_erp_port_failed(port);
2544 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2545 break;
2546 default:
2547 ZFCP_LOG_NORMAL
2548 ("bug: Wrong status qualifier 0x%x arrived.\n",
2549 header->fsf_status_qual.word[0]);
2550 debug_text_event(fsf_req->adapter->erp_dbf, 0,
2551 "fsf_sq_inval:");
2552 debug_exception(
2553 fsf_req->adapter->erp_dbf, 0,
2554 &header->fsf_status_qual.word[0],
2555 sizeof (u32));
2556 break;
2557 }
2558 break;
2559
2560 case FSF_GOOD:
2561 ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
2562 /* save port handle assigned by FSF */
2563 port->handle = header->port_handle;
2564 ZFCP_LOG_INFO("The remote port 0x%016Lx via adapter %s "
2565 "was opened, it's port handle is 0x%x\n",
2566 port->wwpn, zfcp_get_busid_by_port(port),
2567 port->handle);
2568 /* mark port as open */
2569 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
2570 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2571 retval = 0;
2572 /* check whether D_ID has changed during open */
2573 /*
2574 * FIXME: This check is not airtight, as the FCP channel does
2575 * not monitor closures of target port connections caused on
2576 * the remote side. Thus, they might miss out on invalidating
2577 * locally cached WWPNs (and other N_Port parameters) of gone
2578 * target ports. So, our heroic attempt to make things safe
2579 * could be undermined by 'open port' response data tagged with
2580 * obsolete WWPNs. Another reason to monitor potential
2581 * connection closures ourself at least (by interpreting
2582 * incoming ELS' and unsolicited status). It just crosses my
2583 * mind that one should be able to cross-check by means of
2584 * another GID_PN straight after a port has been opened.
2585 * Alternately, an ADISC/PDISC ELS should suffice, as well.
2586 */
2587 plogi = (struct fsf_plogi *) fsf_req->qtcb->bottom.support.els;
2588 if (!atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN, &port->status))
2589 {
2590 if (fsf_req->qtcb->bottom.support.els1_length <
2591 ((((unsigned long) &plogi->serv_param.wwpn) -
2592 ((unsigned long) plogi)) + sizeof (u64))) {
2593 ZFCP_LOG_INFO(
2594 "warning: insufficient length of "
2595 "PLOGI payload (%i)\n",
2596 fsf_req->qtcb->bottom.support.els1_length);
2597 debug_text_event(fsf_req->adapter->erp_dbf, 0,
2598 "fsf_s_short_plogi:");
2599 /* skip sanity check and assume wwpn is ok */
2600 } else {
2601 if (plogi->serv_param.wwpn != port->wwpn) {
2602 ZFCP_LOG_INFO("warning: d_id of port "
2603 "0x%016Lx changed during "
2604 "open\n", port->wwpn);
2605 debug_text_event(
2606 fsf_req->adapter->erp_dbf, 0,
2607 "fsf_s_did_change:");
2608 atomic_clear_mask(
2609 ZFCP_STATUS_PORT_DID_DID,
2610 &port->status);
2611 } else
2612 port->wwnn = plogi->serv_param.wwnn;
2613 }
2614 }
2615 break;
2616
2617 case FSF_UNKNOWN_OP_SUBTYPE:
2618 /* should never occure, subtype not set in zfcp_fsf_open_port */
2619 ZFCP_LOG_FLAGS(2, "FSF_UNKNOWN_OP_SUBTYPE\n");
2620 ZFCP_LOG_INFO("unknown operation subtype (adapter: %s, "
2621 "op_subtype=0x%x)\n",
2622 zfcp_get_busid_by_port(port),
2623 fsf_req->qtcb->bottom.support.operation_subtype);
2624 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2625 break;
2626
2627 default:
2628 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
2629 "(debug info 0x%x)\n",
2630 header->fsf_status);
2631 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
2632 debug_exception(fsf_req->adapter->erp_dbf, 0,
2633 &header->fsf_status, sizeof (u32));
2634 break;
2635 }
2636
2637 skip_fsfstatus:
2638 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status);
2639 return retval;
2640}
2641
2642/*
2643 * function: zfcp_fsf_close_port
2644 *
2645 * purpose: submit FSF command "close port"
2646 *
2647 * returns: address of initiated FSF request
2648 * NULL - request could not be initiated
2649 */
2650int
2651zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2652{
2653 volatile struct qdio_buffer_element *sbale;
2654 unsigned long lock_flags;
2655 int retval = 0;
2656
2657 /* setup new FSF request */
2658 retval = zfcp_fsf_req_create(erp_action->adapter,
2659 FSF_QTCB_CLOSE_PORT,
2660 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2661 erp_action->adapter->pool.fsf_req_erp,
2662 &lock_flags, &(erp_action->fsf_req));
2663 if (retval < 0) {
2664 ZFCP_LOG_INFO("error: Could not create a close port request "
2665 "for port 0x%016Lx on adapter %s.\n",
2666 erp_action->port->wwpn,
2667 zfcp_get_busid_by_adapter(erp_action->adapter));
2668 goto out;
2669 }
2670
2671 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
2672 erp_action->fsf_req->sbal_curr, 0);
2673 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2674 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2675
2676 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
2677 erp_action->fsf_req->data.close_port.port = erp_action->port;
2678 erp_action->fsf_req->erp_action = erp_action;
2679 erp_action->fsf_req->qtcb->header.port_handle =
2680 erp_action->port->handle;
2681
2682 /* start QDIO request for this FSF request */
2683 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
2684 if (retval) {
2685 ZFCP_LOG_INFO("error: Could not send a close port request for "
2686 "port 0x%016Lx on adapter %s.\n",
2687 erp_action->port->wwpn,
2688 zfcp_get_busid_by_adapter(erp_action->adapter));
2689 zfcp_fsf_req_free(erp_action->fsf_req);
2690 erp_action->fsf_req = NULL;
2691 goto out;
2692 }
2693
2694 ZFCP_LOG_TRACE("close port request initiated "
2695 "(adapter %s, port 0x%016Lx)\n",
2696 zfcp_get_busid_by_adapter(erp_action->adapter),
2697 erp_action->port->wwpn);
2698 out:
2699 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2700 lock_flags);
2701 return retval;
2702}
2703
2704/*
2705 * function: zfcp_fsf_close_port_handler
2706 *
2707 * purpose: is called for finished Close Port FSF command
2708 *
2709 * returns:
2710 */
2711static int
2712zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
2713{
2714 int retval = -EINVAL;
2715 struct zfcp_port *port;
2716
2717 port = fsf_req->data.close_port.port;
2718
2719 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2720 /* don't change port status in our bookkeeping */
2721 goto skip_fsfstatus;
2722 }
2723
2724 /* evaluate FSF status in QTCB */
2725 switch (fsf_req->qtcb->header.fsf_status) {
2726
2727 case FSF_PORT_HANDLE_NOT_VALID:
2728 ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
2729 ZFCP_LOG_INFO("Temporary port identifier 0x%x for port "
2730 "0x%016Lx on adapter %s invalid. This may happen "
2731 "occasionally.\n", port->handle,
2732 port->wwpn, zfcp_get_busid_by_port(port));
2733 ZFCP_LOG_DEBUG("status qualifier:\n");
2734 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
2735 (char *) &fsf_req->qtcb->header.fsf_status_qual,
2736 sizeof (union fsf_status_qual));
2737 debug_text_event(fsf_req->adapter->erp_dbf, 1,
2738 "fsf_s_phand_nv");
2739 zfcp_erp_adapter_reopen(port->adapter, 0);
2740 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2741 break;
2742
2743 case FSF_ADAPTER_STATUS_AVAILABLE:
2744 ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
2745 /* Note: FSF has actually closed the port in this case.
2746 * The status code is just daft. Fingers crossed for a change
2747 */
2748 retval = 0;
2749 break;
2750
2751 case FSF_GOOD:
2752 ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
2753 ZFCP_LOG_TRACE("remote port 0x016%Lx on adapter %s closed, "
2754 "port handle 0x%x\n", port->wwpn,
2755 zfcp_get_busid_by_port(port), port->handle);
2756 zfcp_erp_modify_port_status(port,
2757 ZFCP_STATUS_COMMON_OPEN,
2758 ZFCP_CLEAR);
2759 retval = 0;
2760 break;
2761
2762 default:
2763 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
2764 "(debug info 0x%x)\n",
2765 fsf_req->qtcb->header.fsf_status);
2766 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
2767 debug_exception(fsf_req->adapter->erp_dbf, 0,
2768 &fsf_req->qtcb->header.fsf_status,
2769 sizeof (u32));
2770 break;
2771 }
2772
2773 skip_fsfstatus:
2774 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status);
2775 return retval;
2776}
2777
2778/*
2779 * function: zfcp_fsf_close_physical_port
2780 *
2781 * purpose: submit FSF command "close physical port"
2782 *
2783 * returns: address of initiated FSF request
2784 * NULL - request could not be initiated
2785 */
2786int
2787zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2788{
2789 int retval = 0;
2790 unsigned long lock_flags;
2791 volatile struct qdio_buffer_element *sbale;
2792
2793 /* setup new FSF request */
2794 retval = zfcp_fsf_req_create(erp_action->adapter,
2795 FSF_QTCB_CLOSE_PHYSICAL_PORT,
2796 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2797 erp_action->adapter->pool.fsf_req_erp,
2798 &lock_flags, &erp_action->fsf_req);
2799 if (retval < 0) {
2800 ZFCP_LOG_INFO("error: Could not create close physical port "
2801 "request (adapter %s, port 0x%016Lx)\n",
2802 zfcp_get_busid_by_adapter(erp_action->adapter),
2803 erp_action->port->wwpn);
2804
2805 goto out;
2806 }
2807
2808 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
2809 erp_action->fsf_req->sbal_curr, 0);
2810 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2811 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2812
2813 /* mark port as being closed */
2814 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
2815 &erp_action->port->status);
2816 /* save a pointer to this port */
2817 erp_action->fsf_req->data.close_physical_port.port = erp_action->port;
2818 /* port to be closeed */
2819 erp_action->fsf_req->qtcb->header.port_handle =
2820 erp_action->port->handle;
2821 erp_action->fsf_req->erp_action = erp_action;
2822
2823 /* start QDIO request for this FSF request */
2824 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
2825 if (retval) {
2826 ZFCP_LOG_INFO("error: Could not send close physical port "
2827 "request (adapter %s, port 0x%016Lx)\n",
2828 zfcp_get_busid_by_adapter(erp_action->adapter),
2829 erp_action->port->wwpn);
2830 zfcp_fsf_req_free(erp_action->fsf_req);
2831 erp_action->fsf_req = NULL;
2832 goto out;
2833 }
2834
2835 ZFCP_LOG_TRACE("close physical port request initiated "
2836 "(adapter %s, port 0x%016Lx)\n",
2837 zfcp_get_busid_by_adapter(erp_action->adapter),
2838 erp_action->port->wwpn);
2839 out:
2840 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2841 lock_flags);
2842 return retval;
2843}
2844
2845/*
2846 * function: zfcp_fsf_close_physical_port_handler
2847 *
2848 * purpose: is called for finished Close Physical Port FSF command
2849 *
2850 * returns:
2851 */
2852static int
2853zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
2854{
2855 int retval = -EINVAL;
2856 struct zfcp_port *port;
2857 struct zfcp_unit *unit;
2858 struct fsf_qtcb_header *header;
2859 u16 subtable, rule, counter;
2860
2861 port = fsf_req->data.close_physical_port.port;
2862 header = &fsf_req->qtcb->header;
2863
2864 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2865 /* don't change port status in our bookkeeping */
2866 goto skip_fsfstatus;
2867 }
2868
2869 /* evaluate FSF status in QTCB */
2870 switch (header->fsf_status) {
2871
2872 case FSF_PORT_HANDLE_NOT_VALID:
2873 ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
2874 ZFCP_LOG_INFO("Temporary port identifier 0x%x invalid"
2875 "(adapter %s, port 0x%016Lx). "
2876 "This may happen occasionally.\n",
2877 port->handle,
2878 zfcp_get_busid_by_port(port),
2879 port->wwpn);
2880 ZFCP_LOG_DEBUG("status qualifier:\n");
2881 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
2882 (char *) &header->fsf_status_qual,
2883 sizeof (union fsf_status_qual));
2884 debug_text_event(fsf_req->adapter->erp_dbf, 1,
2885 "fsf_s_phand_nv");
2886 zfcp_erp_adapter_reopen(port->adapter, 0);
2887 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2888 break;
2889
2890 case FSF_ACCESS_DENIED:
2891 ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
2892 ZFCP_LOG_NORMAL("Access denied, cannot close "
2893 "physical port 0x%016Lx on adapter %s\n",
2894 port->wwpn, zfcp_get_busid_by_port(port));
2895 for (counter = 0; counter < 2; counter++) {
2896 subtable = header->fsf_status_qual.halfword[counter * 2];
2897 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
2898 switch (subtable) {
2899 case FSF_SQ_CFDC_SUBTABLE_OS:
2900 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
2901 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
2902 case FSF_SQ_CFDC_SUBTABLE_LUN:
2903 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
2904 zfcp_act_subtable_type[subtable], rule);
2905 break;
2906 }
2907 }
2908 debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
2909 zfcp_erp_port_access_denied(port);
2910 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2911 break;
2912
2913 case FSF_PORT_BOXED:
2914 ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
2915 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter "
2916 "%s needs to be reopened but it was attempted "
2917 "to close it physically.\n",
2918 port->wwpn,
2919 zfcp_get_busid_by_port(port));
2920 debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_pboxed");
2921 zfcp_erp_port_reopen(port, 0);
2922 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2923 ZFCP_STATUS_FSFREQ_RETRY;
2924 break;
2925
2926 case FSF_ADAPTER_STATUS_AVAILABLE:
2927 ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
2928 switch (header->fsf_status_qual.word[0]) {
2929 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2930 ZFCP_LOG_FLAGS(2,
2931 "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
2932 debug_text_event(fsf_req->adapter->erp_dbf, 1,
2933 "fsf_sq_ltest");
2934 /* This will now be escalated by ERP */
2935 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2936 break;
2937 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2938 ZFCP_LOG_FLAGS(2,
2939 "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
2940 /* ERP strategy will escalate */
2941 debug_text_event(fsf_req->adapter->erp_dbf, 1,
2942 "fsf_sq_ulp");
2943 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2944 break;
2945 default:
2946 ZFCP_LOG_NORMAL
2947 ("bug: Wrong status qualifier 0x%x arrived.\n",
2948 header->fsf_status_qual.word[0]);
2949 debug_text_event(fsf_req->adapter->erp_dbf, 0,
2950 "fsf_sq_inval:");
2951 debug_exception(
2952 fsf_req->adapter->erp_dbf, 0,
2953 &header->fsf_status_qual.word[0], sizeof (u32));
2954 break;
2955 }
2956 break;
2957
2958 case FSF_GOOD:
2959 ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
2960 ZFCP_LOG_DEBUG("Remote port 0x%016Lx via adapter %s "
2961 "physically closed, port handle 0x%x\n",
2962 port->wwpn,
2963 zfcp_get_busid_by_port(port), port->handle);
2964 /* can't use generic zfcp_erp_modify_port_status because
2965 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
2966 */
2967 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2968 list_for_each_entry(unit, &port->unit_list_head, list)
2969 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
2970 retval = 0;
2971 break;
2972
2973 default:
2974 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
2975 "(debug info 0x%x)\n",
2976 header->fsf_status);
2977 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
2978 debug_exception(fsf_req->adapter->erp_dbf, 0,
2979 &header->fsf_status, sizeof (u32));
2980 break;
2981 }
2982
2983 skip_fsfstatus:
2984 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status);
2985 return retval;
2986}
2987
2988/*
2989 * function: zfcp_fsf_open_unit
2990 *
2991 * purpose:
2992 *
2993 * returns:
2994 *
2995 * assumptions: This routine does not check whether the associated
2996 * remote port has already been opened. This should be
2997 * done by calling routines. Otherwise some status
2998 * may be presented by FSF
2999 */
3000int
3001zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
3002{
3003 volatile struct qdio_buffer_element *sbale;
3004 unsigned long lock_flags;
3005 int retval = 0;
3006
3007 /* setup new FSF request */
3008 retval = zfcp_fsf_req_create(erp_action->adapter,
3009 FSF_QTCB_OPEN_LUN,
3010 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
3011 erp_action->adapter->pool.fsf_req_erp,
3012 &lock_flags, &(erp_action->fsf_req));
3013 if (retval < 0) {
3014 ZFCP_LOG_INFO("error: Could not create open unit request for "
3015 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
3016 erp_action->unit->fcp_lun,
3017 erp_action->unit->port->wwpn,
3018 zfcp_get_busid_by_adapter(erp_action->adapter));
3019 goto out;
3020 }
3021
3022 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
3023 erp_action->fsf_req->sbal_curr, 0);
3024 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
3025 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3026
3027 erp_action->fsf_req->qtcb->header.port_handle =
3028 erp_action->port->handle;
3029 erp_action->fsf_req->qtcb->bottom.support.fcp_lun =
3030 erp_action->unit->fcp_lun;
3031 erp_action->fsf_req->qtcb->bottom.support.option =
3032 FSF_OPEN_LUN_SUPPRESS_BOXING;
3033 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
3034 erp_action->fsf_req->data.open_unit.unit = erp_action->unit;
3035 erp_action->fsf_req->erp_action = erp_action;
3036
3037 /* start QDIO request for this FSF request */
3038 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
3039 if (retval) {
3040 ZFCP_LOG_INFO("error: Could not send an open unit request "
3041 "on the adapter %s, port 0x%016Lx for "
3042 "unit 0x%016Lx\n",
3043 zfcp_get_busid_by_adapter(erp_action->adapter),
3044 erp_action->port->wwpn,
3045 erp_action->unit->fcp_lun);
3046 zfcp_fsf_req_free(erp_action->fsf_req);
3047 erp_action->fsf_req = NULL;
3048 goto out;
3049 }
3050
3051 ZFCP_LOG_TRACE("Open LUN request initiated (adapter %s, "
3052 "port 0x%016Lx, unit 0x%016Lx)\n",
3053 zfcp_get_busid_by_adapter(erp_action->adapter),
3054 erp_action->port->wwpn, erp_action->unit->fcp_lun);
3055 out:
3056 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
3057 lock_flags);
3058 return retval;
3059}
3060
3061/*
3062 * function: zfcp_fsf_open_unit_handler
3063 *
3064 * purpose: is called for finished Open LUN command
3065 *
3066 * returns:
3067 */
3068static int
3069zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
3070{
3071 int retval = -EINVAL;
3072 struct zfcp_adapter *adapter;
3073 struct zfcp_unit *unit;
3074 struct fsf_qtcb_header *header;
3075 struct fsf_qtcb_bottom_support *bottom;
3076 struct fsf_queue_designator *queue_designator;
3077 u16 subtable, rule, counter;
3078 u32 allowed, exclusive, readwrite;
3079
3080 unit = fsf_req->data.open_unit.unit;
3081
3082 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
3083 /* don't change unit status in our bookkeeping */
3084 goto skip_fsfstatus;
3085 }
3086
3087 adapter = fsf_req->adapter;
3088 header = &fsf_req->qtcb->header;
3089 bottom = &fsf_req->qtcb->bottom.support;
3090 queue_designator = &header->fsf_status_qual.fsf_queue_designator;
3091
3092 allowed = bottom->lun_access_info & FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED;
3093 exclusive = bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE;
3094 readwrite = bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER;
3095
3096 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
3097 ZFCP_STATUS_UNIT_SHARED |
3098 ZFCP_STATUS_UNIT_READONLY,
3099 &unit->status);
3100
3101 /* evaluate FSF status in QTCB */
3102 switch (header->fsf_status) {
3103
3104 case FSF_PORT_HANDLE_NOT_VALID:
3105 ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
3106 ZFCP_LOG_INFO("Temporary port identifier 0x%x "
3107 "for port 0x%016Lx on adapter %s invalid "
3108 "This may happen occasionally\n",
3109 unit->port->handle,
3110 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3111 ZFCP_LOG_DEBUG("status qualifier:\n");
3112 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3113 (char *) &header->fsf_status_qual,
3114 sizeof (union fsf_status_qual));
3115 debug_text_event(adapter->erp_dbf, 1, "fsf_s_ph_nv");
3116 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
3117 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3118 break;
3119
3120 case FSF_LUN_ALREADY_OPEN:
3121 ZFCP_LOG_FLAGS(0, "FSF_LUN_ALREADY_OPEN\n");
3122 ZFCP_LOG_NORMAL("bug: Attempted to open unit 0x%016Lx on "
3123 "remote port 0x%016Lx on adapter %s twice.\n",
3124 unit->fcp_lun,
3125 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3126 debug_text_exception(adapter->erp_dbf, 0,
3127 "fsf_s_uopen");
3128 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3129 break;
3130
3131 case FSF_ACCESS_DENIED:
3132 ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
3133 ZFCP_LOG_NORMAL("Access denied, cannot open unit 0x%016Lx on "
3134 "remote port 0x%016Lx on adapter %s\n",
3135 unit->fcp_lun, unit->port->wwpn,
3136 zfcp_get_busid_by_unit(unit));
3137 for (counter = 0; counter < 2; counter++) {
3138 subtable = header->fsf_status_qual.halfword[counter * 2];
3139 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
3140 switch (subtable) {
3141 case FSF_SQ_CFDC_SUBTABLE_OS:
3142 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
3143 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
3144 case FSF_SQ_CFDC_SUBTABLE_LUN:
3145 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
3146 zfcp_act_subtable_type[subtable], rule);
3147 break;
3148 }
3149 }
3150 debug_text_event(adapter->erp_dbf, 1, "fsf_s_access");
3151 zfcp_erp_unit_access_denied(unit);
3152 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
3153 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
3154 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3155 break;
3156
3157 case FSF_PORT_BOXED:
3158 ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
3159 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
3160 "needs to be reopened\n",
3161 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3162 debug_text_event(adapter->erp_dbf, 2, "fsf_s_pboxed");
3163 zfcp_erp_port_reopen(unit->port, 0);
3164 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3165 ZFCP_STATUS_FSFREQ_RETRY;
3166 break;
3167
3168 case FSF_LUN_SHARING_VIOLATION:
3169 ZFCP_LOG_FLAGS(2, "FSF_LUN_SHARING_VIOLATION\n");
3170 if (header->fsf_status_qual.word[0] != 0) {
3171 ZFCP_LOG_NORMAL("FCP-LUN 0x%Lx at the remote port "
3172 "with WWPN 0x%Lx "
3173 "connected to the adapter %s "
3174 "is already in use in LPAR%d, CSS%d\n",
3175 unit->fcp_lun,
3176 unit->port->wwpn,
3177 zfcp_get_busid_by_unit(unit),
3178 queue_designator->hla,
3179 queue_designator->cssid);
3180 } else {
3181 subtable = header->fsf_status_qual.halfword[4];
3182 rule = header->fsf_status_qual.halfword[5];
3183 switch (subtable) {
3184 case FSF_SQ_CFDC_SUBTABLE_OS:
3185 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
3186 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
3187 case FSF_SQ_CFDC_SUBTABLE_LUN:
3188 ZFCP_LOG_NORMAL("Access to FCP-LUN 0x%Lx at the "
3189 "remote port with WWPN 0x%Lx "
3190 "connected to the adapter %s "
3191 "is denied (%s rule %d)\n",
3192 unit->fcp_lun,
3193 unit->port->wwpn,
3194 zfcp_get_busid_by_unit(unit),
3195 zfcp_act_subtable_type[subtable],
3196 rule);
3197 break;
3198 }
3199 }
3200 ZFCP_LOG_DEBUG("status qualifier:\n");
3201 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3202 (char *) &header->fsf_status_qual,
3203 sizeof (union fsf_status_qual));
3204 debug_text_event(adapter->erp_dbf, 2,
3205 "fsf_s_l_sh_vio");
3206 zfcp_erp_unit_access_denied(unit);
3207 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
3208 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
3209 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3210 break;
3211
3212 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
3213 ZFCP_LOG_FLAGS(1, "FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED\n");
3214 ZFCP_LOG_INFO("error: The adapter ran out of resources. "
3215 "There is no handle (temporary port identifier) "
3216 "available for unit 0x%016Lx on port 0x%016Lx "
3217 "on adapter %s\n",
3218 unit->fcp_lun,
3219 unit->port->wwpn,
3220 zfcp_get_busid_by_unit(unit));
3221 debug_text_event(adapter->erp_dbf, 1,
3222 "fsf_s_max_units");
3223 zfcp_erp_unit_failed(unit);
3224 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3225 break;
3226
3227 case FSF_ADAPTER_STATUS_AVAILABLE:
3228 ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
3229 switch (header->fsf_status_qual.word[0]) {
3230 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
3231 ZFCP_LOG_FLAGS(2,
3232 "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
3233 /* Re-establish link to port */
3234 debug_text_event(adapter->erp_dbf, 1,
3235 "fsf_sq_ltest");
3236 zfcp_erp_port_reopen(unit->port, 0);
3237 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3238 break;
3239 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
3240 ZFCP_LOG_FLAGS(2,
3241 "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
3242 /* ERP strategy will escalate */
3243 debug_text_event(adapter->erp_dbf, 1,
3244 "fsf_sq_ulp");
3245 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3246 break;
3247 default:
3248 ZFCP_LOG_NORMAL
3249 ("bug: Wrong status qualifier 0x%x arrived.\n",
3250 header->fsf_status_qual.word[0]);
3251 debug_text_event(adapter->erp_dbf, 0,
3252 "fsf_sq_inval:");
3253 debug_exception(adapter->erp_dbf, 0,
3254 &header->fsf_status_qual.word[0],
3255 sizeof (u32));
3256 }
3257 break;
3258
3259 case FSF_INVALID_COMMAND_OPTION:
3260 ZFCP_LOG_FLAGS(2, "FSF_INVALID_COMMAND_OPTION\n");
3261 ZFCP_LOG_NORMAL(
3262 "Invalid option 0x%x has been specified "
3263 "in QTCB bottom sent to the adapter %s\n",
3264 bottom->option,
3265 zfcp_get_busid_by_adapter(adapter));
3266 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3267 retval = -EINVAL;
3268 break;
3269
3270 case FSF_GOOD:
3271 ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
3272 /* save LUN handle assigned by FSF */
3273 unit->handle = header->lun_handle;
3274 ZFCP_LOG_TRACE("unit 0x%016Lx on remote port 0x%016Lx on "
3275 "adapter %s opened, port handle 0x%x\n",
3276 unit->fcp_lun,
3277 unit->port->wwpn,
3278 zfcp_get_busid_by_unit(unit),
3279 unit->handle);
3280 /* mark unit as open */
3281 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
3282
3283 if (adapter->supported_features & FSF_FEATURE_LUN_SHARING){
3284 if (!exclusive)
3285 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
3286 &unit->status);
3287
3288 if (!readwrite) {
3289 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
3290 &unit->status);
3291 ZFCP_LOG_NORMAL("read-only access for unit "
3292 "(adapter %s, wwpn=0x%016Lx, "
3293 "fcp_lun=0x%016Lx)\n",
3294 zfcp_get_busid_by_unit(unit),
3295 unit->port->wwpn,
3296 unit->fcp_lun);
3297 }
3298
3299 if (exclusive && !readwrite) {
3300 ZFCP_LOG_NORMAL("exclusive access of read-only "
3301 "unit not supported\n");
3302 zfcp_erp_unit_failed(unit);
3303 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3304 zfcp_erp_unit_shutdown(unit, 0);
3305 } else if (!exclusive && readwrite) {
3306 ZFCP_LOG_NORMAL("shared access of read-write "
3307 "unit not supported\n");
3308 zfcp_erp_unit_failed(unit);
3309 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3310 zfcp_erp_unit_shutdown(unit, 0);
3311 }
3312 }
3313
3314 retval = 0;
3315 break;
3316
3317 default:
3318 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
3319 "(debug info 0x%x)\n",
3320 header->fsf_status);
3321 debug_text_event(adapter->erp_dbf, 0, "fsf_s_inval:");
3322 debug_exception(adapter->erp_dbf, 0,
3323 &header->fsf_status, sizeof (u32));
3324 break;
3325 }
3326
3327 skip_fsfstatus:
3328 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status);
3329 return retval;
3330}
3331
3332/*
3333 * function: zfcp_fsf_close_unit
3334 *
3335 * purpose:
3336 *
3337 * returns: address of fsf_req - request successfully initiated
3338 * NULL -
3339 *
3340 * assumptions: This routine does not check whether the associated
3341 * remote port/lun has already been opened. This should be
3342 * done by calling routines. Otherwise some status
3343 * may be presented by FSF
3344 */
3345int
3346zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3347{
3348 volatile struct qdio_buffer_element *sbale;
3349 unsigned long lock_flags;
3350 int retval = 0;
3351
3352 /* setup new FSF request */
3353 retval = zfcp_fsf_req_create(erp_action->adapter,
3354 FSF_QTCB_CLOSE_LUN,
3355 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
3356 erp_action->adapter->pool.fsf_req_erp,
3357 &lock_flags, &(erp_action->fsf_req));
3358 if (retval < 0) {
3359 ZFCP_LOG_INFO("error: Could not create close unit request for "
3360 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
3361 erp_action->unit->fcp_lun,
3362 erp_action->port->wwpn,
3363 zfcp_get_busid_by_adapter(erp_action->adapter));
3364 goto out;
3365 }
3366
3367 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
3368 erp_action->fsf_req->sbal_curr, 0);
3369 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
3370 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3371
3372 erp_action->fsf_req->qtcb->header.port_handle =
3373 erp_action->port->handle;
3374 erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
3375 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
3376 erp_action->fsf_req->data.close_unit.unit = erp_action->unit;
3377 erp_action->fsf_req->erp_action = erp_action;
3378
3379 /* start QDIO request for this FSF request */
3380 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
3381 if (retval) {
3382 ZFCP_LOG_INFO("error: Could not send a close unit request for "
3383 "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n",
3384 erp_action->unit->fcp_lun,
3385 erp_action->port->wwpn,
3386 zfcp_get_busid_by_adapter(erp_action->adapter));
3387 zfcp_fsf_req_free(erp_action->fsf_req);
3388 erp_action->fsf_req = NULL;
3389 goto out;
3390 }
3391
3392 ZFCP_LOG_TRACE("Close LUN request initiated (adapter %s, "
3393 "port 0x%016Lx, unit 0x%016Lx)\n",
3394 zfcp_get_busid_by_adapter(erp_action->adapter),
3395 erp_action->port->wwpn, erp_action->unit->fcp_lun);
3396 out:
3397 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
3398 lock_flags);
3399 return retval;
3400}
3401
3402/*
3403 * function: zfcp_fsf_close_unit_handler
3404 *
3405 * purpose: is called for finished Close LUN FSF command
3406 *
3407 * returns:
3408 */
3409static int
3410zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3411{
3412 int retval = -EINVAL;
3413 struct zfcp_unit *unit;
3414
3415 unit = fsf_req->data.close_unit.unit; /* restore unit */
3416
3417 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
3418 /* don't change unit status in our bookkeeping */
3419 goto skip_fsfstatus;
3420 }
3421
3422 /* evaluate FSF status in QTCB */
3423 switch (fsf_req->qtcb->header.fsf_status) {
3424
3425 case FSF_PORT_HANDLE_NOT_VALID:
3426 ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
3427 ZFCP_LOG_INFO("Temporary port identifier 0x%x for port "
3428 "0x%016Lx on adapter %s invalid. This may "
3429 "happen in rare circumstances\n",
3430 unit->port->handle,
3431 unit->port->wwpn,
3432 zfcp_get_busid_by_unit(unit));
3433 ZFCP_LOG_DEBUG("status qualifier:\n");
3434 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3435 (char *) &fsf_req->qtcb->header.fsf_status_qual,
3436 sizeof (union fsf_status_qual));
3437 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3438 "fsf_s_phand_nv");
3439 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
3440 zfcp_cmd_dbf_event_fsf("porthinv", fsf_req,
3441 &fsf_req->qtcb->header.fsf_status_qual,
3442 sizeof (union fsf_status_qual));
3443 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3444 break;
3445
3446 case FSF_LUN_HANDLE_NOT_VALID:
3447 ZFCP_LOG_FLAGS(1, "FSF_LUN_HANDLE_NOT_VALID\n");
3448 ZFCP_LOG_INFO("Temporary LUN identifier 0x%x of unit "
3449 "0x%016Lx on port 0x%016Lx on adapter %s is "
3450 "invalid. This may happen occasionally.\n",
3451 unit->handle,
3452 unit->fcp_lun,
3453 unit->port->wwpn,
3454 zfcp_get_busid_by_unit(unit));
3455 ZFCP_LOG_DEBUG("Status qualifier data:\n");
3456 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3457 (char *) &fsf_req->qtcb->header.fsf_status_qual,
3458 sizeof (union fsf_status_qual));
3459 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3460 "fsf_s_lhand_nv");
3461 zfcp_erp_port_reopen(unit->port, 0);
3462 zfcp_cmd_dbf_event_fsf("lunhinv", fsf_req,
3463 &fsf_req->qtcb->header.fsf_status_qual,
3464 sizeof (union fsf_status_qual));
3465 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3466 break;
3467
3468 case FSF_PORT_BOXED:
3469 ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
3470 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
3471 "needs to be reopened\n",
3472 unit->port->wwpn,
3473 zfcp_get_busid_by_unit(unit));
3474 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_pboxed");
3475 zfcp_erp_port_reopen(unit->port, 0);
3476 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3477 ZFCP_STATUS_FSFREQ_RETRY;
3478 break;
3479
3480 case FSF_ADAPTER_STATUS_AVAILABLE:
3481 ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
3482 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
3483 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
3484 ZFCP_LOG_FLAGS(2,
3485 "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
3486 /* re-establish link to port */
3487 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3488 "fsf_sq_ltest");
3489 zfcp_erp_port_reopen(unit->port, 0);
3490 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3491 break;
3492 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
3493 ZFCP_LOG_FLAGS(2,
3494 "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
3495 /* ERP strategy will escalate */
3496 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3497 "fsf_sq_ulp");
3498 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3499 break;
3500 default:
3501 ZFCP_LOG_NORMAL
3502 ("bug: Wrong status qualifier 0x%x arrived.\n",
3503 fsf_req->qtcb->header.fsf_status_qual.word[0]);
3504 debug_text_event(fsf_req->adapter->erp_dbf, 0,
3505 "fsf_sq_inval:");
3506 debug_exception(
3507 fsf_req->adapter->erp_dbf, 0,
3508 &fsf_req->qtcb->header.fsf_status_qual.word[0],
3509 sizeof (u32));
3510 break;
3511 }
3512 break;
3513
3514 case FSF_GOOD:
3515 ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
3516 ZFCP_LOG_TRACE("unit 0x%016Lx on port 0x%016Lx on adapter %s "
3517 "closed, port handle 0x%x\n",
3518 unit->fcp_lun,
3519 unit->port->wwpn,
3520 zfcp_get_busid_by_unit(unit),
3521 unit->handle);
3522 /* mark unit as closed */
3523 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
3524 retval = 0;
3525 break;
3526
3527 default:
3528 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
3529 "(debug info 0x%x)\n",
3530 fsf_req->qtcb->header.fsf_status);
3531 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
3532 debug_exception(fsf_req->adapter->erp_dbf, 0,
3533 &fsf_req->qtcb->header.fsf_status,
3534 sizeof (u32));
3535 break;
3536 }
3537
3538 skip_fsfstatus:
3539 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status);
3540 return retval;
3541}
3542
3543/**
3544 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
3545 * @adapter: adapter where scsi command is issued
3546 * @unit: unit where command is sent to
3547 * @scsi_cmnd: scsi command to be sent
3548 * @timer: timer to be started when request is initiated
3549 * @req_flags: flags for fsf_request
3550 */
3551int
3552zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3553 struct zfcp_unit *unit,
3554 struct scsi_cmnd * scsi_cmnd,
3555 struct timer_list *timer, int req_flags)
3556{
3557 struct zfcp_fsf_req *fsf_req = NULL;
3558 struct fcp_cmnd_iu *fcp_cmnd_iu;
3559 unsigned int sbtype;
3560 unsigned long lock_flags;
3561 int real_bytes = 0;
3562 int retval = 0;
3563 int mask;
3564
3565 /* setup new FSF request */
3566 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
3567 adapter->pool.fsf_req_scsi,
3568 &lock_flags, &fsf_req);
3569 if (unlikely(retval < 0)) {
3570 ZFCP_LOG_DEBUG("error: Could not create FCP command request "
3571 "for unit 0x%016Lx on port 0x%016Lx on "
3572 "adapter %s\n",
3573 unit->fcp_lun,
3574 unit->port->wwpn,
3575 zfcp_get_busid_by_adapter(adapter));
3576 goto failed_req_create;
3577 }
3578
3579 /*
3580 * associate FSF request with SCSI request
3581 * (need this for look up on abort)
3582 */
3583 fsf_req->data.send_fcp_command_task.fsf_req = fsf_req;
3584 scsi_cmnd->host_scribble = (char *) &(fsf_req->data);
3585
3586 /*
3587 * associate SCSI command with FSF request
3588 * (need this for look up on normal command completion)
3589 */
3590 fsf_req->data.send_fcp_command_task.scsi_cmnd = scsi_cmnd;
3591 fsf_req->data.send_fcp_command_task.start_jiffies = jiffies;
3592 fsf_req->data.send_fcp_command_task.unit = unit;
3593 ZFCP_LOG_DEBUG("unit=%p, fcp_lun=0x%016Lx\n", unit, unit->fcp_lun);
3594
3595 /* set handles of unit and its parent port in QTCB */
3596 fsf_req->qtcb->header.lun_handle = unit->handle;
3597 fsf_req->qtcb->header.port_handle = unit->port->handle;
3598
3599 /* FSF does not define the structure of the FCP_CMND IU */
3600 fcp_cmnd_iu = (struct fcp_cmnd_iu *)
3601 &(fsf_req->qtcb->bottom.io.fcp_cmnd);
3602
3603 /*
3604 * set depending on data direction:
3605 * data direction bits in SBALE (SB Type)
3606 * data direction bits in QTCB
3607 * data direction bits in FCP_CMND IU
3608 */
3609 switch (scsi_cmnd->sc_data_direction) {
3610 case DMA_NONE:
3611 ZFCP_LOG_FLAGS(3, "DMA_NONE\n");
3612 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
3613 /*
3614 * FIXME(qdio):
3615 * what is the correct type for commands
3616 * without 'real' data buffers?
3617 */
3618 sbtype = SBAL_FLAGS0_TYPE_READ;
3619 break;
3620 case DMA_FROM_DEVICE:
3621 ZFCP_LOG_FLAGS(3, "DMA_FROM_DEVICE\n");
3622 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
3623 sbtype = SBAL_FLAGS0_TYPE_READ;
3624 fcp_cmnd_iu->rddata = 1;
3625 break;
3626 case DMA_TO_DEVICE:
3627 ZFCP_LOG_FLAGS(3, "DMA_TO_DEVICE\n");
3628 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
3629 sbtype = SBAL_FLAGS0_TYPE_WRITE;
3630 fcp_cmnd_iu->wddata = 1;
3631 break;
3632 case DMA_BIDIRECTIONAL:
3633 ZFCP_LOG_FLAGS(0, "DMA_BIDIRECTIONAL not supported\n");
3634 default:
3635 /*
3636 * dummy, catch this condition earlier
3637 * in zfcp_scsi_queuecommand
3638 */
3639 goto failed_scsi_cmnd;
3640 }
3641
3642 /* set FC service class in QTCB (3 per default) */
3643 fsf_req->qtcb->bottom.io.service_class = adapter->fc_service_class;
3644
3645 /* set FCP_LUN in FCP_CMND IU in QTCB */
3646 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
3647
3648 mask = ZFCP_STATUS_UNIT_READONLY | ZFCP_STATUS_UNIT_SHARED;
3649
3650 /* set task attributes in FCP_CMND IU in QTCB */
3651 if (likely((scsi_cmnd->device->simple_tags) ||
3652 (atomic_test_mask(mask, &unit->status))))
3653 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
3654 else
3655 fcp_cmnd_iu->task_attribute = UNTAGGED;
3656
3657 /* set additional length of FCP_CDB in FCP_CMND IU in QTCB, if needed */
3658 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH)) {
3659 fcp_cmnd_iu->add_fcp_cdb_length
3660 = (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
3661 ZFCP_LOG_TRACE("SCSI CDB length is 0x%x, "
3662 "additional FCP_CDB length is 0x%x "
3663 "(shifted right 2 bits)\n",
3664 scsi_cmnd->cmd_len,
3665 fcp_cmnd_iu->add_fcp_cdb_length);
3666 }
3667 /*
3668 * copy SCSI CDB (including additional length, if any) to
3669 * FCP_CDB in FCP_CMND IU in QTCB
3670 */
3671 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3672
3673 /* FCP CMND IU length in QTCB */
3674 fsf_req->qtcb->bottom.io.fcp_cmnd_length =
3675 sizeof (struct fcp_cmnd_iu) +
3676 fcp_cmnd_iu->add_fcp_cdb_length + sizeof (fcp_dl_t);
3677
3678 /* generate SBALEs from data buffer */
3679 real_bytes = zfcp_qdio_sbals_from_scsicmnd(fsf_req, sbtype, scsi_cmnd);
3680 if (unlikely(real_bytes < 0)) {
3681 if (fsf_req->sbal_number < ZFCP_MAX_SBALS_PER_REQ) {
3682 ZFCP_LOG_DEBUG(
3683 "Data did not fit into available buffer(s), "
3684 "waiting for more...\n");
3685 retval = -EIO;
3686 } else {
3687 ZFCP_LOG_NORMAL("error: No truncation implemented but "
3688 "required. Shutting down unit "
3689 "(adapter %s, port 0x%016Lx, "
3690 "unit 0x%016Lx)\n",
3691 zfcp_get_busid_by_unit(unit),
3692 unit->port->wwpn,
3693 unit->fcp_lun);
3694 zfcp_erp_unit_shutdown(unit, 0);
3695 retval = -EINVAL;
3696 }
3697 goto no_fit;
3698 }
3699
3700 /* set length of FCP data length in FCP_CMND IU in QTCB */
3701 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
3702
3703 ZFCP_LOG_DEBUG("Sending SCSI command:\n");
3704 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3705 (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3706
3707 /*
3708 * start QDIO request for this FSF request
3709 * covered by an SBALE)
3710 */
3711 retval = zfcp_fsf_req_send(fsf_req, timer);
3712 if (unlikely(retval < 0)) {
3713 ZFCP_LOG_INFO("error: Could not send FCP command request "
3714 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
3715 zfcp_get_busid_by_adapter(adapter),
3716 unit->port->wwpn,
3717 unit->fcp_lun);
3718 goto send_failed;
3719 }
3720
3721 ZFCP_LOG_TRACE("Send FCP Command initiated (adapter %s, "
3722 "port 0x%016Lx, unit 0x%016Lx)\n",
3723 zfcp_get_busid_by_adapter(adapter),
3724 unit->port->wwpn,
3725 unit->fcp_lun);
3726 goto success;
3727
3728 send_failed:
3729 no_fit:
3730 failed_scsi_cmnd:
3731 zfcp_fsf_req_free(fsf_req);
3732 fsf_req = NULL;
3733 scsi_cmnd->host_scribble = NULL;
3734 success:
3735 failed_req_create:
3736 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
3737 return retval;
3738}
3739
3740/*
3741 * function: zfcp_fsf_send_fcp_command_task_management
3742 *
3743 * purpose:
3744 *
3745 * returns:
3746 *
3747 * FIXME(design): should be watched by a timeout!!!
3748 * FIXME(design) shouldn't this be modified to return an int
3749 * also...don't know how though
3750 *
3751 */
3752struct zfcp_fsf_req *
3753zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
3754 struct zfcp_unit *unit,
3755 u8 tm_flags, int req_flags)
3756{
3757 struct zfcp_fsf_req *fsf_req = NULL;
3758 int retval = 0;
3759 struct fcp_cmnd_iu *fcp_cmnd_iu;
3760 unsigned long lock_flags;
3761 volatile struct qdio_buffer_element *sbale;
3762
3763 /* setup new FSF request */
3764 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
3765 adapter->pool.fsf_req_scsi,
3766 &lock_flags, &fsf_req);
3767 if (retval < 0) {
3768 ZFCP_LOG_INFO("error: Could not create FCP command (task "
3769 "management) request for adapter %s, port "
3770 " 0x%016Lx, unit 0x%016Lx.\n",
3771 zfcp_get_busid_by_adapter(adapter),
3772 unit->port->wwpn, unit->fcp_lun);
3773 goto out;
3774 }
3775
3776 /*
3777 * Used to decide on proper handler in the return path,
3778 * could be either zfcp_fsf_send_fcp_command_task_handler or
3779 * zfcp_fsf_send_fcp_command_task_management_handler */
3780
3781 fsf_req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
3782
3783 /*
3784 * hold a pointer to the unit being target of this
3785 * task management request
3786 */
3787 fsf_req->data.send_fcp_command_task_management.unit = unit;
3788
3789 /* set FSF related fields in QTCB */
3790 fsf_req->qtcb->header.lun_handle = unit->handle;
3791 fsf_req->qtcb->header.port_handle = unit->port->handle;
3792 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
3793 fsf_req->qtcb->bottom.io.service_class = adapter->fc_service_class;
3794 fsf_req->qtcb->bottom.io.fcp_cmnd_length =
3795 sizeof (struct fcp_cmnd_iu) + sizeof (fcp_dl_t);
3796
3797 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
3798 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
3799 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3800
3801 /* set FCP related fields in FCP_CMND IU in QTCB */
3802 fcp_cmnd_iu = (struct fcp_cmnd_iu *)
3803 &(fsf_req->qtcb->bottom.io.fcp_cmnd);
3804 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
3805 fcp_cmnd_iu->task_management_flags = tm_flags;
3806
3807 /* start QDIO request for this FSF request */
3808 zfcp_fsf_start_scsi_er_timer(adapter);
3809 retval = zfcp_fsf_req_send(fsf_req, NULL);
3810 if (retval) {
3811 del_timer(&adapter->scsi_er_timer);
3812 ZFCP_LOG_INFO("error: Could not send an FCP-command (task "
3813 "management) on adapter %s, port 0x%016Lx for "
3814 "unit LUN 0x%016Lx\n",
3815 zfcp_get_busid_by_adapter(adapter),
3816 unit->port->wwpn,
3817 unit->fcp_lun);
3818 zfcp_fsf_req_free(fsf_req);
3819 fsf_req = NULL;
3820 goto out;
3821 }
3822
3823 ZFCP_LOG_TRACE("Send FCP Command (task management function) initiated "
3824 "(adapter %s, port 0x%016Lx, unit 0x%016Lx, "
3825 "tm_flags=0x%x)\n",
3826 zfcp_get_busid_by_adapter(adapter),
3827 unit->port->wwpn,
3828 unit->fcp_lun,
3829 tm_flags);
3830 out:
3831 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
3832 return fsf_req;
3833}
3834
3835/*
3836 * function: zfcp_fsf_send_fcp_command_handler
3837 *
3838 * purpose: is called for finished Send FCP Command
3839 *
3840 * returns:
3841 */
3842static int
3843zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3844{
3845 int retval = -EINVAL;
3846 struct zfcp_unit *unit;
3847 struct fsf_qtcb_header *header;
3848 u16 subtable, rule, counter;
3849
3850 header = &fsf_req->qtcb->header;
3851
3852 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
3853 unit = fsf_req->data.send_fcp_command_task_management.unit;
3854 else
3855 unit = fsf_req->data.send_fcp_command_task.unit;
3856
3857 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
3858 /* go directly to calls of special handlers */
3859 goto skip_fsfstatus;
3860 }
3861
3862 /* evaluate FSF status in QTCB */
3863 switch (header->fsf_status) {
3864
3865 case FSF_PORT_HANDLE_NOT_VALID:
3866 ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
3867 ZFCP_LOG_INFO("Temporary port identifier 0x%x for port "
3868 "0x%016Lx on adapter %s invalid\n",
3869 unit->port->handle,
3870 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3871 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3872 (char *) &header->fsf_status_qual,
3873 sizeof (union fsf_status_qual));
3874 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3875 "fsf_s_phand_nv");
3876 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
3877 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3878 break;
3879
3880 case FSF_LUN_HANDLE_NOT_VALID:
3881 ZFCP_LOG_FLAGS(1, "FSF_LUN_HANDLE_NOT_VALID\n");
3882 ZFCP_LOG_INFO("Temporary LUN identifier 0x%x for unit "
3883 "0x%016Lx on port 0x%016Lx on adapter %s is "
3884 "invalid. This may happen occasionally.\n",
3885 unit->handle,
3886 unit->fcp_lun,
3887 unit->port->wwpn,
3888 zfcp_get_busid_by_unit(unit));
3889 ZFCP_LOG_NORMAL("Status qualifier data:\n");
3890 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
3891 (char *) &header->fsf_status_qual,
3892 sizeof (union fsf_status_qual));
3893 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3894 "fsf_s_uhand_nv");
3895 zfcp_erp_port_reopen(unit->port, 0);
3896 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3897 break;
3898
3899 case FSF_HANDLE_MISMATCH:
3900 ZFCP_LOG_FLAGS(0, "FSF_HANDLE_MISMATCH\n");
3901 ZFCP_LOG_NORMAL("bug: The port handle 0x%x has changed "
3902 "unexpectedly. (adapter %s, port 0x%016Lx, "
3903 "unit 0x%016Lx)\n",
3904 unit->port->handle,
3905 zfcp_get_busid_by_unit(unit),
3906 unit->port->wwpn,
3907 unit->fcp_lun);
3908 ZFCP_LOG_NORMAL("status qualifier:\n");
3909 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
3910 (char *) &header->fsf_status_qual,
3911 sizeof (union fsf_status_qual));
3912 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3913 "fsf_s_hand_mis");
3914 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
3915 zfcp_cmd_dbf_event_fsf("handmism",
3916 fsf_req,
3917 &header->fsf_status_qual,
3918 sizeof (union fsf_status_qual));
3919 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3920 break;
3921
3922 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
3923 ZFCP_LOG_FLAGS(0, "FSF_SERVICE_CLASS_NOT_SUPPORTED\n");
3924 if (fsf_req->adapter->fc_service_class <= 3) {
3925 ZFCP_LOG_NORMAL("error: The adapter %s does "
3926 "not support fibrechannel class %d.\n",
3927 zfcp_get_busid_by_unit(unit),
3928 fsf_req->adapter->fc_service_class);
3929 } else {
3930 ZFCP_LOG_NORMAL("bug: The fibrechannel class at "
3931 "adapter %s is invalid. "
3932 "(debug info %d)\n",
3933 zfcp_get_busid_by_unit(unit),
3934 fsf_req->adapter->fc_service_class);
3935 }
3936 /* stop operation for this adapter */
3937 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
3938 "fsf_s_class_nsup");
3939 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
3940 zfcp_cmd_dbf_event_fsf("unsclass",
3941 fsf_req,
3942 &header->fsf_status_qual,
3943 sizeof (union fsf_status_qual));
3944 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3945 break;
3946
3947 case FSF_FCPLUN_NOT_VALID:
3948 ZFCP_LOG_FLAGS(0, "FSF_FCPLUN_NOT_VALID\n");
3949 ZFCP_LOG_NORMAL("bug: unit 0x%016Lx on port 0x%016Lx on "
3950 "adapter %s does not have correct unit "
3951 "handle 0x%x\n",
3952 unit->fcp_lun,
3953 unit->port->wwpn,
3954 zfcp_get_busid_by_unit(unit),
3955 unit->handle);
3956 ZFCP_LOG_DEBUG("status qualifier:\n");
3957 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3958 (char *) &header->fsf_status_qual,
3959 sizeof (union fsf_status_qual));
3960 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3961 "fsf_s_fcp_lun_nv");
3962 zfcp_erp_port_reopen(unit->port, 0);
3963 zfcp_cmd_dbf_event_fsf("fluninv",
3964 fsf_req,
3965 &header->fsf_status_qual,
3966 sizeof (union fsf_status_qual));
3967 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3968 break;
3969
3970 case FSF_ACCESS_DENIED:
3971 ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
3972 ZFCP_LOG_NORMAL("Access denied, cannot send FCP command to "
3973 "unit 0x%016Lx on port 0x%016Lx on "
3974 "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
3975 zfcp_get_busid_by_unit(unit));
3976 for (counter = 0; counter < 2; counter++) {
3977 subtable = header->fsf_status_qual.halfword[counter * 2];
3978 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
3979 switch (subtable) {
3980 case FSF_SQ_CFDC_SUBTABLE_OS:
3981 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
3982 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
3983 case FSF_SQ_CFDC_SUBTABLE_LUN:
3984 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
3985 zfcp_act_subtable_type[subtable], rule);
3986 break;
3987 }
3988 }
3989 debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
3990 zfcp_erp_unit_access_denied(unit);
3991 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3992 break;
3993
3994 case FSF_DIRECTION_INDICATOR_NOT_VALID:
3995 ZFCP_LOG_FLAGS(0, "FSF_DIRECTION_INDICATOR_NOT_VALID\n");
3996 ZFCP_LOG_INFO("bug: Invalid data direction given for unit "
3997 "0x%016Lx on port 0x%016Lx on adapter %s "
3998 "(debug info %d)\n",
3999 unit->fcp_lun,
4000 unit->port->wwpn,
4001 zfcp_get_busid_by_unit(unit),
4002 fsf_req->qtcb->bottom.io.data_direction);
4003 /* stop operation for this adapter */
4004 debug_text_event(fsf_req->adapter->erp_dbf, 0,
4005 "fsf_s_dir_ind_nv");
4006 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
4007 zfcp_cmd_dbf_event_fsf("dirinv",
4008 fsf_req,
4009 &header->fsf_status_qual,
4010 sizeof (union fsf_status_qual));
4011 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4012 break;
4013
4014 case FSF_CMND_LENGTH_NOT_VALID:
4015 ZFCP_LOG_FLAGS(0, "FSF_CMND_LENGTH_NOT_VALID\n");
4016 ZFCP_LOG_NORMAL
4017 ("bug: An invalid control-data-block length field "
4018 "was found in a command for unit 0x%016Lx on port "
4019 "0x%016Lx on adapter %s " "(debug info %d)\n",
4020 unit->fcp_lun, unit->port->wwpn,
4021 zfcp_get_busid_by_unit(unit),
4022 fsf_req->qtcb->bottom.io.fcp_cmnd_length);
4023 /* stop operation for this adapter */
4024 debug_text_event(fsf_req->adapter->erp_dbf, 0,
4025 "fsf_s_cmd_len_nv");
4026 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
4027 zfcp_cmd_dbf_event_fsf("cleninv",
4028 fsf_req,
4029 &header->fsf_status_qual,
4030 sizeof (union fsf_status_qual));
4031 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4032 break;
4033
4034 case FSF_PORT_BOXED:
4035 ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
4036 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
4037 "needs to be reopened\n",
4038 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
4039 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_pboxed");
4040 zfcp_erp_port_reopen(unit->port, 0);
4041 zfcp_cmd_dbf_event_fsf("portbox", fsf_req,
4042 &header->fsf_status_qual,
4043 sizeof (union fsf_status_qual));
4044 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
4045 ZFCP_STATUS_FSFREQ_RETRY;
4046 break;
4047
4048 case FSF_LUN_BOXED:
4049 ZFCP_LOG_FLAGS(0, "FSF_LUN_BOXED\n");
4050 ZFCP_LOG_NORMAL("unit needs to be reopened (adapter %s, "
4051 "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n",
4052 zfcp_get_busid_by_unit(unit),
4053 unit->port->wwpn, unit->fcp_lun);
4054 debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_lboxed");
4055 zfcp_erp_unit_reopen(unit, 0);
4056 zfcp_cmd_dbf_event_fsf("unitbox", fsf_req,
4057 &header->fsf_status_qual,
4058 sizeof(union fsf_status_qual));
4059 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
4060 | ZFCP_STATUS_FSFREQ_RETRY;
4061 break;
4062
4063 case FSF_ADAPTER_STATUS_AVAILABLE:
4064 ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
4065 switch (header->fsf_status_qual.word[0]) {
4066 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
4067 ZFCP_LOG_FLAGS(2,
4068 "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
4069 /* re-establish link to port */
4070 debug_text_event(fsf_req->adapter->erp_dbf, 1,
4071 "fsf_sq_ltest");
4072 zfcp_erp_port_reopen(unit->port, 0);
4073 zfcp_cmd_dbf_event_fsf(
4074 "sqltest",
4075 fsf_req,
4076 &header->fsf_status_qual,
4077 sizeof (union fsf_status_qual));
4078 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4079 break;
4080 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
4081 ZFCP_LOG_FLAGS(3,
4082 "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
4083 /* FIXME(hw) need proper specs for proper action */
4084 /* let scsi stack deal with retries and escalation */
4085 debug_text_event(fsf_req->adapter->erp_dbf, 1,
4086 "fsf_sq_ulp");
4087 zfcp_cmd_dbf_event_fsf(
4088 "sqdeperp",
4089 fsf_req,
4090 &header->fsf_status_qual,
4091 sizeof (union fsf_status_qual));
4092 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4093 break;
4094 default:
4095 /* FIXME: shall we consider this a successful transfer? */
4096 ZFCP_LOG_NORMAL
4097 ("bug: Wrong status qualifier 0x%x arrived.\n",
4098 header->fsf_status_qual.word[0]);
4099 debug_text_event(fsf_req->adapter->erp_dbf, 0,
4100 "fsf_sq_inval:");
4101 debug_exception(fsf_req->adapter->erp_dbf, 0,
4102 &header->fsf_status_qual.word[0],
4103 sizeof(u32));
4104 break;
4105 }
4106 break;
4107
4108 case FSF_GOOD:
4109 ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
4110 break;
4111
4112 case FSF_FCP_RSP_AVAILABLE:
4113 ZFCP_LOG_FLAGS(2, "FSF_FCP_RSP_AVAILABLE\n");
4114 break;
4115
4116 default:
4117 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:");
4118 debug_exception(fsf_req->adapter->erp_dbf, 0,
4119 &header->fsf_status, sizeof(u32));
4120 break;
4121 }
4122
4123 skip_fsfstatus:
4124 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) {
4125 retval =
4126 zfcp_fsf_send_fcp_command_task_management_handler(fsf_req);
4127 } else {
4128 retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req);
4129 }
4130 return retval;
4131}
4132
4133/*
4134 * function: zfcp_fsf_send_fcp_command_task_handler
4135 *
4136 * purpose: evaluates FCP_RSP IU
4137 *
4138 * returns:
4139 */
4140static int
4141zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4142{
4143 int retval = 0;
4144 struct scsi_cmnd *scpnt;
4145 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
4146 &(fsf_req->qtcb->bottom.io.fcp_rsp);
4147 struct fcp_cmnd_iu *fcp_cmnd_iu = (struct fcp_cmnd_iu *)
4148 &(fsf_req->qtcb->bottom.io.fcp_cmnd);
4149 u32 sns_len;
4150 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
4151 unsigned long flags;
4152 struct zfcp_unit *unit = fsf_req->data.send_fcp_command_task.unit;
4153
4154 read_lock_irqsave(&fsf_req->adapter->abort_lock, flags);
4155 scpnt = fsf_req->data.send_fcp_command_task.scsi_cmnd;
4156 if (unlikely(!scpnt)) {
4157 ZFCP_LOG_DEBUG
4158 ("Command with fsf_req %p is not associated to "
4159 "a scsi command anymore. Aborted?\n", fsf_req);
4160 goto out;
4161 }
4162 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
4163 /* FIXME: (design) mid-layer should handle DID_ABORT like
4164 * DID_SOFT_ERROR by retrying the request for devices
4165 * that allow retries.
4166 */
4167 ZFCP_LOG_DEBUG("Setting DID_SOFT_ERROR and SUGGEST_RETRY\n");
4168 set_host_byte(&scpnt->result, DID_SOFT_ERROR);
4169 set_driver_byte(&scpnt->result, SUGGEST_RETRY);
4170 goto skip_fsfstatus;
4171 }
4172
4173 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
4174 ZFCP_LOG_DEBUG("Setting DID_ERROR\n");
4175 set_host_byte(&scpnt->result, DID_ERROR);
4176 goto skip_fsfstatus;
4177 }
4178
4179 /* set message byte of result in SCSI command */
4180 scpnt->result |= COMMAND_COMPLETE << 8;
4181
4182 /*
4183 * copy SCSI status code of FCP_STATUS of FCP_RSP IU to status byte
4184 * of result in SCSI command
4185 */
4186 scpnt->result |= fcp_rsp_iu->scsi_status;
4187 if (unlikely(fcp_rsp_iu->scsi_status)) {
4188 /* DEBUG */
4189 ZFCP_LOG_DEBUG("status for SCSI Command:\n");
4190 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4191 scpnt->cmnd, scpnt->cmd_len);
4192 ZFCP_LOG_DEBUG("SCSI status code 0x%x\n",
4193 fcp_rsp_iu->scsi_status);
4194 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4195 (void *) fcp_rsp_iu, sizeof (struct fcp_rsp_iu));
4196 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4197 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu),
4198 fcp_rsp_iu->fcp_sns_len);
4199 }
4200
4201 /* check FCP_RSP_INFO */
4202 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
4203 ZFCP_LOG_DEBUG("rsp_len is valid\n");
4204 switch (fcp_rsp_info[3]) {
4205 case RSP_CODE_GOOD:
4206 ZFCP_LOG_FLAGS(3, "RSP_CODE_GOOD\n");
4207 /* ok, continue */
4208 ZFCP_LOG_TRACE("no failure or Task Management "
4209 "Function complete\n");
4210 set_host_byte(&scpnt->result, DID_OK);
4211 break;
4212 case RSP_CODE_LENGTH_MISMATCH:
4213 ZFCP_LOG_FLAGS(0, "RSP_CODE_LENGTH_MISMATCH\n");
4214 /* hardware bug */
4215 ZFCP_LOG_NORMAL("bug: FCP response code indictates "
4216 "that the fibrechannel protocol data "
4217 "length differs from the burst length. "
4218 "The problem occured on unit 0x%016Lx "
4219 "on port 0x%016Lx on adapter %s",
4220 unit->fcp_lun,
4221 unit->port->wwpn,
4222 zfcp_get_busid_by_unit(unit));
4223 /* dump SCSI CDB as prepared by zfcp */
4224 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4225 (char *) &fsf_req->qtcb->
4226 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4227 zfcp_cmd_dbf_event_fsf("clenmis", fsf_req, NULL, 0);
4228 set_host_byte(&scpnt->result, DID_ERROR);
4229 goto skip_fsfstatus;
4230 case RSP_CODE_FIELD_INVALID:
4231 ZFCP_LOG_FLAGS(0, "RSP_CODE_FIELD_INVALID\n");
4232 /* driver or hardware bug */
4233 ZFCP_LOG_NORMAL("bug: FCP response code indictates "
4234 "that the fibrechannel protocol data "
4235 "fields were incorrectly set up. "
4236 "The problem occured on the unit "
4237 "0x%016Lx on port 0x%016Lx on "
4238 "adapter %s",
4239 unit->fcp_lun,
4240 unit->port->wwpn,
4241 zfcp_get_busid_by_unit(unit));
4242 /* dump SCSI CDB as prepared by zfcp */
4243 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4244 (char *) &fsf_req->qtcb->
4245 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4246 set_host_byte(&scpnt->result, DID_ERROR);
4247 zfcp_cmd_dbf_event_fsf("codeinv", fsf_req, NULL, 0);
4248 goto skip_fsfstatus;
4249 case RSP_CODE_RO_MISMATCH:
4250 ZFCP_LOG_FLAGS(0, "RSP_CODE_RO_MISMATCH\n");
4251 /* hardware bug */
4252 ZFCP_LOG_NORMAL("bug: The FCP response code indicates "
4253 "that conflicting values for the "
4254 "fibrechannel payload offset from the "
4255 "header were found. "
4256 "The problem occured on unit 0x%016Lx "
4257 "on port 0x%016Lx on adapter %s.\n",
4258 unit->fcp_lun,
4259 unit->port->wwpn,
4260 zfcp_get_busid_by_unit(unit));
4261 /* dump SCSI CDB as prepared by zfcp */
4262 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4263 (char *) &fsf_req->qtcb->
4264 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4265 zfcp_cmd_dbf_event_fsf("codemism", fsf_req, NULL, 0);
4266 set_host_byte(&scpnt->result, DID_ERROR);
4267 goto skip_fsfstatus;
4268 default:
4269 ZFCP_LOG_NORMAL("bug: An invalid FCP response "
4270 "code was detected for a command. "
4271 "The problem occured on the unit "
4272 "0x%016Lx on port 0x%016Lx on "
4273 "adapter %s (debug info 0x%x)\n",
4274 unit->fcp_lun,
4275 unit->port->wwpn,
4276 zfcp_get_busid_by_unit(unit),
4277 fcp_rsp_info[3]);
4278 /* dump SCSI CDB as prepared by zfcp */
4279 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4280 (char *) &fsf_req->qtcb->
4281 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4282 zfcp_cmd_dbf_event_fsf("undeffcp", fsf_req, NULL, 0);
4283 set_host_byte(&scpnt->result, DID_ERROR);
4284 }
4285 }
4286
4287 /* check for sense data */
4288 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
4289 sns_len = FSF_FCP_RSP_SIZE -
4290 sizeof (struct fcp_rsp_iu) + fcp_rsp_iu->fcp_rsp_len;
4291 ZFCP_LOG_TRACE("room for %i bytes sense data in QTCB\n",
4292 sns_len);
4293 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
4294 ZFCP_LOG_TRACE("room for %i bytes sense data in SCSI command\n",
4295 SCSI_SENSE_BUFFERSIZE);
4296 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
4297 ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n",
4298 scpnt->result);
4299 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
4300 (void *) &scpnt->cmnd, scpnt->cmd_len);
4301
4302 ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n",
4303 fcp_rsp_iu->fcp_sns_len);
4304 memcpy(&scpnt->sense_buffer,
4305 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
4306 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
4307 (void *) &scpnt->sense_buffer, sns_len);
4308 }
4309
4310 /* check for overrun */
4311 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_over)) {
4312 ZFCP_LOG_INFO("A data overrun was detected for a command. "
4313 "unit 0x%016Lx, port 0x%016Lx, adapter %s. "
4314 "The response data length is "
4315 "%d, the original length was %d.\n",
4316 unit->fcp_lun,
4317 unit->port->wwpn,
4318 zfcp_get_busid_by_unit(unit),
4319 fcp_rsp_iu->fcp_resid,
4320 (int) zfcp_get_fcp_dl(fcp_cmnd_iu));
4321 }
4322
4323 /* check for underrun */
4324 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
4325 ZFCP_LOG_INFO("A data underrun was detected for a command. "
4326 "unit 0x%016Lx, port 0x%016Lx, adapter %s. "
4327 "The response data length is "
4328 "%d, the original length was %d.\n",
4329 unit->fcp_lun,
4330 unit->port->wwpn,
4331 zfcp_get_busid_by_unit(unit),
4332 fcp_rsp_iu->fcp_resid,
4333 (int) zfcp_get_fcp_dl(fcp_cmnd_iu));
4334
4335 scpnt->resid = fcp_rsp_iu->fcp_resid;
4336 if (scpnt->request_bufflen - scpnt->resid < scpnt->underflow)
4337 scpnt->result |= DID_ERROR << 16;
4338 }
4339
4340 skip_fsfstatus:
4341 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
4342
4343 zfcp_cmd_dbf_event_scsi("response", scpnt);
4344
4345 /* cleanup pointer (need this especially for abort) */
4346 scpnt->host_scribble = NULL;
4347
4348 /*
4349 * NOTE:
4350 * according to the outcome of a discussion on linux-scsi we
4351 * don't need to grab the io_request_lock here since we use
4352 * the new eh
4353 */
4354 /* always call back */
4355
4356 (scpnt->scsi_done) (scpnt);
4357
4358 /*
4359 * We must hold this lock until scsi_done has been called.
4360 * Otherwise we may call scsi_done after abort regarding this
4361 * command has completed.
4362 * Note: scsi_done must not block!
4363 */
4364 out:
4365 read_unlock_irqrestore(&fsf_req->adapter->abort_lock, flags);
4366 return retval;
4367}
4368
4369/*
4370 * function: zfcp_fsf_send_fcp_command_task_management_handler
4371 *
4372 * purpose: evaluates FCP_RSP IU
4373 *
4374 * returns:
4375 */
4376static int
4377zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
4378{
4379 int retval = 0;
4380 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
4381 &(fsf_req->qtcb->bottom.io.fcp_rsp);
4382 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
4383 struct zfcp_unit *unit =
4384 fsf_req->data.send_fcp_command_task_management.unit;
4385
4386 del_timer(&fsf_req->adapter->scsi_er_timer);
4387 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
4388 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4389 goto skip_fsfstatus;
4390 }
4391
4392 /* check FCP_RSP_INFO */
4393 switch (fcp_rsp_info[3]) {
4394 case RSP_CODE_GOOD:
4395 ZFCP_LOG_FLAGS(3, "RSP_CODE_GOOD\n");
4396 /* ok, continue */
4397 ZFCP_LOG_DEBUG("no failure or Task Management "
4398 "Function complete\n");
4399 break;
4400 case RSP_CODE_TASKMAN_UNSUPP:
4401 ZFCP_LOG_FLAGS(0, "RSP_CODE_TASKMAN_UNSUPP\n");
4402 ZFCP_LOG_NORMAL("bug: A reuested task management function "
4403 "is not supported on the target device "
4404 "unit 0x%016Lx, port 0x%016Lx, adapter %s\n ",
4405 unit->fcp_lun,
4406 unit->port->wwpn,
4407 zfcp_get_busid_by_unit(unit));
4408 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP;
4409 break;
4410 case RSP_CODE_TASKMAN_FAILED:
4411 ZFCP_LOG_FLAGS(0, "RSP_CODE_TASKMAN_FAILED\n");
4412 ZFCP_LOG_NORMAL("bug: A reuested task management function "
4413 "failed to complete successfully. "
4414 "unit 0x%016Lx, port 0x%016Lx, adapter %s.\n",
4415 unit->fcp_lun,
4416 unit->port->wwpn,
4417 zfcp_get_busid_by_unit(unit));
4418 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4419 break;
4420 default:
4421 ZFCP_LOG_NORMAL("bug: An invalid FCP response "
4422 "code was detected for a command. "
4423 "unit 0x%016Lx, port 0x%016Lx, adapter %s "
4424 "(debug info 0x%x)\n",
4425 unit->fcp_lun,
4426 unit->port->wwpn,
4427 zfcp_get_busid_by_unit(unit),
4428 fcp_rsp_info[3]);
4429 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4430 }
4431
4432 skip_fsfstatus:
4433 return retval;
4434}
4435
4436
4437/*
4438 * function: zfcp_fsf_control_file
4439 *
4440 * purpose: Initiator of the control file upload/download FSF requests
4441 *
4442 * returns: 0 - FSF request is successfuly created and queued
4443 * -EOPNOTSUPP - The FCP adapter does not have Control File support
4444 * -EINVAL - Invalid direction specified
4445 * -ENOMEM - Insufficient memory
4446 * -EPERM - Cannot create FSF request or place it in QDIO queue
4447 */
4448int
4449zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4450 struct zfcp_fsf_req **fsf_req_ptr,
4451 u32 fsf_command,
4452 u32 option,
4453 struct zfcp_sg_list *sg_list)
4454{
4455 struct zfcp_fsf_req *fsf_req;
4456 struct fsf_qtcb_bottom_support *bottom;
4457 volatile struct qdio_buffer_element *sbale;
4458 struct timer_list *timer;
4459 unsigned long lock_flags;
4460 int req_flags = 0;
4461 int direction;
4462 int retval = 0;
4463
4464 if (!(adapter->supported_features & FSF_FEATURE_CFDC)) {
4465 ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n",
4466 zfcp_get_busid_by_adapter(adapter));
4467 retval = -EOPNOTSUPP;
4468 goto out;
4469 }
4470
4471 switch (fsf_command) {
4472
4473 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
4474 direction = SBAL_FLAGS0_TYPE_WRITE;
4475 if ((option != FSF_CFDC_OPTION_FULL_ACCESS) &&
4476 (option != FSF_CFDC_OPTION_RESTRICTED_ACCESS))
4477 req_flags = ZFCP_WAIT_FOR_SBAL;
4478 break;
4479
4480 case FSF_QTCB_UPLOAD_CONTROL_FILE:
4481 direction = SBAL_FLAGS0_TYPE_READ;
4482 break;
4483
4484 default:
4485 ZFCP_LOG_INFO("Invalid FSF command code 0x%08x\n", fsf_command);
4486 retval = -EINVAL;
4487 goto out;
4488 }
4489
4490 timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
4491 if (!timer) {
4492 retval = -ENOMEM;
4493 goto out;
4494 }
4495
4496 retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags,
4497 NULL, &lock_flags, &fsf_req);
4498 if (retval < 0) {
4499 ZFCP_LOG_INFO("error: Could not create FSF request for the "
4500 "adapter %s\n",
4501 zfcp_get_busid_by_adapter(adapter));
4502 retval = -EPERM;
4503 goto unlock_queue_lock;
4504 }
4505
4506 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
4507 sbale[0].flags |= direction;
4508
4509 bottom = &fsf_req->qtcb->bottom.support;
4510 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
4511 bottom->option = option;
4512
4513 if (sg_list->count > 0) {
4514 int bytes;
4515
4516 bytes = zfcp_qdio_sbals_from_sg(fsf_req, direction,
4517 sg_list->sg, sg_list->count,
4518 ZFCP_MAX_SBALS_PER_REQ);
4519 if (bytes != ZFCP_CFDC_MAX_CONTROL_FILE_SIZE) {
4520 ZFCP_LOG_INFO(
4521 "error: Could not create sufficient number of "
4522 "SBALS for an FSF request to the adapter %s\n",
4523 zfcp_get_busid_by_adapter(adapter));
4524 retval = -ENOMEM;
4525 goto free_fsf_req;
4526 }
4527 } else
4528 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
4529
4530 init_timer(timer);
4531 timer->function = zfcp_fsf_request_timeout_handler;
4532 timer->data = (unsigned long) adapter;
4533 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
4534
4535 retval = zfcp_fsf_req_send(fsf_req, timer);
4536 if (retval < 0) {
4537 ZFCP_LOG_INFO("initiation of cfdc up/download failed"
4538 "(adapter %s)\n",
4539 zfcp_get_busid_by_adapter(adapter));
4540 retval = -EPERM;
4541 goto free_fsf_req;
4542 }
4543 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
4544
4545 ZFCP_LOG_NORMAL("Control file %s FSF request has been sent to the "
4546 "adapter %s\n",
4547 fsf_command == FSF_QTCB_DOWNLOAD_CONTROL_FILE ?
4548 "download" : "upload",
4549 zfcp_get_busid_by_adapter(adapter));
4550
4551 wait_event(fsf_req->completion_wq,
4552 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
4553
4554 *fsf_req_ptr = fsf_req;
4555 del_timer_sync(timer);
4556 goto free_timer;
4557
4558 free_fsf_req:
4559 zfcp_fsf_req_free(fsf_req);
4560 unlock_queue_lock:
4561 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
4562 free_timer:
4563 kfree(timer);
4564 out:
4565 return retval;
4566}
4567
4568
4569/*
4570 * function: zfcp_fsf_control_file_handler
4571 *
4572 * purpose: Handler of the control file upload/download FSF requests
4573 *
4574 * returns: 0 - FSF request successfuly processed
4575 * -EAGAIN - Operation has to be repeated because of a temporary problem
4576 * -EACCES - There is no permission to execute an operation
4577 * -EPERM - The control file is not in a right format
4578 * -EIO - There is a problem with the FCP adapter
4579 * -EINVAL - Invalid operation
4580 * -EFAULT - User space memory I/O operation fault
4581 */
4582static int
4583zfcp_fsf_control_file_handler(struct zfcp_fsf_req *fsf_req)
4584{
4585 struct zfcp_adapter *adapter = fsf_req->adapter;
4586 struct fsf_qtcb_header *header = &fsf_req->qtcb->header;
4587 struct fsf_qtcb_bottom_support *bottom = &fsf_req->qtcb->bottom.support;
4588 int retval = 0;
4589
4590 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
4591 retval = -EINVAL;
4592 goto skip_fsfstatus;
4593 }
4594
4595 switch (header->fsf_status) {
4596
4597 case FSF_GOOD:
4598 ZFCP_LOG_FLAGS(2, "FSF_GOOD\n");
4599 ZFCP_LOG_NORMAL(
4600 "The FSF request has been successfully completed "
4601 "on the adapter %s\n",
4602 zfcp_get_busid_by_adapter(adapter));
4603 break;
4604
4605 case FSF_OPERATION_PARTIALLY_SUCCESSFUL:
4606 ZFCP_LOG_FLAGS(2, "FSF_OPERATION_PARTIALLY_SUCCESSFUL\n");
4607 if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) {
4608 switch (header->fsf_status_qual.word[0]) {
4609
4610 case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE:
4611 ZFCP_LOG_NORMAL(
4612 "CFDC of the adapter %s could not "
4613 "be saved on the SE\n",
4614 zfcp_get_busid_by_adapter(adapter));
4615 break;
4616
4617 case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2:
4618 ZFCP_LOG_NORMAL(
4619 "CFDC of the adapter %s could not "
4620 "be copied to the secondary SE\n",
4621 zfcp_get_busid_by_adapter(adapter));
4622 break;
4623
4624 default:
4625 ZFCP_LOG_NORMAL(
4626 "CFDC could not be hardened "
4627 "on the adapter %s\n",
4628 zfcp_get_busid_by_adapter(adapter));
4629 }
4630 }
4631 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4632 retval = -EAGAIN;
4633 break;
4634
4635 case FSF_AUTHORIZATION_FAILURE:
4636 ZFCP_LOG_FLAGS(2, "FSF_AUTHORIZATION_FAILURE\n");
4637 ZFCP_LOG_NORMAL(
4638 "Adapter %s does not accept privileged commands\n",
4639 zfcp_get_busid_by_adapter(adapter));
4640 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4641 retval = -EACCES;
4642 break;
4643
4644 case FSF_CFDC_ERROR_DETECTED:
4645 ZFCP_LOG_FLAGS(2, "FSF_CFDC_ERROR_DETECTED\n");
4646 ZFCP_LOG_NORMAL(
4647 "Error at position %d in the CFDC, "
4648 "CFDC is discarded by the adapter %s\n",
4649 header->fsf_status_qual.word[0],
4650 zfcp_get_busid_by_adapter(adapter));
4651 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4652 retval = -EPERM;
4653 break;
4654
4655 case FSF_CONTROL_FILE_UPDATE_ERROR:
4656 ZFCP_LOG_FLAGS(2, "FSF_CONTROL_FILE_UPDATE_ERROR\n");
4657 ZFCP_LOG_NORMAL(
4658 "Adapter %s cannot harden the control file, "
4659 "file is discarded\n",
4660 zfcp_get_busid_by_adapter(adapter));
4661 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4662 retval = -EIO;
4663 break;
4664
4665 case FSF_CONTROL_FILE_TOO_LARGE:
4666 ZFCP_LOG_FLAGS(2, "FSF_CONTROL_FILE_TOO_LARGE\n");
4667 ZFCP_LOG_NORMAL(
4668 "Control file is too large, file is discarded "
4669 "by the adapter %s\n",
4670 zfcp_get_busid_by_adapter(adapter));
4671 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4672 retval = -EIO;
4673 break;
4674
4675 case FSF_ACCESS_CONFLICT_DETECTED:
4676 ZFCP_LOG_FLAGS(2, "FSF_ACCESS_CONFLICT_DETECTED\n");
4677 if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE)
4678 ZFCP_LOG_NORMAL(
4679 "CFDC has been discarded by the adapter %s, "
4680 "because activation would impact "
4681 "%d active connection(s)\n",
4682 zfcp_get_busid_by_adapter(adapter),
4683 header->fsf_status_qual.word[0]);
4684 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4685 retval = -EIO;
4686 break;
4687
4688 case FSF_CONFLICTS_OVERRULED:
4689 ZFCP_LOG_FLAGS(2, "FSF_CONFLICTS_OVERRULED\n");
4690 if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE)
4691 ZFCP_LOG_NORMAL(
4692 "CFDC has been activated on the adapter %s, "
4693 "but activation has impacted "
4694 "%d active connection(s)\n",
4695 zfcp_get_busid_by_adapter(adapter),
4696 header->fsf_status_qual.word[0]);
4697 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4698 retval = -EIO;
4699 break;
4700
4701 case FSF_UNKNOWN_OP_SUBTYPE:
4702 ZFCP_LOG_FLAGS(2, "FSF_UNKNOWN_OP_SUBTYPE\n");
4703 ZFCP_LOG_NORMAL("unknown operation subtype (adapter: %s, "
4704 "op_subtype=0x%x)\n",
4705 zfcp_get_busid_by_adapter(adapter),
4706 bottom->operation_subtype);
4707 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4708 retval = -EINVAL;
4709 break;
4710
4711 case FSF_INVALID_COMMAND_OPTION:
4712 ZFCP_LOG_FLAGS(2, "FSF_INVALID_COMMAND_OPTION\n");
4713 ZFCP_LOG_NORMAL(
4714 "Invalid option 0x%x has been specified "
4715 "in QTCB bottom sent to the adapter %s\n",
4716 bottom->option,
4717 zfcp_get_busid_by_adapter(adapter));
4718 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4719 retval = -EINVAL;
4720 break;
4721
4722 default:
4723 ZFCP_LOG_NORMAL(
4724 "bug: An unknown/unexpected FSF status 0x%08x "
4725 "was presented on the adapter %s\n",
4726 header->fsf_status,
4727 zfcp_get_busid_by_adapter(adapter));
4728 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval");
4729 debug_exception(fsf_req->adapter->erp_dbf, 0,
4730 &header->fsf_status_qual.word[0], sizeof(u32));
4731 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4732 retval = -EINVAL;
4733 break;
4734 }
4735
4736skip_fsfstatus:
4737 return retval;
4738}
4739
4740
4741/*
4742 * function: zfcp_fsf_req_wait_and_cleanup
4743 *
4744 * purpose:
4745 *
4746 * FIXME(design): signal seems to be <0 !!!
4747 * returns: 0 - request completed (*status is valid), cleanup succ.
4748 * <0 - request completed (*status is valid), cleanup failed
4749 * >0 - signal which interrupted waiting (*status invalid),
4750 * request not completed, no cleanup
4751 *
4752 * *status is a copy of status of completed fsf_req
4753 */
4754int
4755zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *fsf_req,
4756 int interruptible, u32 * status)
4757{
4758 int retval = 0;
4759 int signal = 0;
4760
4761 if (interruptible) {
4762 __wait_event_interruptible(fsf_req->completion_wq,
4763 fsf_req->status &
4764 ZFCP_STATUS_FSFREQ_COMPLETED,
4765 signal);
4766 if (signal) {
4767 ZFCP_LOG_DEBUG("Caught signal %i while waiting for the "
4768 "completion of the request at %p\n",
4769 signal, fsf_req);
4770 retval = signal;
4771 goto out;
4772 }
4773 } else {
4774 __wait_event(fsf_req->completion_wq,
4775 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
4776 }
4777
4778 *status = fsf_req->status;
4779
4780 /* cleanup request */
4781 zfcp_fsf_req_cleanup(fsf_req);
4782 out:
4783 return retval;
4784}
4785
4786static inline int
4787zfcp_fsf_req_sbal_check(unsigned long *flags,
4788 struct zfcp_qdio_queue *queue, int needed)
4789{
4790 write_lock_irqsave(&queue->queue_lock, *flags);
4791 if (likely(atomic_read(&queue->free_count) >= needed))
4792 return 1;
4793 write_unlock_irqrestore(&queue->queue_lock, *flags);
4794 return 0;
4795}
4796
4797/*
4798 * set qtcb pointer in fsf_req and initialize QTCB
4799 */
4800static inline void
4801zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req, u32 fsf_cmd)
4802{
4803 if (likely(fsf_req->qtcb != NULL)) {
4804 fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req;
4805 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
4806 fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
4807 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
4808 fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req;
4809 fsf_req->qtcb->header.fsf_command = fsf_cmd;
4810 }
4811}
4812
4813/**
4814 * zfcp_fsf_req_sbal_get - try to get one SBAL in the request queue
4815 * @adapter: adapter for which request queue is examined
4816 * @req_flags: flags indicating whether to wait for needed SBAL or not
4817 * @lock_flags: lock_flags if queue_lock is taken
4818 * Return: 0 on success, otherwise -EIO, or -ERESTARTSYS
4819 * Locks: lock adapter->request_queue->queue_lock on success
4820 */
4821static int
4822zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter, int req_flags,
4823 unsigned long *lock_flags)
4824{
4825 long ret;
4826 struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
4827
4828 if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) {
4829 ret = wait_event_interruptible_timeout(adapter->request_wq,
4830 zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1),
4831 ZFCP_SBAL_TIMEOUT);
4832 if (ret < 0)
4833 return ret;
4834 if (!ret)
4835 return -EIO;
4836 } else if (!zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1))
4837 return -EIO;
4838
4839 return 0;
4840}
4841
4842/*
4843 * function: zfcp_fsf_req_create
4844 *
4845 * purpose: create an FSF request at the specified adapter and
4846 * setup common fields
4847 *
4848 * returns: -ENOMEM if there was insufficient memory for a request
4849 * -EIO if no qdio buffers could be allocate to the request
4850 * -EINVAL/-EPERM on bug conditions in req_dequeue
4851 * 0 in success
4852 *
4853 * note: The created request is returned by reference.
4854 *
4855 * locks: lock of concerned request queue must not be held,
4856 * but is held on completion (write, irqsave)
4857 */
4858int
4859zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4860 mempool_t *pool, unsigned long *lock_flags,
4861 struct zfcp_fsf_req **fsf_req_p)
4862{
4863 volatile struct qdio_buffer_element *sbale;
4864 struct zfcp_fsf_req *fsf_req = NULL;
4865 int ret = 0;
4866 struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
4867
4868 /* allocate new FSF request */
4869 fsf_req = zfcp_fsf_req_alloc(pool, req_flags);
4870 if (unlikely(NULL == fsf_req)) {
4871 ZFCP_LOG_DEBUG("error: Could not put an FSF request into"
4872 "the outbound (send) queue.\n");
4873 ret = -ENOMEM;
4874 goto failed_fsf_req;
4875 }
4876
4877 zfcp_fsf_req_qtcb_init(fsf_req, fsf_cmd);
4878
4879 /* initialize waitqueue which may be used to wait on
4880 this request completion */
4881 init_waitqueue_head(&fsf_req->completion_wq);
4882
4883 ret = zfcp_fsf_req_sbal_get(adapter, req_flags, lock_flags);
4884 if(ret < 0) {
4885 goto failed_sbals;
4886 }
4887
4888 /*
4889 * We hold queue_lock here. Check if QDIOUP is set and let request fail
4890 * if it is not set (see also *_open_qdio and *_close_qdio).
4891 */
4892
4893 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
4894 write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags);
4895 ret = -EIO;
4896 goto failed_sbals;
4897 }
4898
4899 fsf_req->adapter = adapter; /* pointer to "parent" adapter */
4900 fsf_req->fsf_command = fsf_cmd;
4901 fsf_req->sbal_number = 1;
4902 fsf_req->sbal_first = req_queue->free_index;
4903 fsf_req->sbal_curr = req_queue->free_index;
4904 fsf_req->sbale_curr = 1;
4905
4906 if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) {
4907 fsf_req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
4908 }
4909
4910 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
4911
4912 /* setup common SBALE fields */
4913 sbale[0].addr = fsf_req;
4914 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
4915 if (likely(fsf_req->qtcb != NULL)) {
4916 sbale[1].addr = (void *) fsf_req->qtcb;
4917 sbale[1].length = sizeof(struct fsf_qtcb);
4918 }
4919
4920 ZFCP_LOG_TRACE("got %i free BUFFERs starting at index %i\n",
4921 fsf_req->sbal_number, fsf_req->sbal_first);
4922
4923 goto success;
4924
4925 failed_sbals:
4926/* dequeue new FSF request previously enqueued */
4927 zfcp_fsf_req_free(fsf_req);
4928 fsf_req = NULL;
4929
4930 failed_fsf_req:
4931 write_lock_irqsave(&req_queue->queue_lock, *lock_flags);
4932 success:
4933 *fsf_req_p = fsf_req;
4934 return ret;
4935}
4936
4937/*
4938 * function: zfcp_fsf_req_send
4939 *
4940 * purpose: start transfer of FSF request via QDIO
4941 *
4942 * returns: 0 - request transfer succesfully started
4943 * !0 - start of request transfer failed
4944 */
4945static int
4946zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4947{
4948 struct zfcp_adapter *adapter;
4949 struct zfcp_qdio_queue *req_queue;
4950 volatile struct qdio_buffer_element *sbale;
4951 int new_distance_from_int;
4952 unsigned long flags;
4953 int inc_seq_no = 1;
4954 int retval = 0;
4955
4956 adapter = fsf_req->adapter;
4957 req_queue = &adapter->request_queue,
4958
4959
4960 /* FIXME(debug): remove it later */
4961 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_first, 0);
4962 ZFCP_LOG_DEBUG("SBALE0 flags=0x%x\n", sbale[0].flags);
4963 ZFCP_LOG_TRACE("HEX DUMP OF SBALE1 PAYLOAD:\n");
4964 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
4965 sbale[1].length);
4966
4967 /* set sequence counter in QTCB */
4968 if (likely(fsf_req->qtcb)) {
4969 fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
4970 fsf_req->seq_no = adapter->fsf_req_seq_no;
4971 ZFCP_LOG_TRACE("FSF request %p of adapter %s gets "
4972 "FSF sequence counter value of %i\n",
4973 fsf_req,
4974 zfcp_get_busid_by_adapter(adapter),
4975 fsf_req->qtcb->prefix.req_seq_no);
4976 } else
4977 inc_seq_no = 0;
4978
4979 /* put allocated FSF request at list tail */
4980 write_lock_irqsave(&adapter->fsf_req_list_lock, flags);
4981 list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head);
4982 write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
4983
4984 /* figure out expiration time of timeout and start timeout */
4985 if (unlikely(timer)) {
4986 timer->expires += jiffies;
4987 add_timer(timer);
4988 }
4989
4990 ZFCP_LOG_TRACE("request queue of adapter %s: "
4991 "next free SBAL is %i, %i free SBALs\n",
4992 zfcp_get_busid_by_adapter(adapter),
4993 req_queue->free_index,
4994 atomic_read(&req_queue->free_count));
4995
4996 ZFCP_LOG_DEBUG("calling do_QDIO adapter %s, flags=0x%x, queue_no=%i, "
4997 "index_in_queue=%i, count=%i, buffers=%p\n",
4998 zfcp_get_busid_by_adapter(adapter),
4999 QDIO_FLAG_SYNC_OUTPUT,
5000 0, fsf_req->sbal_first, fsf_req->sbal_number,
5001 &req_queue->buffer[fsf_req->sbal_first]);
5002
5003 /*
5004 * adjust the number of free SBALs in request queue as well as
5005 * position of first one
5006 */
5007 atomic_sub(fsf_req->sbal_number, &req_queue->free_count);
5008 ZFCP_LOG_TRACE("free_count=%d\n", atomic_read(&req_queue->free_count));
5009 req_queue->free_index += fsf_req->sbal_number; /* increase */
5010 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */
5011 new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req);
5012
5013 retval = do_QDIO(adapter->ccw_device,
5014 QDIO_FLAG_SYNC_OUTPUT,
5015 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
5016
5017 if (unlikely(retval)) {
5018 /* Queues are down..... */
5019 retval = -EIO;
5020 /*
5021 * FIXME(potential race):
5022 * timer might be expired (absolutely unlikely)
5023 */
5024 if (timer)
5025 del_timer(timer);
5026 write_lock_irqsave(&adapter->fsf_req_list_lock, flags);
5027 list_del(&fsf_req->list);
5028 write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
5029 /*
5030 * adjust the number of free SBALs in request queue as well as
5031 * position of first one
5032 */
5033 zfcp_qdio_zero_sbals(req_queue->buffer,
5034 fsf_req->sbal_first, fsf_req->sbal_number);
5035 atomic_add(fsf_req->sbal_number, &req_queue->free_count);
5036 req_queue->free_index -= fsf_req->sbal_number; /* increase */
5037 req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
5038 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
5039 ZFCP_LOG_DEBUG
5040 ("error: do_QDIO failed. Buffers could not be enqueued "
5041 "to request queue.\n");
5042 } else {
5043 req_queue->distance_from_int = new_distance_from_int;
5044 /*
5045 * increase FSF sequence counter -
5046 * this must only be done for request successfully enqueued to
5047 * QDIO this rejected requests may be cleaned up by calling
5048 * routines resulting in missing sequence counter values
5049 * otherwise,
5050 */
5051 /* Don't increase for unsolicited status */
5052 if (likely(inc_seq_no)) {
5053 adapter->fsf_req_seq_no++;
5054 ZFCP_LOG_TRACE
5055 ("FSF sequence counter value of adapter %s "
5056 "increased to %i\n",
5057 zfcp_get_busid_by_adapter(adapter),
5058 adapter->fsf_req_seq_no);
5059 }
5060 /* count FSF requests pending */
5061 atomic_inc(&adapter->fsf_reqs_active);
5062 }
5063 return retval;
5064}
5065
5066/*
5067 * function: zfcp_fsf_req_cleanup
5068 *
5069 * purpose: cleans up an FSF request and removes it from the specified list
5070 *
5071 * returns:
5072 *
5073 * assumption: no pending SB in SBALEs other than QTCB
5074 */
5075void
5076zfcp_fsf_req_cleanup(struct zfcp_fsf_req *fsf_req)
5077{
5078 struct zfcp_adapter *adapter = fsf_req->adapter;
5079 unsigned long flags;
5080
5081 write_lock_irqsave(&adapter->fsf_req_list_lock, flags);
5082 list_del(&fsf_req->list);
5083 write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
5084 zfcp_fsf_req_free(fsf_req);
5085}
5086
5087#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
new file mode 100644
index 000000000000..5889956bbf08
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -0,0 +1,472 @@
1/*
2 *
3 * linux/drivers/s390/scsi/zfcp_fsf.h
4 *
5 * FCP adapter driver for IBM eServer zSeries
6 *
7 * (C) Copyright IBM Corp. 2002, 2004
8 *
9 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
10 * Raimund Schroeder <raimund.schroeder@de.ibm.com>
11 * Aron Zeh
12 * Wolfgang Taphorn
13 * Stefan Bader <stefan.bader@de.ibm.com>
14 * Heiko Carstens <heiko.carstens@de.ibm.com>
15 * Andreas Herrmann <aherrman@de.ibm.com>
16 * Volker Sameske <sameske@de.ibm.com>
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33#ifndef FSF_H
34#define FSF_H
35
36#define FSF_QTCB_VERSION1 0x00000001
37#define FSF_QTCB_CURRENT_VERSION FSF_QTCB_VERSION1
38
39/* FSF commands */
40#define FSF_QTCB_FCP_CMND 0x00000001
41#define FSF_QTCB_ABORT_FCP_CMND 0x00000002
42#define FSF_QTCB_OPEN_PORT_WITH_DID 0x00000005
43#define FSF_QTCB_OPEN_LUN 0x00000006
44#define FSF_QTCB_CLOSE_LUN 0x00000007
45#define FSF_QTCB_CLOSE_PORT 0x00000008
46#define FSF_QTCB_CLOSE_PHYSICAL_PORT 0x00000009
47#define FSF_QTCB_SEND_ELS 0x0000000B
48#define FSF_QTCB_SEND_GENERIC 0x0000000C
49#define FSF_QTCB_EXCHANGE_CONFIG_DATA 0x0000000D
50#define FSF_QTCB_EXCHANGE_PORT_DATA 0x0000000E
51#define FSF_QTCB_DOWNLOAD_CONTROL_FILE 0x00000012
52#define FSF_QTCB_UPLOAD_CONTROL_FILE 0x00000013
53
54/* FSF QTCB types */
55#define FSF_IO_COMMAND 0x00000001
56#define FSF_SUPPORT_COMMAND 0x00000002
57#define FSF_CONFIG_COMMAND 0x00000003
58#define FSF_PORT_COMMAND 0x00000004
59
60/* FSF control file upload/download operations' subtype and options */
61#define FSF_CFDC_OPERATION_SUBTYPE 0x00020001
62#define FSF_CFDC_OPTION_NORMAL_MODE 0x00000000
63#define FSF_CFDC_OPTION_FORCE 0x00000001
64#define FSF_CFDC_OPTION_FULL_ACCESS 0x00000002
65#define FSF_CFDC_OPTION_RESTRICTED_ACCESS 0x00000004
66
67/* FSF protocol stati */
68#define FSF_PROT_GOOD 0x00000001
69#define FSF_PROT_QTCB_VERSION_ERROR 0x00000010
70#define FSF_PROT_SEQ_NUMB_ERROR 0x00000020
71#define FSF_PROT_UNSUPP_QTCB_TYPE 0x00000040
72#define FSF_PROT_HOST_CONNECTION_INITIALIZING 0x00000080
73#define FSF_PROT_FSF_STATUS_PRESENTED 0x00000100
74#define FSF_PROT_DUPLICATE_REQUEST_ID 0x00000200
75#define FSF_PROT_LINK_DOWN 0x00000400
76#define FSF_PROT_REEST_QUEUE 0x00000800
77#define FSF_PROT_ERROR_STATE 0x01000000
78
79/* FSF stati */
80#define FSF_GOOD 0x00000000
81#define FSF_PORT_ALREADY_OPEN 0x00000001
82#define FSF_LUN_ALREADY_OPEN 0x00000002
83#define FSF_PORT_HANDLE_NOT_VALID 0x00000003
84#define FSF_LUN_HANDLE_NOT_VALID 0x00000004
85#define FSF_HANDLE_MISMATCH 0x00000005
86#define FSF_SERVICE_CLASS_NOT_SUPPORTED 0x00000006
87#define FSF_FCPLUN_NOT_VALID 0x00000009
88#define FSF_ACCESS_DENIED 0x00000010
89#define FSF_LUN_SHARING_VIOLATION 0x00000012
90#define FSF_FCP_COMMAND_DOES_NOT_EXIST 0x00000022
91#define FSF_DIRECTION_INDICATOR_NOT_VALID 0x00000030
92#define FSF_CMND_LENGTH_NOT_VALID 0x00000033
93#define FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED 0x00000040
94#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041
95#define FSF_ELS_COMMAND_REJECTED 0x00000050
96#define FSF_GENERIC_COMMAND_REJECTED 0x00000051
97#define FSF_OPERATION_PARTIALLY_SUCCESSFUL 0x00000052
98#define FSF_AUTHORIZATION_FAILURE 0x00000053
99#define FSF_CFDC_ERROR_DETECTED 0x00000054
100#define FSF_CONTROL_FILE_UPDATE_ERROR 0x00000055
101#define FSF_CONTROL_FILE_TOO_LARGE 0x00000056
102#define FSF_ACCESS_CONFLICT_DETECTED 0x00000057
103#define FSF_CONFLICTS_OVERRULED 0x00000058
104#define FSF_PORT_BOXED 0x00000059
105#define FSF_LUN_BOXED 0x0000005A
106#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B
107#define FSF_PAYLOAD_SIZE_MISMATCH 0x00000060
108#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
109#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
110#define FSF_SBAL_MISMATCH 0x00000063
111#define FSF_OPEN_PORT_WITHOUT_PRLI 0x00000064
112#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
113#define FSF_FCP_RSP_AVAILABLE 0x000000AF
114#define FSF_UNKNOWN_COMMAND 0x000000E2
115#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
116#define FSF_INVALID_COMMAND_OPTION 0x000000E5
117/* #define FSF_ERROR 0x000000FF */
118
119#define FSF_STATUS_QUALIFIER_SIZE 16
120
121/* FSF status qualifier, recommendations */
122#define FSF_SQ_NO_RECOM 0x00
123#define FSF_SQ_FCP_RSP_AVAILABLE 0x01
124#define FSF_SQ_RETRY_IF_POSSIBLE 0x02
125#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03
126#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04
127#define FSF_SQ_ULP_PROGRAMMING_ERROR 0x05
128#define FSF_SQ_COMMAND_ABORTED 0x06
129#define FSF_SQ_NO_RETRY_POSSIBLE 0x07
130
131/* FSF status qualifier for CFDC commands */
132#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE 0x00000001
133#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2 0x00000002
134/* CFDC subtable codes */
135#define FSF_SQ_CFDC_SUBTABLE_OS 0x0001
136#define FSF_SQ_CFDC_SUBTABLE_PORT_WWPN 0x0002
137#define FSF_SQ_CFDC_SUBTABLE_PORT_DID 0x0003
138#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
139
140/* FSF status qualifier (most significant 4 bytes), local link down */
141#define FSF_PSQ_LINK_NOLIGHT 0x00000004
142#define FSF_PSQ_LINK_WRAPPLUG 0x00000008
143#define FSF_PSQ_LINK_NOFCP 0x00000010
144
145/* payload size in status read buffer */
146#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
147
148/* number of status read buffers that should be sent by ULP */
149#define FSF_STATUS_READS_RECOM 16
150
151/* status types in status read buffer */
152#define FSF_STATUS_READ_PORT_CLOSED 0x00000001
153#define FSF_STATUS_READ_INCOMING_ELS 0x00000002
154#define FSF_STATUS_READ_SENSE_DATA_AVAIL 0x00000003
155#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004
156#define FSF_STATUS_READ_LINK_DOWN 0x00000005 /* FIXME: really? */
157#define FSF_STATUS_READ_LINK_UP 0x00000006
158#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
159#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
160
161/* status subtypes in status read buffer */
162#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
163#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
164
165/* status subtypes for CFDC */
166#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
167#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
168
169/* topologie that is detected by the adapter */
170#define FSF_TOPO_ERROR 0x00000000
171#define FSF_TOPO_P2P 0x00000001
172#define FSF_TOPO_FABRIC 0x00000002
173#define FSF_TOPO_AL 0x00000003
174#define FSF_TOPO_FABRIC_VIRT 0x00000004
175
176/* data direction for FCP commands */
177#define FSF_DATADIR_WRITE 0x00000001
178#define FSF_DATADIR_READ 0x00000002
179#define FSF_DATADIR_READ_WRITE 0x00000003
180#define FSF_DATADIR_CMND 0x00000004
181
182/* fc service class */
183#define FSF_CLASS_1 0x00000001
184#define FSF_CLASS_2 0x00000002
185#define FSF_CLASS_3 0x00000003
186
187/* SBAL chaining */
188#define FSF_MAX_SBALS_PER_REQ 36
189#define FSF_MAX_SBALS_PER_ELS_REQ 2
190
191/* logging space behind QTCB */
192#define FSF_QTCB_LOG_SIZE 1024
193
194/* channel features */
195#define FSF_FEATURE_QTCB_SUPPRESSION 0x00000001
196#define FSF_FEATURE_CFDC 0x00000002
197#define FSF_FEATURE_LUN_SHARING 0x00000004
198#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
199#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
200
201/* option */
202#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
203#define FSF_OPEN_LUN_REPLICATE_SENSE 0x00000002
204
205/* adapter types */
206#define FSF_ADAPTER_TYPE_FICON 0x00000001
207#define FSF_ADAPTER_TYPE_FICON_EXPRESS 0x00000002
208
209/* port types */
210#define FSF_HBA_PORTTYPE_UNKNOWN 0x00000001
211#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
212#define FSF_HBA_PORTTYPE_NPORT 0x00000005
213#define FSF_HBA_PORTTYPE_PTP 0x00000021
214/* following are not defined and used by FSF Spec
215 but are additionally defined by FC-HBA */
216#define FSF_HBA_PORTTYPE_OTHER 0x00000002
217#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
218#define FSF_HBA_PORTTYPE_NLPORT 0x00000006
219#define FSF_HBA_PORTTYPE_FLPORT 0x00000007
220#define FSF_HBA_PORTTYPE_FPORT 0x00000008
221#define FSF_HBA_PORTTYPE_LPORT 0x00000020
222
223/* port states */
224#define FSF_HBA_PORTSTATE_UNKNOWN 0x00000001
225#define FSF_HBA_PORTSTATE_ONLINE 0x00000002
226#define FSF_HBA_PORTSTATE_OFFLINE 0x00000003
227#define FSF_HBA_PORTSTATE_LINKDOWN 0x00000006
228#define FSF_HBA_PORTSTATE_ERROR 0x00000007
229
230/* IO states of adapter */
231#define FSF_IOSTAT_NPORT_RJT 0x00000004
232#define FSF_IOSTAT_FABRIC_RJT 0x00000005
233#define FSF_IOSTAT_LS_RJT 0x00000009
234
235/* open LUN access flags*/
236#define FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED 0x01000000
237#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
238#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
239
240struct fsf_queue_designator;
241struct fsf_status_read_buffer;
242struct fsf_port_closed_payload;
243struct fsf_bit_error_payload;
244union fsf_prot_status_qual;
245struct fsf_qual_version_error;
246struct fsf_qual_sequence_error;
247struct fsf_qtcb_prefix;
248struct fsf_qtcb_header;
249struct fsf_qtcb_bottom_config;
250struct fsf_qtcb_bottom_support;
251struct fsf_qtcb_bottom_io;
252union fsf_qtcb_bottom;
253
254struct fsf_queue_designator {
255 u8 cssid;
256 u8 chpid;
257 u8 hla;
258 u8 ua;
259 u32 res1;
260} __attribute__ ((packed));
261
262struct fsf_port_closed_payload {
263 struct fsf_queue_designator queue_designator;
264 u32 port_handle;
265} __attribute__ ((packed));
266
267struct fsf_bit_error_payload {
268 u32 res1;
269 u32 link_failure_error_count;
270 u32 loss_of_sync_error_count;
271 u32 loss_of_signal_error_count;
272 u32 primitive_sequence_error_count;
273 u32 invalid_transmission_word_error_count;
274 u32 crc_error_count;
275 u32 primitive_sequence_event_timeout_count;
276 u32 elastic_buffer_overrun_error_count;
277 u32 fcal_arbitration_timeout_count;
278 u32 advertised_receive_b2b_credit;
279 u32 current_receive_b2b_credit;
280 u32 advertised_transmit_b2b_credit;
281 u32 current_transmit_b2b_credit;
282} __attribute__ ((packed));
283
284struct fsf_status_read_buffer {
285 u32 status_type;
286 u32 status_subtype;
287 u32 length;
288 u32 res1;
289 struct fsf_queue_designator queue_designator;
290 u32 d_id;
291 u32 class;
292 u64 fcp_lun;
293 u8 res3[24];
294 u8 payload[FSF_STATUS_READ_PAYLOAD_SIZE];
295} __attribute__ ((packed));
296
297struct fsf_qual_version_error {
298 u32 fsf_version;
299 u32 res1[3];
300} __attribute__ ((packed));
301
302struct fsf_qual_sequence_error {
303 u32 exp_req_seq_no;
304 u32 res1[3];
305} __attribute__ ((packed));
306
307struct fsf_qual_locallink_error {
308 u32 code;
309 u32 res1[3];
310} __attribute__ ((packed));
311
312union fsf_prot_status_qual {
313 struct fsf_qual_version_error version_error;
314 struct fsf_qual_sequence_error sequence_error;
315 struct fsf_qual_locallink_error locallink_error;
316} __attribute__ ((packed));
317
318struct fsf_qtcb_prefix {
319 u64 req_id;
320 u32 qtcb_version;
321 u32 ulp_info;
322 u32 qtcb_type;
323 u32 req_seq_no;
324 u32 prot_status;
325 union fsf_prot_status_qual prot_status_qual;
326 u8 res1[20];
327} __attribute__ ((packed));
328
329union fsf_status_qual {
330 u8 byte[FSF_STATUS_QUALIFIER_SIZE];
331 u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
332 u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)];
333 struct fsf_queue_designator fsf_queue_designator;
334} __attribute__ ((packed));
335
336struct fsf_qtcb_header {
337 u64 req_handle;
338 u32 fsf_command;
339 u32 res1;
340 u32 port_handle;
341 u32 lun_handle;
342 u32 res2;
343 u32 fsf_status;
344 union fsf_status_qual fsf_status_qual;
345 u8 res3[28];
346 u16 log_start;
347 u16 log_length;
348 u8 res4[16];
349} __attribute__ ((packed));
350
351struct fsf_nport_serv_param {
352 u8 common_serv_param[16];
353 u64 wwpn;
354 u64 wwnn;
355 u8 class1_serv_param[16];
356 u8 class2_serv_param[16];
357 u8 class3_serv_param[16];
358 u8 class4_serv_param[16];
359 u8 vendor_version_level[16];
360 u8 res1[16];
361} __attribute__ ((packed));
362
363struct fsf_plogi {
364 u32 code;
365 struct fsf_nport_serv_param serv_param;
366} __attribute__ ((packed));
367
368#define FSF_FCP_CMND_SIZE 288
369#define FSF_FCP_RSP_SIZE 128
370
371struct fsf_qtcb_bottom_io {
372 u32 data_direction;
373 u32 service_class;
374 u8 res1[8];
375 u32 fcp_cmnd_length;
376 u8 res2[12];
377 u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
378 u8 fcp_rsp[FSF_FCP_RSP_SIZE];
379 u8 res3[64];
380} __attribute__ ((packed));
381
382struct fsf_qtcb_bottom_support {
383 u32 operation_subtype;
384 u8 res1[12];
385 u32 d_id;
386 u32 option;
387 u64 fcp_lun;
388 u64 res2;
389 u64 req_handle;
390 u32 service_class;
391 u8 res3[3];
392 u8 timeout;
393 u32 lun_access_info;
394 u8 res4[180];
395 u32 els1_length;
396 u32 els2_length;
397 u32 req_buf_length;
398 u32 resp_buf_length;
399 u8 els[256];
400} __attribute__ ((packed));
401
402struct fsf_qtcb_bottom_config {
403 u32 lic_version;
404 u32 feature_selection;
405 u32 high_qtcb_version;
406 u32 low_qtcb_version;
407 u32 max_qtcb_size;
408 u32 max_data_transfer_size;
409 u32 supported_features;
410 u8 res1[4];
411 u32 fc_topology;
412 u32 fc_link_speed;
413 u32 adapter_type;
414 u32 peer_d_id;
415 u8 res2[12];
416 u32 s_id;
417 struct fsf_nport_serv_param nport_serv_param;
418 u8 res3[8];
419 u32 adapter_ports;
420 u32 hardware_version;
421 u8 serial_number[32];
422 u8 res4[272];
423} __attribute__ ((packed));
424
425struct fsf_qtcb_bottom_port {
426 u8 res1[8];
427 u32 fc_port_id;
428 u32 port_type;
429 u32 port_state;
430 u32 class_of_service; /* should be 0x00000006 for class 2 and 3 */
431 u8 supported_fc4_types[32]; /* should be 0x00000100 for scsi fcp */
432 u8 active_fc4_types[32];
433 u32 supported_speed; /* 0x0001 for 1 GBit/s or 0x0002 for 2 GBit/s */
434 u32 maximum_frame_size; /* fixed value of 2112 */
435 u64 seconds_since_last_reset;
436 u64 tx_frames;
437 u64 tx_words;
438 u64 rx_frames;
439 u64 rx_words;
440 u64 lip; /* 0 */
441 u64 nos; /* currently 0 */
442 u64 error_frames; /* currently 0 */
443 u64 dumped_frames; /* currently 0 */
444 u64 link_failure;
445 u64 loss_of_sync;
446 u64 loss_of_signal;
447 u64 psp_error_counts;
448 u64 invalid_tx_words;
449 u64 invalid_crcs;
450 u64 input_requests;
451 u64 output_requests;
452 u64 control_requests;
453 u64 input_mb; /* where 1 MByte == 1.000.000 Bytes */
454 u64 output_mb; /* where 1 MByte == 1.000.000 Bytes */
455 u8 res2[256];
456} __attribute__ ((packed));
457
458union fsf_qtcb_bottom {
459 struct fsf_qtcb_bottom_io io;
460 struct fsf_qtcb_bottom_support support;
461 struct fsf_qtcb_bottom_config config;
462 struct fsf_qtcb_bottom_port port;
463};
464
465struct fsf_qtcb {
466 struct fsf_qtcb_prefix prefix;
467 struct fsf_qtcb_header header;
468 union fsf_qtcb_bottom bottom;
469 u8 log[FSF_QTCB_LOG_SIZE];
470} __attribute__ ((packed));
471
472#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
new file mode 100644
index 000000000000..06e862d7bc90
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -0,0 +1,868 @@
1/*
2 * linux/drivers/s390/scsi/zfcp_qdio.c
3 *
4 * FCP adapter driver for IBM eServer zSeries
5 *
6 * QDIO related routines
7 *
8 * (C) Copyright IBM Corp. 2002, 2004
9 *
10 * Authors:
11 * Martin Peschke <mpeschke@de.ibm.com>
12 * Raimund Schroeder <raimund.schroeder@de.ibm.com>
13 * Wolfgang Taphorn
14 * Heiko Carstens <heiko.carstens@de.ibm.com>
15 * Andreas Herrmann <aherrman@de.ibm.com>
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */
31
32#define ZFCP_QDIO_C_REVISION "$Revision: 1.20 $"
33
34#include "zfcp_ext.h"
35
36static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int);
37static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get
38 (struct zfcp_qdio_queue *, int, int);
39static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp
40 (struct zfcp_fsf_req *, int, int);
41static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain
42 (struct zfcp_fsf_req *, unsigned long);
43static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next
44 (struct zfcp_fsf_req *, unsigned long);
45static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int);
46static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *);
47static inline void zfcp_qdio_sbale_fill
48 (struct zfcp_fsf_req *, unsigned long, void *, int);
49static inline int zfcp_qdio_sbals_from_segment
50 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
51static inline int zfcp_qdio_sbals_from_buffer
52 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int);
53
54static qdio_handler_t zfcp_qdio_request_handler;
55static qdio_handler_t zfcp_qdio_response_handler;
56static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
57 unsigned int,
58 unsigned int, unsigned int);
59
60#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
61
62/*
63 * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
64 * array in the adapter struct.
65 * Cur_buf is the pointer array and count can be any number of required
66 * buffers, the page-fitting arithmetic is done entirely within this funciton.
67 *
68 * returns: number of buffers allocated
69 * locks: must only be called with zfcp_data.config_sema taken
70 */
71static int
72zfcp_qdio_buffers_enqueue(struct qdio_buffer **cur_buf, int count)
73{
74 int buf_pos;
75 int qdio_buffers_per_page;
76 int page_pos = 0;
77 struct qdio_buffer *first_in_page = NULL;
78
79 qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
80 ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
81
82 for (buf_pos = 0; buf_pos < count; buf_pos++) {
83 if (page_pos == 0) {
84 cur_buf[buf_pos] = (struct qdio_buffer *)
85 get_zeroed_page(GFP_KERNEL);
86 if (cur_buf[buf_pos] == NULL) {
87 ZFCP_LOG_INFO("error: allocation of "
88 "QDIO buffer failed \n");
89 goto out;
90 }
91 first_in_page = cur_buf[buf_pos];
92 } else {
93 cur_buf[buf_pos] = first_in_page + page_pos;
94
95 }
96 /* was initialised to zero */
97 page_pos++;
98 page_pos %= qdio_buffers_per_page;
99 }
100 out:
101 return buf_pos;
102}
103
104/*
105 * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array
106 * in the adapter struct cur_buf is the pointer array and count can be any
107 * number of buffers in the array that should be freed starting from buffer 0
108 *
109 * locks: must only be called with zfcp_data.config_sema taken
110 */
111static void
112zfcp_qdio_buffers_dequeue(struct qdio_buffer **cur_buf, int count)
113{
114 int buf_pos;
115 int qdio_buffers_per_page;
116
117 qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
118 ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
119
120 for (buf_pos = 0; buf_pos < count; buf_pos += qdio_buffers_per_page)
121 free_page((unsigned long) cur_buf[buf_pos]);
122 return;
123}
124
125/* locks: must only be called with zfcp_data.config_sema taken */
126int
127zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter)
128{
129 int buffer_count;
130 int retval = 0;
131
132 buffer_count =
133 zfcp_qdio_buffers_enqueue(&(adapter->request_queue.buffer[0]),
134 QDIO_MAX_BUFFERS_PER_Q);
135 if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
136 ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for request "
137 "queue\n", buffer_count);
138 zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
139 buffer_count);
140 retval = -ENOMEM;
141 goto out;
142 }
143
144 buffer_count =
145 zfcp_qdio_buffers_enqueue(&(adapter->response_queue.buffer[0]),
146 QDIO_MAX_BUFFERS_PER_Q);
147 if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
148 ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for response "
149 "queue", buffer_count);
150 zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]),
151 buffer_count);
152 ZFCP_LOG_TRACE("freeing request_queue buffers\n");
153 zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
154 QDIO_MAX_BUFFERS_PER_Q);
155 retval = -ENOMEM;
156 goto out;
157 }
158 out:
159 return retval;
160}
161
162/* locks: must only be called with zfcp_data.config_sema taken */
163void
164zfcp_qdio_free_queues(struct zfcp_adapter *adapter)
165{
166 ZFCP_LOG_TRACE("freeing request_queue buffers\n");
167 zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
168 QDIO_MAX_BUFFERS_PER_Q);
169
170 ZFCP_LOG_TRACE("freeing response_queue buffers\n");
171 zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]),
172 QDIO_MAX_BUFFERS_PER_Q);
173}
174
175int
176zfcp_qdio_allocate(struct zfcp_adapter *adapter)
177{
178 struct qdio_initialize *init_data;
179
180 init_data = &adapter->qdio_init_data;
181
182 init_data->cdev = adapter->ccw_device;
183 init_data->q_format = QDIO_SCSI_QFMT;
184 memcpy(init_data->adapter_name, &adapter->name, 8);
185 init_data->qib_param_field_format = 0;
186 init_data->qib_param_field = NULL;
187 init_data->input_slib_elements = NULL;
188 init_data->output_slib_elements = NULL;
189 init_data->min_input_threshold = ZFCP_MIN_INPUT_THRESHOLD;
190 init_data->max_input_threshold = ZFCP_MAX_INPUT_THRESHOLD;
191 init_data->min_output_threshold = ZFCP_MIN_OUTPUT_THRESHOLD;
192 init_data->max_output_threshold = ZFCP_MAX_OUTPUT_THRESHOLD;
193 init_data->no_input_qs = 1;
194 init_data->no_output_qs = 1;
195 init_data->input_handler = zfcp_qdio_response_handler;
196 init_data->output_handler = zfcp_qdio_request_handler;
197 init_data->int_parm = (unsigned long) adapter;
198 init_data->flags = QDIO_INBOUND_0COPY_SBALS |
199 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
200 init_data->input_sbal_addr_array =
201 (void **) (adapter->response_queue.buffer);
202 init_data->output_sbal_addr_array =
203 (void **) (adapter->request_queue.buffer);
204
205 return qdio_allocate(init_data);
206}
207
208/*
209 * function: zfcp_qdio_handler_error_check
210 *
211 * purpose: called by the response handler to determine error condition
212 *
213 * returns: error flag
214 *
215 */
216static inline int
217zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter,
218 unsigned int status,
219 unsigned int qdio_error, unsigned int siga_error)
220{
221 int retval = 0;
222
223 if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE)) {
224 if (status & QDIO_STATUS_INBOUND_INT) {
225 ZFCP_LOG_TRACE("status is"
226 " QDIO_STATUS_INBOUND_INT \n");
227 }
228 if (status & QDIO_STATUS_OUTBOUND_INT) {
229 ZFCP_LOG_TRACE("status is"
230 " QDIO_STATUS_OUTBOUND_INT \n");
231 }
232 } // if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE))
233 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
234 retval = -EIO;
235
236 ZFCP_LOG_FLAGS(1, "QDIO_STATUS_LOOK_FOR_ERROR \n");
237
238 ZFCP_LOG_INFO("QDIO problem occurred (status=0x%x, "
239 "qdio_error=0x%x, siga_error=0x%x)\n",
240 status, qdio_error, siga_error);
241
242 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
243 ZFCP_LOG_FLAGS(2,
244 "QDIO_STATUS_ACTIVATE_CHECK_CONDITION\n");
245 }
246 if (status & QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR) {
247 ZFCP_LOG_FLAGS(2,
248 "QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR\n");
249 }
250 if (status & QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR) {
251 ZFCP_LOG_FLAGS(2,
252 "QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR\n");
253 }
254
255 if (siga_error & QDIO_SIGA_ERROR_ACCESS_EXCEPTION) {
256 ZFCP_LOG_FLAGS(2, "QDIO_SIGA_ERROR_ACCESS_EXCEPTION\n");
257 }
258
259 if (siga_error & QDIO_SIGA_ERROR_B_BIT_SET) {
260 ZFCP_LOG_FLAGS(2, "QDIO_SIGA_ERROR_B_BIT_SET\n");
261 }
262
263 switch (qdio_error) {
264 case 0:
265 ZFCP_LOG_FLAGS(3, "QDIO_OK");
266 break;
267 case SLSB_P_INPUT_ERROR:
268 ZFCP_LOG_FLAGS(1, "SLSB_P_INPUT_ERROR\n");
269 break;
270 case SLSB_P_OUTPUT_ERROR:
271 ZFCP_LOG_FLAGS(1, "SLSB_P_OUTPUT_ERROR\n");
272 break;
273 default:
274 ZFCP_LOG_NORMAL("bug: unknown QDIO error 0x%x\n",
275 qdio_error);
276 break;
277 }
278 /* Restarting IO on the failed adapter from scratch */
279 debug_text_event(adapter->erp_dbf, 1, "qdio_err");
280 /*
281 * Since we have been using this adapter, it is save to assume
282 * that it is not failed but recoverable. The card seems to
283 * report link-up events by self-initiated queue shutdown.
284 * That is why we need to clear the the link-down flag
285 * which is set again in case we have missed by a mile.
286 */
287 zfcp_erp_adapter_reopen(
288 adapter,
289 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
290 ZFCP_STATUS_COMMON_ERP_FAILED);
291 }
292 return retval;
293}
294
295/*
296 * function: zfcp_qdio_request_handler
297 *
298 * purpose: is called by QDIO layer for completed SBALs in request queue
299 *
300 * returns: (void)
301 */
302static void
303zfcp_qdio_request_handler(struct ccw_device *ccw_device,
304 unsigned int status,
305 unsigned int qdio_error,
306 unsigned int siga_error,
307 unsigned int queue_number,
308 int first_element,
309 int elements_processed,
310 unsigned long int_parm)
311{
312 struct zfcp_adapter *adapter;
313 struct zfcp_qdio_queue *queue;
314
315 adapter = (struct zfcp_adapter *) int_parm;
316 queue = &adapter->request_queue;
317
318 ZFCP_LOG_DEBUG("adapter %s, first=%d, elements_processed=%d\n",
319 zfcp_get_busid_by_adapter(adapter),
320 first_element, elements_processed);
321
322 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
323 siga_error)))
324 goto out;
325 /*
326 * we stored address of struct zfcp_adapter data structure
327 * associated with irq in int_parm
328 */
329
330 /* cleanup all SBALs being program-owned now */
331 zfcp_qdio_zero_sbals(queue->buffer, first_element, elements_processed);
332
333 /* increase free space in outbound queue */
334 atomic_add(elements_processed, &queue->free_count);
335 ZFCP_LOG_DEBUG("free_count=%d\n", atomic_read(&queue->free_count));
336 wake_up(&adapter->request_wq);
337 ZFCP_LOG_DEBUG("elements_processed=%d, free count=%d\n",
338 elements_processed, atomic_read(&queue->free_count));
339 out:
340 return;
341}
342
343/*
344 * function: zfcp_qdio_response_handler
345 *
346 * purpose: is called by QDIO layer for completed SBALs in response queue
347 *
348 * returns: (void)
349 */
350static void
351zfcp_qdio_response_handler(struct ccw_device *ccw_device,
352 unsigned int status,
353 unsigned int qdio_error,
354 unsigned int siga_error,
355 unsigned int queue_number,
356 int first_element,
357 int elements_processed,
358 unsigned long int_parm)
359{
360 struct zfcp_adapter *adapter;
361 struct zfcp_qdio_queue *queue;
362 int buffer_index;
363 int i;
364 struct qdio_buffer *buffer;
365 int retval = 0;
366 u8 count;
367 u8 start;
368 volatile struct qdio_buffer_element *buffere = NULL;
369 int buffere_index;
370
371 adapter = (struct zfcp_adapter *) int_parm;
372 queue = &adapter->response_queue;
373
374 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
375 siga_error)))
376 goto out;
377
378 /*
379 * we stored address of struct zfcp_adapter data structure
380 * associated with irq in int_parm
381 */
382
383 buffere = &(queue->buffer[first_element]->element[0]);
384 ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x\n", buffere->flags);
385 /*
386 * go through all SBALs from input queue currently
387 * returned by QDIO layer
388 */
389
390 for (i = 0; i < elements_processed; i++) {
391
392 buffer_index = first_element + i;
393 buffer_index %= QDIO_MAX_BUFFERS_PER_Q;
394 buffer = queue->buffer[buffer_index];
395
396 /* go through all SBALEs of SBAL */
397 for (buffere_index = 0;
398 buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER;
399 buffere_index++) {
400
401 /* look for QDIO request identifiers in SB */
402 buffere = &buffer->element[buffere_index];
403 retval = zfcp_qdio_reqid_check(adapter,
404 (void *) buffere->addr);
405
406 if (retval) {
407 ZFCP_LOG_NORMAL("bug: unexpected inbound "
408 "packet on adapter %s "
409 "(reqid=0x%lx, "
410 "first_element=%d, "
411 "elements_processed=%d)\n",
412 zfcp_get_busid_by_adapter(adapter),
413 (unsigned long) buffere->addr,
414 first_element,
415 elements_processed);
416 ZFCP_LOG_NORMAL("hex dump of inbound buffer "
417 "at address %p "
418 "(buffer_index=%d, "
419 "buffere_index=%d)\n", buffer,
420 buffer_index, buffere_index);
421 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
422 (char *) buffer, SBAL_SIZE);
423 }
424 /*
425 * A single used SBALE per inbound SBALE has been
426 * implemented by QDIO so far. Hope they will
427 * do some optimisation. Will need to change to
428 * unlikely() then.
429 */
430 if (likely(buffere->flags & SBAL_FLAGS_LAST_ENTRY))
431 break;
432 };
433
434 if (unlikely(!(buffere->flags & SBAL_FLAGS_LAST_ENTRY))) {
435 ZFCP_LOG_NORMAL("bug: End of inbound data "
436 "not marked!\n");
437 }
438 }
439
440 /*
441 * put range of SBALs back to response queue
442 * (including SBALs which have already been free before)
443 */
444 count = atomic_read(&queue->free_count) + elements_processed;
445 start = queue->free_index;
446
447 ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
448 "queue_no=%i, index_in_queue=%i, count=%i, "
449 "buffers=0x%lx\n",
450 zfcp_get_busid_by_adapter(adapter),
451 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
452 0, start, count, (unsigned long) &queue->buffer[start]);
453
454 retval = do_QDIO(ccw_device,
455 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
456 0, start, count, NULL);
457
458 if (unlikely(retval)) {
459 atomic_set(&queue->free_count, count);
460 ZFCP_LOG_DEBUG("clearing of inbound data regions failed, "
461 "queues may be down "
462 "(count=%d, start=%d, retval=%d)\n",
463 count, start, retval);
464 } else {
465 queue->free_index += count;
466 queue->free_index %= QDIO_MAX_BUFFERS_PER_Q;
467 atomic_set(&queue->free_count, 0);
468 ZFCP_LOG_TRACE("%i buffers enqueued to response "
469 "queue at position %i\n", count, start);
470 }
471 out:
472 return;
473}
474
475/*
476 * function: zfcp_qdio_reqid_check
477 *
478 * purpose: checks for valid reqids or unsolicited status
479 *
480 * returns: 0 - valid request id or unsolicited status
481 * !0 - otherwise
482 */
483int
484zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr)
485{
486 struct zfcp_fsf_req *fsf_req;
487 int retval = 0;
488
489 /* invalid (per convention used in this driver) */
490 if (unlikely(!sbale_addr)) {
491 ZFCP_LOG_NORMAL("bug: invalid reqid\n");
492 retval = -EINVAL;
493 goto out;
494 }
495
496 /* valid request id and thus (hopefully :) valid fsf_req address */
497 fsf_req = (struct zfcp_fsf_req *) sbale_addr;
498
499 if (unlikely(adapter != fsf_req->adapter)) {
500 ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, "
501 "fsf_req->adapter=%p, adapter=%p)\n",
502 fsf_req, fsf_req->adapter, adapter);
503 retval = -EINVAL;
504 goto out;
505 }
506
507 ZFCP_LOG_TRACE("fsf_req at %p, QTCB at %p\n", fsf_req, fsf_req->qtcb);
508 if (likely(fsf_req->qtcb)) {
509 ZFCP_LOG_TRACE("hex dump of QTCB:\n");
510 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) fsf_req->qtcb,
511 sizeof(struct fsf_qtcb));
512 }
513
514 /* finish the FSF request */
515 zfcp_fsf_req_complete(fsf_req);
516 out:
517 return retval;
518}
519
520/**
521 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue
522 * @queue: queue from which SBALE should be returned
523 * @sbal: specifies number of SBAL in queue
524 * @sbale: specifes number of SBALE in SBAL
525 */
526static inline volatile struct qdio_buffer_element *
527zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale)
528{
529 return &queue->buffer[sbal]->element[sbale];
530}
531
532/**
533 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for
534 * a struct zfcp_fsf_req
535 */
536inline volatile struct qdio_buffer_element *
537zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
538{
539 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue,
540 sbal, sbale);
541}
542
543/**
544 * zfcp_qdio_sbale_resp - return pointer to SBALE of response_queue for
545 * a struct zfcp_fsf_req
546 */
547static inline volatile struct qdio_buffer_element *
548zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
549{
550 return zfcp_qdio_sbale_get(&fsf_req->adapter->response_queue,
551 sbal, sbale);
552}
553
554/**
555 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for
556 * a struct zfcp_fsf_req
557 */
558inline volatile struct qdio_buffer_element *
559zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
560{
561 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr,
562 fsf_req->sbale_curr);
563}
564
565/**
566 * zfcp_qdio_sbal_limit - determine maximum number of SBALs that can be used
567 * on the request_queue for a struct zfcp_fsf_req
568 * @fsf_req: the number of the last SBAL that can be used is stored herein
569 * @max_sbals: used to pass an upper limit for the number of SBALs
570 *
571 * Note: We can assume at least one free SBAL in the request_queue when called.
572 */
573static inline void
574zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
575{
576 int count = atomic_read(&fsf_req->adapter->request_queue.free_count);
577 count = min(count, max_sbals);
578 fsf_req->sbal_last = fsf_req->sbal_first;
579 fsf_req->sbal_last += (count - 1);
580 fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
581}
582
583/**
584 * zfcp_qdio_sbal_chain - chain SBALs if more than one SBAL is needed for a
585 * request
586 * @fsf_req: zfcp_fsf_req to be processed
587 * @sbtype: SBAL flags which have to be set in first SBALE of new SBAL
588 *
589 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req.
590 */
591static inline volatile struct qdio_buffer_element *
592zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
593{
594 volatile struct qdio_buffer_element *sbale;
595
596 /* set last entry flag in current SBALE of current SBAL */
597 sbale = zfcp_qdio_sbale_curr(fsf_req);
598 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
599
600 /* don't exceed last allowed SBAL */
601 if (fsf_req->sbal_curr == fsf_req->sbal_last)
602 return NULL;
603
604 /* set chaining flag in first SBALE of current SBAL */
605 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
606 sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
607
608 /* calculate index of next SBAL */
609 fsf_req->sbal_curr++;
610 fsf_req->sbal_curr %= QDIO_MAX_BUFFERS_PER_Q;
611
612 /* keep this requests number of SBALs up-to-date */
613 fsf_req->sbal_number++;
614
615 /* start at first SBALE of new SBAL */
616 fsf_req->sbale_curr = 0;
617
618 /* set storage-block type for new SBAL */
619 sbale = zfcp_qdio_sbale_curr(fsf_req);
620 sbale->flags |= sbtype;
621
622 return sbale;
623}
624
625/**
626 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed
627 */
628static inline volatile struct qdio_buffer_element *
629zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
630{
631 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
632 return zfcp_qdio_sbal_chain(fsf_req, sbtype);
633
634 fsf_req->sbale_curr++;
635
636 return zfcp_qdio_sbale_curr(fsf_req);
637}
638
639/**
640 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue
641 * with zero from
642 */
643static inline int
644zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last)
645{
646 struct qdio_buffer **buf = queue->buffer;
647 int curr = first;
648 int count = 0;
649
650 for(;;) {
651 curr %= QDIO_MAX_BUFFERS_PER_Q;
652 count++;
653 memset(buf[curr], 0, sizeof(struct qdio_buffer));
654 if (curr == last)
655 break;
656 curr++;
657 }
658 return count;
659}
660
661
662/**
663 * zfcp_qdio_sbals_wipe - reset all changes in SBALs for an fsf_req
664 */
665static inline int
666zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
667{
668 return zfcp_qdio_sbals_zero(&fsf_req->adapter->request_queue,
669 fsf_req->sbal_first, fsf_req->sbal_curr);
670}
671
672
673/**
674 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE
675 * on request_queue
676 */
677static inline void
678zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
679 void *addr, int length)
680{
681 volatile struct qdio_buffer_element *sbale;
682
683 sbale = zfcp_qdio_sbale_curr(fsf_req);
684 sbale->addr = addr;
685 sbale->length = length;
686}
687
688/**
689 * zfcp_qdio_sbals_from_segment - map memory segment to SBALE(s)
690 * @fsf_req: request to be processed
691 * @sbtype: SBALE flags
692 * @start_addr: address of memory segment
693 * @total_length: length of memory segment
694 *
695 * Alignment and length of the segment determine how many SBALEs are needed
696 * for the memory segment.
697 */
698static inline int
699zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
700 void *start_addr, unsigned long total_length)
701{
702 unsigned long remaining, length;
703 void *addr;
704
705 /* split segment up heeding page boundaries */
706 for (addr = start_addr, remaining = total_length; remaining > 0;
707 addr += length, remaining -= length) {
708 /* get next free SBALE for new piece */
709 if (NULL == zfcp_qdio_sbale_next(fsf_req, sbtype)) {
710 /* no SBALE left, clean up and leave */
711 zfcp_qdio_sbals_wipe(fsf_req);
712 return -EINVAL;
713 }
714 /* calculate length of new piece */
715 length = min(remaining,
716 (PAGE_SIZE - ((unsigned long) addr &
717 (PAGE_SIZE - 1))));
718 /* fill current SBALE with calculated piece */
719 zfcp_qdio_sbale_fill(fsf_req, sbtype, addr, length);
720 }
721 return total_length;
722}
723
724
725/**
726 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
727 * @fsf_req: request to be processed
728 * @sbtype: SBALE flags
729 * @sg: scatter-gather list
730 * @sg_count: number of elements in scatter-gather list
731 * @max_sbals: upper bound for number of SBALs to be used
732 */
733inline int
734zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
735 struct scatterlist *sg, int sg_count, int max_sbals)
736{
737 int sg_index;
738 struct scatterlist *sg_segment;
739 int retval;
740 volatile struct qdio_buffer_element *sbale;
741 int bytes = 0;
742
743 /* figure out last allowed SBAL */
744 zfcp_qdio_sbal_limit(fsf_req, max_sbals);
745
746 /* set storage-block type for current SBAL */
747 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
748 sbale->flags |= sbtype;
749
750 /* process all segements of scatter-gather list */
751 for (sg_index = 0, sg_segment = sg, bytes = 0;
752 sg_index < sg_count;
753 sg_index++, sg_segment++) {
754 retval = zfcp_qdio_sbals_from_segment(
755 fsf_req,
756 sbtype,
757 zfcp_sg_to_address(sg_segment),
758 sg_segment->length);
759 if (retval < 0) {
760 bytes = retval;
761 goto out;
762 } else
763 bytes += retval;
764 }
765 /* assume that no other SBALEs are to follow in the same SBAL */
766 sbale = zfcp_qdio_sbale_curr(fsf_req);
767 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
768out:
769 return bytes;
770}
771
772
773/**
774 * zfcp_qdio_sbals_from_buffer - fill SBALs from buffer
775 * @fsf_req: request to be processed
776 * @sbtype: SBALE flags
777 * @buffer: data buffer
778 * @length: length of buffer
779 * @max_sbals: upper bound for number of SBALs to be used
780 */
781static inline int
782zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
783 void *buffer, unsigned long length, int max_sbals)
784{
785 struct scatterlist sg_segment;
786
787 zfcp_address_to_sg(buffer, &sg_segment);
788 sg_segment.length = length;
789
790 return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, &sg_segment, 1,
791 max_sbals);
792}
793
794
795/**
796 * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command
797 * @fsf_req: request to be processed
798 * @sbtype: SBALE flags
799 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
800 * to fill SBALs
801 */
802inline int
803zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
804 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
805{
806 if (scsi_cmnd->use_sg) {
807 return zfcp_qdio_sbals_from_sg(fsf_req, sbtype,
808 (struct scatterlist *)
809 scsi_cmnd->request_buffer,
810 scsi_cmnd->use_sg,
811 ZFCP_MAX_SBALS_PER_REQ);
812 } else {
813 return zfcp_qdio_sbals_from_buffer(fsf_req, sbtype,
814 scsi_cmnd->request_buffer,
815 scsi_cmnd->request_bufflen,
816 ZFCP_MAX_SBALS_PER_REQ);
817 }
818}
819
820/**
821 * zfcp_qdio_determine_pci - set PCI flag in first SBALE on qdio queue if needed
822 */
823int
824zfcp_qdio_determine_pci(struct zfcp_qdio_queue *req_queue,
825 struct zfcp_fsf_req *fsf_req)
826{
827 int new_distance_from_int;
828 int pci_pos;
829 volatile struct qdio_buffer_element *sbale;
830
831 new_distance_from_int = req_queue->distance_from_int +
832 fsf_req->sbal_number;
833
834 if (unlikely(new_distance_from_int >= ZFCP_QDIO_PCI_INTERVAL)) {
835 new_distance_from_int %= ZFCP_QDIO_PCI_INTERVAL;
836 pci_pos = fsf_req->sbal_first;
837 pci_pos += fsf_req->sbal_number;
838 pci_pos -= new_distance_from_int;
839 pci_pos -= 1;
840 pci_pos %= QDIO_MAX_BUFFERS_PER_Q;
841 sbale = zfcp_qdio_sbale_req(fsf_req, pci_pos, 0);
842 sbale->flags |= SBAL_FLAGS0_PCI;
843 }
844 return new_distance_from_int;
845}
846
847/*
848 * function: zfcp_zero_sbals
849 *
850 * purpose: zeros specified range of SBALs
851 *
852 * returns:
853 */
854void
855zfcp_qdio_zero_sbals(struct qdio_buffer *buf[], int first, int clean_count)
856{
857 int cur_pos;
858 int index;
859
860 for (cur_pos = first; cur_pos < (first + clean_count); cur_pos++) {
861 index = cur_pos % QDIO_MAX_BUFFERS_PER_Q;
862 memset(buf[index], 0, sizeof (struct qdio_buffer));
863 ZFCP_LOG_TRACE("zeroing BUFFER %d at address %p\n",
864 index, buf[index]);
865 }
866}
867
868#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
new file mode 100644
index 000000000000..e21b547fd427
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -0,0 +1,949 @@
1/*
2 *
3 * linux/drivers/s390/scsi/zfcp_scsi.c
4 *
5 * FCP adapter driver for IBM eServer zSeries
6 *
7 * (C) Copyright IBM Corp. 2002, 2004
8 *
9 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
10 * Raimund Schroeder <raimund.schroeder@de.ibm.com>
11 * Aron Zeh
12 * Wolfgang Taphorn
13 * Stefan Bader <stefan.bader@de.ibm.com>
14 * Heiko Carstens <heiko.carstens@de.ibm.com>
15 * Andreas Herrmann <aherrman@de.ibm.com>
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */
31
32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
33
34#define ZFCP_SCSI_REVISION "$Revision: 1.74 $"
35
36#include "zfcp_ext.h"
37
38static void zfcp_scsi_slave_destroy(struct scsi_device *sdp);
39static int zfcp_scsi_slave_alloc(struct scsi_device *sdp);
40static int zfcp_scsi_slave_configure(struct scsi_device *sdp);
41static int zfcp_scsi_queuecommand(struct scsi_cmnd *,
42 void (*done) (struct scsi_cmnd *));
43static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
44static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
45static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
46static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
47static int zfcp_task_management_function(struct zfcp_unit *, u8);
48
49static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int, scsi_id_t,
50 scsi_lun_t);
51static struct zfcp_port *zfcp_port_lookup(struct zfcp_adapter *, int,
52 scsi_id_t);
53
54static struct device_attribute *zfcp_sysfs_sdev_attrs[];
55
56struct scsi_transport_template *zfcp_transport_template;
57
58struct zfcp_data zfcp_data = {
59 .scsi_host_template = {
60 name: ZFCP_NAME,
61 proc_name: "zfcp",
62 proc_info: NULL,
63 detect: NULL,
64 slave_alloc: zfcp_scsi_slave_alloc,
65 slave_configure: zfcp_scsi_slave_configure,
66 slave_destroy: zfcp_scsi_slave_destroy,
67 queuecommand: zfcp_scsi_queuecommand,
68 eh_abort_handler: zfcp_scsi_eh_abort_handler,
69 eh_device_reset_handler: zfcp_scsi_eh_device_reset_handler,
70 eh_bus_reset_handler: zfcp_scsi_eh_bus_reset_handler,
71 eh_host_reset_handler: zfcp_scsi_eh_host_reset_handler,
72 /* FIXME(openfcp): Tune */
73 can_queue: 4096,
74 this_id: 0,
75 /*
76 * FIXME:
77 * one less? can zfcp_create_sbale cope with it?
78 */
79 sg_tablesize: ZFCP_MAX_SBALES_PER_REQ,
80 cmd_per_lun: 1,
81 unchecked_isa_dma: 0,
82 use_clustering: 1,
83 sdev_attrs: zfcp_sysfs_sdev_attrs,
84 },
85 .driver_version = ZFCP_VERSION,
86 /* rest initialised with zeros */
87};
88
89/* Find start of Response Information in FCP response unit*/
90char *
91zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
92{
93 char *fcp_rsp_info_ptr;
94
95 fcp_rsp_info_ptr =
96 (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu));
97
98 return fcp_rsp_info_ptr;
99}
100
101/* Find start of Sense Information in FCP response unit*/
102char *
103zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
104{
105 char *fcp_sns_info_ptr;
106
107 fcp_sns_info_ptr =
108 (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu));
109 if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)
110 fcp_sns_info_ptr = (char *) fcp_sns_info_ptr +
111 fcp_rsp_iu->fcp_rsp_len;
112
113 return fcp_sns_info_ptr;
114}
115
116fcp_dl_t *
117zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd)
118{
119 int additional_length = fcp_cmd->add_fcp_cdb_length << 2;
120 fcp_dl_t *fcp_dl_addr;
121
122 fcp_dl_addr = (fcp_dl_t *)
123 ((unsigned char *) fcp_cmd +
124 sizeof (struct fcp_cmnd_iu) + additional_length);
125 /*
126 * fcp_dl_addr = start address of fcp_cmnd structure +
127 * size of fixed part + size of dynamically sized add_dcp_cdb field
128 * SEE FCP-2 documentation
129 */
130 return fcp_dl_addr;
131}
132
133fcp_dl_t
134zfcp_get_fcp_dl(struct fcp_cmnd_iu * fcp_cmd)
135{
136 return *zfcp_get_fcp_dl_ptr(fcp_cmd);
137}
138
139void
140zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
141{
142 *zfcp_get_fcp_dl_ptr(fcp_cmd) = fcp_dl;
143}
144
145/*
146 * note: it's a bit-or operation not an assignment
147 * regarding the specified byte
148 */
149static inline void
150set_byte(u32 * result, char status, char pos)
151{
152 *result |= status << (pos * 8);
153}
154
155void
156set_host_byte(u32 * result, char status)
157{
158 set_byte(result, status, 2);
159}
160
161void
162set_driver_byte(u32 * result, char status)
163{
164 set_byte(result, status, 3);
165}
166
167/*
168 * function: zfcp_scsi_slave_alloc
169 *
170 * purpose:
171 *
172 * returns:
173 */
174
175static int
176zfcp_scsi_slave_alloc(struct scsi_device *sdp)
177{
178 struct zfcp_adapter *adapter;
179 struct zfcp_unit *unit;
180 unsigned long flags;
181 int retval = -ENODEV;
182
183 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
184 if (!adapter)
185 goto out;
186
187 read_lock_irqsave(&zfcp_data.config_lock, flags);
188 unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
189 if (unit) {
190 sdp->hostdata = unit;
191 unit->device = sdp;
192 zfcp_unit_get(unit);
193 retval = 0;
194 }
195 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
196 out:
197 return retval;
198}
199
200/*
201 * function: zfcp_scsi_slave_destroy
202 *
203 * purpose:
204 *
205 * returns:
206 */
207
208static void
209zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
210{
211 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
212
213 if (unit) {
214 sdpnt->hostdata = NULL;
215 unit->device = NULL;
216 zfcp_unit_put(unit);
217 } else {
218 ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at "
219 "address %p\n", sdpnt);
220 }
221}
222
223/*
224 * called from scsi midlayer to allow finetuning of a device.
225 */
226static int
227zfcp_scsi_slave_configure(struct scsi_device *sdp)
228{
229 if (sdp->tagged_supported)
230 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, ZFCP_CMND_PER_LUN);
231 else
232 scsi_adjust_queue_depth(sdp, 0, 1);
233 return 0;
234}
235
236/**
237 * zfcp_scsi_command_fail - set result in scsi_cmnd and call scsi_done function
238 * @scpnt: pointer to struct scsi_cmnd where result is set
239 * @result: result to be set in scpnt (e.g. DID_ERROR)
240 */
241static void
242zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
243{
244 set_host_byte(&scpnt->result, result);
245 zfcp_cmd_dbf_event_scsi("failing", scpnt);
246 /* return directly */
247 scpnt->scsi_done(scpnt);
248}
249
250/**
251 * zfcp_scsi_command_async - worker for zfcp_scsi_queuecommand and
252 * zfcp_scsi_command_sync
253 * @adapter: adapter where scsi command is issued
254 * @unit: unit to which scsi command is sent
255 * @scpnt: scsi command to be sent
256 * @timer: timer to be started if request is successfully initiated
257 *
258 * Note: In scsi_done function must be set in scpnt.
259 */
260int
261zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
262 struct scsi_cmnd *scpnt, struct timer_list *timer)
263{
264 int tmp;
265 int retval;
266
267 retval = 0;
268
269 BUG_ON((adapter == NULL) || (adapter != unit->port->adapter));
270 BUG_ON(scpnt->scsi_done == NULL);
271
272 if (unlikely(NULL == unit)) {
273 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
274 goto out;
275 }
276
277 if (unlikely(
278 atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status) ||
279 !atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status))) {
280 ZFCP_LOG_DEBUG("stopping SCSI I/O on unit 0x%016Lx on port "
281 "0x%016Lx on adapter %s\n",
282 unit->fcp_lun, unit->port->wwpn,
283 zfcp_get_busid_by_adapter(adapter));
284 zfcp_scsi_command_fail(scpnt, DID_ERROR);
285 goto out;
286 }
287
288 if (unlikely(
289 !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))) {
290 ZFCP_LOG_DEBUG("adapter %s not ready or unit 0x%016Lx "
291 "on port 0x%016Lx in recovery\n",
292 zfcp_get_busid_by_unit(unit),
293 unit->fcp_lun, unit->port->wwpn);
294 retval = SCSI_MLQUEUE_DEVICE_BUSY;
295 goto out;
296 }
297
298 tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, timer,
299 ZFCP_REQ_AUTO_CLEANUP);
300
301 if (unlikely(tmp < 0)) {
302 ZFCP_LOG_DEBUG("error: initiation of Send FCP Cmnd failed\n");
303 retval = SCSI_MLQUEUE_HOST_BUSY;
304 }
305
306out:
307 return retval;
308}
309
310void
311zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
312{
313 struct completion *wait = (struct completion *) scpnt->SCp.ptr;
314 complete(wait);
315}
316
317
318/**
319 * zfcp_scsi_command_sync - send a SCSI command and wait for completion
320 * @unit: unit where command is sent to
321 * @scpnt: scsi command to be sent
322 * @timer: timer to be started if request is successfully initiated
323 * Return: 0
324 *
325 * Errors are indicated in scpnt->result
326 */
327int
328zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
329 struct timer_list *timer)
330{
331 int ret;
332 DECLARE_COMPLETION(wait);
333
334 scpnt->SCp.ptr = (void *) &wait; /* silent re-use */
335 scpnt->scsi_done = zfcp_scsi_command_sync_handler;
336 ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt, timer);
337 if (ret == 0)
338 wait_for_completion(&wait);
339
340 scpnt->SCp.ptr = NULL;
341
342 return 0;
343}
344
345/*
346 * function: zfcp_scsi_queuecommand
347 *
348 * purpose: enqueues a SCSI command to the specified target device
349 *
350 * returns: 0 - success, SCSI command enqueued
351 * !0 - failure
352 */
353int
354zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
355 void (*done) (struct scsi_cmnd *))
356{
357 struct zfcp_unit *unit;
358 struct zfcp_adapter *adapter;
359
360 /* reset the status for this request */
361 scpnt->result = 0;
362 scpnt->host_scribble = NULL;
363 scpnt->scsi_done = done;
364
365 /*
366 * figure out adapter and target device
367 * (stored there by zfcp_scsi_slave_alloc)
368 */
369 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
370 unit = (struct zfcp_unit *) scpnt->device->hostdata;
371
372 return zfcp_scsi_command_async(adapter, unit, scpnt, NULL);
373}
374
375/*
376 * function: zfcp_unit_lookup
377 *
378 * purpose:
379 *
380 * returns:
381 *
382 * context:
383 */
384static struct zfcp_unit *
385zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, scsi_id_t id,
386 scsi_lun_t lun)
387{
388 struct zfcp_port *port;
389 struct zfcp_unit *unit, *retval = NULL;
390
391 list_for_each_entry(port, &adapter->port_list_head, list) {
392 if (id != port->scsi_id)
393 continue;
394 list_for_each_entry(unit, &port->unit_list_head, list) {
395 if (lun == unit->scsi_lun) {
396 retval = unit;
397 goto out;
398 }
399 }
400 }
401 out:
402 return retval;
403}
404
405static struct zfcp_port *
406zfcp_port_lookup(struct zfcp_adapter *adapter, int channel, scsi_id_t id)
407{
408 struct zfcp_port *port;
409
410 list_for_each_entry(port, &adapter->port_list_head, list) {
411 if (id == port->scsi_id)
412 return port;
413 }
414 return (struct zfcp_port *) NULL;
415}
416
417/*
418 * function: zfcp_scsi_eh_abort_handler
419 *
420 * purpose: tries to abort the specified (timed out) SCSI command
421 *
422 * note: We do not need to care for a SCSI command which completes
423 * normally but late during this abort routine runs.
424 * We are allowed to return late commands to the SCSI stack.
425 * It tracks the state of commands and will handle late commands.
426 * (Usually, the normal completion of late commands is ignored with
427 * respect to the running abort operation. Grep for 'done_late'
428 * in the SCSI stacks sources.)
429 *
430 * returns: SUCCESS - command has been aborted and cleaned up in internal
431 * bookkeeping,
432 * SCSI stack won't be called for aborted command
433 * FAILED - otherwise
434 */
435int
436zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
437{
438 int retval = SUCCESS;
439 struct zfcp_fsf_req *new_fsf_req, *old_fsf_req;
440 struct zfcp_adapter *adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
441 struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
442 struct zfcp_port *port = unit->port;
443 struct Scsi_Host *scsi_host = scpnt->device->host;
444 union zfcp_req_data *req_data = NULL;
445 unsigned long flags;
446 u32 status = 0;
447
448 /* the components of a abort_dbf record (fixed size record) */
449 u64 dbf_scsi_cmnd = (unsigned long) scpnt;
450 char dbf_opcode[ZFCP_ABORT_DBF_LENGTH];
451 wwn_t dbf_wwn = port->wwpn;
452 fcp_lun_t dbf_fcp_lun = unit->fcp_lun;
453 u64 dbf_retries = scpnt->retries;
454 u64 dbf_allowed = scpnt->allowed;
455 u64 dbf_timeout = 0;
456 u64 dbf_fsf_req = 0;
457 u64 dbf_fsf_status = 0;
458 u64 dbf_fsf_qual[2] = { 0, 0 };
459 char dbf_result[ZFCP_ABORT_DBF_LENGTH] = "##undef";
460
461 memset(dbf_opcode, 0, ZFCP_ABORT_DBF_LENGTH);
462 memcpy(dbf_opcode,
463 scpnt->cmnd,
464 min(scpnt->cmd_len, (unsigned char) ZFCP_ABORT_DBF_LENGTH));
465
466 ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n",
467 scpnt, zfcp_get_busid_by_adapter(adapter));
468
469 spin_unlock_irq(scsi_host->host_lock);
470
471 /*
472 * Race condition between normal (late) completion and abort has
473 * to be avoided.
474 * The entirity of all accesses to scsi_req have to be atomic.
475 * scsi_req is usually part of the fsf_req and thus we block the
476 * release of fsf_req as long as we need to access scsi_req.
477 */
478 write_lock_irqsave(&adapter->abort_lock, flags);
479
480 /*
481 * Check whether command has just completed and can not be aborted.
482 * Even if the command has just been completed late, we can access
483 * scpnt since the SCSI stack does not release it at least until
484 * this routine returns. (scpnt is parameter passed to this routine
485 * and must not disappear during abort even on late completion.)
486 */
487 req_data = (union zfcp_req_data *) scpnt->host_scribble;
488 /* DEBUG */
489 ZFCP_LOG_DEBUG("req_data=%p\n", req_data);
490 if (!req_data) {
491 ZFCP_LOG_DEBUG("late command completion overtook abort\n");
492 /*
493 * That's it.
494 * Do not initiate abort but return SUCCESS.
495 */
496 write_unlock_irqrestore(&adapter->abort_lock, flags);
497 retval = SUCCESS;
498 strncpy(dbf_result, "##late1", ZFCP_ABORT_DBF_LENGTH);
499 goto out;
500 }
501
502 /* Figure out which fsf_req needs to be aborted. */
503 old_fsf_req = req_data->send_fcp_command_task.fsf_req;
504
505 dbf_fsf_req = (unsigned long) old_fsf_req;
506 dbf_timeout =
507 (jiffies - req_data->send_fcp_command_task.start_jiffies) / HZ;
508
509 ZFCP_LOG_DEBUG("old_fsf_req=%p\n", old_fsf_req);
510 if (!old_fsf_req) {
511 write_unlock_irqrestore(&adapter->abort_lock, flags);
512 ZFCP_LOG_NORMAL("bug: no old fsf request found\n");
513 ZFCP_LOG_NORMAL("req_data:\n");
514 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
515 (char *) req_data, sizeof (union zfcp_req_data));
516 ZFCP_LOG_NORMAL("scsi_cmnd:\n");
517 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
518 (char *) scpnt, sizeof (struct scsi_cmnd));
519 retval = FAILED;
520 strncpy(dbf_result, "##bug:r", ZFCP_ABORT_DBF_LENGTH);
521 goto out;
522 }
523 old_fsf_req->data.send_fcp_command_task.scsi_cmnd = NULL;
524 /* mark old request as being aborted */
525 old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
526 /*
527 * We have to collect all information (e.g. unit) needed by
528 * zfcp_fsf_abort_fcp_command before calling that routine
529 * since that routine is not allowed to access
530 * fsf_req which it is going to abort.
531 * This is because of we need to release fsf_req_list_lock
532 * before calling zfcp_fsf_abort_fcp_command.
533 * Since this lock will not be held, fsf_req may complete
534 * late and may be released meanwhile.
535 */
536 ZFCP_LOG_DEBUG("unit 0x%016Lx (%p)\n", unit->fcp_lun, unit);
537
538 /*
539 * We block (call schedule)
540 * That's why we must release the lock and enable the
541 * interrupts before.
542 * On the other hand we do not need the lock anymore since
543 * all critical accesses to scsi_req are done.
544 */
545 write_unlock_irqrestore(&adapter->abort_lock, flags);
546 /* call FSF routine which does the abort */
547 new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req,
548 adapter, unit, 0);
549 ZFCP_LOG_DEBUG("new_fsf_req=%p\n", new_fsf_req);
550 if (!new_fsf_req) {
551 retval = FAILED;
552 ZFCP_LOG_NORMAL("error: initiation of Abort FCP Cmnd "
553 "failed\n");
554 strncpy(dbf_result, "##nores", ZFCP_ABORT_DBF_LENGTH);
555 goto out;
556 }
557
558 /* wait for completion of abort */
559 ZFCP_LOG_DEBUG("waiting for cleanup...\n");
560#if 1
561 /*
562 * FIXME:
563 * copying zfcp_fsf_req_wait_and_cleanup code is not really nice
564 */
565 __wait_event(new_fsf_req->completion_wq,
566 new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
567 status = new_fsf_req->status;
568 dbf_fsf_status = new_fsf_req->qtcb->header.fsf_status;
569 /*
570 * Ralphs special debug load provides timestamps in the FSF
571 * status qualifier. This might be specified later if being
572 * useful for debugging aborts.
573 */
574 dbf_fsf_qual[0] =
575 *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[0];
576 dbf_fsf_qual[1] =
577 *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[2];
578 zfcp_fsf_req_cleanup(new_fsf_req);
579#else
580 retval = zfcp_fsf_req_wait_and_cleanup(new_fsf_req,
581 ZFCP_UNINTERRUPTIBLE, &status);
582#endif
583 ZFCP_LOG_DEBUG("Waiting for cleanup complete, status=0x%x\n", status);
584 /* status should be valid since signals were not permitted */
585 if (status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
586 retval = SUCCESS;
587 strncpy(dbf_result, "##succ", ZFCP_ABORT_DBF_LENGTH);
588 } else if (status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
589 retval = SUCCESS;
590 strncpy(dbf_result, "##late2", ZFCP_ABORT_DBF_LENGTH);
591 } else {
592 retval = FAILED;
593 strncpy(dbf_result, "##fail", ZFCP_ABORT_DBF_LENGTH);
594 }
595
596 out:
597 debug_event(adapter->abort_dbf, 1, &dbf_scsi_cmnd, sizeof (u64));
598 debug_event(adapter->abort_dbf, 1, &dbf_opcode, ZFCP_ABORT_DBF_LENGTH);
599 debug_event(adapter->abort_dbf, 1, &dbf_wwn, sizeof (wwn_t));
600 debug_event(adapter->abort_dbf, 1, &dbf_fcp_lun, sizeof (fcp_lun_t));
601 debug_event(adapter->abort_dbf, 1, &dbf_retries, sizeof (u64));
602 debug_event(adapter->abort_dbf, 1, &dbf_allowed, sizeof (u64));
603 debug_event(adapter->abort_dbf, 1, &dbf_timeout, sizeof (u64));
604 debug_event(adapter->abort_dbf, 1, &dbf_fsf_req, sizeof (u64));
605 debug_event(adapter->abort_dbf, 1, &dbf_fsf_status, sizeof (u64));
606 debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[0], sizeof (u64));
607 debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[1], sizeof (u64));
608 debug_text_event(adapter->abort_dbf, 1, dbf_result);
609
610 spin_lock_irq(scsi_host->host_lock);
611 return retval;
612}
613
614/*
615 * function: zfcp_scsi_eh_device_reset_handler
616 *
617 * purpose:
618 *
619 * returns:
620 */
621int
622zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
623{
624 int retval;
625 struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
626 struct Scsi_Host *scsi_host = scpnt->device->host;
627
628 spin_unlock_irq(scsi_host->host_lock);
629
630 if (!unit) {
631 ZFCP_LOG_NORMAL("bug: Tried reset for nonexistent unit\n");
632 retval = SUCCESS;
633 goto out;
634 }
635 ZFCP_LOG_NORMAL("resetting unit 0x%016Lx\n", unit->fcp_lun);
636
637 /*
638 * If we do not know whether the unit supports 'logical unit reset'
639 * then try 'logical unit reset' and proceed with 'target reset'
640 * if 'logical unit reset' fails.
641 * If the unit is known not to support 'logical unit reset' then
642 * skip 'logical unit reset' and try 'target reset' immediately.
643 */
644 if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
645 &unit->status)) {
646 retval =
647 zfcp_task_management_function(unit, FCP_LOGICAL_UNIT_RESET);
648 if (retval) {
649 ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit);
650 if (retval == -ENOTSUPP)
651 atomic_set_mask
652 (ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
653 &unit->status);
654 /* fall through and try 'target reset' next */
655 } else {
656 ZFCP_LOG_DEBUG("unit reset succeeded (unit=%p)\n",
657 unit);
658 /* avoid 'target reset' */
659 retval = SUCCESS;
660 goto out;
661 }
662 }
663 retval = zfcp_task_management_function(unit, FCP_TARGET_RESET);
664 if (retval) {
665 ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit);
666 retval = FAILED;
667 } else {
668 ZFCP_LOG_DEBUG("target reset succeeded (unit=%p)\n", unit);
669 retval = SUCCESS;
670 }
671 out:
672 spin_lock_irq(scsi_host->host_lock);
673 return retval;
674}
675
676static int
677zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags)
678{
679 struct zfcp_adapter *adapter = unit->port->adapter;
680 int retval;
681 int status;
682 struct zfcp_fsf_req *fsf_req;
683
684 /* issue task management function */
685 fsf_req = zfcp_fsf_send_fcp_command_task_management
686 (adapter, unit, tm_flags, 0);
687 if (!fsf_req) {
688 ZFCP_LOG_INFO("error: creation of task management request "
689 "failed for unit 0x%016Lx on port 0x%016Lx on "
690 "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
691 zfcp_get_busid_by_adapter(adapter));
692 retval = -ENOMEM;
693 goto out;
694 }
695
696 retval = zfcp_fsf_req_wait_and_cleanup(fsf_req,
697 ZFCP_UNINTERRUPTIBLE, &status);
698 /*
699 * check completion status of task management function
700 * (status should always be valid since no signals permitted)
701 */
702 if (status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED)
703 retval = -EIO;
704 else if (status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP)
705 retval = -ENOTSUPP;
706 else
707 retval = 0;
708 out:
709 return retval;
710}
711
712/*
713 * function: zfcp_scsi_eh_bus_reset_handler
714 *
715 * purpose:
716 *
717 * returns:
718 */
719int
720zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
721{
722 int retval = 0;
723 struct zfcp_unit *unit;
724 struct Scsi_Host *scsi_host = scpnt->device->host;
725
726 spin_unlock_irq(scsi_host->host_lock);
727
728 unit = (struct zfcp_unit *) scpnt->device->hostdata;
729 ZFCP_LOG_NORMAL("bus reset because of problems with "
730 "unit 0x%016Lx\n", unit->fcp_lun);
731 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
732 zfcp_erp_wait(unit->port->adapter);
733 retval = SUCCESS;
734
735 spin_lock_irq(scsi_host->host_lock);
736 return retval;
737}
738
739/*
740 * function: zfcp_scsi_eh_host_reset_handler
741 *
742 * purpose:
743 *
744 * returns:
745 */
746int
747zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
748{
749 int retval = 0;
750 struct zfcp_unit *unit;
751 struct Scsi_Host *scsi_host = scpnt->device->host;
752
753 spin_unlock_irq(scsi_host->host_lock);
754
755 unit = (struct zfcp_unit *) scpnt->device->hostdata;
756 ZFCP_LOG_NORMAL("host reset because of problems with "
757 "unit 0x%016Lx\n", unit->fcp_lun);
758 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
759 zfcp_erp_wait(unit->port->adapter);
760 retval = SUCCESS;
761
762 spin_lock_irq(scsi_host->host_lock);
763 return retval;
764}
765
766/*
767 * function:
768 *
769 * purpose:
770 *
771 * returns:
772 */
773int
774zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
775{
776 int retval = 0;
777 static unsigned int unique_id = 0;
778
779 /* register adapter as SCSI host with mid layer of SCSI stack */
780 adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
781 sizeof (struct zfcp_adapter *));
782 if (!adapter->scsi_host) {
783 ZFCP_LOG_NORMAL("error: registration with SCSI stack failed "
784 "for adapter %s ",
785 zfcp_get_busid_by_adapter(adapter));
786 retval = -EIO;
787 goto out;
788 }
789 ZFCP_LOG_DEBUG("host registered, scsi_host=%p\n", adapter->scsi_host);
790
791 /* tell the SCSI stack some characteristics of this adapter */
792 adapter->scsi_host->max_id = 1;
793 adapter->scsi_host->max_lun = 1;
794 adapter->scsi_host->max_channel = 0;
795 adapter->scsi_host->unique_id = unique_id++; /* FIXME */
796 adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH;
797 adapter->scsi_host->transportt = zfcp_transport_template;
798 /*
799 * Reverse mapping of the host number to avoid race condition
800 */
801 adapter->scsi_host_no = adapter->scsi_host->host_no;
802
803 /*
804 * save a pointer to our own adapter data structure within
805 * hostdata field of SCSI host data structure
806 */
807 adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
808
809 if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
810 scsi_host_put(adapter->scsi_host);
811 retval = -EIO;
812 goto out;
813 }
814 atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
815 out:
816 return retval;
817}
818
819/*
820 * function:
821 *
822 * purpose:
823 *
824 * returns:
825 */
826void
827zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
828{
829 struct Scsi_Host *shost;
830
831 shost = adapter->scsi_host;
832 if (!shost)
833 return;
834 scsi_remove_host(shost);
835 scsi_host_put(shost);
836 adapter->scsi_host = NULL;
837 adapter->scsi_host_no = 0;
838 atomic_clear_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
839
840 return;
841}
842
843
844void
845zfcp_fsf_start_scsi_er_timer(struct zfcp_adapter *adapter)
846{
847 adapter->scsi_er_timer.function = zfcp_fsf_scsi_er_timeout_handler;
848 adapter->scsi_er_timer.data = (unsigned long) adapter;
849 adapter->scsi_er_timer.expires = jiffies + ZFCP_SCSI_ER_TIMEOUT;
850 add_timer(&adapter->scsi_er_timer);
851}
852
853/*
854 * Support functions for FC transport class
855 */
856static void
857zfcp_get_port_id(struct scsi_target *starget)
858{
859 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
860 struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0];
861 struct zfcp_port *port;
862 unsigned long flags;
863
864 read_lock_irqsave(&zfcp_data.config_lock, flags);
865 port = zfcp_port_lookup(adapter, starget->channel, starget->id);
866 if (port)
867 fc_starget_port_id(starget) = port->d_id;
868 else
869 fc_starget_port_id(starget) = -1;
870 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
871}
872
873static void
874zfcp_get_port_name(struct scsi_target *starget)
875{
876 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
877 struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0];
878 struct zfcp_port *port;
879 unsigned long flags;
880
881 read_lock_irqsave(&zfcp_data.config_lock, flags);
882 port = zfcp_port_lookup(adapter, starget->channel, starget->id);
883 if (port)
884 fc_starget_port_name(starget) = port->wwpn;
885 else
886 fc_starget_port_name(starget) = -1;
887 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
888}
889
890static void
891zfcp_get_node_name(struct scsi_target *starget)
892{
893 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
894 struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0];
895 struct zfcp_port *port;
896 unsigned long flags;
897
898 read_lock_irqsave(&zfcp_data.config_lock, flags);
899 port = zfcp_port_lookup(adapter, starget->channel, starget->id);
900 if (port)
901 fc_starget_node_name(starget) = port->wwnn;
902 else
903 fc_starget_node_name(starget) = -1;
904 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
905}
906
907struct fc_function_template zfcp_transport_functions = {
908 .get_starget_port_id = zfcp_get_port_id,
909 .get_starget_port_name = zfcp_get_port_name,
910 .get_starget_node_name = zfcp_get_node_name,
911 .show_starget_port_id = 1,
912 .show_starget_port_name = 1,
913 .show_starget_node_name = 1,
914};
915
916/**
917 * ZFCP_DEFINE_SCSI_ATTR
918 * @_name: name of show attribute
919 * @_format: format string
920 * @_value: value to print
921 *
922 * Generates attribute for a unit.
923 */
924#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \
925static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
926 char *buf) \
927{ \
928 struct scsi_device *sdev; \
929 struct zfcp_unit *unit; \
930 \
931 sdev = to_scsi_device(dev); \
932 unit = sdev->hostdata; \
933 return sprintf(buf, _format, _value); \
934} \
935 \
936static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
937
938ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", zfcp_get_busid_by_unit(unit));
939ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn);
940ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun);
941
942static struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
943 &dev_attr_fcp_lun,
944 &dev_attr_wwpn,
945 &dev_attr_hba_id,
946 NULL
947};
948
949#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
new file mode 100644
index 000000000000..ff28ade1dfc7
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c
@@ -0,0 +1,298 @@
1/*
2 * linux/drivers/s390/scsi/zfcp_sysfs_adapter.c
3 *
4 * FCP adapter driver for IBM eServer zSeries
5 *
6 * sysfs adapter related routines
7 *
8 * (C) Copyright IBM Corp. 2003, 2004
9 *
10 * Authors:
11 * Martin Peschke <mpeschke@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Andreas Herrmann <aherrman@de.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30#define ZFCP_SYSFS_ADAPTER_C_REVISION "$Revision: 1.38 $"
31
32#include "zfcp_ext.h"
33
34#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
35
36static const char fc_topologies[5][25] = {
37 "<error>",
38 "point-to-point",
39 "fabric",
40 "arbitrated loop",
41 "fabric (virt. adapter)"
42};
43
44/**
45 * ZFCP_DEFINE_ADAPTER_ATTR
46 * @_name: name of show attribute
47 * @_format: format string
48 * @_value: value to print
49 *
50 * Generates attributes for an adapter.
51 */
52#define ZFCP_DEFINE_ADAPTER_ATTR(_name, _format, _value) \
53static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
54 char *buf) \
55{ \
56 struct zfcp_adapter *adapter; \
57 \
58 adapter = dev_get_drvdata(dev); \
59 return sprintf(buf, _format, _value); \
60} \
61 \
62static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
63
64ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
65ZFCP_DEFINE_ADAPTER_ATTR(wwnn, "0x%016llx\n", adapter->wwnn);
66ZFCP_DEFINE_ADAPTER_ATTR(wwpn, "0x%016llx\n", adapter->wwpn);
67ZFCP_DEFINE_ADAPTER_ATTR(s_id, "0x%06x\n", adapter->s_id);
68ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
69ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
70ZFCP_DEFINE_ADAPTER_ATTR(fc_link_speed, "%d Gb/s\n", adapter->fc_link_speed);
71ZFCP_DEFINE_ADAPTER_ATTR(fc_service_class, "%d\n", adapter->fc_service_class);
72ZFCP_DEFINE_ADAPTER_ATTR(fc_topology, "%s\n",
73 fc_topologies[adapter->fc_topology]);
74ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
75 adapter->hardware_version);
76ZFCP_DEFINE_ADAPTER_ATTR(serial_number, "%17s\n", adapter->serial_number);
77ZFCP_DEFINE_ADAPTER_ATTR(scsi_host_no, "0x%x\n", adapter->scsi_host_no);
78ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask
79 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status));
80
81/**
82 * zfcp_sysfs_port_add_store - add a port to sysfs tree
83 * @dev: pointer to belonging device
84 * @buf: pointer to input buffer
85 * @count: number of bytes in buffer
86 *
87 * Store function of the "port_add" attribute of an adapter.
88 */
89static ssize_t
90zfcp_sysfs_port_add_store(struct device *dev, const char *buf, size_t count)
91{
92 wwn_t wwpn;
93 char *endp;
94 struct zfcp_adapter *adapter;
95 struct zfcp_port *port;
96 int retval = -EINVAL;
97
98 down(&zfcp_data.config_sema);
99
100 adapter = dev_get_drvdata(dev);
101 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
102 retval = -EBUSY;
103 goto out;
104 }
105
106 wwpn = simple_strtoull(buf, &endp, 0);
107 if ((endp + 1) < (buf + count))
108 goto out;
109
110 port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
111 if (!port)
112 goto out;
113
114 retval = 0;
115
116 zfcp_erp_port_reopen(port, 0);
117 zfcp_erp_wait(port->adapter);
118 zfcp_port_put(port);
119 out:
120 up(&zfcp_data.config_sema);
121 return retval ? retval : (ssize_t) count;
122}
123
124static DEVICE_ATTR(port_add, S_IWUSR, NULL, zfcp_sysfs_port_add_store);
125
126/**
127 * zfcp_sysfs_port_remove_store - remove a port from sysfs tree
128 * @dev: pointer to belonging device
129 * @buf: pointer to input buffer
130 * @count: number of bytes in buffer
131 *
132 * Store function of the "port_remove" attribute of an adapter.
133 */
134static ssize_t
135zfcp_sysfs_port_remove_store(struct device *dev, const char *buf, size_t count)
136{
137 struct zfcp_adapter *adapter;
138 struct zfcp_port *port;
139 wwn_t wwpn;
140 char *endp;
141 int retval = 0;
142
143 down(&zfcp_data.config_sema);
144
145 adapter = dev_get_drvdata(dev);
146 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
147 retval = -EBUSY;
148 goto out;
149 }
150
151 wwpn = simple_strtoull(buf, &endp, 0);
152 if ((endp + 1) < (buf + count)) {
153 retval = -EINVAL;
154 goto out;
155 }
156
157 write_lock_irq(&zfcp_data.config_lock);
158 port = zfcp_get_port_by_wwpn(adapter, wwpn);
159 if (port && (atomic_read(&port->refcount) == 0)) {
160 zfcp_port_get(port);
161 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
162 list_move(&port->list, &adapter->port_remove_lh);
163 }
164 else {
165 port = NULL;
166 }
167 write_unlock_irq(&zfcp_data.config_lock);
168
169 if (!port) {
170 retval = -ENXIO;
171 goto out;
172 }
173
174 zfcp_erp_port_shutdown(port, 0);
175 zfcp_erp_wait(adapter);
176 zfcp_port_put(port);
177 zfcp_port_dequeue(port);
178 out:
179 up(&zfcp_data.config_sema);
180 return retval ? retval : (ssize_t) count;
181}
182
183static DEVICE_ATTR(port_remove, S_IWUSR, NULL, zfcp_sysfs_port_remove_store);
184
185/**
186 * zfcp_sysfs_adapter_failed_store - failed state of adapter
187 * @dev: pointer to belonging device
188 * @buf: pointer to input buffer
189 * @count: number of bytes in buffer
190 *
191 * Store function of the "failed" attribute of an adapter.
192 * If a "0" gets written to "failed", error recovery will be
193 * started for the belonging adapter.
194 */
195static ssize_t
196zfcp_sysfs_adapter_failed_store(struct device *dev,
197 const char *buf, size_t count)
198{
199 struct zfcp_adapter *adapter;
200 unsigned int val;
201 char *endp;
202 int retval = 0;
203
204 down(&zfcp_data.config_sema);
205
206 adapter = dev_get_drvdata(dev);
207 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
208 retval = -EBUSY;
209 goto out;
210 }
211
212 val = simple_strtoul(buf, &endp, 0);
213 if (((endp + 1) < (buf + count)) || (val != 0)) {
214 retval = -EINVAL;
215 goto out;
216 }
217
218 zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
219 ZFCP_SET);
220 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
221 zfcp_erp_wait(adapter);
222 out:
223 up(&zfcp_data.config_sema);
224 return retval ? retval : (ssize_t) count;
225}
226
227/**
228 * zfcp_sysfs_adapter_failed_show - failed state of adapter
229 * @dev: pointer to belonging device
230 * @buf: pointer to input buffer
231 *
232 * Show function of "failed" attribute of adapter. Will be
233 * "0" if adapter is working, otherwise "1".
234 */
235static ssize_t
236zfcp_sysfs_adapter_failed_show(struct device *dev, char *buf)
237{
238 struct zfcp_adapter *adapter;
239
240 adapter = dev_get_drvdata(dev);
241 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status))
242 return sprintf(buf, "1\n");
243 else
244 return sprintf(buf, "0\n");
245}
246
247static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_adapter_failed_show,
248 zfcp_sysfs_adapter_failed_store);
249
250static struct attribute *zfcp_adapter_attrs[] = {
251 &dev_attr_failed.attr,
252 &dev_attr_in_recovery.attr,
253 &dev_attr_port_remove.attr,
254 &dev_attr_port_add.attr,
255 &dev_attr_wwnn.attr,
256 &dev_attr_wwpn.attr,
257 &dev_attr_s_id.attr,
258 &dev_attr_card_version.attr,
259 &dev_attr_lic_version.attr,
260 &dev_attr_fc_link_speed.attr,
261 &dev_attr_fc_service_class.attr,
262 &dev_attr_fc_topology.attr,
263 &dev_attr_scsi_host_no.attr,
264 &dev_attr_status.attr,
265 &dev_attr_hardware_version.attr,
266 &dev_attr_serial_number.attr,
267 NULL
268};
269
270static struct attribute_group zfcp_adapter_attr_group = {
271 .attrs = zfcp_adapter_attrs,
272};
273
274/**
275 * zfcp_sysfs_create_adapter_files - create sysfs adapter files
276 * @dev: pointer to belonging device
277 *
278 * Create all attributes of the sysfs representation of an adapter.
279 */
280int
281zfcp_sysfs_adapter_create_files(struct device *dev)
282{
283 return sysfs_create_group(&dev->kobj, &zfcp_adapter_attr_group);
284}
285
286/**
287 * zfcp_sysfs_remove_adapter_files - remove sysfs adapter files
288 * @dev: pointer to belonging device
289 *
290 * Remove all attributes of the sysfs representation of an adapter.
291 */
292void
293zfcp_sysfs_adapter_remove_files(struct device *dev)
294{
295 sysfs_remove_group(&dev->kobj, &zfcp_adapter_attr_group);
296}
297
298#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_driver.c b/drivers/s390/scsi/zfcp_sysfs_driver.c
new file mode 100644
index 000000000000..77a5e2dcc0ff
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_sysfs_driver.c
@@ -0,0 +1,135 @@
1/*
2 * linux/drivers/s390/scsi/zfcp_sysfs_driver.c
3 *
4 * FCP adapter driver for IBM eServer zSeries
5 *
6 * sysfs driver related routines
7 *
8 * (C) Copyright IBM Corp. 2003, 2004
9 *
10 * Authors:
11 * Martin Peschke <mpeschke@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Andreas Herrmann <aherrman@de.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30#define ZFCP_SYSFS_DRIVER_C_REVISION "$Revision: 1.17 $"
31
32#include "zfcp_ext.h"
33
34#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
35
36/**
37 * ZFCP_DEFINE_DRIVER_ATTR - define for all loglevels sysfs attributes
38 * @_name: name of attribute
39 * @_define: name of ZFCP loglevel define
40 *
41 * Generates store function for a sysfs loglevel attribute of zfcp driver.
42 */
43#define ZFCP_DEFINE_DRIVER_ATTR(_name, _define) \
44static ssize_t zfcp_sysfs_loglevel_##_name##_store(struct device_driver *drv, \
45 const char *buf, \
46 size_t count) \
47{ \
48 unsigned int loglevel; \
49 unsigned int new_loglevel; \
50 char *endp; \
51 \
52 new_loglevel = simple_strtoul(buf, &endp, 0); \
53 if ((endp + 1) < (buf + count)) \
54 return -EINVAL; \
55 if (new_loglevel > 3) \
56 return -EINVAL; \
57 down(&zfcp_data.config_sema); \
58 loglevel = atomic_read(&zfcp_data.loglevel); \
59 loglevel &= ~((unsigned int) 0xf << (ZFCP_LOG_AREA_##_define << 2)); \
60 loglevel |= new_loglevel << (ZFCP_LOG_AREA_##_define << 2); \
61 atomic_set(&zfcp_data.loglevel, loglevel); \
62 up(&zfcp_data.config_sema); \
63 return count; \
64} \
65 \
66static ssize_t zfcp_sysfs_loglevel_##_name##_show(struct device_driver *dev, \
67 char *buf) \
68{ \
69 return sprintf(buf,"%d\n", (unsigned int) \
70 ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA_##_define)); \
71} \
72 \
73static DRIVER_ATTR(loglevel_##_name, S_IWUSR | S_IRUGO, \
74 zfcp_sysfs_loglevel_##_name##_show, \
75 zfcp_sysfs_loglevel_##_name##_store);
76
77ZFCP_DEFINE_DRIVER_ATTR(other, OTHER);
78ZFCP_DEFINE_DRIVER_ATTR(scsi, SCSI);
79ZFCP_DEFINE_DRIVER_ATTR(fsf, FSF);
80ZFCP_DEFINE_DRIVER_ATTR(config, CONFIG);
81ZFCP_DEFINE_DRIVER_ATTR(cio, CIO);
82ZFCP_DEFINE_DRIVER_ATTR(qdio, QDIO);
83ZFCP_DEFINE_DRIVER_ATTR(erp, ERP);
84ZFCP_DEFINE_DRIVER_ATTR(fc, FC);
85
86static ssize_t zfcp_sysfs_version_show(struct device_driver *dev,
87 char *buf)
88{
89 return sprintf(buf, "%s\n", zfcp_data.driver_version);
90}
91
92static DRIVER_ATTR(version, S_IRUGO, zfcp_sysfs_version_show, NULL);
93
94static struct attribute *zfcp_driver_attrs[] = {
95 &driver_attr_loglevel_other.attr,
96 &driver_attr_loglevel_scsi.attr,
97 &driver_attr_loglevel_fsf.attr,
98 &driver_attr_loglevel_config.attr,
99 &driver_attr_loglevel_cio.attr,
100 &driver_attr_loglevel_qdio.attr,
101 &driver_attr_loglevel_erp.attr,
102 &driver_attr_loglevel_fc.attr,
103 &driver_attr_version.attr,
104 NULL
105};
106
107static struct attribute_group zfcp_driver_attr_group = {
108 .attrs = zfcp_driver_attrs,
109};
110
111/**
112 * zfcp_sysfs_create_driver_files - create sysfs driver files
113 * @dev: pointer to belonging device
114 *
115 * Create all sysfs attributes of the zfcp device driver
116 */
117int
118zfcp_sysfs_driver_create_files(struct device_driver *drv)
119{
120 return sysfs_create_group(&drv->kobj, &zfcp_driver_attr_group);
121}
122
123/**
124 * zfcp_sysfs_remove_driver_files - remove sysfs driver files
125 * @dev: pointer to belonging device
126 *
127 * Remove all sysfs attributes of the zfcp device driver
128 */
129void
130zfcp_sysfs_driver_remove_files(struct device_driver *drv)
131{
132 sysfs_remove_group(&drv->kobj, &zfcp_driver_attr_group);
133}
134
135#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_port.c b/drivers/s390/scsi/zfcp_sysfs_port.c
new file mode 100644
index 000000000000..6aafb2abb4b5
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_sysfs_port.c
@@ -0,0 +1,311 @@
1/*
2 * linux/drivers/s390/scsi/zfcp_sysfs_port.c
3 *
4 * FCP adapter driver for IBM eServer zSeries
5 *
6 * sysfs port related routines
7 *
8 * (C) Copyright IBM Corp. 2003, 2004
9 *
10 * Authors:
11 * Martin Peschke <mpeschke@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Andreas Herrmann <aherrman@de.ibm.com>
14 * Volker Sameske <sameske@de.ibm.com>
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2, or (at your option)
19 * any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 */
30
31#define ZFCP_SYSFS_PORT_C_REVISION "$Revision: 1.47 $"
32
33#include "zfcp_ext.h"
34
35#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
36
37/**
38 * zfcp_sysfs_port_release - gets called when a struct device port is released
39 * @dev: pointer to belonging device
40 */
41void
42zfcp_sysfs_port_release(struct device *dev)
43{
44 kfree(dev);
45}
46
47/**
48 * ZFCP_DEFINE_PORT_ATTR
49 * @_name: name of show attribute
50 * @_format: format string
51 * @_value: value to print
52 *
53 * Generates attributes for a port.
54 */
55#define ZFCP_DEFINE_PORT_ATTR(_name, _format, _value) \
56static ssize_t zfcp_sysfs_port_##_name##_show(struct device *dev, \
57 char *buf) \
58{ \
59 struct zfcp_port *port; \
60 \
61 port = dev_get_drvdata(dev); \
62 return sprintf(buf, _format, _value); \
63} \
64 \
65static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_port_##_name##_show, NULL);
66
67ZFCP_DEFINE_PORT_ATTR(status, "0x%08x\n", atomic_read(&port->status));
68ZFCP_DEFINE_PORT_ATTR(wwnn, "0x%016llx\n", port->wwnn);
69ZFCP_DEFINE_PORT_ATTR(d_id, "0x%06x\n", port->d_id);
70ZFCP_DEFINE_PORT_ATTR(scsi_id, "0x%x\n", port->scsi_id);
71ZFCP_DEFINE_PORT_ATTR(in_recovery, "%d\n", atomic_test_mask
72 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status));
73ZFCP_DEFINE_PORT_ATTR(access_denied, "%d\n", atomic_test_mask
74 (ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status));
75
76/**
77 * zfcp_sysfs_unit_add_store - add a unit to sysfs tree
78 * @dev: pointer to belonging device
79 * @buf: pointer to input buffer
80 * @count: number of bytes in buffer
81 *
82 * Store function of the "unit_add" attribute of a port.
83 */
84static ssize_t
85zfcp_sysfs_unit_add_store(struct device *dev, const char *buf, size_t count)
86{
87 fcp_lun_t fcp_lun;
88 char *endp;
89 struct zfcp_port *port;
90 struct zfcp_unit *unit;
91 int retval = -EINVAL;
92
93 down(&zfcp_data.config_sema);
94
95 port = dev_get_drvdata(dev);
96 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
97 retval = -EBUSY;
98 goto out;
99 }
100
101 fcp_lun = simple_strtoull(buf, &endp, 0);
102 if ((endp + 1) < (buf + count))
103 goto out;
104
105 unit = zfcp_unit_enqueue(port, fcp_lun);
106 if (!unit)
107 goto out;
108
109 retval = 0;
110
111 zfcp_erp_unit_reopen(unit, 0);
112 zfcp_erp_wait(unit->port->adapter);
113 zfcp_unit_put(unit);
114 out:
115 up(&zfcp_data.config_sema);
116 return retval ? retval : (ssize_t) count;
117}
118
119static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
120
121/**
122 * zfcp_sysfs_unit_remove_store - remove a unit from sysfs tree
123 * @dev: pointer to belonging device
124 * @buf: pointer to input buffer
125 * @count: number of bytes in buffer
126 */
127static ssize_t
128zfcp_sysfs_unit_remove_store(struct device *dev, const char *buf, size_t count)
129{
130 struct zfcp_port *port;
131 struct zfcp_unit *unit;
132 fcp_lun_t fcp_lun;
133 char *endp;
134 int retval = 0;
135
136 down(&zfcp_data.config_sema);
137
138 port = dev_get_drvdata(dev);
139 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
140 retval = -EBUSY;
141 goto out;
142 }
143
144 fcp_lun = simple_strtoull(buf, &endp, 0);
145 if ((endp + 1) < (buf + count)) {
146 retval = -EINVAL;
147 goto out;
148 }
149
150 write_lock_irq(&zfcp_data.config_lock);
151 unit = zfcp_get_unit_by_lun(port, fcp_lun);
152 if (unit && (atomic_read(&unit->refcount) == 0)) {
153 zfcp_unit_get(unit);
154 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
155 list_move(&unit->list, &port->unit_remove_lh);
156 }
157 else {
158 unit = NULL;
159 }
160 write_unlock_irq(&zfcp_data.config_lock);
161
162 if (!unit) {
163 retval = -ENXIO;
164 goto out;
165 }
166
167 zfcp_erp_unit_shutdown(unit, 0);
168 zfcp_erp_wait(unit->port->adapter);
169 zfcp_unit_put(unit);
170 zfcp_unit_dequeue(unit);
171 out:
172 up(&zfcp_data.config_sema);
173 return retval ? retval : (ssize_t) count;
174}
175
176static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
177
178/**
179 * zfcp_sysfs_port_failed_store - failed state of port
180 * @dev: pointer to belonging device
181 * @buf: pointer to input buffer
182 * @count: number of bytes in buffer
183 *
184 * Store function of the "failed" attribute of a port.
185 * If a "0" gets written to "failed", error recovery will be
186 * started for the belonging port.
187 */
188static ssize_t
189zfcp_sysfs_port_failed_store(struct device *dev, const char *buf, size_t count)
190{
191 struct zfcp_port *port;
192 unsigned int val;
193 char *endp;
194 int retval = 0;
195
196 down(&zfcp_data.config_sema);
197
198 port = dev_get_drvdata(dev);
199 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
200 retval = -EBUSY;
201 goto out;
202 }
203
204 val = simple_strtoul(buf, &endp, 0);
205 if (((endp + 1) < (buf + count)) || (val != 0)) {
206 retval = -EINVAL;
207 goto out;
208 }
209
210 zfcp_erp_modify_port_status(port, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
211 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED);
212 zfcp_erp_wait(port->adapter);
213 out:
214 up(&zfcp_data.config_sema);
215 return retval ? retval : (ssize_t) count;
216}
217
218/**
219 * zfcp_sysfs_port_failed_show - failed state of port
220 * @dev: pointer to belonging device
221 * @buf: pointer to input buffer
222 *
223 * Show function of "failed" attribute of port. Will be
224 * "0" if port is working, otherwise "1".
225 */
226static ssize_t
227zfcp_sysfs_port_failed_show(struct device *dev, char *buf)
228{
229 struct zfcp_port *port;
230
231 port = dev_get_drvdata(dev);
232 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status))
233 return sprintf(buf, "1\n");
234 else
235 return sprintf(buf, "0\n");
236}
237
238static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_port_failed_show,
239 zfcp_sysfs_port_failed_store);
240
241/**
242 * zfcp_port_common_attrs
243 * sysfs attributes that are common for all kind of fc ports.
244 */
245static struct attribute *zfcp_port_common_attrs[] = {
246 &dev_attr_failed.attr,
247 &dev_attr_in_recovery.attr,
248 &dev_attr_status.attr,
249 &dev_attr_wwnn.attr,
250 &dev_attr_d_id.attr,
251 &dev_attr_access_denied.attr,
252 NULL
253};
254
255static struct attribute_group zfcp_port_common_attr_group = {
256 .attrs = zfcp_port_common_attrs,
257};
258
259/**
260 * zfcp_port_no_ns_attrs
261 * sysfs attributes not to be used for nameserver ports.
262 */
263static struct attribute *zfcp_port_no_ns_attrs[] = {
264 &dev_attr_unit_add.attr,
265 &dev_attr_unit_remove.attr,
266 &dev_attr_scsi_id.attr,
267 NULL
268};
269
270static struct attribute_group zfcp_port_no_ns_attr_group = {
271 .attrs = zfcp_port_no_ns_attrs,
272};
273
274/**
275 * zfcp_sysfs_port_create_files - create sysfs port files
276 * @dev: pointer to belonging device
277 *
278 * Create all attributes of the sysfs representation of a port.
279 */
280int
281zfcp_sysfs_port_create_files(struct device *dev, u32 flags)
282{
283 int retval;
284
285 retval = sysfs_create_group(&dev->kobj, &zfcp_port_common_attr_group);
286
287 if ((flags & ZFCP_STATUS_PORT_WKA) || retval)
288 return retval;
289
290 retval = sysfs_create_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
291 if (retval)
292 sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
293
294 return retval;
295}
296
297/**
298 * zfcp_sysfs_port_remove_files - remove sysfs port files
299 * @dev: pointer to belonging device
300 *
301 * Remove all attributes of the sysfs representation of a port.
302 */
303void
304zfcp_sysfs_port_remove_files(struct device *dev, u32 flags)
305{
306 sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
307 if (!(flags & ZFCP_STATUS_PORT_WKA))
308 sysfs_remove_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
309}
310
311#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_unit.c b/drivers/s390/scsi/zfcp_sysfs_unit.c
new file mode 100644
index 000000000000..87c0b461831f
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_sysfs_unit.c
@@ -0,0 +1,179 @@
1/*
2 * linux/drivers/s390/scsi/zfcp_sysfs_unit.c
3 *
4 * FCP adapter driver for IBM eServer zSeries
5 *
6 * sysfs unit related routines
7 *
8 * (C) Copyright IBM Corp. 2003, 2004
9 *
10 * Authors:
11 * Martin Peschke <mpeschke@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Andreas Herrmann <aherrman@de.ibm.com>
14 * Volker Sameske <sameske@de.ibm.com>
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2, or (at your option)
19 * any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 */
30
31#define ZFCP_SYSFS_UNIT_C_REVISION "$Revision: 1.30 $"
32
33#include "zfcp_ext.h"
34
35#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
36
37/**
38 * zfcp_sysfs_unit_release - gets called when a struct device unit is released
39 * @dev: pointer to belonging device
40 */
41void
42zfcp_sysfs_unit_release(struct device *dev)
43{
44 kfree(dev);
45}
46
47/**
48 * ZFCP_DEFINE_UNIT_ATTR
49 * @_name: name of show attribute
50 * @_format: format string
51 * @_value: value to print
52 *
53 * Generates attribute for a unit.
54 */
55#define ZFCP_DEFINE_UNIT_ATTR(_name, _format, _value) \
56static ssize_t zfcp_sysfs_unit_##_name##_show(struct device *dev, \
57 char *buf) \
58{ \
59 struct zfcp_unit *unit; \
60 \
61 unit = dev_get_drvdata(dev); \
62 return sprintf(buf, _format, _value); \
63} \
64 \
65static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_unit_##_name##_show, NULL);
66
67ZFCP_DEFINE_UNIT_ATTR(status, "0x%08x\n", atomic_read(&unit->status));
68ZFCP_DEFINE_UNIT_ATTR(scsi_lun, "0x%x\n", unit->scsi_lun);
69ZFCP_DEFINE_UNIT_ATTR(in_recovery, "%d\n", atomic_test_mask
70 (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status));
71ZFCP_DEFINE_UNIT_ATTR(access_denied, "%d\n", atomic_test_mask
72 (ZFCP_STATUS_COMMON_ACCESS_DENIED, &unit->status));
73ZFCP_DEFINE_UNIT_ATTR(access_shared, "%d\n", atomic_test_mask
74 (ZFCP_STATUS_UNIT_SHARED, &unit->status));
75ZFCP_DEFINE_UNIT_ATTR(access_readonly, "%d\n", atomic_test_mask
76 (ZFCP_STATUS_UNIT_READONLY, &unit->status));
77
78/**
79 * zfcp_sysfs_unit_failed_store - failed state of unit
80 * @dev: pointer to belonging device
81 * @buf: pointer to input buffer
82 * @count: number of bytes in buffer
83 *
84 * Store function of the "failed" attribute of a unit.
85 * If a "0" gets written to "failed", error recovery will be
86 * started for the belonging unit.
87 */
88static ssize_t
89zfcp_sysfs_unit_failed_store(struct device *dev, const char *buf, size_t count)
90{
91 struct zfcp_unit *unit;
92 unsigned int val;
93 char *endp;
94 int retval = 0;
95
96 down(&zfcp_data.config_sema);
97 unit = dev_get_drvdata(dev);
98 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status)) {
99 retval = -EBUSY;
100 goto out;
101 }
102
103 val = simple_strtoul(buf, &endp, 0);
104 if (((endp + 1) < (buf + count)) || (val != 0)) {
105 retval = -EINVAL;
106 goto out;
107 }
108
109 zfcp_erp_modify_unit_status(unit, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
110 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED);
111 zfcp_erp_wait(unit->port->adapter);
112 out:
113 up(&zfcp_data.config_sema);
114 return retval ? retval : (ssize_t) count;
115}
116
117/**
118 * zfcp_sysfs_unit_failed_show - failed state of unit
119 * @dev: pointer to belonging device
120 * @buf: pointer to input buffer
121 *
122 * Show function of "failed" attribute of unit. Will be
123 * "0" if unit is working, otherwise "1".
124 */
125static ssize_t
126zfcp_sysfs_unit_failed_show(struct device *dev, char *buf)
127{
128 struct zfcp_unit *unit;
129
130 unit = dev_get_drvdata(dev);
131 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status))
132 return sprintf(buf, "1\n");
133 else
134 return sprintf(buf, "0\n");
135}
136
137static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_unit_failed_show,
138 zfcp_sysfs_unit_failed_store);
139
140static struct attribute *zfcp_unit_attrs[] = {
141 &dev_attr_scsi_lun.attr,
142 &dev_attr_failed.attr,
143 &dev_attr_in_recovery.attr,
144 &dev_attr_status.attr,
145 &dev_attr_access_denied.attr,
146 &dev_attr_access_shared.attr,
147 &dev_attr_access_readonly.attr,
148 NULL
149};
150
151static struct attribute_group zfcp_unit_attr_group = {
152 .attrs = zfcp_unit_attrs,
153};
154
155/**
156 * zfcp_sysfs_create_unit_files - create sysfs unit files
157 * @dev: pointer to belonging device
158 *
159 * Create all attributes of the sysfs representation of a unit.
160 */
161int
162zfcp_sysfs_unit_create_files(struct device *dev)
163{
164 return sysfs_create_group(&dev->kobj, &zfcp_unit_attr_group);
165}
166
167/**
168 * zfcp_sysfs_remove_unit_files - remove sysfs unit files
169 * @dev: pointer to belonging device
170 *
171 * Remove all attributes of the sysfs representation of a unit.
172 */
173void
174zfcp_sysfs_unit_remove_files(struct device *dev)
175{
176 sysfs_remove_group(&dev->kobj, &zfcp_unit_attr_group);
177}
178
179#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
new file mode 100644
index 000000000000..87c2db1bd4f5
--- /dev/null
+++ b/drivers/s390/sysinfo.c
@@ -0,0 +1,347 @@
1/*
2 * drivers/s390/sysinfo.c
3 *
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com)
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/proc_fs.h>
12#include <linux/init.h>
13#include <asm/ebcdic.h>
14
15struct sysinfo_1_1_1
16{
17 char reserved_0[32];
18 char manufacturer[16];
19 char type[4];
20 char reserved_1[12];
21 char model[16];
22 char sequence[16];
23 char plant[4];
24};
25
26struct sysinfo_1_2_1
27{
28 char reserved_0[80];
29 char sequence[16];
30 char plant[4];
31 char reserved_1[2];
32 unsigned short cpu_address;
33};
34
35struct sysinfo_1_2_2
36{
37 char reserved_0[32];
38 unsigned int capability;
39 unsigned short cpus_total;
40 unsigned short cpus_configured;
41 unsigned short cpus_standby;
42 unsigned short cpus_reserved;
43 unsigned short adjustment[0];
44};
45
46struct sysinfo_2_2_1
47{
48 char reserved_0[80];
49 char sequence[16];
50 char plant[4];
51 unsigned short cpu_id;
52 unsigned short cpu_address;
53};
54
55struct sysinfo_2_2_2
56{
57 char reserved_0[32];
58 unsigned short lpar_number;
59 char reserved_1;
60 unsigned char characteristics;
61 #define LPAR_CHAR_DEDICATED (1 << 7)
62 #define LPAR_CHAR_SHARED (1 << 6)
63 #define LPAR_CHAR_LIMITED (1 << 5)
64 unsigned short cpus_total;
65 unsigned short cpus_configured;
66 unsigned short cpus_standby;
67 unsigned short cpus_reserved;
68 char name[8];
69 unsigned int caf;
70 char reserved_2[16];
71 unsigned short cpus_dedicated;
72 unsigned short cpus_shared;
73};
74
75struct sysinfo_3_2_2
76{
77 char reserved_0[31];
78 unsigned char count;
79 struct
80 {
81 char reserved_0[4];
82 unsigned short cpus_total;
83 unsigned short cpus_configured;
84 unsigned short cpus_standby;
85 unsigned short cpus_reserved;
86 char name[8];
87 unsigned int caf;
88 char cpi[16];
89 char reserved_1[24];
90
91 } vm[8];
92};
93
94union s390_sysinfo
95{
96 struct sysinfo_1_1_1 sysinfo_1_1_1;
97 struct sysinfo_1_2_1 sysinfo_1_2_1;
98 struct sysinfo_1_2_2 sysinfo_1_2_2;
99 struct sysinfo_2_2_1 sysinfo_2_2_1;
100 struct sysinfo_2_2_2 sysinfo_2_2_2;
101 struct sysinfo_3_2_2 sysinfo_3_2_2;
102};
103
104static inline int stsi (void *sysinfo,
105 int fc, int sel1, int sel2)
106{
107 int cc, retv;
108
109#ifndef CONFIG_ARCH_S390X
110 __asm__ __volatile__ ( "lr\t0,%2\n"
111 "\tlr\t1,%3\n"
112 "\tstsi\t0(%4)\n"
113 "0:\tipm\t%0\n"
114 "\tsrl\t%0,28\n"
115 "1:lr\t%1,0\n"
116 ".section .fixup,\"ax\"\n"
117 "2:\tlhi\t%0,3\n"
118 "\tbras\t1,3f\n"
119 "\t.long 1b\n"
120 "3:\tl\t1,0(1)\n"
121 "\tbr\t1\n"
122 ".previous\n"
123 ".section __ex_table,\"a\"\n"
124 "\t.align 4\n"
125 "\t.long 0b,2b\n"
126 ".previous\n"
127 : "=d" (cc), "=d" (retv)
128 : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
129 : "cc", "memory", "0", "1" );
130#else
131 __asm__ __volatile__ ( "lr\t0,%2\n"
132 "lr\t1,%3\n"
133 "\tstsi\t0(%4)\n"
134 "0:\tipm\t%0\n"
135 "\tsrl\t%0,28\n"
136 "1:lr\t%1,0\n"
137 ".section .fixup,\"ax\"\n"
138 "2:\tlhi\t%0,3\n"
139 "\tjg\t1b\n"
140 ".previous\n"
141 ".section __ex_table,\"a\"\n"
142 "\t.align 8\n"
143 "\t.quad 0b,2b\n"
144 ".previous\n"
145 : "=d" (cc), "=d" (retv)
146 : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
147 : "cc", "memory", "0", "1" );
148#endif
149
150 return cc? -1 : retv;
151}
152
153static inline int stsi_0 (void)
154{
155 int rc = stsi (NULL, 0, 0, 0);
156 return rc == -1 ? rc : (((unsigned int)rc) >> 28);
157}
158
159static inline int stsi_1_1_1 (struct sysinfo_1_1_1 *info)
160{
161 int rc = stsi (info, 1, 1, 1);
162 if (rc != -1)
163 {
164 EBCASC (info->manufacturer, sizeof(info->manufacturer));
165 EBCASC (info->type, sizeof(info->type));
166 EBCASC (info->model, sizeof(info->model));
167 EBCASC (info->sequence, sizeof(info->sequence));
168 EBCASC (info->plant, sizeof(info->plant));
169 }
170 return rc == -1 ? rc : 0;
171}
172
173static inline int stsi_1_2_1 (struct sysinfo_1_2_1 *info)
174{
175 int rc = stsi (info, 1, 2, 1);
176 if (rc != -1)
177 {
178 EBCASC (info->sequence, sizeof(info->sequence));
179 EBCASC (info->plant, sizeof(info->plant));
180 }
181 return rc == -1 ? rc : 0;
182}
183
184static inline int stsi_1_2_2 (struct sysinfo_1_2_2 *info)
185{
186 int rc = stsi (info, 1, 2, 2);
187 return rc == -1 ? rc : 0;
188}
189
190static inline int stsi_2_2_1 (struct sysinfo_2_2_1 *info)
191{
192 int rc = stsi (info, 2, 2, 1);
193 if (rc != -1)
194 {
195 EBCASC (info->sequence, sizeof(info->sequence));
196 EBCASC (info->plant, sizeof(info->plant));
197 }
198 return rc == -1 ? rc : 0;
199}
200
201static inline int stsi_2_2_2 (struct sysinfo_2_2_2 *info)
202{
203 int rc = stsi (info, 2, 2, 2);
204 if (rc != -1)
205 {
206 EBCASC (info->name, sizeof(info->name));
207 }
208 return rc == -1 ? rc : 0;
209}
210
211static inline int stsi_3_2_2 (struct sysinfo_3_2_2 *info)
212{
213 int rc = stsi (info, 3, 2, 2);
214 if (rc != -1)
215 {
216 int i;
217 for (i = 0; i < info->count; i++)
218 {
219 EBCASC (info->vm[i].name, sizeof(info->vm[i].name));
220 EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi));
221 }
222 }
223 return rc == -1 ? rc : 0;
224}
225
226
227static int proc_read_sysinfo(char *page, char **start,
228 off_t off, int count,
229 int *eof, void *data)
230{
231 unsigned long info_page = get_zeroed_page (GFP_KERNEL);
232 union s390_sysinfo *info = (union s390_sysinfo *) info_page;
233 int len = 0;
234 int level;
235 int i;
236
237 if (!info)
238 return 0;
239
240 level = stsi_0 ();
241
242 if (level >= 1 && stsi_1_1_1 (&info->sysinfo_1_1_1) == 0)
243 {
244 len += sprintf (page+len, "Manufacturer: %-16.16s\n",
245 info->sysinfo_1_1_1.manufacturer);
246 len += sprintf (page+len, "Type: %-4.4s\n",
247 info->sysinfo_1_1_1.type);
248 len += sprintf (page+len, "Model: %-16.16s\n",
249 info->sysinfo_1_1_1.model);
250 len += sprintf (page+len, "Sequence Code: %-16.16s\n",
251 info->sysinfo_1_1_1.sequence);
252 len += sprintf (page+len, "Plant: %-4.4s\n",
253 info->sysinfo_1_1_1.plant);
254 }
255
256 if (level >= 1 && stsi_1_2_2 (&info->sysinfo_1_2_2) == 0)
257 {
258 len += sprintf (page+len, "\n");
259 len += sprintf (page+len, "CPUs Total: %d\n",
260 info->sysinfo_1_2_2.cpus_total);
261 len += sprintf (page+len, "CPUs Configured: %d\n",
262 info->sysinfo_1_2_2.cpus_configured);
263 len += sprintf (page+len, "CPUs Standby: %d\n",
264 info->sysinfo_1_2_2.cpus_standby);
265 len += sprintf (page+len, "CPUs Reserved: %d\n",
266 info->sysinfo_1_2_2.cpus_reserved);
267
268 len += sprintf (page+len, "Capability: %d\n",
269 info->sysinfo_1_2_2.capability);
270
271 for (i = 2; i <= info->sysinfo_1_2_2.cpus_total; i++)
272 len += sprintf (page+len, "Adjustment %02d-way: %d\n",
273 i, info->sysinfo_1_2_2.adjustment[i-2]);
274 }
275
276 if (level >= 2 && stsi_2_2_2 (&info->sysinfo_2_2_2) == 0)
277 {
278 len += sprintf (page+len, "\n");
279 len += sprintf (page+len, "LPAR Number: %d\n",
280 info->sysinfo_2_2_2.lpar_number);
281
282 len += sprintf (page+len, "LPAR Characteristics: ");
283 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_DEDICATED)
284 len += sprintf (page+len, "Dedicated ");
285 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_SHARED)
286 len += sprintf (page+len, "Shared ");
287 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_LIMITED)
288 len += sprintf (page+len, "Limited ");
289 len += sprintf (page+len, "\n");
290
291 len += sprintf (page+len, "LPAR Name: %-8.8s\n",
292 info->sysinfo_2_2_2.name);
293
294 len += sprintf (page+len, "LPAR Adjustment: %d\n",
295 info->sysinfo_2_2_2.caf);
296
297 len += sprintf (page+len, "LPAR CPUs Total: %d\n",
298 info->sysinfo_2_2_2.cpus_total);
299 len += sprintf (page+len, "LPAR CPUs Configured: %d\n",
300 info->sysinfo_2_2_2.cpus_configured);
301 len += sprintf (page+len, "LPAR CPUs Standby: %d\n",
302 info->sysinfo_2_2_2.cpus_standby);
303 len += sprintf (page+len, "LPAR CPUs Reserved: %d\n",
304 info->sysinfo_2_2_2.cpus_reserved);
305 len += sprintf (page+len, "LPAR CPUs Dedicated: %d\n",
306 info->sysinfo_2_2_2.cpus_dedicated);
307 len += sprintf (page+len, "LPAR CPUs Shared: %d\n",
308 info->sysinfo_2_2_2.cpus_shared);
309 }
310
311 if (level >= 3 && stsi_3_2_2 (&info->sysinfo_3_2_2) == 0)
312 {
313 for (i = 0; i < info->sysinfo_3_2_2.count; i++)
314 {
315 len += sprintf (page+len, "\n");
316 len += sprintf (page+len, "VM%02d Name: %-8.8s\n",
317 i, info->sysinfo_3_2_2.vm[i].name);
318 len += sprintf (page+len, "VM%02d Control Program: %-16.16s\n",
319 i, info->sysinfo_3_2_2.vm[i].cpi);
320
321 len += sprintf (page+len, "VM%02d Adjustment: %d\n",
322 i, info->sysinfo_3_2_2.vm[i].caf);
323
324 len += sprintf (page+len, "VM%02d CPUs Total: %d\n",
325 i, info->sysinfo_3_2_2.vm[i].cpus_total);
326 len += sprintf (page+len, "VM%02d CPUs Configured: %d\n",
327 i, info->sysinfo_3_2_2.vm[i].cpus_configured);
328 len += sprintf (page+len, "VM%02d CPUs Standby: %d\n",
329 i, info->sysinfo_3_2_2.vm[i].cpus_standby);
330 len += sprintf (page+len, "VM%02d CPUs Reserved: %d\n",
331 i, info->sysinfo_3_2_2.vm[i].cpus_reserved);
332 }
333 }
334
335 free_page (info_page);
336 return len;
337}
338
339static __init int create_proc_sysinfo(void)
340{
341 create_proc_read_entry ("sysinfo", 0444, NULL,
342 proc_read_sysinfo, NULL);
343 return 0;
344}
345
346__initcall(create_proc_sysinfo);
347