aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/s390/net
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/s390/net')
-rw-r--r--drivers/s390/net/Kconfig108
-rw-r--r--drivers/s390/net/Makefile14
-rw-r--r--drivers/s390/net/claw.c4447
-rw-r--r--drivers/s390/net/claw.h335
-rw-r--r--drivers/s390/net/ctcdbug.c83
-rw-r--r--drivers/s390/net/ctcdbug.h123
-rw-r--r--drivers/s390/net/ctcmain.c3304
-rw-r--r--drivers/s390/net/ctctty.c1276
-rw-r--r--drivers/s390/net/ctctty.h37
-rw-r--r--drivers/s390/net/cu3088.c166
-rw-r--r--drivers/s390/net/cu3088.h41
-rw-r--r--drivers/s390/net/fsm.c220
-rw-r--r--drivers/s390/net/fsm.h265
-rw-r--r--drivers/s390/net/iucv.c2567
-rw-r--r--drivers/s390/net/iucv.h849
-rw-r--r--drivers/s390/net/lcs.c2347
-rw-r--r--drivers/s390/net/lcs.h321
-rw-r--r--drivers/s390/net/netiucv.c2149
-rw-r--r--drivers/s390/net/qeth.h1162
-rw-r--r--drivers/s390/net/qeth_eddp.c643
-rw-r--r--drivers/s390/net/qeth_eddp.h85
-rw-r--r--drivers/s390/net/qeth_fs.h163
-rw-r--r--drivers/s390/net/qeth_main.c8236
-rw-r--r--drivers/s390/net/qeth_mpc.c168
-rw-r--r--drivers/s390/net/qeth_mpc.h538
-rw-r--r--drivers/s390/net/qeth_proc.c495
-rw-r--r--drivers/s390/net/qeth_sys.c1788
-rw-r--r--drivers/s390/net/qeth_tso.c285
-rw-r--r--drivers/s390/net/qeth_tso.h58
-rw-r--r--drivers/s390/net/smsgiucv.c180
-rw-r--r--drivers/s390/net/smsgiucv.h10
31 files changed, 32463 insertions, 0 deletions
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
new file mode 100644
index 000000000000..a7efc394515e
--- /dev/null
+++ b/drivers/s390/net/Kconfig
@@ -0,0 +1,108 @@
1menu "S/390 network device drivers"
2 depends on NETDEVICES && ARCH_S390
3
4config LCS
5 tristate "Lan Channel Station Interface"
6 depends on NETDEVICES && (NET_ETHERNET || TR || FDDI)
7 help
8 Select this option if you want to use LCS networking on IBM S/390
9 or zSeries. This device driver supports Token Ring (IEEE 802.5),
10 FDDI (IEEE 802.7) and Ethernet.
11 This option is also available as a module which will be
12 called lcs.ko. If you do not know what it is, it's safe to say "Y".
13
14config CTC
15 tristate "CTC device support"
16 depends on NETDEVICES
17 help
18 Select this option if you want to use channel-to-channel networking
19 on IBM S/390 or zSeries. This device driver supports real CTC
20 coupling using ESCON. It also supports virtual CTCs when running
21 under VM. It will use the channel device configuration if this is
22 available. This option is also available as a module which will be
23 called ctc.ko. If you do not know what it is, it's safe to say "Y".
24
25config IUCV
26 tristate "IUCV support (VM only)"
27 help
28 Select this option if you want to use inter-user communication
29 under VM or VIF. If unsure, say "Y" to enable a fast communication
30 link between VM guests. At boot time the user ID of the guest needs
31 to be passed to the kernel. Note that both kernels need to be
32 compiled with this option and both need to be booted with the user ID
33 of the other VM guest.
34
35config NETIUCV
36 tristate "IUCV network device support (VM only)"
37 depends on IUCV && NETDEVICES
38 help
39 Select this option if you want to use inter-user communication
40 vehicle networking under VM or VIF. It enables a fast communication
41 link between VM guests. Using ifconfig a point-to-point connection
42 can be established to the Linux for zSeries and S7390 system
43 running on the other VM guest. This option is also available
44 as a module which will be called netiucv.ko. If unsure, say "Y".
45
46config SMSGIUCV
47 tristate "IUCV special message support (VM only)"
48 depends on IUCV
49 help
50 Select this option if you want to be able to receive SMSG messages
51 from other VM guest systems.
52
53config CLAW
54 tristate "CLAW device support"
55 depends on NETDEVICES
56 help
57 This driver supports channel attached CLAW devices.
58 CLAW is Common Link Access for Workstation. Common devices
59 that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
60 To compile as a module choose M here: The module will be called
61 claw.ko to compile into the kernel choose Y
62
63config QETH
64 tristate "Gigabit Ethernet device support"
65 depends on NETDEVICES && IP_MULTICAST && QDIO
66 help
67 This driver supports the IBM S/390 and zSeries OSA Express adapters
68 in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
69 interfaces in QDIO and HIPER mode.
70
71 For details please refer to the documentation provided by IBM at
72 <http://www10.software.ibm.com/developerworks/opensource/linux390>
73
74 To compile this driver as a module, choose M here: the
75 module will be called qeth.ko.
76
77
78comment "Gigabit Ethernet default settings"
79 depends on QETH
80
81config QETH_IPV6
82 bool "IPv6 support for gigabit ethernet"
83 depends on (QETH = IPV6) || (QETH && IPV6 = 'y')
84 help
85 If CONFIG_QETH is switched on, this option will include IPv6
86 support in the qeth device driver.
87
88config QETH_VLAN
89 bool "VLAN support for gigabit ethernet"
90 depends on (QETH = VLAN_8021Q) || (QETH && VLAN_8021Q = 'y')
91 help
92 If CONFIG_QETH is switched on, this option will include IEEE
93 802.1q VLAN support in the qeth device driver.
94
95config QETH_PERF_STATS
96 bool "Performance statistics in /proc"
97 depends on QETH
98 help
99 When switched on, this option will add a file in the proc-fs
100 (/proc/qeth_perf_stats) containing performance statistics. It
101 may slightly impact performance, so this is only recommended for
102 internal tuning of the device driver.
103
104config CCWGROUP
105 tristate
106 default (LCS || CTC || QETH)
107
108endmenu
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
new file mode 100644
index 000000000000..7cabb80a2e41
--- /dev/null
+++ b/drivers/s390/net/Makefile
@@ -0,0 +1,14 @@
1#
2# S/390 network devices
3#
4
5ctc-objs := ctcmain.o ctctty.o ctcdbug.o
6
7obj-$(CONFIG_IUCV) += iucv.o
8obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
9obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
10obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
11obj-$(CONFIG_LCS) += lcs.o cu3088.o
12qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o qeth_tso.o
13qeth-$(CONFIG_PROC_FS) += qeth_proc.o
14obj-$(CONFIG_QETH) += qeth.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
new file mode 100644
index 000000000000..06804d39a9c6
--- /dev/null
+++ b/drivers/s390/net/claw.c
@@ -0,0 +1,4447 @@
1/*
2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver
4 *
5 * $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $
6 *
7 * Linux fo zSeries version
8 * Copyright (C) 2002,2005 IBM Corporation
9 * Author(s) Original code written by:
10 * Kazuo Iimura (iimura@jp.ibm.com)
11 * Rewritten by
12 * Andy Richter (richtera@us.ibm.com)
13 * Marc Price (mwprice@us.ibm.com)
14 *
15 * sysfs parms:
16 * group x.x.rrrr,x.x.wwww
17 * read_buffer nnnnnnn
18 * write_buffer nnnnnn
19 * host_name aaaaaaaa
20 * adapter_name aaaaaaaa
21 * api_type aaaaaaaa
22 *
23 * eg.
24 * group 0.0.0200 0.0.0201
25 * read_buffer 25
26 * write_buffer 20
27 * host_name LINUX390
28 * adapter_name RS6K
29 * api_type TCPIP
30 *
31 * where
32 *
33 * The device id is decided by the order entries
34 * are added to the group the first is claw0 the second claw1
35 * up to CLAW_MAX_DEV
36 *
37 * rrrr - the first of 2 consecutive device addresses used for the
38 * CLAW protocol.
39 * The specified address is always used as the input (Read)
40 * channel and the next address is used as the output channel.
41 *
42 * wwww - the second of 2 consecutive device addresses used for
43 * the CLAW protocol.
44 * The specified address is always used as the output
45 * channel and the previous address is used as the input channel.
46 *
47 * read_buffer - specifies number of input buffers to allocate.
48 * write_buffer - specifies number of output buffers to allocate.
49 * host_name - host name
50 * adaptor_name - adaptor name
51 * api_type - API type TCPIP or API will be sent and expected
52 * as ws_name
53 *
54 * Note the following requirements:
55 * 1) host_name must match the configured adapter_name on the remote side
56 * 2) adaptor_name must match the configured host name on the remote side
57 *
58 * Change History
59 * 1.00 Initial release shipped
60 * 1.10 Changes for Buffer allocation
61 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
62 * 1.25 Added Packing support
63 */
64#include <asm/bitops.h>
65#include <asm/ccwdev.h>
66#include <asm/ccwgroup.h>
67#include <asm/debug.h>
68#include <asm/idals.h>
69#include <asm/io.h>
70
71#include <linux/ctype.h>
72#include <linux/delay.h>
73#include <linux/errno.h>
74#include <linux/if_arp.h>
75#include <linux/init.h>
76#include <linux/interrupt.h>
77#include <linux/ip.h>
78#include <linux/kernel.h>
79#include <linux/module.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/proc_fs.h>
83#include <linux/sched.h>
84#include <linux/signal.h>
85#include <linux/skbuff.h>
86#include <linux/slab.h>
87#include <linux/string.h>
88#include <linux/tcp.h>
89#include <linux/timer.h>
90#include <linux/types.h>
91#include <linux/version.h>
92
93#include "cu3088.h"
94#include "claw.h"
95
96MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
97MODULE_DESCRIPTION("Linux for zSeries CLAW Driver\n" \
98 "Copyright 2000,2005 IBM Corporation\n");
99MODULE_LICENSE("GPL");
100
101/* Debugging is based on DEBUGMSG, IOTRACE, or FUNCTRACE options:
102 DEBUGMSG - Enables output of various debug messages in the code
103 IOTRACE - Enables output of CCW and other IO related traces
104 FUNCTRACE - Enables output of function entry/exit trace
105 Define any combination of above options to enable tracing
106
107 CLAW also uses the s390dbf file system see claw_trace and claw_setup
108*/
109
110/* following enables tracing */
111//#define DEBUGMSG
112//#define IOTRACE
113//#define FUNCTRACE
114
115#ifdef DEBUGMSG
116#define DEBUG
117#endif
118
119#ifdef IOTRACE
120#define DEBUG
121#endif
122
123#ifdef FUNCTRACE
124#define DEBUG
125#endif
126
127 char debug_buffer[255];
128/**
129 * Debug Facility Stuff
130 */
131static debug_info_t *claw_dbf_setup;
132static debug_info_t *claw_dbf_trace;
133
134/**
135 * CLAW Debug Facility functions
136 */
137static void
138claw_unregister_debug_facility(void)
139{
140 if (claw_dbf_setup)
141 debug_unregister(claw_dbf_setup);
142 if (claw_dbf_trace)
143 debug_unregister(claw_dbf_trace);
144}
145
146static int
147claw_register_debug_facility(void)
148{
149 claw_dbf_setup = debug_register("claw_setup", 1, 1, 8);
150 claw_dbf_trace = debug_register("claw_trace", 1, 2, 8);
151 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
152 printk(KERN_WARNING "Not enough memory for debug facility.\n");
153 claw_unregister_debug_facility();
154 return -ENOMEM;
155 }
156 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
157 debug_set_level(claw_dbf_setup, 2);
158 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
159 debug_set_level(claw_dbf_trace, 2);
160 return 0;
161}
162
163static inline void
164claw_set_busy(struct net_device *dev)
165{
166 ((struct claw_privbk *) dev->priv)->tbusy=1;
167 eieio();
168}
169
170static inline void
171claw_clear_busy(struct net_device *dev)
172{
173 clear_bit(0, &(((struct claw_privbk *) dev->priv)->tbusy));
174 netif_wake_queue(dev);
175 eieio();
176}
177
178static inline int
179claw_check_busy(struct net_device *dev)
180{
181 eieio();
182 return ((struct claw_privbk *) dev->priv)->tbusy;
183}
184
185static inline void
186claw_setbit_busy(int nr,struct net_device *dev)
187{
188 netif_stop_queue(dev);
189 set_bit(nr, (void *)&(((struct claw_privbk *)dev->priv)->tbusy));
190}
191
192static inline void
193claw_clearbit_busy(int nr,struct net_device *dev)
194{
195 clear_bit(nr,(void *)&(((struct claw_privbk *)dev->priv)->tbusy));
196 netif_wake_queue(dev);
197}
198
199static inline int
200claw_test_and_setbit_busy(int nr,struct net_device *dev)
201{
202 netif_stop_queue(dev);
203 return test_and_set_bit(nr,
204 (void *)&(((struct claw_privbk *) dev->priv)->tbusy));
205}
206
207
208/* Functions for the DEV methods */
209
210static int claw_probe(struct ccwgroup_device *cgdev);
211static void claw_remove_device(struct ccwgroup_device *cgdev);
212static void claw_purge_skb_queue(struct sk_buff_head *q);
213static int claw_new_device(struct ccwgroup_device *cgdev);
214static int claw_shutdown_device(struct ccwgroup_device *cgdev);
215static int claw_tx(struct sk_buff *skb, struct net_device *dev);
216static int claw_change_mtu( struct net_device *dev, int new_mtu);
217static int claw_open(struct net_device *dev);
218static void claw_irq_handler(struct ccw_device *cdev,
219 unsigned long intparm, struct irb *irb);
220static void claw_irq_tasklet ( unsigned long data );
221static int claw_release(struct net_device *dev);
222static void claw_write_retry ( struct chbk * p_ch );
223static void claw_write_next ( struct chbk * p_ch );
224static void claw_timer ( struct chbk * p_ch );
225
226/* Functions */
227static int add_claw_reads(struct net_device *dev,
228 struct ccwbk* p_first, struct ccwbk* p_last);
229static void inline ccw_check_return_code (struct ccw_device *cdev,
230 int return_code);
231static void inline ccw_check_unit_check (struct chbk * p_ch,
232 unsigned char sense );
233static int find_link(struct net_device *dev, char *host_name, char *ws_name );
234static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
235static int init_ccw_bk(struct net_device *dev);
236static void probe_error( struct ccwgroup_device *cgdev);
237static struct net_device_stats *claw_stats(struct net_device *dev);
238static int inline pages_to_order_of_mag(int num_of_pages);
239static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
240#ifdef DEBUG
241static void dumpit (char *buf, int len);
242#endif
243/* sysfs Functions */
244static ssize_t claw_hname_show(struct device *dev, char *buf);
245static ssize_t claw_hname_write(struct device *dev,
246 const char *buf, size_t count);
247static ssize_t claw_adname_show(struct device *dev, char *buf);
248static ssize_t claw_adname_write(struct device *dev,
249 const char *buf, size_t count);
250static ssize_t claw_apname_show(struct device *dev, char *buf);
251static ssize_t claw_apname_write(struct device *dev,
252 const char *buf, size_t count);
253static ssize_t claw_wbuff_show(struct device *dev, char *buf);
254static ssize_t claw_wbuff_write(struct device *dev,
255 const char *buf, size_t count);
256static ssize_t claw_rbuff_show(struct device *dev, char *buf);
257static ssize_t claw_rbuff_write(struct device *dev,
258 const char *buf, size_t count);
259static int claw_add_files(struct device *dev);
260static void claw_remove_files(struct device *dev);
261
262/* Functions for System Validate */
263static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
264static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
265 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
266static int claw_snd_conn_req(struct net_device *dev, __u8 link);
267static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
268static int claw_snd_sys_validate_rsp(struct net_device *dev,
269 struct clawctl * p_ctl, __u32 return_code);
270static int claw_strt_conn_req(struct net_device *dev );
271static void claw_strt_read ( struct net_device *dev, int lock );
272static void claw_strt_out_IO( struct net_device *dev );
273static void claw_free_wrt_buf( struct net_device *dev );
274
275/* Functions for unpack reads */
276static void unpack_read (struct net_device *dev );
277
278/* ccwgroup table */
279
280static struct ccwgroup_driver claw_group_driver = {
281 .owner = THIS_MODULE,
282 .name = "claw",
283 .max_slaves = 2,
284 .driver_id = 0xC3D3C1E6,
285 .probe = claw_probe,
286 .remove = claw_remove_device,
287 .set_online = claw_new_device,
288 .set_offline = claw_shutdown_device,
289};
290
291/*
292*
293* Key functions
294*/
295
296/*----------------------------------------------------------------*
297 * claw_probe *
298 * this function is called for each CLAW device. *
299 *----------------------------------------------------------------*/
300static int
301claw_probe(struct ccwgroup_device *cgdev)
302{
303 int rc;
304 struct claw_privbk *privptr=NULL;
305
306#ifdef FUNCTRACE
307 printk(KERN_INFO "%s Enter\n",__FUNCTION__);
308#endif
309 CLAW_DBF_TEXT(2,setup,"probe");
310 if (!get_device(&cgdev->dev))
311 return -ENODEV;
312#ifdef DEBUGMSG
313 printk(KERN_INFO "claw: variable cgdev =\n");
314 dumpit((char *)cgdev, sizeof(struct ccwgroup_device));
315#endif
316 privptr = kmalloc(sizeof(struct claw_privbk), GFP_KERNEL);
317 if (privptr == NULL) {
318 probe_error(cgdev);
319 put_device(&cgdev->dev);
320 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
321 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
322 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
323 return -ENOMEM;
324 }
325 memset(privptr,0x00,sizeof(struct claw_privbk));
326 privptr->p_mtc_envelope= kmalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
327 privptr->p_env = kmalloc(sizeof(struct claw_env), GFP_KERNEL);
328 if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
329 probe_error(cgdev);
330 put_device(&cgdev->dev);
331 printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n",
332 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
333 CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
334 return -ENOMEM;
335 }
336 memset(privptr->p_mtc_envelope, 0x00, MAX_ENVELOPE_SIZE);
337 memset(privptr->p_env, 0x00, sizeof(struct claw_env));
338 memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
339 memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
340 memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
341 privptr->p_env->packing = 0;
342 privptr->p_env->write_buffers = 5;
343 privptr->p_env->read_buffers = 5;
344 privptr->p_env->read_size = CLAW_FRAME_SIZE;
345 privptr->p_env->write_size = CLAW_FRAME_SIZE;
346 rc = claw_add_files(&cgdev->dev);
347 if (rc) {
348 probe_error(cgdev);
349 put_device(&cgdev->dev);
350 printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n",
351 cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__);
352 CLAW_DBF_TEXT_(2,setup,"probex%d",rc);
353 return rc;
354 }
355 printk(KERN_INFO "claw: sysfs files added for %s\n",cgdev->cdev[0]->dev.bus_id);
356 privptr->p_env->p_priv = privptr;
357 cgdev->cdev[0]->handler = claw_irq_handler;
358 cgdev->cdev[1]->handler = claw_irq_handler;
359 cgdev->dev.driver_data = privptr;
360#ifdef FUNCTRACE
361 printk(KERN_INFO "claw:%s exit on line %d, "
362 "rc = 0\n",__FUNCTION__,__LINE__);
363#endif
364 CLAW_DBF_TEXT(2,setup,"prbext 0");
365
366 return 0;
367} /* end of claw_probe */
368
369/*-------------------------------------------------------------------*
370 * claw_tx *
371 *-------------------------------------------------------------------*/
372
373static int
374claw_tx(struct sk_buff *skb, struct net_device *dev)
375{
376 int rc;
377 struct claw_privbk *privptr=dev->priv;
378 unsigned long saveflags;
379 struct chbk *p_ch;
380
381#ifdef FUNCTRACE
382 printk(KERN_INFO "%s:%s enter\n",dev->name,__FUNCTION__);
383#endif
384 CLAW_DBF_TEXT(4,trace,"claw_tx");
385 p_ch=&privptr->channel[WRITE];
386 if (skb == NULL) {
387 printk(KERN_WARNING "%s: null pointer passed as sk_buffer\n",
388 dev->name);
389 privptr->stats.tx_dropped++;
390#ifdef FUNCTRACE
391 printk(KERN_INFO "%s: %s() exit on line %d, rc = EIO\n",
392 dev->name,__FUNCTION__, __LINE__);
393#endif
394 CLAW_DBF_TEXT_(2,trace,"clawtx%d",-EIO);
395 return -EIO;
396 }
397
398#ifdef IOTRACE
399 printk(KERN_INFO "%s: variable sk_buff=\n",dev->name);
400 dumpit((char *) skb, sizeof(struct sk_buff));
401 printk(KERN_INFO "%s: variable dev=\n",dev->name);
402 dumpit((char *) dev, sizeof(struct net_device));
403#endif
404 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
405 rc=claw_hw_tx( skb, dev, 1 );
406 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
407#ifdef FUNCTRACE
408 printk(KERN_INFO "%s:%s exit on line %d, rc = %d\n",
409 dev->name, __FUNCTION__, __LINE__, rc);
410#endif
411 CLAW_DBF_TEXT_(4,trace,"clawtx%d",rc);
412 return rc;
413} /* end of claw_tx */
414
415/*------------------------------------------------------------------*
416 * pack the collect queue into an skb and return it *
417 * If not packing just return the top skb from the queue *
418 *------------------------------------------------------------------*/
419
420static struct sk_buff *
421claw_pack_skb(struct claw_privbk *privptr)
422{
423 struct sk_buff *new_skb,*held_skb;
424 struct chbk *p_ch = &privptr->channel[WRITE];
425 struct claw_env *p_env = privptr->p_env;
426 int pkt_cnt,pk_ind,so_far;
427
428 new_skb = NULL; /* assume no dice */
429 pkt_cnt = 0;
430 CLAW_DBF_TEXT(4,trace,"PackSKBe");
431 if (skb_queue_len(&p_ch->collect_queue) > 0) {
432 /* some data */
433 held_skb = skb_dequeue(&p_ch->collect_queue);
434 if (p_env->packing != DO_PACKED)
435 return held_skb;
436 if (held_skb)
437 atomic_dec(&held_skb->users);
438 else
439 return NULL;
440 /* get a new SKB we will pack at least one */
441 new_skb = dev_alloc_skb(p_env->write_size);
442 if (new_skb == NULL) {
443 atomic_inc(&held_skb->users);
444 skb_queue_head(&p_ch->collect_queue,held_skb);
445 return NULL;
446 }
447 /* we have packed packet and a place to put it */
448 pk_ind = 1;
449 so_far = 0;
450 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
451 while ((pk_ind) && (held_skb != NULL)) {
452 if (held_skb->len+so_far <= p_env->write_size-8) {
453 memcpy(skb_put(new_skb,held_skb->len),
454 held_skb->data,held_skb->len);
455 privptr->stats.tx_packets++;
456 so_far += held_skb->len;
457 pkt_cnt++;
458 dev_kfree_skb_irq(held_skb);
459 held_skb = skb_dequeue(&p_ch->collect_queue);
460 if (held_skb)
461 atomic_dec(&held_skb->users);
462 } else {
463 pk_ind = 0;
464 atomic_inc(&held_skb->users);
465 skb_queue_head(&p_ch->collect_queue,held_skb);
466 }
467 }
468#ifdef IOTRACE
469 printk(KERN_INFO "%s: %s() Packed %d len %d\n",
470 p_env->ndev->name,
471 __FUNCTION__,pkt_cnt,new_skb->len);
472#endif
473 }
474 CLAW_DBF_TEXT(4,trace,"PackSKBx");
475 return new_skb;
476}
477
478/*-------------------------------------------------------------------*
479 * claw_change_mtu *
480 * *
481 *-------------------------------------------------------------------*/
482
483static int
484claw_change_mtu(struct net_device *dev, int new_mtu)
485{
486 struct claw_privbk *privptr=dev->priv;
487 int buff_size;
488#ifdef FUNCTRACE
489 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
490#endif
491#ifdef DEBUGMSG
492 printk(KERN_INFO "variable dev =\n");
493 dumpit((char *) dev, sizeof(struct net_device));
494 printk(KERN_INFO "variable new_mtu = %d\n", new_mtu);
495#endif
496 CLAW_DBF_TEXT(4,trace,"setmtu");
497 buff_size = privptr->p_env->write_size;
498 if ((new_mtu < 60) || (new_mtu > buff_size)) {
499#ifdef FUNCTRACE
500 printk(KERN_INFO "%s:%s Exit on line %d, rc=EINVAL\n",
501 dev->name,
502 __FUNCTION__, __LINE__);
503#endif
504 return -EINVAL;
505 }
506 dev->mtu = new_mtu;
507#ifdef FUNCTRACE
508 printk(KERN_INFO "%s:%s Exit on line %d\n",dev->name,
509 __FUNCTION__, __LINE__);
510#endif
511 return 0;
512} /* end of claw_change_mtu */
513
514
515/*-------------------------------------------------------------------*
516 * claw_open *
517 * *
518 *-------------------------------------------------------------------*/
519static int
520claw_open(struct net_device *dev)
521{
522
523 int rc;
524 int i;
525 unsigned long saveflags=0;
526 unsigned long parm;
527 struct claw_privbk *privptr;
528 DECLARE_WAITQUEUE(wait, current);
529 struct timer_list timer;
530 struct ccwbk *p_buf;
531
532#ifdef FUNCTRACE
533 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
534#endif
535 CLAW_DBF_TEXT(4,trace,"open");
536 if (!dev | (dev->name[0] == 0x00)) {
537 CLAW_DBF_TEXT(2,trace,"BadDev");
538 printk(KERN_WARNING "claw: Bad device at open failing \n");
539 return -ENODEV;
540 }
541 privptr = (struct claw_privbk *)dev->priv;
542 /* allocate and initialize CCW blocks */
543 if (privptr->buffs_alloc == 0) {
544 rc=init_ccw_bk(dev);
545 if (rc) {
546 printk(KERN_INFO "%s:%s Exit on line %d, rc=ENOMEM\n",
547 dev->name,
548 __FUNCTION__, __LINE__);
549 CLAW_DBF_TEXT(2,trace,"openmem");
550 return -ENOMEM;
551 }
552 }
553 privptr->system_validate_comp=0;
554 privptr->release_pend=0;
555 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
556 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
557 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
558 privptr->p_env->packing=PACKING_ASK;
559 } else {
560 privptr->p_env->packing=0;
561 privptr->p_env->read_size=CLAW_FRAME_SIZE;
562 privptr->p_env->write_size=CLAW_FRAME_SIZE;
563 }
564 claw_set_busy(dev);
565 tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet,
566 (unsigned long) &privptr->channel[READ]);
567 for ( i = 0; i < 2; i++) {
568 CLAW_DBF_TEXT_(2,trace,"opn_ch%d",i);
569 init_waitqueue_head(&privptr->channel[i].wait);
570 /* skb_queue_head_init(&p_ch->io_queue); */
571 if (i == WRITE)
572 skb_queue_head_init(
573 &privptr->channel[WRITE].collect_queue);
574 privptr->channel[i].flag_a = 0;
575 privptr->channel[i].IO_active = 0;
576 privptr->channel[i].flag &= ~CLAW_TIMER;
577 init_timer(&timer);
578 timer.function = (void *)claw_timer;
579 timer.data = (unsigned long)(&privptr->channel[i]);
580 timer.expires = jiffies + 15*HZ;
581 add_timer(&timer);
582 spin_lock_irqsave(get_ccwdev_lock(
583 privptr->channel[i].cdev), saveflags);
584 parm = (unsigned long) &privptr->channel[i];
585 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
586 rc = 0;
587 add_wait_queue(&privptr->channel[i].wait, &wait);
588 rc = ccw_device_halt(
589 (struct ccw_device *)privptr->channel[i].cdev,parm);
590 set_current_state(TASK_INTERRUPTIBLE);
591 spin_unlock_irqrestore(
592 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
593 schedule();
594 set_current_state(TASK_RUNNING);
595 remove_wait_queue(&privptr->channel[i].wait, &wait);
596 if(rc != 0)
597 ccw_check_return_code(privptr->channel[i].cdev, rc);
598 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
599 del_timer(&timer);
600 }
601 if ((((privptr->channel[READ].last_dstat |
602 privptr->channel[WRITE].last_dstat) &
603 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
604 (((privptr->channel[READ].flag |
605 privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
606#ifdef DEBUGMSG
607 printk(KERN_INFO "%s: channel problems during open - read:"
608 " %02x - write: %02x\n",
609 dev->name,
610 privptr->channel[READ].last_dstat,
611 privptr->channel[WRITE].last_dstat);
612#endif
613 printk(KERN_INFO "%s: remote side is not ready\n", dev->name);
614 CLAW_DBF_TEXT(2,trace,"notrdy");
615
616 for ( i = 0; i < 2; i++) {
617 spin_lock_irqsave(
618 get_ccwdev_lock(privptr->channel[i].cdev),
619 saveflags);
620 parm = (unsigned long) &privptr->channel[i];
621 privptr->channel[i].claw_state = CLAW_STOP;
622 rc = ccw_device_halt(
623 (struct ccw_device *)&privptr->channel[i].cdev,
624 parm);
625 spin_unlock_irqrestore(
626 get_ccwdev_lock(privptr->channel[i].cdev),
627 saveflags);
628 if (rc != 0) {
629 ccw_check_return_code(
630 privptr->channel[i].cdev, rc);
631 }
632 }
633 free_pages((unsigned long)privptr->p_buff_ccw,
634 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
635 if (privptr->p_env->read_size < PAGE_SIZE) {
636 free_pages((unsigned long)privptr->p_buff_read,
637 (int)pages_to_order_of_mag(
638 privptr->p_buff_read_num));
639 }
640 else {
641 p_buf=privptr->p_read_active_first;
642 while (p_buf!=NULL) {
643 free_pages((unsigned long)p_buf->p_buffer,
644 (int)pages_to_order_of_mag(
645 privptr->p_buff_pages_perread ));
646 p_buf=p_buf->next;
647 }
648 }
649 if (privptr->p_env->write_size < PAGE_SIZE ) {
650 free_pages((unsigned long)privptr->p_buff_write,
651 (int)pages_to_order_of_mag(
652 privptr->p_buff_write_num));
653 }
654 else {
655 p_buf=privptr->p_write_active_first;
656 while (p_buf!=NULL) {
657 free_pages((unsigned long)p_buf->p_buffer,
658 (int)pages_to_order_of_mag(
659 privptr->p_buff_pages_perwrite ));
660 p_buf=p_buf->next;
661 }
662 }
663 privptr->buffs_alloc = 0;
664 privptr->channel[READ].flag= 0x00;
665 privptr->channel[WRITE].flag = 0x00;
666 privptr->p_buff_ccw=NULL;
667 privptr->p_buff_read=NULL;
668 privptr->p_buff_write=NULL;
669 claw_clear_busy(dev);
670#ifdef FUNCTRACE
671 printk(KERN_INFO "%s:%s Exit on line %d, rc=EIO\n",
672 dev->name,__FUNCTION__,__LINE__);
673#endif
674 CLAW_DBF_TEXT(2,trace,"open EIO");
675 return -EIO;
676 }
677
678 /* Send SystemValidate command */
679
680 claw_clear_busy(dev);
681
682#ifdef FUNCTRACE
683 printk(KERN_INFO "%s:%s Exit on line %d, rc=0\n",
684 dev->name,__FUNCTION__,__LINE__);
685#endif
686 CLAW_DBF_TEXT(4,trace,"openok");
687 return 0;
688} /* end of claw_open */
689
690/*-------------------------------------------------------------------*
691* *
692* claw_irq_handler *
693* *
694*--------------------------------------------------------------------*/
695static void
696claw_irq_handler(struct ccw_device *cdev,
697 unsigned long intparm, struct irb *irb)
698{
699 struct chbk *p_ch = NULL;
700 struct claw_privbk *privptr = NULL;
701 struct net_device *dev = NULL;
702 struct claw_env *p_env;
703 struct chbk *p_ch_r=NULL;
704
705
706#ifdef FUNCTRACE
707 printk(KERN_INFO "%s enter \n",__FUNCTION__);
708#endif
709 CLAW_DBF_TEXT(4,trace,"clawirq");
710 /* Bypass all 'unsolicited interrupts' */
711 if (!cdev->dev.driver_data) {
712 printk(KERN_WARNING "claw: unsolicited interrupt for device:"
713 "%s received c-%02x d-%02x\n",
714 cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat);
715#ifdef FUNCTRACE
716 printk(KERN_INFO "claw: %s() "
717 "exit on line %d\n",__FUNCTION__,__LINE__);
718#endif
719 CLAW_DBF_TEXT(2,trace,"badirq");
720 return;
721 }
722 privptr = (struct claw_privbk *)cdev->dev.driver_data;
723
724 /* Try to extract channel from driver data. */
725 if (privptr->channel[READ].cdev == cdev)
726 p_ch = &privptr->channel[READ];
727 else if (privptr->channel[WRITE].cdev == cdev)
728 p_ch = &privptr->channel[WRITE];
729 else {
730 printk(KERN_WARNING "claw: Can't determine channel for "
731 "interrupt, device %s\n", cdev->dev.bus_id);
732 CLAW_DBF_TEXT(2,trace,"badchan");
733 return;
734 }
735 CLAW_DBF_TEXT_(4,trace,"IRQCH=%d",p_ch->flag);
736
737 dev = (struct net_device *) (p_ch->ndev);
738 p_env=privptr->p_env;
739
740#ifdef IOTRACE
741 printk(KERN_INFO "%s: interrupt for device: %04x "
742 "received c-%02x d-%02x state-%02x\n",
743 dev->name, p_ch->devno, irb->scsw.cstat,
744 irb->scsw.dstat, p_ch->claw_state);
745#endif
746
747 /* Copy interruption response block. */
748 memcpy(p_ch->irb, irb, sizeof(struct irb));
749
750 /* Check for good subchannel return code, otherwise error message */
751 if (irb->scsw.cstat && !(irb->scsw.cstat & SCHN_STAT_PCI)) {
752 printk(KERN_INFO "%s: subchannel check for device: %04x -"
753 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
754 dev->name, p_ch->devno,
755 irb->scsw.cstat, irb->scsw.dstat,irb->scsw.cpa);
756#ifdef IOTRACE
757 dumpit((char *)irb,sizeof(struct irb));
758 dumpit((char *)(unsigned long)irb->scsw.cpa,
759 sizeof(struct ccw1));
760#endif
761#ifdef FUNCTRACE
762 printk(KERN_INFO "%s:%s Exit on line %d\n",
763 dev->name,__FUNCTION__,__LINE__);
764#endif
765 CLAW_DBF_TEXT(2,trace,"chanchk");
766 /* return; */
767 }
768
769 /* Check the reason-code of a unit check */
770 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
771 ccw_check_unit_check(p_ch, irb->ecw[0]);
772 }
773
774 /* State machine to bring the connection up, down and to restart */
775 p_ch->last_dstat = irb->scsw.dstat;
776
777 switch (p_ch->claw_state) {
778 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
779#ifdef DEBUGMSG
780 printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name);
781#endif
782 if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
783 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
784 (p_ch->irb->scsw.stctl ==
785 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
786#ifdef FUNCTRACE
787 printk(KERN_INFO "%s:%s Exit on line %d\n",
788 dev->name,__FUNCTION__,__LINE__);
789#endif
790 return;
791 }
792 wake_up(&p_ch->wait); /* wake up claw_release */
793
794#ifdef DEBUGMSG
795 printk(KERN_INFO "%s: CLAW_STOP exit\n", dev->name);
796#endif
797#ifdef FUNCTRACE
798 printk(KERN_INFO "%s:%s Exit on line %d\n",
799 dev->name,__FUNCTION__,__LINE__);
800#endif
801 CLAW_DBF_TEXT(4,trace,"stop");
802 return;
803
804 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
805#ifdef DEBUGMSG
806 printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n",
807 dev->name);
808#endif
809 if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
810 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
811 (p_ch->irb->scsw.stctl ==
812 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
813#ifdef FUNCTRACE
814 printk(KERN_INFO "%s:%s Exit on line %d\n",
815 dev->name,__FUNCTION__,__LINE__);
816#endif
817 CLAW_DBF_TEXT(4,trace,"haltio");
818 return;
819 }
820 if (p_ch->flag == CLAW_READ) {
821 p_ch->claw_state = CLAW_START_READ;
822 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
823 }
824 else
825 if (p_ch->flag == CLAW_WRITE) {
826 p_ch->claw_state = CLAW_START_WRITE;
827 /* send SYSTEM_VALIDATE */
828 claw_strt_read(dev, LOCK_NO);
829 claw_send_control(dev,
830 SYSTEM_VALIDATE_REQUEST,
831 0, 0, 0,
832 p_env->host_name,
833 p_env->adapter_name );
834 } else {
835 printk(KERN_WARNING "claw: unsolicited "
836 "interrupt for device:"
837 "%s received c-%02x d-%02x\n",
838 cdev->dev.bus_id,
839 irb->scsw.cstat,
840 irb->scsw.dstat);
841 return;
842 }
843#ifdef DEBUGMSG
844 printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO exit\n",
845 dev->name);
846#endif
847#ifdef FUNCTRACE
848 printk(KERN_INFO "%s:%s Exit on line %d\n",
849 dev->name,__FUNCTION__,__LINE__);
850#endif
851 CLAW_DBF_TEXT(4,trace,"haltio");
852 return;
853 case CLAW_START_READ:
854 CLAW_DBF_TEXT(4,trace,"ReadIRQ");
855 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
856 clear_bit(0, (void *)&p_ch->IO_active);
857 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
858 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
859 (p_ch->irb->ecw[0]) == 0)
860 {
861 privptr->stats.rx_errors++;
862 printk(KERN_INFO "%s: Restart is "
863 "required after remote "
864 "side recovers \n",
865 dev->name);
866 }
867#ifdef FUNCTRACE
868 printk(KERN_INFO "%s:%s Exit on line %d\n",
869 dev->name,__FUNCTION__,__LINE__);
870#endif
871 CLAW_DBF_TEXT(4,trace,"notrdy");
872 return;
873 }
874 if ((p_ch->irb->scsw.cstat & SCHN_STAT_PCI) &&
875 (p_ch->irb->scsw.dstat==0)) {
876 if (test_and_set_bit(CLAW_BH_ACTIVE,
877 (void *)&p_ch->flag_a) == 0) {
878 tasklet_schedule(&p_ch->tasklet);
879 }
880 else {
881 CLAW_DBF_TEXT(4,trace,"PCINoBH");
882 }
883#ifdef FUNCTRACE
884 printk(KERN_INFO "%s:%s Exit on line %d\n",
885 dev->name,__FUNCTION__,__LINE__);
886#endif
887 CLAW_DBF_TEXT(4,trace,"PCI_read");
888 return;
889 }
890 if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
891 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
892 (p_ch->irb->scsw.stctl ==
893 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
894#ifdef FUNCTRACE
895 printk(KERN_INFO "%s:%s Exit on line %d\n",
896 dev->name,__FUNCTION__,__LINE__);
897#endif
898 CLAW_DBF_TEXT(4,trace,"SPend_rd");
899 return;
900 }
901 clear_bit(0, (void *)&p_ch->IO_active);
902 claw_clearbit_busy(TB_RETRY,dev);
903 if (test_and_set_bit(CLAW_BH_ACTIVE,
904 (void *)&p_ch->flag_a) == 0) {
905 tasklet_schedule(&p_ch->tasklet);
906 }
907 else {
908 CLAW_DBF_TEXT(4,trace,"RdBHAct");
909 }
910
911#ifdef DEBUGMSG
912 printk(KERN_INFO "%s: process CLAW_START_READ exit\n",
913 dev->name);
914#endif
915#ifdef FUNCTRACE
916 printk(KERN_INFO "%s:%s Exit on line %d\n",
917 dev->name,__FUNCTION__,__LINE__);
918#endif
919 CLAW_DBF_TEXT(4,trace,"RdIRQXit");
920 return;
921 case CLAW_START_WRITE:
922 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
923 printk(KERN_INFO "%s: Unit Check Occured in "
924 "write channel\n",dev->name);
925 clear_bit(0, (void *)&p_ch->IO_active);
926 if (p_ch->irb->ecw[0] & 0x80 ) {
927 printk(KERN_INFO "%s: Resetting Event "
928 "occurred:\n",dev->name);
929 init_timer(&p_ch->timer);
930 p_ch->timer.function =
931 (void *)claw_write_retry;
932 p_ch->timer.data = (unsigned long)p_ch;
933 p_ch->timer.expires = jiffies + 10*HZ;
934 add_timer(&p_ch->timer);
935 printk(KERN_INFO "%s: write connection "
936 "restarting\n",dev->name);
937 }
938#ifdef FUNCTRACE
939 printk(KERN_INFO "%s:%s Exit on line %d\n",
940 dev->name,__FUNCTION__,__LINE__);
941#endif
942 CLAW_DBF_TEXT(4,trace,"rstrtwrt");
943 return;
944 }
945 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) {
946 clear_bit(0, (void *)&p_ch->IO_active);
947 printk(KERN_INFO "%s: Unit Exception "
948 "Occured in write channel\n",
949 dev->name);
950 }
951 if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
952 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
953 (p_ch->irb->scsw.stctl ==
954 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
955#ifdef FUNCTRACE
956 printk(KERN_INFO "%s:%s Exit on line %d\n",
957 dev->name,__FUNCTION__,__LINE__);
958#endif
959 CLAW_DBF_TEXT(4,trace,"writeUE");
960 return;
961 }
962 clear_bit(0, (void *)&p_ch->IO_active);
963 if (claw_test_and_setbit_busy(TB_TX,dev)==0) {
964 claw_write_next(p_ch);
965 claw_clearbit_busy(TB_TX,dev);
966 claw_clear_busy(dev);
967 }
968 p_ch_r=(struct chbk *)&privptr->channel[READ];
969 if (test_and_set_bit(CLAW_BH_ACTIVE,
970 (void *)&p_ch_r->flag_a) == 0) {
971 tasklet_schedule(&p_ch_r->tasklet);
972 }
973
974#ifdef DEBUGMSG
975 printk(KERN_INFO "%s: process CLAW_START_WRITE exit\n",
976 dev->name);
977#endif
978#ifdef FUNCTRACE
979 printk(KERN_INFO "%s:%s Exit on line %d\n",
980 dev->name,__FUNCTION__,__LINE__);
981#endif
982 CLAW_DBF_TEXT(4,trace,"StWtExit");
983 return;
984 default:
985 printk(KERN_WARNING "%s: wrong selection code - irq "
986 "state=%d\n",dev->name,p_ch->claw_state);
987#ifdef FUNCTRACE
988 printk(KERN_INFO "%s:%s Exit on line %d\n",
989 dev->name,__FUNCTION__,__LINE__);
990#endif
991 CLAW_DBF_TEXT(2,trace,"badIRQ");
992 return;
993 }
994
995} /* end of claw_irq_handler */
996
997
998/*-------------------------------------------------------------------*
999* claw_irq_tasklet *
1000* *
1001*--------------------------------------------------------------------*/
1002static void
1003claw_irq_tasklet ( unsigned long data )
1004{
1005 struct chbk * p_ch;
1006 struct net_device *dev;
1007 struct claw_privbk * privptr;
1008
1009 p_ch = (struct chbk *) data;
1010 dev = (struct net_device *)p_ch->ndev;
1011#ifdef FUNCTRACE
1012 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
1013#endif
1014#ifdef DEBUGMSG
1015 printk(KERN_INFO "%s: variable p_ch =\n",dev->name);
1016 dumpit((char *) p_ch, sizeof(struct chbk));
1017#endif
1018 CLAW_DBF_TEXT(4,trace,"IRQtask");
1019
1020 privptr = (struct claw_privbk *) dev->priv;
1021
1022#ifdef DEBUGMSG
1023 printk(KERN_INFO "%s: bh routine - state-%02x\n" ,
1024 dev->name, p_ch->claw_state);
1025#endif
1026
1027 unpack_read(dev);
1028 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
1029 CLAW_DBF_TEXT(4,trace,"TskletXt");
1030#ifdef FUNCTRACE
1031 printk(KERN_INFO "%s:%s Exit on line %d\n",
1032 dev->name,__FUNCTION__,__LINE__);
1033#endif
1034 return;
1035} /* end of claw_irq_bh */
1036
1037/*-------------------------------------------------------------------*
1038* claw_release *
1039* *
1040*--------------------------------------------------------------------*/
1041static int
1042claw_release(struct net_device *dev)
1043{
1044 int rc;
1045 int i;
1046 unsigned long saveflags;
1047 unsigned long parm;
1048 struct claw_privbk *privptr;
1049 DECLARE_WAITQUEUE(wait, current);
1050 struct ccwbk* p_this_ccw;
1051 struct ccwbk* p_buf;
1052
1053 if (!dev)
1054 return 0;
1055 privptr = (struct claw_privbk *) dev->priv;
1056 if (!privptr)
1057 return 0;
1058#ifdef FUNCTRACE
1059 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
1060#endif
1061 CLAW_DBF_TEXT(4,trace,"release");
1062#ifdef DEBUGMSG
1063 printk(KERN_INFO "%s: variable dev =\n",dev->name);
1064 dumpit((char *) dev, sizeof(struct net_device));
1065 printk(KERN_INFO "Priv Buffalloc %d\n",privptr->buffs_alloc);
1066 printk(KERN_INFO "Priv p_buff_ccw = %p\n",&privptr->p_buff_ccw);
1067#endif
1068 privptr->release_pend=1;
1069 claw_setbit_busy(TB_STOP,dev);
1070 for ( i = 1; i >=0 ; i--) {
1071 spin_lock_irqsave(
1072 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
1073 /* del_timer(&privptr->channel[READ].timer); */
1074 privptr->channel[i].claw_state = CLAW_STOP;
1075 privptr->channel[i].IO_active = 0;
1076 parm = (unsigned long) &privptr->channel[i];
1077 if (i == WRITE)
1078 claw_purge_skb_queue(
1079 &privptr->channel[WRITE].collect_queue);
1080 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
1081 if (privptr->system_validate_comp==0x00) /* never opened? */
1082 init_waitqueue_head(&privptr->channel[i].wait);
1083 add_wait_queue(&privptr->channel[i].wait, &wait);
1084 set_current_state(TASK_INTERRUPTIBLE);
1085 spin_unlock_irqrestore(
1086 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
1087 schedule();
1088 set_current_state(TASK_RUNNING);
1089 remove_wait_queue(&privptr->channel[i].wait, &wait);
1090 if (rc != 0) {
1091 ccw_check_return_code(privptr->channel[i].cdev, rc);
1092 }
1093 }
1094 if (privptr->pk_skb != NULL) {
1095 dev_kfree_skb(privptr->pk_skb);
1096 privptr->pk_skb = NULL;
1097 }
1098 if(privptr->buffs_alloc != 1) {
1099#ifdef FUNCTRACE
1100 printk(KERN_INFO "%s:%s Exit on line %d\n",
1101 dev->name,__FUNCTION__,__LINE__);
1102#endif
1103 CLAW_DBF_TEXT(4,trace,"none2fre");
1104 return 0;
1105 }
1106 CLAW_DBF_TEXT(4,trace,"freebufs");
1107 if (privptr->p_buff_ccw != NULL) {
1108 free_pages((unsigned long)privptr->p_buff_ccw,
1109 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
1110 }
1111 CLAW_DBF_TEXT(4,trace,"freeread");
1112 if (privptr->p_env->read_size < PAGE_SIZE) {
1113 if (privptr->p_buff_read != NULL) {
1114 free_pages((unsigned long)privptr->p_buff_read,
1115 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
1116 }
1117 }
1118 else {
1119 p_buf=privptr->p_read_active_first;
1120 while (p_buf!=NULL) {
1121 free_pages((unsigned long)p_buf->p_buffer,
1122 (int)pages_to_order_of_mag(
1123 privptr->p_buff_pages_perread ));
1124 p_buf=p_buf->next;
1125 }
1126 }
1127 CLAW_DBF_TEXT(4,trace,"freewrit");
1128 if (privptr->p_env->write_size < PAGE_SIZE ) {
1129 free_pages((unsigned long)privptr->p_buff_write,
1130 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
1131 }
1132 else {
1133 p_buf=privptr->p_write_active_first;
1134 while (p_buf!=NULL) {
1135 free_pages((unsigned long)p_buf->p_buffer,
1136 (int)pages_to_order_of_mag(
1137 privptr->p_buff_pages_perwrite ));
1138 p_buf=p_buf->next;
1139 }
1140 }
1141 CLAW_DBF_TEXT(4,trace,"clearptr");
1142 privptr->buffs_alloc = 0;
1143 privptr->p_buff_ccw=NULL;
1144 privptr->p_buff_read=NULL;
1145 privptr->p_buff_write=NULL;
1146 privptr->system_validate_comp=0;
1147 privptr->release_pend=0;
1148 /* Remove any writes that were pending and reset all reads */
1149 p_this_ccw=privptr->p_read_active_first;
1150 while (p_this_ccw!=NULL) {
1151 p_this_ccw->header.length=0xffff;
1152 p_this_ccw->header.opcode=0xff;
1153 p_this_ccw->header.flag=0x00;
1154 p_this_ccw=p_this_ccw->next;
1155 }
1156
1157 while (privptr->p_write_active_first!=NULL) {
1158 p_this_ccw=privptr->p_write_active_first;
1159 p_this_ccw->header.flag=CLAW_PENDING;
1160 privptr->p_write_active_first=p_this_ccw->next;
1161 p_this_ccw->next=privptr->p_write_free_chain;
1162 privptr->p_write_free_chain=p_this_ccw;
1163 ++privptr->write_free_count;
1164 }
1165 privptr->p_write_active_last=NULL;
1166 privptr->mtc_logical_link = -1;
1167 privptr->mtc_skipping = 1;
1168 privptr->mtc_offset=0;
1169
1170 if (((privptr->channel[READ].last_dstat |
1171 privptr->channel[WRITE].last_dstat) &
1172 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
1173 printk(KERN_WARNING "%s: channel problems during close - "
1174 "read: %02x - write: %02x\n",
1175 dev->name,
1176 privptr->channel[READ].last_dstat,
1177 privptr->channel[WRITE].last_dstat);
1178 CLAW_DBF_TEXT(2,trace,"badclose");
1179 }
1180#ifdef FUNCTRACE
1181 printk(KERN_INFO "%s:%s Exit on line %d\n",
1182 dev->name,__FUNCTION__,__LINE__);
1183#endif
1184 CLAW_DBF_TEXT(4,trace,"rlsexit");
1185 return 0;
1186} /* end of claw_release */
1187
1188
1189
1190/*-------------------------------------------------------------------*
1191* claw_write_retry *
1192* *
1193*--------------------------------------------------------------------*/
1194
1195static void
1196claw_write_retry ( struct chbk *p_ch )
1197{
1198
1199 struct net_device *dev=p_ch->ndev;
1200
1201
1202#ifdef FUNCTRACE
1203 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
1204 printk(KERN_INFO "claw: variable p_ch =\n");
1205 dumpit((char *) p_ch, sizeof(struct chbk));
1206#endif
1207 CLAW_DBF_TEXT(4,trace,"w_retry");
1208 if (p_ch->claw_state == CLAW_STOP) {
1209#ifdef FUNCTRACE
1210 printk(KERN_INFO "%s:%s Exit on line %d\n",
1211 dev->name,__FUNCTION__,__LINE__);
1212#endif
1213 return;
1214 }
1215#ifdef DEBUGMSG
1216 printk( KERN_INFO "%s:%s state-%02x\n" ,
1217 dev->name,
1218 __FUNCTION__,
1219 p_ch->claw_state);
1220#endif
1221 claw_strt_out_IO( dev );
1222#ifdef FUNCTRACE
1223 printk(KERN_INFO "%s:%s Exit on line %d\n",
1224 dev->name,__FUNCTION__,__LINE__);
1225#endif
1226 CLAW_DBF_TEXT(4,trace,"rtry_xit");
1227 return;
1228} /* end of claw_write_retry */
1229
1230
1231/*-------------------------------------------------------------------*
1232* claw_write_next *
1233* *
1234*--------------------------------------------------------------------*/
1235
1236static void
1237claw_write_next ( struct chbk * p_ch )
1238{
1239
1240 struct net_device *dev;
1241 struct claw_privbk *privptr=NULL;
1242 struct sk_buff *pk_skb;
1243 int rc;
1244
1245#ifdef FUNCTRACE
1246 printk(KERN_INFO "%s:%s Enter \n",p_ch->ndev->name,__FUNCTION__);
1247 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
1248 dumpit((char *) p_ch, sizeof(struct chbk));
1249#endif
1250 CLAW_DBF_TEXT(4,trace,"claw_wrt");
1251 if (p_ch->claw_state == CLAW_STOP)
1252 return;
1253 dev = (struct net_device *) p_ch->ndev;
1254 privptr = (struct claw_privbk *) dev->priv;
1255 claw_free_wrt_buf( dev );
1256 if ((privptr->write_free_count > 0) &&
1257 (skb_queue_len(&p_ch->collect_queue) > 0)) {
1258 pk_skb = claw_pack_skb(privptr);
1259 while (pk_skb != NULL) {
1260 rc = claw_hw_tx( pk_skb, dev,1);
1261 if (privptr->write_free_count > 0) {
1262 pk_skb = claw_pack_skb(privptr);
1263 } else
1264 pk_skb = NULL;
1265 }
1266 }
1267 if (privptr->p_write_active_first!=NULL) {
1268 claw_strt_out_IO(dev);
1269 }
1270
1271#ifdef FUNCTRACE
1272 printk(KERN_INFO "%s:%s Exit on line %d\n",
1273 dev->name,__FUNCTION__,__LINE__);
1274#endif
1275 return;
1276} /* end of claw_write_next */
1277
1278/*-------------------------------------------------------------------*
1279* *
1280* claw_timer *
1281*--------------------------------------------------------------------*/
1282
1283static void
1284claw_timer ( struct chbk * p_ch )
1285{
1286#ifdef FUNCTRACE
1287 printk(KERN_INFO "%s:%s Entry\n",p_ch->ndev->name,__FUNCTION__);
1288 printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name);
1289 dumpit((char *) p_ch, sizeof(struct chbk));
1290#endif
1291 CLAW_DBF_TEXT(4,trace,"timer");
1292 p_ch->flag |= CLAW_TIMER;
1293 wake_up(&p_ch->wait);
1294#ifdef FUNCTRACE
1295 printk(KERN_INFO "%s:%s Exit on line %d\n",
1296 p_ch->ndev->name,__FUNCTION__,__LINE__);
1297#endif
1298 return;
1299} /* end of claw_timer */
1300
1301
1302/*
1303*
1304* functions
1305*/
1306
1307
1308/*-------------------------------------------------------------------*
1309* *
1310* pages_to_order_of_mag *
1311* *
1312* takes a number of pages from 1 to 512 and returns the *
1313* log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1314* of magnitude get_free_pages() has an upper order of 9 *
1315*--------------------------------------------------------------------*/
1316
1317static int inline
1318pages_to_order_of_mag(int num_of_pages)
1319{
1320 int order_of_mag=1; /* assume 2 pages */
1321 int nump=2;
1322#ifdef FUNCTRACE
1323 printk(KERN_INFO "%s Enter pages = %d \n",__FUNCTION__,num_of_pages);
1324#endif
1325 CLAW_DBF_TEXT_(5,trace,"pages%d",num_of_pages);
1326 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1327 /* 512 pages = 2Meg on 4k page systems */
1328 if (num_of_pages >= 512) {return 9; }
1329 /* we have two or more pages order is at least 1 */
1330 for (nump=2 ;nump <= 512;nump*=2) {
1331 if (num_of_pages <= nump)
1332 break;
1333 order_of_mag +=1;
1334 }
1335 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1336#ifdef FUNCTRACE
1337 printk(KERN_INFO "%s Exit on line %d, order = %d\n",
1338 __FUNCTION__,__LINE__, order_of_mag);
1339#endif
1340 CLAW_DBF_TEXT_(5,trace,"mag%d",order_of_mag);
1341 return order_of_mag;
1342}
1343
1344/*-------------------------------------------------------------------*
1345* *
1346* add_claw_reads *
1347* *
1348*--------------------------------------------------------------------*/
1349static int
1350add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1351 struct ccwbk* p_last)
1352{
1353 struct claw_privbk *privptr;
1354 struct ccw1 temp_ccw;
1355 struct endccw * p_end;
1356#ifdef IOTRACE
1357 struct ccwbk* p_buf;
1358#endif
1359#ifdef FUNCTRACE
1360 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
1361#endif
1362#ifdef DEBUGMSG
1363 printk(KERN_INFO "dev\n");
1364 dumpit((char *) dev, sizeof(struct net_device));
1365 printk(KERN_INFO "p_first\n");
1366 dumpit((char *) p_first, sizeof(struct ccwbk));
1367 printk(KERN_INFO "p_last\n");
1368 dumpit((char *) p_last, sizeof(struct ccwbk));
1369#endif
1370 CLAW_DBF_TEXT(4,trace,"addreads");
1371 privptr = dev->priv;
1372 p_end = privptr->p_end_ccw;
1373
1374 /* first CCW and last CCW contains a new set of read channel programs
1375 * to apend the running channel programs
1376 */
1377 if ( p_first==NULL) {
1378#ifdef FUNCTRACE
1379 printk(KERN_INFO "%s:%s Exit on line %d\n",
1380 dev->name,__FUNCTION__,__LINE__);
1381#endif
1382 CLAW_DBF_TEXT(4,trace,"addexit");
1383 return 0;
1384 }
1385
1386 /* set up ending CCW sequence for this segment */
1387 if (p_end->read1) {
1388 p_end->read1=0x00; /* second ending CCW is now active */
1389 /* reset ending CCWs and setup TIC CCWs */
1390 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1391 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1392 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1393 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1394 p_end->read2_nop2.cda=0;
1395 p_end->read2_nop2.count=1;
1396 }
1397 else {
1398 p_end->read1=0x01; /* first ending CCW is now active */
1399 /* reset ending CCWs and setup TIC CCWs */
1400 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1401 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1402 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1403 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1404 p_end->read1_nop2.cda=0;
1405 p_end->read1_nop2.count=1;
1406 }
1407
1408 if ( privptr-> p_read_active_first ==NULL ) {
1409#ifdef DEBUGMSG
1410 printk(KERN_INFO "%s:%s p_read_active_frist == NULL \n",
1411 dev->name,__FUNCTION__);
1412 printk(KERN_INFO "%s:%s Read active first/last changed \n",
1413 dev->name,__FUNCTION__);
1414#endif
1415 privptr-> p_read_active_first= p_first; /* set new first */
1416 privptr-> p_read_active_last = p_last; /* set new last */
1417 }
1418 else {
1419
1420#ifdef DEBUGMSG
1421 printk(KERN_INFO "%s:%s Read in progress \n",
1422 dev->name,__FUNCTION__);
1423#endif
1424 /* set up TIC ccw */
1425 temp_ccw.cda= (__u32)__pa(&p_first->read);
1426 temp_ccw.count=0;
1427 temp_ccw.flags=0;
1428 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1429
1430
1431 if (p_end->read1) {
1432
1433 /* first set of CCW's is chained to the new read */
1434 /* chain, so the second set is chained to the active chain. */
1435 /* Therefore modify the second set to point to the new */
1436 /* read chain set up TIC CCWs */
1437 /* make sure we update the CCW so channel doesn't fetch it */
1438 /* when it's only half done */
1439 memcpy( &p_end->read2_nop2, &temp_ccw ,
1440 sizeof(struct ccw1));
1441 privptr->p_read_active_last->r_TIC_1.cda=
1442 (__u32)__pa(&p_first->read);
1443 privptr->p_read_active_last->r_TIC_2.cda=
1444 (__u32)__pa(&p_first->read);
1445 }
1446 else {
1447 /* make sure we update the CCW so channel doesn't */
1448 /* fetch it when it is only half done */
1449 memcpy( &p_end->read1_nop2, &temp_ccw ,
1450 sizeof(struct ccw1));
1451 privptr->p_read_active_last->r_TIC_1.cda=
1452 (__u32)__pa(&p_first->read);
1453 privptr->p_read_active_last->r_TIC_2.cda=
1454 (__u32)__pa(&p_first->read);
1455 }
1456 /* chain in new set of blocks */
1457 privptr->p_read_active_last->next = p_first;
1458 privptr->p_read_active_last=p_last;
1459 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1460#ifdef IOTRACE
1461 printk(KERN_INFO "%s:%s dump p_last CCW BK \n",dev->name,__FUNCTION__);
1462 dumpit((char *)p_last, sizeof(struct ccwbk));
1463 printk(KERN_INFO "%s:%s dump p_end CCW BK \n",dev->name,__FUNCTION__);
1464 dumpit((char *)p_end, sizeof(struct endccw));
1465
1466 printk(KERN_INFO "%s:%s dump p_first CCW BK \n",dev->name,__FUNCTION__);
1467 dumpit((char *)p_first, sizeof(struct ccwbk));
1468 printk(KERN_INFO "%s:%s Dump Active CCW chain \n",
1469 dev->name,__FUNCTION__);
1470 p_buf=privptr->p_read_active_first;
1471 while (p_buf!=NULL) {
1472 dumpit((char *)p_buf, sizeof(struct ccwbk));
1473 p_buf=p_buf->next;
1474 }
1475#endif
1476#ifdef FUNCTRACE
1477 printk(KERN_INFO "%s:%s Exit on line %d\n",
1478 dev->name,__FUNCTION__,__LINE__);
1479#endif
1480 CLAW_DBF_TEXT(4,trace,"addexit");
1481 return 0;
1482} /* end of add_claw_reads */
1483
1484/*-------------------------------------------------------------------*
1485 * ccw_check_return_code *
1486 * *
1487 *-------------------------------------------------------------------*/
1488
1489static void inline
1490ccw_check_return_code(struct ccw_device *cdev, int return_code)
1491{
1492#ifdef FUNCTRACE
1493 printk(KERN_INFO "%s: %s() > enter \n",
1494 cdev->dev.bus_id,__FUNCTION__);
1495#endif
1496 CLAW_DBF_TEXT(4,trace,"ccwret");
1497#ifdef DEBUGMSG
1498 printk(KERN_INFO "variable cdev =\n");
1499 dumpit((char *) cdev, sizeof(struct ccw_device));
1500 printk(KERN_INFO "variable return_code = %d\n",return_code);
1501#endif
1502 if (return_code != 0) {
1503 switch (return_code) {
1504 case -EBUSY:
1505 printk(KERN_INFO "%s: Busy !\n",
1506 cdev->dev.bus_id);
1507 break;
1508 case -ENODEV:
1509 printk(KERN_EMERG "%s: Missing device called "
1510 "for IO ENODEV\n", cdev->dev.bus_id);
1511 break;
1512 case -EIO:
1513 printk(KERN_EMERG "%s: Status pending... EIO \n",
1514 cdev->dev.bus_id);
1515 break;
1516 case -EINVAL:
1517 printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n",
1518 cdev->dev.bus_id);
1519 break;
1520 default:
1521 printk(KERN_EMERG "%s: Unknown error in "
1522 "Do_IO %d\n",cdev->dev.bus_id, return_code);
1523 }
1524 }
1525#ifdef FUNCTRACE
1526 printk(KERN_INFO "%s: %s() > exit on line %d\n",
1527 cdev->dev.bus_id,__FUNCTION__,__LINE__);
1528#endif
1529 CLAW_DBF_TEXT(4,trace,"ccwret");
1530} /* end of ccw_check_return_code */
1531
1532/*-------------------------------------------------------------------*
1533* ccw_check_unit_check *
1534*--------------------------------------------------------------------*/
1535
1536static void inline
1537ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1538{
1539 struct net_device *dev = p_ch->ndev;
1540
1541#ifdef FUNCTRACE
1542 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__);
1543#endif
1544#ifdef DEBUGMSG
1545 printk(KERN_INFO "%s: variable dev =\n",dev->name);
1546 dumpit((char *)dev, sizeof(struct net_device));
1547 printk(KERN_INFO "%s: variable sense =\n",dev->name);
1548 dumpit((char *)&sense, 2);
1549#endif
1550 CLAW_DBF_TEXT(4,trace,"unitchek");
1551
1552 printk(KERN_INFO "%s: Unit Check with sense byte:0x%04x\n",
1553 dev->name, sense);
1554
1555 if (sense & 0x40) {
1556 if (sense & 0x01) {
1557 printk(KERN_WARNING "%s: Interface disconnect or "
1558 "Selective reset "
1559 "occurred (remote side)\n", dev->name);
1560 }
1561 else {
1562 printk(KERN_WARNING "%s: System reset occured"
1563 " (remote side)\n", dev->name);
1564 }
1565 }
1566 else if (sense & 0x20) {
1567 if (sense & 0x04) {
1568 printk(KERN_WARNING "%s: Data-streaming "
1569 "timeout)\n", dev->name);
1570 }
1571 else {
1572 printk(KERN_WARNING "%s: Data-transfer parity"
1573 " error\n", dev->name);
1574 }
1575 }
1576 else if (sense & 0x10) {
1577 if (sense & 0x20) {
1578 printk(KERN_WARNING "%s: Hardware malfunction "
1579 "(remote side)\n", dev->name);
1580 }
1581 else {
1582 printk(KERN_WARNING "%s: read-data parity error "
1583 "(remote side)\n", dev->name);
1584 }
1585 }
1586
1587#ifdef FUNCTRACE
1588 printk(KERN_INFO "%s: %s() exit on line %d\n",
1589 dev->name,__FUNCTION__,__LINE__);
1590#endif
1591} /* end of ccw_check_unit_check */
1592
1593
1594
1595/*-------------------------------------------------------------------*
1596* Dump buffer format *
1597* *
1598*--------------------------------------------------------------------*/
1599#ifdef DEBUG
1600static void
1601dumpit(char* buf, int len)
1602{
1603
1604 __u32 ct, sw, rm, dup;
1605 char *ptr, *rptr;
1606 char tbuf[82], tdup[82];
1607#if (CONFIG_ARCH_S390X)
1608 char addr[22];
1609#else
1610 char addr[12];
1611#endif
1612 char boff[12];
1613 char bhex[82], duphex[82];
1614 char basc[40];
1615
1616 sw = 0;
1617 rptr =ptr=buf;
1618 rm = 16;
1619 duphex[0] = 0x00;
1620 dup = 0;
1621 for ( ct=0; ct < len; ct++, ptr++, rptr++ ) {
1622 if (sw == 0) {
1623#if (CONFIG_ARCH_S390X)
1624 sprintf(addr, "%16.16lX",(unsigned long)rptr);
1625#else
1626 sprintf(addr, "%8.8X",(__u32)rptr);
1627#endif
1628 sprintf(boff, "%4.4X", (__u32)ct);
1629 bhex[0] = '\0';
1630 basc[0] = '\0';
1631 }
1632 if ((sw == 4) || (sw == 12)) {
1633 strcat(bhex, " ");
1634 }
1635 if (sw == 8) {
1636 strcat(bhex, " ");
1637 }
1638#if (CONFIG_ARCH_S390X)
1639 sprintf(tbuf,"%2.2lX", (unsigned long)*ptr);
1640#else
1641 sprintf(tbuf,"%2.2X", (__u32)*ptr);
1642#endif
1643 tbuf[2] = '\0';
1644 strcat(bhex, tbuf);
1645 if ((0!=isprint(*ptr)) && (*ptr >= 0x20)) {
1646 basc[sw] = *ptr;
1647 }
1648 else {
1649 basc[sw] = '.';
1650 }
1651 basc[sw+1] = '\0';
1652 sw++;
1653 rm--;
1654 if (sw==16) {
1655 if ((strcmp(duphex, bhex)) !=0) {
1656 if (dup !=0) {
1657 sprintf(tdup,"Duplicate as above to"
1658 " %s", addr);
1659 printk( KERN_INFO " "
1660 " --- %s ---\n",tdup);
1661 }
1662 printk( KERN_INFO " %s (+%s) : %s [%s]\n",
1663 addr, boff, bhex, basc);
1664 dup = 0;
1665 strcpy(duphex, bhex);
1666 }
1667 else {
1668 dup++;
1669 }
1670 sw = 0;
1671 rm = 16;
1672 }
1673 } /* endfor */
1674
1675 if (sw != 0) {
1676 for ( ; rm > 0; rm--, sw++ ) {
1677 if ((sw==4) || (sw==12)) strcat(bhex, " ");
1678 if (sw==8) strcat(bhex, " ");
1679 strcat(bhex, " ");
1680 strcat(basc, " ");
1681 }
1682 if (dup !=0) {
1683 sprintf(tdup,"Duplicate as above to %s", addr);
1684 printk( KERN_INFO " --- %s ---\n",
1685 tdup);
1686 }
1687 printk( KERN_INFO " %s (+%s) : %s [%s]\n",
1688 addr, boff, bhex, basc);
1689 }
1690 else {
1691 if (dup >=1) {
1692 sprintf(tdup,"Duplicate as above to %s", addr);
1693 printk( KERN_INFO " --- %s ---\n",
1694 tdup);
1695 }
1696 if (dup !=0) {
1697 printk( KERN_INFO " %s (+%s) : %s [%s]\n",
1698 addr, boff, bhex, basc);
1699 }
1700 }
1701 return;
1702
1703} /* end of dumpit */
1704#endif
1705
1706/*-------------------------------------------------------------------*
1707* find_link *
1708*--------------------------------------------------------------------*/
1709static int
1710find_link(struct net_device *dev, char *host_name, char *ws_name )
1711{
1712 struct claw_privbk *privptr;
1713 struct claw_env *p_env;
1714 int rc=0;
1715
1716#ifdef FUNCTRACE
1717 printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__);
1718#endif
1719 CLAW_DBF_TEXT(2,setup,"findlink");
1720#ifdef DEBUGMSG
1721 printk(KERN_INFO "%s: variable dev = \n",dev->name);
1722 dumpit((char *) dev, sizeof(struct net_device));
1723 printk(KERN_INFO "%s: variable host_name = %s\n",dev->name, host_name);
1724 printk(KERN_INFO "%s: variable ws_name = %s\n",dev->name, ws_name);
1725#endif
1726 privptr=dev->priv;
1727 p_env=privptr->p_env;
1728 switch (p_env->packing)
1729 {
1730 case PACKING_ASK:
1731 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1732 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1733 rc = EINVAL;
1734 break;
1735 case DO_PACKED:
1736 case PACK_SEND:
1737 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1738 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1739 rc = EINVAL;
1740 break;
1741 default:
1742 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1743 (memcmp(p_env->api_type , ws_name, 8)!=0))
1744 rc = EINVAL;
1745 break;
1746 }
1747
1748#ifdef FUNCTRACE
1749 printk(KERN_INFO "%s:%s Exit on line %d\n",
1750 dev->name,__FUNCTION__,__LINE__);
1751#endif
1752 return 0;
1753} /* end of find_link */
1754
1755/*-------------------------------------------------------------------*
1756 * claw_hw_tx *
1757 * *
1758 * *
1759 *-------------------------------------------------------------------*/
1760
1761static int
1762claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1763{
1764 int rc=0;
1765 struct claw_privbk *privptr;
1766 struct ccwbk *p_this_ccw;
1767 struct ccwbk *p_first_ccw;
1768 struct ccwbk *p_last_ccw;
1769 __u32 numBuffers;
1770 signed long len_of_data;
1771 unsigned long bytesInThisBuffer;
1772 unsigned char *pDataAddress;
1773 struct endccw *pEnd;
1774 struct ccw1 tempCCW;
1775 struct chbk *p_ch;
1776 struct claw_env *p_env;
1777 int lock;
1778 struct clawph *pk_head;
1779 struct chbk *ch;
1780#ifdef IOTRACE
1781 struct ccwbk *p_buf;
1782#endif
1783#ifdef FUNCTRACE
1784 printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__);
1785#endif
1786 CLAW_DBF_TEXT(4,trace,"hw_tx");
1787#ifdef DEBUGMSG
1788 printk(KERN_INFO "%s: variable dev skb =\n",dev->name);
1789 dumpit((char *) skb, sizeof(struct sk_buff));
1790 printk(KERN_INFO "%s: variable dev =\n",dev->name);
1791 dumpit((char *) dev, sizeof(struct net_device));
1792 printk(KERN_INFO "%s: variable linkid = %ld\n",dev->name,linkid);
1793#endif
1794 privptr = (struct claw_privbk *) (dev->priv);
1795 p_ch=(struct chbk *)&privptr->channel[WRITE];
1796 p_env =privptr->p_env;
1797#ifdef IOTRACE
1798 printk(KERN_INFO "%s: %s() dump sk_buff \n",dev->name,__FUNCTION__);
1799 dumpit((char *)skb ,sizeof(struct sk_buff));
1800#endif
1801 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1802 /* scan the write queue to free any completed write packets */
1803 p_first_ccw=NULL;
1804 p_last_ccw=NULL;
1805 if ((p_env->packing >= PACK_SEND) &&
1806 (skb->cb[1] != 'P')) {
1807 skb_push(skb,sizeof(struct clawph));
1808 pk_head=(struct clawph *)skb->data;
1809 pk_head->len=skb->len-sizeof(struct clawph);
1810 if (pk_head->len%4) {
1811 pk_head->len+= 4-(pk_head->len%4);
1812 skb_pad(skb,4-(pk_head->len%4));
1813 skb_put(skb,4-(pk_head->len%4));
1814 }
1815 if (p_env->packing == DO_PACKED)
1816 pk_head->link_num = linkid;
1817 else
1818 pk_head->link_num = 0;
1819 pk_head->flag = 0x00;
1820 skb_pad(skb,4);
1821 skb->cb[1] = 'P';
1822 }
1823 if (linkid == 0) {
1824 if (claw_check_busy(dev)) {
1825 if (privptr->write_free_count!=0) {
1826 claw_clear_busy(dev);
1827 }
1828 else {
1829 claw_strt_out_IO(dev );
1830 claw_free_wrt_buf( dev );
1831 if (privptr->write_free_count==0) {
1832#ifdef IOTRACE
1833 printk(KERN_INFO "%s: "
1834 "(claw_check_busy) no free write "
1835 "buffers\n", dev->name);
1836#endif
1837 ch = &privptr->channel[WRITE];
1838 atomic_inc(&skb->users);
1839 skb_queue_tail(&ch->collect_queue, skb);
1840 goto Done;
1841 }
1842 else {
1843 claw_clear_busy(dev);
1844 }
1845 }
1846 }
1847 /* tx lock */
1848 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1849#ifdef DEBUGMSG
1850 printk(KERN_INFO "%s: busy (claw_test_and_setbit_"
1851 "busy)\n", dev->name);
1852#endif
1853 ch = &privptr->channel[WRITE];
1854 atomic_inc(&skb->users);
1855 skb_queue_tail(&ch->collect_queue, skb);
1856 claw_strt_out_IO(dev );
1857 rc=-EBUSY;
1858 goto Done2;
1859 }
1860 }
1861 /* See how many write buffers are required to hold this data */
1862 numBuffers= ( skb->len + privptr->p_env->write_size - 1) /
1863 ( privptr->p_env->write_size);
1864
1865 /* If that number of buffers isn't available, give up for now */
1866 if (privptr->write_free_count < numBuffers ||
1867 privptr->p_write_free_chain == NULL ) {
1868
1869 claw_setbit_busy(TB_NOBUFFER,dev);
1870
1871#ifdef DEBUGMSG
1872 printk(KERN_INFO "%s: busy (claw_setbit_busy"
1873 "(TB_NOBUFFER))\n", dev->name);
1874 printk(KERN_INFO " free_count: %d, numBuffers : %d\n",
1875 (int)privptr->write_free_count,(int) numBuffers );
1876#endif
1877 ch = &privptr->channel[WRITE];
1878 atomic_inc(&skb->users);
1879 skb_queue_tail(&ch->collect_queue, skb);
1880 CLAW_DBF_TEXT(2,trace,"clawbusy");
1881 goto Done2;
1882 }
1883 pDataAddress=skb->data;
1884 len_of_data=skb->len;
1885
1886 while (len_of_data > 0) {
1887#ifdef DEBUGMSG
1888 printk(KERN_INFO "%s: %s() length-of-data is %ld \n",
1889 dev->name ,__FUNCTION__,len_of_data);
1890 dumpit((char *)pDataAddress ,64);
1891#endif
1892 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1893 if (p_this_ccw == NULL) { /* lost the race */
1894 ch = &privptr->channel[WRITE];
1895 atomic_inc(&skb->users);
1896 skb_queue_tail(&ch->collect_queue, skb);
1897 goto Done2;
1898 }
1899 privptr->p_write_free_chain=p_this_ccw->next;
1900 p_this_ccw->next=NULL;
1901 --privptr->write_free_count; /* -1 */
1902 bytesInThisBuffer=len_of_data;
1903 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1904 len_of_data-=bytesInThisBuffer;
1905 pDataAddress+=(unsigned long)bytesInThisBuffer;
1906 /* setup write CCW */
1907 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1908 if (len_of_data>0) {
1909 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1910 }
1911 p_this_ccw->write.count=bytesInThisBuffer;
1912 /* now add to end of this chain */
1913 if (p_first_ccw==NULL) {
1914 p_first_ccw=p_this_ccw;
1915 }
1916 if (p_last_ccw!=NULL) {
1917 p_last_ccw->next=p_this_ccw;
1918 /* set up TIC ccws */
1919 p_last_ccw->w_TIC_1.cda=
1920 (__u32)__pa(&p_this_ccw->write);
1921 }
1922 p_last_ccw=p_this_ccw; /* save new last block */
1923#ifdef IOTRACE
1924 printk(KERN_INFO "%s: %s() > CCW and Buffer %ld bytes long \n",
1925 dev->name,__FUNCTION__,bytesInThisBuffer);
1926 dumpit((char *)p_this_ccw, sizeof(struct ccwbk));
1927 dumpit((char *)p_this_ccw->p_buffer, 64);
1928#endif
1929 }
1930
1931 /* FirstCCW and LastCCW now contain a new set of write channel
1932 * programs to append to the running channel program
1933 */
1934
1935 if (p_first_ccw!=NULL) {
1936 /* setup ending ccw sequence for this segment */
1937 pEnd=privptr->p_end_ccw;
1938 if (pEnd->write1) {
1939 pEnd->write1=0x00; /* second end ccw is now active */
1940 /* set up Tic CCWs */
1941 p_last_ccw->w_TIC_1.cda=
1942 (__u32)__pa(&pEnd->write2_nop1);
1943 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1944 pEnd->write2_nop2.flags =
1945 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1946 pEnd->write2_nop2.cda=0;
1947 pEnd->write2_nop2.count=1;
1948 }
1949 else { /* end of if (pEnd->write1)*/
1950 pEnd->write1=0x01; /* first end ccw is now active */
1951 /* set up Tic CCWs */
1952 p_last_ccw->w_TIC_1.cda=
1953 (__u32)__pa(&pEnd->write1_nop1);
1954 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1955 pEnd->write1_nop2.flags =
1956 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1957 pEnd->write1_nop2.cda=0;
1958 pEnd->write1_nop2.count=1;
1959 } /* end if if (pEnd->write1) */
1960
1961
1962 if (privptr->p_write_active_first==NULL ) {
1963 privptr->p_write_active_first=p_first_ccw;
1964 privptr->p_write_active_last=p_last_ccw;
1965 }
1966 else {
1967
1968 /* set up Tic CCWs */
1969
1970 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1971 tempCCW.count=0;
1972 tempCCW.flags=0;
1973 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1974
1975 if (pEnd->write1) {
1976
1977 /*
1978 * first set of ending CCW's is chained to the new write
1979 * chain, so the second set is chained to the active chain
1980 * Therefore modify the second set to point the new write chain.
1981 * make sure we update the CCW atomically
1982 * so channel does not fetch it when it's only half done
1983 */
1984 memcpy( &pEnd->write2_nop2, &tempCCW ,
1985 sizeof(struct ccw1));
1986 privptr->p_write_active_last->w_TIC_1.cda=
1987 (__u32)__pa(&p_first_ccw->write);
1988 }
1989 else {
1990
1991 /*make sure we update the CCW atomically
1992 *so channel does not fetch it when it's only half done
1993 */
1994 memcpy(&pEnd->write1_nop2, &tempCCW ,
1995 sizeof(struct ccw1));
1996 privptr->p_write_active_last->w_TIC_1.cda=
1997 (__u32)__pa(&p_first_ccw->write);
1998
1999 } /* end if if (pEnd->write1) */
2000
2001 privptr->p_write_active_last->next=p_first_ccw;
2002 privptr->p_write_active_last=p_last_ccw;
2003 }
2004
2005 } /* endif (p_first_ccw!=NULL) */
2006
2007
2008#ifdef IOTRACE
2009 printk(KERN_INFO "%s: %s() > Dump Active CCW chain \n",
2010 dev->name,__FUNCTION__);
2011 p_buf=privptr->p_write_active_first;
2012 while (p_buf!=NULL) {
2013 dumpit((char *)p_buf, sizeof(struct ccwbk));
2014 p_buf=p_buf->next;
2015 }
2016 p_buf=(struct ccwbk*)privptr->p_end_ccw;
2017 dumpit((char *)p_buf, sizeof(struct endccw));
2018#endif
2019 dev_kfree_skb(skb);
2020 if (linkid==0) {
2021 lock=LOCK_NO;
2022 }
2023 else {
2024 lock=LOCK_YES;
2025 }
2026 claw_strt_out_IO(dev );
2027 /* if write free count is zero , set NOBUFFER */
2028#ifdef DEBUGMSG
2029 printk(KERN_INFO "%s: %s() > free_count is %d\n",
2030 dev->name,__FUNCTION__,
2031 (int) privptr->write_free_count );
2032#endif
2033 if (privptr->write_free_count==0) {
2034 claw_setbit_busy(TB_NOBUFFER,dev);
2035 }
2036Done2:
2037 claw_clearbit_busy(TB_TX,dev);
2038Done:
2039#ifdef FUNCTRACE
2040 printk(KERN_INFO "%s: %s() > exit on line %d, rc = %d \n",
2041 dev->name,__FUNCTION__,__LINE__, rc);
2042#endif
2043 return(rc);
2044} /* end of claw_hw_tx */
2045
2046/*-------------------------------------------------------------------*
2047* *
2048* init_ccw_bk *
2049* *
2050*--------------------------------------------------------------------*/
2051
2052static int
2053init_ccw_bk(struct net_device *dev)
2054{
2055
2056 __u32 ccw_blocks_required;
2057 __u32 ccw_blocks_perpage;
2058 __u32 ccw_pages_required;
2059 __u32 claw_reads_perpage=1;
2060 __u32 claw_read_pages;
2061 __u32 claw_writes_perpage=1;
2062 __u32 claw_write_pages;
2063 void *p_buff=NULL;
2064 struct ccwbk*p_free_chain;
2065 struct ccwbk*p_buf;
2066 struct ccwbk*p_last_CCWB;
2067 struct ccwbk*p_first_CCWB;
2068 struct endccw *p_endccw=NULL;
2069 addr_t real_address;
2070 struct claw_privbk *privptr=dev->priv;
2071 struct clawh *pClawH=NULL;
2072 addr_t real_TIC_address;
2073 int i,j;
2074#ifdef FUNCTRACE
2075 printk(KERN_INFO "%s: %s() enter \n",dev->name,__FUNCTION__);
2076#endif
2077 CLAW_DBF_TEXT(4,trace,"init_ccw");
2078#ifdef DEBUGMSG
2079 printk(KERN_INFO "%s: variable dev =\n",dev->name);
2080 dumpit((char *) dev, sizeof(struct net_device));
2081#endif
2082
2083 /* initialize statistics field */
2084 privptr->active_link_ID=0;
2085 /* initialize ccwbk pointers */
2086 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
2087 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
2088 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
2089 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
2090 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
2091 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
2092 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
2093 privptr->buffs_alloc = 0;
2094 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
2095 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
2096 /* initialize free write ccwbk counter */
2097 privptr->write_free_count=0; /* number of free bufs on write chain */
2098 p_last_CCWB = NULL;
2099 p_first_CCWB= NULL;
2100 /*
2101 * We need 1 CCW block for each read buffer, 1 for each
2102 * write buffer, plus 1 for ClawSignalBlock
2103 */
2104 ccw_blocks_required =
2105 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
2106#ifdef DEBUGMSG
2107 printk(KERN_INFO "%s: %s() "
2108 "ccw_blocks_required=%d\n",
2109 dev->name,__FUNCTION__,
2110 ccw_blocks_required);
2111 printk(KERN_INFO "%s: %s() "
2112 "PAGE_SIZE=0x%x\n",
2113 dev->name,__FUNCTION__,
2114 (unsigned int)PAGE_SIZE);
2115 printk(KERN_INFO "%s: %s() > "
2116 "PAGE_MASK=0x%x\n",
2117 dev->name,__FUNCTION__,
2118 (unsigned int)PAGE_MASK);
2119#endif
2120 /*
2121 * compute number of CCW blocks that will fit in a page
2122 */
2123 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
2124 ccw_pages_required=
2125 (ccw_blocks_required+ccw_blocks_perpage -1) /
2126 ccw_blocks_perpage;
2127
2128#ifdef DEBUGMSG
2129 printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n",
2130 dev->name,__FUNCTION__,
2131 ccw_blocks_perpage);
2132 printk(KERN_INFO "%s: %s() > ccw_pages_required=%d\n",
2133 dev->name,__FUNCTION__,
2134 ccw_pages_required);
2135#endif
2136 /*
2137 * read and write sizes are set by 2 constants in claw.h
2138 * 4k and 32k. Unpacked values other than 4k are not going to
2139 * provide good performance. With packing buffers support 32k
2140 * buffers are used.
2141 */
2142 if (privptr->p_env->read_size < PAGE_SIZE) {
2143 claw_reads_perpage= PAGE_SIZE / privptr->p_env->read_size;
2144 claw_read_pages= (privptr->p_env->read_buffers +
2145 claw_reads_perpage -1) / claw_reads_perpage;
2146 }
2147 else { /* > or equal */
2148 privptr->p_buff_pages_perread=
2149 (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE;
2150 claw_read_pages=
2151 privptr->p_env->read_buffers * privptr->p_buff_pages_perread;
2152 }
2153 if (privptr->p_env->write_size < PAGE_SIZE) {
2154 claw_writes_perpage=
2155 PAGE_SIZE / privptr->p_env->write_size;
2156 claw_write_pages=
2157 (privptr->p_env->write_buffers + claw_writes_perpage -1) /
2158 claw_writes_perpage;
2159
2160 }
2161 else { /* > or equal */
2162 privptr->p_buff_pages_perwrite=
2163 (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE;
2164 claw_write_pages=
2165 privptr->p_env->write_buffers * privptr->p_buff_pages_perwrite;
2166 }
2167#ifdef DEBUGMSG
2168 if (privptr->p_env->read_size < PAGE_SIZE) {
2169 printk(KERN_INFO "%s: %s() reads_perpage=%d\n",
2170 dev->name,__FUNCTION__,
2171 claw_reads_perpage);
2172 }
2173 else {
2174 printk(KERN_INFO "%s: %s() pages_perread=%d\n",
2175 dev->name,__FUNCTION__,
2176 privptr->p_buff_pages_perread);
2177 }
2178 printk(KERN_INFO "%s: %s() read_pages=%d\n",
2179 dev->name,__FUNCTION__,
2180 claw_read_pages);
2181 if (privptr->p_env->write_size < PAGE_SIZE) {
2182 printk(KERN_INFO "%s: %s() writes_perpage=%d\n",
2183 dev->name,__FUNCTION__,
2184 claw_writes_perpage);
2185 }
2186 else {
2187 printk(KERN_INFO "%s: %s() pages_perwrite=%d\n",
2188 dev->name,__FUNCTION__,
2189 privptr->p_buff_pages_perwrite);
2190 }
2191 printk(KERN_INFO "%s: %s() write_pages=%d\n",
2192 dev->name,__FUNCTION__,
2193 claw_write_pages);
2194#endif
2195
2196
2197 /*
2198 * allocate ccw_pages_required
2199 */
2200 if (privptr->p_buff_ccw==NULL) {
2201 privptr->p_buff_ccw=
2202 (void *)__get_free_pages(__GFP_DMA,
2203 (int)pages_to_order_of_mag(ccw_pages_required ));
2204 if (privptr->p_buff_ccw==NULL) {
2205 printk(KERN_INFO "%s: %s() "
2206 "__get_free_pages for CCWs failed : "
2207 "pages is %d\n",
2208 dev->name,__FUNCTION__,
2209 ccw_pages_required );
2210#ifdef FUNCTRACE
2211 printk(KERN_INFO "%s: %s() > "
2212 "exit on line %d, rc = ENOMEM\n",
2213 dev->name,__FUNCTION__,
2214 __LINE__);
2215#endif
2216 return -ENOMEM;
2217 }
2218 privptr->p_buff_ccw_num=ccw_pages_required;
2219 }
2220 memset(privptr->p_buff_ccw, 0x00,
2221 privptr->p_buff_ccw_num * PAGE_SIZE);
2222
2223 /*
2224 * obtain ending ccw block address
2225 *
2226 */
2227 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
2228 real_address = (__u32)__pa(privptr->p_end_ccw);
2229 /* Initialize ending CCW block */
2230#ifdef DEBUGMSG
2231 printk(KERN_INFO "%s: %s() begin initialize ending CCW blocks\n",
2232 dev->name,__FUNCTION__);
2233#endif
2234
2235 p_endccw=privptr->p_end_ccw;
2236 p_endccw->real=real_address;
2237 p_endccw->write1=0x00;
2238 p_endccw->read1=0x00;
2239
2240 /* write1_nop1 */
2241 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
2242 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2243 p_endccw->write1_nop1.count = 1;
2244 p_endccw->write1_nop1.cda = 0;
2245
2246 /* write1_nop2 */
2247 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
2248 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
2249 p_endccw->write1_nop2.count = 1;
2250 p_endccw->write1_nop2.cda = 0;
2251
2252 /* write2_nop1 */
2253 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
2254 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2255 p_endccw->write2_nop1.count = 1;
2256 p_endccw->write2_nop1.cda = 0;
2257
2258 /* write2_nop2 */
2259 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
2260 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
2261 p_endccw->write2_nop2.count = 1;
2262 p_endccw->write2_nop2.cda = 0;
2263
2264 /* read1_nop1 */
2265 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
2266 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2267 p_endccw->read1_nop1.count = 1;
2268 p_endccw->read1_nop1.cda = 0;
2269
2270 /* read1_nop2 */
2271 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
2272 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
2273 p_endccw->read1_nop2.count = 1;
2274 p_endccw->read1_nop2.cda = 0;
2275
2276 /* read2_nop1 */
2277 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
2278 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2279 p_endccw->read2_nop1.count = 1;
2280 p_endccw->read2_nop1.cda = 0;
2281
2282 /* read2_nop2 */
2283 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
2284 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
2285 p_endccw->read2_nop2.count = 1;
2286 p_endccw->read2_nop2.cda = 0;
2287
2288#ifdef IOTRACE
2289 printk(KERN_INFO "%s: %s() dump claw ending CCW BK \n",
2290 dev->name,__FUNCTION__);
2291 dumpit((char *)p_endccw, sizeof(struct endccw));
2292#endif
2293
2294 /*
2295 * Build a chain of CCWs
2296 *
2297 */
2298
2299#ifdef DEBUGMSG
2300 printk(KERN_INFO "%s: %s() Begin build a chain of CCW buffer \n",
2301 dev->name,__FUNCTION__);
2302#endif
2303 p_buff=privptr->p_buff_ccw;
2304
2305 p_free_chain=NULL;
2306 for (i=0 ; i < ccw_pages_required; i++ ) {
2307 real_address = (__u32)__pa(p_buff);
2308 p_buf=p_buff;
2309 for (j=0 ; j < ccw_blocks_perpage ; j++) {
2310 p_buf->next = p_free_chain;
2311 p_free_chain = p_buf;
2312 p_buf->real=(__u32)__pa(p_buf);
2313 ++p_buf;
2314 }
2315 p_buff+=PAGE_SIZE;
2316 }
2317#ifdef DEBUGMSG
2318 printk(KERN_INFO "%s: %s() "
2319 "End build a chain of CCW buffer \n",
2320 dev->name,__FUNCTION__);
2321 p_buf=p_free_chain;
2322 while (p_buf!=NULL) {
2323 dumpit((char *)p_buf, sizeof(struct ccwbk));
2324 p_buf=p_buf->next;
2325 }
2326#endif
2327
2328 /*
2329 * Initialize ClawSignalBlock
2330 *
2331 */
2332#ifdef DEBUGMSG
2333 printk(KERN_INFO "%s: %s() "
2334 "Begin initialize ClawSignalBlock \n",
2335 dev->name,__FUNCTION__);
2336#endif
2337 if (privptr->p_claw_signal_blk==NULL) {
2338 privptr->p_claw_signal_blk=p_free_chain;
2339 p_free_chain=p_free_chain->next;
2340 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
2341 pClawH->length=0xffff;
2342 pClawH->opcode=0xff;
2343 pClawH->flag=CLAW_BUSY;
2344 }
2345#ifdef DEBUGMSG
2346 printk(KERN_INFO "%s: %s() > End initialize "
2347 "ClawSignalBlock\n",
2348 dev->name,__FUNCTION__);
2349 dumpit((char *)privptr->p_claw_signal_blk, sizeof(struct ccwbk));
2350#endif
2351
2352 /*
2353 * allocate write_pages_required and add to free chain
2354 */
2355 if (privptr->p_buff_write==NULL) {
2356 if (privptr->p_env->write_size < PAGE_SIZE) {
2357 privptr->p_buff_write=
2358 (void *)__get_free_pages(__GFP_DMA,
2359 (int)pages_to_order_of_mag(claw_write_pages ));
2360 if (privptr->p_buff_write==NULL) {
2361 printk(KERN_INFO "%s: %s() __get_free_pages for write"
2362 " bufs failed : get is for %d pages\n",
2363 dev->name,__FUNCTION__,claw_write_pages );
2364 free_pages((unsigned long)privptr->p_buff_ccw,
2365 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
2366 privptr->p_buff_ccw=NULL;
2367#ifdef FUNCTRACE
2368 printk(KERN_INFO "%s: %s() > exit on line %d,"
2369 "rc = ENOMEM\n",
2370 dev->name,__FUNCTION__,__LINE__);
2371#endif
2372 return -ENOMEM;
2373 }
2374 /*
2375 * Build CLAW write free chain
2376 *
2377 */
2378
2379 memset(privptr->p_buff_write, 0x00,
2380 ccw_pages_required * PAGE_SIZE);
2381#ifdef DEBUGMSG
2382 printk(KERN_INFO "%s: %s() Begin build claw write free "
2383 "chain \n",dev->name,__FUNCTION__);
2384#endif
2385 privptr->p_write_free_chain=NULL;
2386
2387 p_buff=privptr->p_buff_write;
2388
2389 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
2390 p_buf = p_free_chain; /* get a CCW */
2391 p_free_chain = p_buf->next;
2392 p_buf->next =privptr->p_write_free_chain;
2393 privptr->p_write_free_chain = p_buf;
2394 p_buf-> p_buffer = (struct clawbuf *)p_buff;
2395 p_buf-> write.cda = (__u32)__pa(p_buff);
2396 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2397 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
2398 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2399 p_buf-> w_read_FF.count = 1;
2400 p_buf-> w_read_FF.cda =
2401 (__u32)__pa(&p_buf-> header.flag);
2402 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
2403 p_buf-> w_TIC_1.flags = 0;
2404 p_buf-> w_TIC_1.count = 0;
2405
2406 if (((unsigned long)p_buff+privptr->p_env->write_size) >=
2407 ((unsigned long)(p_buff+2*
2408 (privptr->p_env->write_size) -1) & PAGE_MASK)) {
2409 p_buff= p_buff+privptr->p_env->write_size;
2410 }
2411 }
2412 }
2413 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
2414 {
2415 privptr->p_write_free_chain=NULL;
2416 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
2417 p_buff=(void *)__get_free_pages(__GFP_DMA,
2418 (int)pages_to_order_of_mag(
2419 privptr->p_buff_pages_perwrite) );
2420#ifdef IOTRACE
2421 printk(KERN_INFO "%s:%s __get_free_pages "
2422 "for writes buf: get for %d pages\n",
2423 dev->name,__FUNCTION__,
2424 privptr->p_buff_pages_perwrite);
2425#endif
2426 if (p_buff==NULL) {
2427 printk(KERN_INFO "%s:%s __get_free_pages"
2428 "for writes buf failed : get is for %d pages\n",
2429 dev->name,
2430 __FUNCTION__,
2431 privptr->p_buff_pages_perwrite );
2432 free_pages((unsigned long)privptr->p_buff_ccw,
2433 (int)pages_to_order_of_mag(
2434 privptr->p_buff_ccw_num));
2435 privptr->p_buff_ccw=NULL;
2436 p_buf=privptr->p_buff_write;
2437 while (p_buf!=NULL) {
2438 free_pages((unsigned long)
2439 p_buf->p_buffer,
2440 (int)pages_to_order_of_mag(
2441 privptr->p_buff_pages_perwrite));
2442 p_buf=p_buf->next;
2443 }
2444#ifdef FUNCTRACE
2445 printk(KERN_INFO "%s: %s exit on line %d, rc = ENOMEM\n",
2446 dev->name,
2447 __FUNCTION__,
2448 __LINE__);
2449#endif
2450 return -ENOMEM;
2451 } /* Error on get_pages */
2452 memset(p_buff, 0x00, privptr->p_env->write_size );
2453 p_buf = p_free_chain;
2454 p_free_chain = p_buf->next;
2455 p_buf->next = privptr->p_write_free_chain;
2456 privptr->p_write_free_chain = p_buf;
2457 privptr->p_buff_write = p_buf;
2458 p_buf->p_buffer=(struct clawbuf *)p_buff;
2459 p_buf-> write.cda = (__u32)__pa(p_buff);
2460 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2461 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
2462 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2463 p_buf-> w_read_FF.count = 1;
2464 p_buf-> w_read_FF.cda =
2465 (__u32)__pa(&p_buf-> header.flag);
2466 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
2467 p_buf-> w_TIC_1.flags = 0;
2468 p_buf-> w_TIC_1.count = 0;
2469 } /* for all write_buffers */
2470
2471 } /* else buffers are PAGE_SIZE or bigger */
2472
2473 }
2474 privptr->p_buff_write_num=claw_write_pages;
2475 privptr->write_free_count=privptr->p_env->write_buffers;
2476
2477
2478#ifdef DEBUGMSG
2479 printk(KERN_INFO "%s:%s End build claw write free chain \n",
2480 dev->name,__FUNCTION__);
2481 p_buf=privptr->p_write_free_chain;
2482 while (p_buf!=NULL) {
2483 dumpit((char *)p_buf, sizeof(struct ccwbk));
2484 p_buf=p_buf->next;
2485 }
2486#endif
2487 /*
2488 * allocate read_pages_required and chain to free chain
2489 */
2490 if (privptr->p_buff_read==NULL) {
2491 if (privptr->p_env->read_size < PAGE_SIZE) {
2492 privptr->p_buff_read=
2493 (void *)__get_free_pages(__GFP_DMA,
2494 (int)pages_to_order_of_mag(claw_read_pages) );
2495 if (privptr->p_buff_read==NULL) {
2496 printk(KERN_INFO "%s: %s() "
2497 "__get_free_pages for read buf failed : "
2498 "get is for %d pages\n",
2499 dev->name,__FUNCTION__,claw_read_pages );
2500 free_pages((unsigned long)privptr->p_buff_ccw,
2501 (int)pages_to_order_of_mag(
2502 privptr->p_buff_ccw_num));
2503 /* free the write pages size is < page size */
2504 free_pages((unsigned long)privptr->p_buff_write,
2505 (int)pages_to_order_of_mag(
2506 privptr->p_buff_write_num));
2507 privptr->p_buff_ccw=NULL;
2508 privptr->p_buff_write=NULL;
2509#ifdef FUNCTRACE
2510 printk(KERN_INFO "%s: %s() > exit on line %d, rc ="
2511 " ENOMEM\n",dev->name,__FUNCTION__,__LINE__);
2512#endif
2513 return -ENOMEM;
2514 }
2515 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
2516 privptr->p_buff_read_num=claw_read_pages;
2517 /*
2518 * Build CLAW read free chain
2519 *
2520 */
2521#ifdef DEBUGMSG
2522 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
2523 dev->name,__FUNCTION__);
2524#endif
2525 p_buff=privptr->p_buff_read;
2526 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
2527 p_buf = p_free_chain;
2528 p_free_chain = p_buf->next;
2529
2530 if (p_last_CCWB==NULL) {
2531 p_buf->next=NULL;
2532 real_TIC_address=0;
2533 p_last_CCWB=p_buf;
2534 }
2535 else {
2536 p_buf->next=p_first_CCWB;
2537 real_TIC_address=
2538 (__u32)__pa(&p_first_CCWB -> read );
2539 }
2540
2541 p_first_CCWB=p_buf;
2542
2543 p_buf->p_buffer=(struct clawbuf *)p_buff;
2544 /* initialize read command */
2545 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
2546 p_buf-> read.cda = (__u32)__pa(p_buff);
2547 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2548 p_buf-> read.count = privptr->p_env->read_size;
2549
2550 /* initialize read_h command */
2551 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
2552 p_buf-> read_h.cda =
2553 (__u32)__pa(&(p_buf->header));
2554 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2555 p_buf-> read_h.count = sizeof(struct clawh);
2556
2557 /* initialize Signal command */
2558 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
2559 p_buf-> signal.cda =
2560 (__u32)__pa(&(pClawH->flag));
2561 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2562 p_buf-> signal.count = 1;
2563
2564 /* initialize r_TIC_1 command */
2565 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
2566 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
2567 p_buf-> r_TIC_1.flags = 0;
2568 p_buf-> r_TIC_1.count = 0;
2569
2570 /* initialize r_read_FF command */
2571 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
2572 p_buf-> r_read_FF.cda =
2573 (__u32)__pa(&(pClawH->flag));
2574 p_buf-> r_read_FF.flags =
2575 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
2576 p_buf-> r_read_FF.count = 1;
2577
2578 /* initialize r_TIC_2 */
2579 memcpy(&p_buf->r_TIC_2,
2580 &p_buf->r_TIC_1, sizeof(struct ccw1));
2581
2582 /* initialize Header */
2583 p_buf->header.length=0xffff;
2584 p_buf->header.opcode=0xff;
2585 p_buf->header.flag=CLAW_PENDING;
2586
2587 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
2588 ((unsigned long)(p_buff+2*(privptr->p_env->read_size) -1)
2589 & PAGE_MASK) ) {
2590 p_buff= p_buff+privptr->p_env->read_size;
2591 }
2592 else {
2593 p_buff=
2594 (void *)((unsigned long)
2595 (p_buff+2*(privptr->p_env->read_size) -1)
2596 & PAGE_MASK) ;
2597 }
2598 } /* for read_buffers */
2599 } /* read_size < PAGE_SIZE */
2600 else { /* read Size >= PAGE_SIZE */
2601
2602#ifdef DEBUGMSG
2603 printk(KERN_INFO "%s: %s() Begin build claw read free chain \n",
2604 dev->name,__FUNCTION__);
2605#endif
2606 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
2607 p_buff = (void *)__get_free_pages(__GFP_DMA,
2608 (int)pages_to_order_of_mag(privptr->p_buff_pages_perread) );
2609 if (p_buff==NULL) {
2610 printk(KERN_INFO "%s: %s() __get_free_pages for read "
2611 "buf failed : get is for %d pages\n",
2612 dev->name,__FUNCTION__,
2613 privptr->p_buff_pages_perread );
2614 free_pages((unsigned long)privptr->p_buff_ccw,
2615 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
2616 /* free the write pages */
2617 p_buf=privptr->p_buff_write;
2618 while (p_buf!=NULL) {
2619 free_pages((unsigned long)p_buf->p_buffer,
2620 (int)pages_to_order_of_mag(
2621 privptr->p_buff_pages_perwrite ));
2622 p_buf=p_buf->next;
2623 }
2624 /* free any read pages already alloc */
2625 p_buf=privptr->p_buff_read;
2626 while (p_buf!=NULL) {
2627 free_pages((unsigned long)p_buf->p_buffer,
2628 (int)pages_to_order_of_mag(
2629 privptr->p_buff_pages_perread ));
2630 p_buf=p_buf->next;
2631 }
2632 privptr->p_buff_ccw=NULL;
2633 privptr->p_buff_write=NULL;
2634#ifdef FUNCTRACE
2635 printk(KERN_INFO "%s: %s() exit on line %d, rc = ENOMEM\n",
2636 dev->name,__FUNCTION__,
2637 __LINE__);
2638#endif
2639 return -ENOMEM;
2640 }
2641 memset(p_buff, 0x00, privptr->p_env->read_size);
2642 p_buf = p_free_chain;
2643 privptr->p_buff_read = p_buf;
2644 p_free_chain = p_buf->next;
2645
2646 if (p_last_CCWB==NULL) {
2647 p_buf->next=NULL;
2648 real_TIC_address=0;
2649 p_last_CCWB=p_buf;
2650 }
2651 else {
2652 p_buf->next=p_first_CCWB;
2653 real_TIC_address=
2654 (addr_t)__pa(
2655 &p_first_CCWB -> read );
2656 }
2657
2658 p_first_CCWB=p_buf;
2659 /* save buff address */
2660 p_buf->p_buffer=(struct clawbuf *)p_buff;
2661 /* initialize read command */
2662 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
2663 p_buf-> read.cda = (__u32)__pa(p_buff);
2664 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2665 p_buf-> read.count = privptr->p_env->read_size;
2666
2667 /* initialize read_h command */
2668 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
2669 p_buf-> read_h.cda =
2670 (__u32)__pa(&(p_buf->header));
2671 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2672 p_buf-> read_h.count = sizeof(struct clawh);
2673
2674 /* initialize Signal command */
2675 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
2676 p_buf-> signal.cda =
2677 (__u32)__pa(&(pClawH->flag));
2678 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
2679 p_buf-> signal.count = 1;
2680
2681 /* initialize r_TIC_1 command */
2682 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
2683 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
2684 p_buf-> r_TIC_1.flags = 0;
2685 p_buf-> r_TIC_1.count = 0;
2686
2687 /* initialize r_read_FF command */
2688 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
2689 p_buf-> r_read_FF.cda =
2690 (__u32)__pa(&(pClawH->flag));
2691 p_buf-> r_read_FF.flags =
2692 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
2693 p_buf-> r_read_FF.count = 1;
2694
2695 /* initialize r_TIC_2 */
2696 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
2697 sizeof(struct ccw1));
2698
2699 /* initialize Header */
2700 p_buf->header.length=0xffff;
2701 p_buf->header.opcode=0xff;
2702 p_buf->header.flag=CLAW_PENDING;
2703
2704 } /* For read_buffers */
2705 } /* read_size >= PAGE_SIZE */
2706 } /* pBuffread = NULL */
2707#ifdef DEBUGMSG
2708 printk(KERN_INFO "%s: %s() > End build claw read free chain \n",
2709 dev->name,__FUNCTION__);
2710 p_buf=p_first_CCWB;
2711 while (p_buf!=NULL) {
2712 dumpit((char *)p_buf, sizeof(struct ccwbk));
2713 p_buf=p_buf->next;
2714 }
2715
2716#endif
2717 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
2718 privptr->buffs_alloc = 1;
2719#ifdef FUNCTRACE
2720 printk(KERN_INFO "%s: %s() exit on line %d\n",
2721 dev->name,__FUNCTION__,__LINE__);
2722#endif
2723 return 0;
2724} /* end of init_ccw_bk */
2725
2726/*-------------------------------------------------------------------*
2727* *
2728* probe_error *
2729* *
2730*--------------------------------------------------------------------*/
2731
2732static void
2733probe_error( struct ccwgroup_device *cgdev)
2734{
2735 struct claw_privbk *privptr;
2736#ifdef FUNCTRACE
2737 printk(KERN_INFO "%s enter \n",__FUNCTION__);
2738#endif
2739 CLAW_DBF_TEXT(4,trace,"proberr");
2740#ifdef DEBUGMSG
2741 printk(KERN_INFO "%s variable cgdev =\n",__FUNCTION__);
2742 dumpit((char *) cgdev, sizeof(struct ccwgroup_device));
2743#endif
2744 privptr=(struct claw_privbk *)cgdev->dev.driver_data;
2745 if (privptr!=NULL) {
2746 if (privptr->p_env != NULL) {
2747 kfree(privptr->p_env);
2748 privptr->p_env=NULL;
2749 }
2750 if (privptr->p_mtc_envelope!=NULL) {
2751 kfree(privptr->p_mtc_envelope);
2752 privptr->p_mtc_envelope=NULL;
2753 }
2754 kfree(privptr);
2755 privptr=NULL;
2756 }
2757#ifdef FUNCTRACE
2758 printk(KERN_INFO "%s > exit on line %d\n",
2759 __FUNCTION__,__LINE__);
2760#endif
2761
2762 return;
2763} /* probe_error */
2764
2765
2766
2767/*-------------------------------------------------------------------*
2768* claw_process_control *
2769* *
2770* *
2771*--------------------------------------------------------------------*/
2772
2773static int
2774claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2775{
2776
2777 struct clawbuf *p_buf;
2778 struct clawctl ctlbk;
2779 struct clawctl *p_ctlbk;
2780 char temp_host_name[8];
2781 char temp_ws_name[8];
2782 struct claw_privbk *privptr;
2783 struct claw_env *p_env;
2784 struct sysval *p_sysval;
2785 struct conncmd *p_connect=NULL;
2786 int rc;
2787 struct chbk *p_ch = NULL;
2788#ifdef FUNCTRACE
2789 printk(KERN_INFO "%s: %s() > enter \n",
2790 dev->name,__FUNCTION__);
2791#endif
2792 CLAW_DBF_TEXT(2,setup,"clw_cntl");
2793#ifdef DEBUGMSG
2794 printk(KERN_INFO "%s: variable dev =\n",dev->name);
2795 dumpit((char *) dev, sizeof(struct net_device));
2796 printk(KERN_INFO "%s: variable p_ccw =\n",dev->name);
2797 dumpit((char *) p_ccw, sizeof(struct ccwbk *));
2798#endif
2799 udelay(1000); /* Wait a ms for the control packets to
2800 *catch up to each other */
2801 privptr=dev->priv;
2802 p_env=privptr->p_env;
2803 memcpy( &temp_host_name, p_env->host_name, 8);
2804 memcpy( &temp_ws_name, p_env->adapter_name , 8);
2805 printk(KERN_INFO "%s: CLAW device %.8s: "
2806 "Received Control Packet\n",
2807 dev->name, temp_ws_name);
2808 if (privptr->release_pend==1) {
2809#ifdef FUNCTRACE
2810 printk(KERN_INFO "%s: %s() > "
2811 "exit on line %d, rc=0\n",
2812 dev->name,__FUNCTION__,__LINE__);
2813#endif
2814 return 0;
2815 }
2816 p_buf=p_ccw->p_buffer;
2817 p_ctlbk=&ctlbk;
2818 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2819 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2820 } else {
2821 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2822 }
2823#ifdef IOTRACE
2824 printk(KERN_INFO "%s: dump claw control data inbound\n",dev->name);
2825 dumpit((char *)p_ctlbk, sizeof(struct clawctl));
2826#endif
2827 switch (p_ctlbk->command)
2828 {
2829 case SYSTEM_VALIDATE_REQUEST:
2830 if (p_ctlbk->version!=CLAW_VERSION_ID) {
2831 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2832 CLAW_RC_WRONG_VERSION );
2833 printk("%s: %d is wrong version id. "
2834 "Expected %d\n",
2835 dev->name, p_ctlbk->version,
2836 CLAW_VERSION_ID);
2837 }
2838 p_sysval=(struct sysval *)&(p_ctlbk->data);
2839 printk( "%s: Recv Sys Validate Request: "
2840 "Vers=%d,link_id=%d,Corr=%d,WS name=%."
2841 "8s,Host name=%.8s\n",
2842 dev->name, p_ctlbk->version,
2843 p_ctlbk->linkid,
2844 p_ctlbk->correlator,
2845 p_sysval->WS_name,
2846 p_sysval->host_name);
2847 if (0!=memcmp(temp_host_name,p_sysval->host_name,8)) {
2848 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2849 CLAW_RC_NAME_MISMATCH );
2850 CLAW_DBF_TEXT(2,setup,"HSTBAD");
2851 CLAW_DBF_TEXT_(2,setup,"%s",p_sysval->host_name);
2852 CLAW_DBF_TEXT_(2,setup,"%s",temp_host_name);
2853 printk(KERN_INFO "%s: Host name mismatch\n",
2854 dev->name);
2855 printk(KERN_INFO "%s: Received :%s: "
2856 "expected :%s: \n",
2857 dev->name,
2858 p_sysval->host_name,
2859 temp_host_name);
2860 }
2861 if (0!=memcmp(temp_ws_name,p_sysval->WS_name,8)) {
2862 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2863 CLAW_RC_NAME_MISMATCH );
2864 CLAW_DBF_TEXT(2,setup,"WSNBAD");
2865 CLAW_DBF_TEXT_(2,setup,"%s",p_sysval->WS_name);
2866 CLAW_DBF_TEXT_(2,setup,"%s",temp_ws_name);
2867 printk(KERN_INFO "%s: WS name mismatch\n",
2868 dev->name);
2869 printk(KERN_INFO "%s: Received :%s: "
2870 "expected :%s: \n",
2871 dev->name,
2872 p_sysval->WS_name,
2873 temp_ws_name);
2874 }
2875 if (( p_sysval->write_frame_size < p_env->write_size) &&
2876 ( p_env->packing == 0)) {
2877 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2878 CLAW_RC_HOST_RCV_TOO_SMALL );
2879 printk(KERN_INFO "%s: host write size is too "
2880 "small\n", dev->name);
2881 CLAW_DBF_TEXT(2,setup,"wrtszbad");
2882 }
2883 if (( p_sysval->read_frame_size < p_env->read_size) &&
2884 ( p_env->packing == 0)) {
2885 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2886 CLAW_RC_HOST_RCV_TOO_SMALL );
2887 printk(KERN_INFO "%s: host read size is too "
2888 "small\n", dev->name);
2889 CLAW_DBF_TEXT(2,setup,"rdsizbad");
2890 }
2891 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0 );
2892 printk("%s: CLAW device %.8s: System validate"
2893 " completed.\n",dev->name, temp_ws_name);
2894 printk("%s: sys Validate Rsize:%d Wsize:%d\n",dev->name,
2895 p_sysval->read_frame_size,p_sysval->write_frame_size);
2896 privptr->system_validate_comp=1;
2897 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
2898 p_env->packing = PACKING_ASK;
2899 }
2900 claw_strt_conn_req(dev);
2901 break;
2902
2903 case SYSTEM_VALIDATE_RESPONSE:
2904 p_sysval=(struct sysval *)&(p_ctlbk->data);
2905 printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d,"
2906 "WS name=%.8s,Host name=%.8s\n",
2907 dev->name,
2908 p_ctlbk->version,
2909 p_ctlbk->correlator,
2910 p_ctlbk->rc,
2911 p_sysval->WS_name,
2912 p_sysval->host_name);
2913 switch (p_ctlbk->rc)
2914 {
2915 case 0:
2916 printk(KERN_INFO "%s: CLAW device "
2917 "%.8s: System validate "
2918 "completed.\n",
2919 dev->name, temp_ws_name);
2920 if (privptr->system_validate_comp == 0)
2921 claw_strt_conn_req(dev);
2922 privptr->system_validate_comp=1;
2923 break;
2924 case CLAW_RC_NAME_MISMATCH:
2925 printk(KERN_INFO "%s: Sys Validate "
2926 "Resp : Host, WS name is "
2927 "mismatch\n",
2928 dev->name);
2929 break;
2930 case CLAW_RC_WRONG_VERSION:
2931 printk(KERN_INFO "%s: Sys Validate "
2932 "Resp : Wrong version\n",
2933 dev->name);
2934 break;
2935 case CLAW_RC_HOST_RCV_TOO_SMALL:
2936 printk(KERN_INFO "%s: Sys Validate "
2937 "Resp : bad frame size\n",
2938 dev->name);
2939 break;
2940 default:
2941 printk(KERN_INFO "%s: Sys Validate "
2942 "error code=%d \n",
2943 dev->name, p_ctlbk->rc );
2944 break;
2945 }
2946 break;
2947
2948 case CONNECTION_REQUEST:
2949 p_connect=(struct conncmd *)&(p_ctlbk->data);
2950 printk(KERN_INFO "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2951 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2952 dev->name,
2953 p_ctlbk->version,
2954 p_ctlbk->linkid,
2955 p_ctlbk->correlator,
2956 p_connect->host_name,
2957 p_connect->WS_name);
2958 if (privptr->active_link_ID!=0 ) {
2959 claw_snd_disc(dev, p_ctlbk);
2960 printk(KERN_INFO "%s: Conn Req error : "
2961 "already logical link is active \n",
2962 dev->name);
2963 }
2964 if (p_ctlbk->linkid!=1 ) {
2965 claw_snd_disc(dev, p_ctlbk);
2966 printk(KERN_INFO "%s: Conn Req error : "
2967 "req logical link id is not 1\n",
2968 dev->name);
2969 }
2970 rc=find_link(dev,
2971 p_connect->host_name, p_connect->WS_name);
2972 if (rc!=0) {
2973 claw_snd_disc(dev, p_ctlbk);
2974 printk(KERN_INFO "%s: Conn Req error : "
2975 "req appl name does not match\n",
2976 dev->name);
2977 }
2978 claw_send_control(dev,
2979 CONNECTION_CONFIRM, p_ctlbk->linkid,
2980 p_ctlbk->correlator,
2981 0, p_connect->host_name,
2982 p_connect->WS_name);
2983 if (p_env->packing == PACKING_ASK) {
2984 printk("%s: Now Pack ask\n",dev->name);
2985 p_env->packing = PACK_SEND;
2986 claw_snd_conn_req(dev,0);
2987 }
2988 printk(KERN_INFO "%s: CLAW device %.8s: Connection "
2989 "completed link_id=%d.\n",
2990 dev->name, temp_ws_name,
2991 p_ctlbk->linkid);
2992 privptr->active_link_ID=p_ctlbk->linkid;
2993 p_ch=&privptr->channel[WRITE];
2994 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2995 break;
2996 case CONNECTION_RESPONSE:
2997 p_connect=(struct conncmd *)&(p_ctlbk->data);
2998 printk(KERN_INFO "%s: Revc Conn Resp: Vers=%d,link_id=%d,"
2999 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
3000 dev->name,
3001 p_ctlbk->version,
3002 p_ctlbk->linkid,
3003 p_ctlbk->correlator,
3004 p_ctlbk->rc,
3005 p_connect->host_name,
3006 p_connect->WS_name);
3007
3008 if (p_ctlbk->rc !=0 ) {
3009 printk(KERN_INFO "%s: Conn Resp error: rc=%d \n",
3010 dev->name, p_ctlbk->rc);
3011 return 1;
3012 }
3013 rc=find_link(dev,
3014 p_connect->host_name, p_connect->WS_name);
3015 if (rc!=0) {
3016 claw_snd_disc(dev, p_ctlbk);
3017 printk(KERN_INFO "%s: Conn Resp error: "
3018 "req appl name does not match\n",
3019 dev->name);
3020 }
3021 /* should be until CONNECTION_CONFIRM */
3022 privptr->active_link_ID = - (p_ctlbk->linkid);
3023 break;
3024 case CONNECTION_CONFIRM:
3025 p_connect=(struct conncmd *)&(p_ctlbk->data);
3026 printk(KERN_INFO "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
3027 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
3028 dev->name,
3029 p_ctlbk->version,
3030 p_ctlbk->linkid,
3031 p_ctlbk->correlator,
3032 p_connect->host_name,
3033 p_connect->WS_name);
3034 if (p_ctlbk->linkid== -(privptr->active_link_ID)) {
3035 privptr->active_link_ID=p_ctlbk->linkid;
3036 if (p_env->packing > PACKING_ASK) {
3037 printk(KERN_INFO "%s: Confirmed Now packing\n",dev->name);
3038 p_env->packing = DO_PACKED;
3039 }
3040 p_ch=&privptr->channel[WRITE];
3041 wake_up(&p_ch->wait);
3042 }
3043 else {
3044 printk(KERN_INFO "%s: Conn confirm: "
3045 "unexpected linkid=%d \n",
3046 dev->name, p_ctlbk->linkid);
3047 claw_snd_disc(dev, p_ctlbk);
3048 }
3049 break;
3050 case DISCONNECT:
3051 printk(KERN_INFO "%s: Disconnect: "
3052 "Vers=%d,link_id=%d,Corr=%d\n",
3053 dev->name, p_ctlbk->version,
3054 p_ctlbk->linkid, p_ctlbk->correlator);
3055 if ((p_ctlbk->linkid == 2) &&
3056 (p_env->packing == PACK_SEND)) {
3057 privptr->active_link_ID = 1;
3058 p_env->packing = DO_PACKED;
3059 }
3060 else
3061 privptr->active_link_ID=0;
3062 break;
3063 case CLAW_ERROR:
3064 printk(KERN_INFO "%s: CLAW ERROR detected\n",
3065 dev->name);
3066 break;
3067 default:
3068 printk(KERN_INFO "%s: Unexpected command code=%d \n",
3069 dev->name, p_ctlbk->command);
3070 break;
3071 }
3072
3073#ifdef FUNCTRACE
3074 printk(KERN_INFO "%s: %s() exit on line %d, rc = 0\n",
3075 dev->name,__FUNCTION__,__LINE__);
3076#endif
3077
3078 return 0;
3079} /* end of claw_process_control */
3080
3081
3082/*-------------------------------------------------------------------*
3083* claw_send_control *
3084* *
3085*--------------------------------------------------------------------*/
3086
3087static int
3088claw_send_control(struct net_device *dev, __u8 type, __u8 link,
3089 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
3090{
3091 struct claw_privbk *privptr;
3092 struct clawctl *p_ctl;
3093 struct sysval *p_sysval;
3094 struct conncmd *p_connect;
3095 struct sk_buff *skb;
3096
3097#ifdef FUNCTRACE
3098 printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__);
3099#endif
3100 CLAW_DBF_TEXT(2,setup,"sndcntl");
3101#ifdef DEBUGMSG
3102 printk(KERN_INFO "%s: Sending Control Packet \n",dev->name);
3103 printk(KERN_INFO "%s: variable type = 0x%X, link = "
3104 "%d, correlator = %d, rc = %d\n",
3105 dev->name,type, link, correlator, rc);
3106 printk(KERN_INFO "%s: variable local_name = %s, "
3107 "remote_name = %s\n",dev->name, local_name, remote_name);
3108#endif
3109 privptr=dev->priv;
3110 p_ctl=(struct clawctl *)&privptr->ctl_bk;
3111
3112 p_ctl->command=type;
3113 p_ctl->version=CLAW_VERSION_ID;
3114 p_ctl->linkid=link;
3115 p_ctl->correlator=correlator;
3116 p_ctl->rc=rc;
3117
3118 p_sysval=(struct sysval *)&p_ctl->data;
3119 p_connect=(struct conncmd *)&p_ctl->data;
3120
3121 switch (p_ctl->command) {
3122 case SYSTEM_VALIDATE_REQUEST:
3123 case SYSTEM_VALIDATE_RESPONSE:
3124 memcpy(&p_sysval->host_name, local_name, 8);
3125 memcpy(&p_sysval->WS_name, remote_name, 8);
3126 if (privptr->p_env->packing > 0) {
3127 p_sysval->read_frame_size=DEF_PACK_BUFSIZE;
3128 p_sysval->write_frame_size=DEF_PACK_BUFSIZE;
3129 } else {
3130 /* how big is the piggest group of packets */
3131 p_sysval->read_frame_size=privptr->p_env->read_size;
3132 p_sysval->write_frame_size=privptr->p_env->write_size;
3133 }
3134 memset(&p_sysval->reserved, 0x00, 4);
3135 break;
3136 case CONNECTION_REQUEST:
3137 case CONNECTION_RESPONSE:
3138 case CONNECTION_CONFIRM:
3139 case DISCONNECT:
3140 memcpy(&p_sysval->host_name, local_name, 8);
3141 memcpy(&p_sysval->WS_name, remote_name, 8);
3142 if (privptr->p_env->packing > 0) {
3143 /* How big is the biggest packet */
3144 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
3145 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
3146 } else {
3147 memset(&p_connect->reserved1, 0x00, 4);
3148 memset(&p_connect->reserved2, 0x00, 4);
3149 }
3150 break;
3151 default:
3152 break;
3153 }
3154
3155 /* write Control Record to the device */
3156
3157
3158 skb = dev_alloc_skb(sizeof(struct clawctl));
3159 if (!skb) {
3160 printk( "%s:%s low on mem, returning...\n",
3161 dev->name,__FUNCTION__);
3162#ifdef DEBUG
3163 printk(KERN_INFO "%s:%s Exit, rc = ENOMEM\n",
3164 dev->name,__FUNCTION__);
3165#endif
3166 return -ENOMEM;
3167 }
3168 memcpy(skb_put(skb, sizeof(struct clawctl)),
3169 p_ctl, sizeof(struct clawctl));
3170#ifdef IOTRACE
3171 printk(KERN_INFO "%s: outbnd claw cntl data \n",dev->name);
3172 dumpit((char *)p_ctl,sizeof(struct clawctl));
3173#endif
3174 if (privptr->p_env->packing >= PACK_SEND)
3175 claw_hw_tx(skb, dev, 1);
3176 else
3177 claw_hw_tx(skb, dev, 0);
3178#ifdef FUNCTRACE
3179 printk(KERN_INFO "%s:%s Exit on line %d\n",
3180 dev->name,__FUNCTION__,__LINE__);
3181#endif
3182
3183 return 0;
3184} /* end of claw_send_control */
3185
3186/*-------------------------------------------------------------------*
3187* claw_snd_conn_req *
3188* *
3189*--------------------------------------------------------------------*/
3190static int
3191claw_snd_conn_req(struct net_device *dev, __u8 link)
3192{
3193 int rc;
3194 struct claw_privbk *privptr=dev->priv;
3195 struct clawctl *p_ctl;
3196
3197#ifdef FUNCTRACE
3198 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
3199#endif
3200 CLAW_DBF_TEXT(2,setup,"snd_conn");
3201#ifdef DEBUGMSG
3202 printk(KERN_INFO "%s: variable link = %X, dev =\n",dev->name, link);
3203 dumpit((char *) dev, sizeof(struct net_device));
3204#endif
3205 rc = 1;
3206 p_ctl=(struct clawctl *)&privptr->ctl_bk;
3207 p_ctl->linkid = link;
3208 if ( privptr->system_validate_comp==0x00 ) {
3209#ifdef FUNCTRACE
3210 printk(KERN_INFO "%s:%s Exit on line %d, rc = 1\n",
3211 dev->name,__FUNCTION__,__LINE__);
3212#endif
3213 return rc;
3214 }
3215 if (privptr->p_env->packing == PACKING_ASK )
3216 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
3217 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
3218 if (privptr->p_env->packing == PACK_SEND) {
3219 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
3220 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
3221 }
3222 if (privptr->p_env->packing == 0)
3223 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
3224 HOST_APPL_NAME, privptr->p_env->api_type);
3225#ifdef FUNCTRACE
3226 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3227 dev->name,__FUNCTION__,__LINE__, rc);
3228#endif
3229 return rc;
3230
3231} /* end of claw_snd_conn_req */
3232
3233
3234/*-------------------------------------------------------------------*
3235* claw_snd_disc *
3236* *
3237*--------------------------------------------------------------------*/
3238
3239static int
3240claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
3241{
3242 int rc;
3243 struct conncmd * p_connect;
3244
3245#ifdef FUNCTRACE
3246 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3247#endif
3248 CLAW_DBF_TEXT(2,setup,"snd_dsc");
3249#ifdef DEBUGMSG
3250 printk(KERN_INFO "%s: variable dev =\n",dev->name);
3251 dumpit((char *) dev, sizeof(struct net_device));
3252 printk(KERN_INFO "%s: variable p_ctl",dev->name);
3253 dumpit((char *) p_ctl, sizeof(struct clawctl));
3254#endif
3255 p_connect=(struct conncmd *)&p_ctl->data;
3256
3257 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
3258 p_ctl->correlator, 0,
3259 p_connect->host_name, p_connect->WS_name);
3260#ifdef FUNCTRACE
3261 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3262 dev->name,__FUNCTION__, __LINE__, rc);
3263#endif
3264 return rc;
3265} /* end of claw_snd_disc */
3266
3267
3268/*-------------------------------------------------------------------*
3269* claw_snd_sys_validate_rsp *
3270* *
3271*--------------------------------------------------------------------*/
3272
3273static int
3274claw_snd_sys_validate_rsp(struct net_device *dev,
3275 struct clawctl *p_ctl, __u32 return_code)
3276{
3277 struct claw_env * p_env;
3278 struct claw_privbk *privptr;
3279 int rc;
3280
3281#ifdef FUNCTRACE
3282 printk(KERN_INFO "%s:%s Enter\n",
3283 dev->name,__FUNCTION__);
3284#endif
3285 CLAW_DBF_TEXT(2,setup,"chkresp");
3286#ifdef DEBUGMSG
3287 printk(KERN_INFO "%s: variable return_code = %d, dev =\n",
3288 dev->name, return_code);
3289 dumpit((char *) dev, sizeof(struct net_device));
3290 printk(KERN_INFO "%s: variable p_ctl =\n",dev->name);
3291 dumpit((char *) p_ctl, sizeof(struct clawctl));
3292#endif
3293 privptr = dev->priv;
3294 p_env=privptr->p_env;
3295 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
3296 p_ctl->linkid,
3297 p_ctl->correlator,
3298 return_code,
3299 p_env->host_name,
3300 p_env->adapter_name );
3301#ifdef FUNCTRACE
3302 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3303 dev->name,__FUNCTION__,__LINE__, rc);
3304#endif
3305 return rc;
3306} /* end of claw_snd_sys_validate_rsp */
3307
3308/*-------------------------------------------------------------------*
3309* claw_strt_conn_req *
3310* *
3311*--------------------------------------------------------------------*/
3312
3313static int
3314claw_strt_conn_req(struct net_device *dev )
3315{
3316 int rc;
3317
3318#ifdef FUNCTRACE
3319 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3320#endif
3321 CLAW_DBF_TEXT(2,setup,"conn_req");
3322#ifdef DEBUGMSG
3323 printk(KERN_INFO "%s: variable dev =\n",dev->name);
3324 dumpit((char *) dev, sizeof(struct net_device));
3325#endif
3326 rc=claw_snd_conn_req(dev, 1);
3327#ifdef FUNCTRACE
3328 printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n",
3329 dev->name,__FUNCTION__,__LINE__, rc);
3330#endif
3331 return rc;
3332} /* end of claw_strt_conn_req */
3333
3334
3335
3336/*-------------------------------------------------------------------*
3337 * claw_stats *
3338 *-------------------------------------------------------------------*/
3339
3340static struct
3341net_device_stats *claw_stats(struct net_device *dev)
3342{
3343 struct claw_privbk *privptr;
3344#ifdef FUNCTRACE
3345 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3346#endif
3347 CLAW_DBF_TEXT(4,trace,"stats");
3348 privptr = dev->priv;
3349#ifdef FUNCTRACE
3350 printk(KERN_INFO "%s:%s Exit on line %d\n",
3351 dev->name,__FUNCTION__,__LINE__);
3352#endif
3353 return &privptr->stats;
3354} /* end of claw_stats */
3355
3356
3357/*-------------------------------------------------------------------*
3358* unpack_read *
3359* *
3360*--------------------------------------------------------------------*/
3361static void
3362unpack_read(struct net_device *dev )
3363{
3364 struct sk_buff *skb;
3365 struct claw_privbk *privptr;
3366 struct claw_env *p_env;
3367 struct ccwbk *p_this_ccw;
3368 struct ccwbk *p_first_ccw;
3369 struct ccwbk *p_last_ccw;
3370 struct clawph *p_packh;
3371 void *p_packd;
3372 struct clawctl *p_ctlrec=NULL;
3373
3374 __u32 len_of_data;
3375 __u32 pack_off;
3376 __u8 link_num;
3377 __u8 mtc_this_frm=0;
3378 __u32 bytes_to_mov;
3379 struct chbk *p_ch = NULL;
3380 int i=0;
3381 int p=0;
3382
3383#ifdef FUNCTRACE
3384 printk(KERN_INFO "%s:%s enter \n",dev->name,__FUNCTION__);
3385#endif
3386 CLAW_DBF_TEXT(4,trace,"unpkread");
3387 p_first_ccw=NULL;
3388 p_last_ccw=NULL;
3389 p_packh=NULL;
3390 p_packd=NULL;
3391 privptr=dev->priv;
3392 p_env = privptr->p_env;
3393 p_this_ccw=privptr->p_read_active_first;
3394 i=0;
3395 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
3396#ifdef IOTRACE
3397 printk(KERN_INFO "%s p_this_ccw \n",dev->name);
3398 dumpit((char*)p_this_ccw, sizeof(struct ccwbk));
3399 printk(KERN_INFO "%s Inbound p_this_ccw->p_buffer(64)"
3400 " pk=%d \n",dev->name,p_env->packing);
3401 dumpit((char *)p_this_ccw->p_buffer, 64 );
3402#endif
3403 pack_off = 0;
3404 p = 0;
3405 p_this_ccw->header.flag=CLAW_PENDING;
3406 privptr->p_read_active_first=p_this_ccw->next;
3407 p_this_ccw->next=NULL;
3408 p_packh = (struct clawph *)p_this_ccw->p_buffer;
3409 if ((p_env->packing == PACK_SEND) &&
3410 (p_packh->len == 32) &&
3411 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
3412 p_packh++; /* peek past pack header */
3413 p_ctlrec = (struct clawctl *)p_packh;
3414 p_packh--; /* un peek */
3415 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
3416 (p_ctlrec->command == CONNECTION_CONFIRM))
3417 p_env->packing = DO_PACKED;
3418 }
3419 if (p_env->packing == DO_PACKED)
3420 link_num=p_packh->link_num;
3421 else
3422 link_num=p_this_ccw->header.opcode / 8;
3423 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
3424#ifdef DEBUGMSG
3425 printk(KERN_INFO "%s: %s > More_to_come is ON\n",
3426 dev->name,__FUNCTION__);
3427#endif
3428 mtc_this_frm=1;
3429 if (p_this_ccw->header.length!=
3430 privptr->p_env->read_size ) {
3431 printk(KERN_INFO " %s: Invalid frame detected "
3432 "length is %02x\n" ,
3433 dev->name, p_this_ccw->header.length);
3434 }
3435 }
3436
3437 if (privptr->mtc_skipping) {
3438 /*
3439 * We're in the mode of skipping past a
3440 * multi-frame message
3441 * that we can't process for some reason or other.
3442 * The first frame without the More-To-Come flag is
3443 * the last frame of the skipped message.
3444 */
3445 /* in case of More-To-Come not set in this frame */
3446 if (mtc_this_frm==0) {
3447 privptr->mtc_skipping=0; /* Ok, the end */
3448 privptr->mtc_logical_link=-1;
3449 }
3450#ifdef DEBUGMSG
3451 printk(KERN_INFO "%s:%s goto next "
3452 "frame from MoretoComeSkip \n",
3453 dev->name,__FUNCTION__);
3454#endif
3455 goto NextFrame;
3456 }
3457
3458 if (link_num==0) {
3459 claw_process_control(dev, p_this_ccw);
3460#ifdef DEBUGMSG
3461 printk(KERN_INFO "%s:%s goto next "
3462 "frame from claw_process_control \n",
3463 dev->name,__FUNCTION__);
3464#endif
3465 CLAW_DBF_TEXT(4,trace,"UnpkCntl");
3466 goto NextFrame;
3467 }
3468unpack_next:
3469 if (p_env->packing == DO_PACKED) {
3470 if (pack_off > p_env->read_size)
3471 goto NextFrame;
3472 p_packd = p_this_ccw->p_buffer+pack_off;
3473 p_packh = (struct clawph *) p_packd;
3474 if ((p_packh->len == 0) || /* all done with this frame? */
3475 (p_packh->flag != 0))
3476 goto NextFrame;
3477 bytes_to_mov = p_packh->len;
3478 pack_off += bytes_to_mov+sizeof(struct clawph);
3479 p++;
3480 } else {
3481 bytes_to_mov=p_this_ccw->header.length;
3482 }
3483 if (privptr->mtc_logical_link<0) {
3484#ifdef DEBUGMSG
3485 printk(KERN_INFO "%s: %s mtc_logical_link < 0 \n",
3486 dev->name,__FUNCTION__);
3487#endif
3488
3489 /*
3490 * if More-To-Come is set in this frame then we don't know
3491 * length of entire message, and hence have to allocate
3492 * large buffer */
3493
3494 /* We are starting a new envelope */
3495 privptr->mtc_offset=0;
3496 privptr->mtc_logical_link=link_num;
3497 }
3498
3499 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
3500 /* error */
3501#ifdef DEBUGMSG
3502 printk(KERN_INFO "%s: %s > goto next "
3503 "frame from MoretoComeSkip \n",
3504 dev->name,
3505 __FUNCTION__);
3506 printk(KERN_INFO " bytes_to_mov %d > (MAX_ENVELOPE_"
3507 "SIZE-privptr->mtc_offset %d)\n",
3508 bytes_to_mov,(MAX_ENVELOPE_SIZE- privptr->mtc_offset));
3509#endif
3510 privptr->stats.rx_frame_errors++;
3511 goto NextFrame;
3512 }
3513 if (p_env->packing == DO_PACKED) {
3514 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
3515 p_packd+sizeof(struct clawph), bytes_to_mov);
3516
3517 } else {
3518 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
3519 p_this_ccw->p_buffer, bytes_to_mov);
3520 }
3521#ifdef DEBUGMSG
3522 printk(KERN_INFO "%s: %s() received data \n",
3523 dev->name,__FUNCTION__);
3524 if (p_env->packing == DO_PACKED)
3525 dumpit((char *)p_packd+sizeof(struct clawph),32);
3526 else
3527 dumpit((char *)p_this_ccw->p_buffer, 32);
3528 printk(KERN_INFO "%s: %s() bytelength %d \n",
3529 dev->name,__FUNCTION__,bytes_to_mov);
3530#endif
3531 if (mtc_this_frm==0) {
3532 len_of_data=privptr->mtc_offset+bytes_to_mov;
3533 skb=dev_alloc_skb(len_of_data);
3534 if (skb) {
3535 memcpy(skb_put(skb,len_of_data),
3536 privptr->p_mtc_envelope,
3537 len_of_data);
3538 skb->mac.raw=skb->data;
3539 skb->dev=dev;
3540 skb->protocol=htons(ETH_P_IP);
3541 skb->ip_summed=CHECKSUM_UNNECESSARY;
3542 privptr->stats.rx_packets++;
3543 privptr->stats.rx_bytes+=len_of_data;
3544 netif_rx(skb);
3545#ifdef DEBUGMSG
3546 printk(KERN_INFO "%s: %s() netif_"
3547 "rx(skb) completed \n",
3548 dev->name,__FUNCTION__);
3549#endif
3550 }
3551 else {
3552 privptr->stats.rx_dropped++;
3553 printk(KERN_WARNING "%s: %s() low on memory\n",
3554 dev->name,__FUNCTION__);
3555 }
3556 privptr->mtc_offset=0;
3557 privptr->mtc_logical_link=-1;
3558 }
3559 else {
3560 privptr->mtc_offset+=bytes_to_mov;
3561 }
3562 if (p_env->packing == DO_PACKED)
3563 goto unpack_next;
3564NextFrame:
3565 /*
3566 * Remove ThisCCWblock from active read queue, and add it
3567 * to queue of free blocks to be reused.
3568 */
3569 i++;
3570 p_this_ccw->header.length=0xffff;
3571 p_this_ccw->header.opcode=0xff;
3572 /*
3573 * add this one to the free queue for later reuse
3574 */
3575 if (p_first_ccw==NULL) {
3576 p_first_ccw = p_this_ccw;
3577 }
3578 else {
3579 p_last_ccw->next = p_this_ccw;
3580 }
3581 p_last_ccw = p_this_ccw;
3582 /*
3583 * chain to next block on active read queue
3584 */
3585 p_this_ccw = privptr->p_read_active_first;
3586 CLAW_DBF_TEXT_(4,trace,"rxpkt %d",p);
3587 } /* end of while */
3588
3589 /* check validity */
3590
3591#ifdef IOTRACE
3592 printk(KERN_INFO "%s:%s processed frame is %d \n",
3593 dev->name,__FUNCTION__,i);
3594 printk(KERN_INFO "%s:%s F:%lx L:%lx\n",
3595 dev->name,
3596 __FUNCTION__,
3597 (unsigned long)p_first_ccw,
3598 (unsigned long)p_last_ccw);
3599#endif
3600 CLAW_DBF_TEXT_(4,trace,"rxfrm %d",i);
3601 add_claw_reads(dev, p_first_ccw, p_last_ccw);
3602 p_ch=&privptr->channel[READ];
3603 claw_strt_read(dev, LOCK_YES);
3604#ifdef FUNCTRACE
3605 printk(KERN_INFO "%s: %s exit on line %d\n",
3606 dev->name, __FUNCTION__, __LINE__);
3607#endif
3608 return;
3609} /* end of unpack_read */
3610
3611/*-------------------------------------------------------------------*
3612* claw_strt_read *
3613* *
3614*--------------------------------------------------------------------*/
3615static void
3616claw_strt_read (struct net_device *dev, int lock )
3617{
3618 int rc = 0;
3619 __u32 parm;
3620 unsigned long saveflags = 0;
3621 struct claw_privbk *privptr=dev->priv;
3622 struct ccwbk*p_ccwbk;
3623 struct chbk *p_ch;
3624 struct clawh *p_clawh;
3625 p_ch=&privptr->channel[READ];
3626
3627#ifdef FUNCTRACE
3628 printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__);
3629 printk(KERN_INFO "%s: variable lock = %d, dev =\n",dev->name, lock);
3630 dumpit((char *) dev, sizeof(struct net_device));
3631#endif
3632 CLAW_DBF_TEXT(4,trace,"StRdNter");
3633 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
3634 p_clawh->flag=CLAW_IDLE; /* 0x00 */
3635
3636 if ((privptr->p_write_active_first!=NULL &&
3637 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
3638 (privptr->p_read_active_first!=NULL &&
3639 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
3640 p_clawh->flag=CLAW_BUSY; /* 0xff */
3641 }
3642#ifdef DEBUGMSG
3643 printk(KERN_INFO "%s:%s state-%02x\n" ,
3644 dev->name,__FUNCTION__, p_ch->claw_state);
3645#endif
3646 if (lock==LOCK_YES) {
3647 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
3648 }
3649 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
3650#ifdef DEBUGMSG
3651 printk(KERN_INFO "%s: HOT READ started in %s\n" ,
3652 dev->name,__FUNCTION__);
3653 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
3654 dumpit((char *)&p_clawh->flag , 1);
3655#endif
3656 CLAW_DBF_TEXT(4,trace,"HotRead");
3657 p_ccwbk=privptr->p_read_active_first;
3658 parm = (unsigned long) p_ch;
3659 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
3660 0xff, 0);
3661 if (rc != 0) {
3662 ccw_check_return_code(p_ch->cdev, rc);
3663 }
3664 }
3665 else {
3666#ifdef DEBUGMSG
3667 printk(KERN_INFO "%s: No READ started by %s() In progress\n" ,
3668 dev->name,__FUNCTION__);
3669#endif
3670 CLAW_DBF_TEXT(2,trace,"ReadAct");
3671 }
3672
3673 if (lock==LOCK_YES) {
3674 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
3675 }
3676#ifdef FUNCTRACE
3677 printk(KERN_INFO "%s:%s Exit on line %d\n",
3678 dev->name,__FUNCTION__,__LINE__);
3679#endif
3680 CLAW_DBF_TEXT(4,trace,"StRdExit");
3681 return;
3682} /* end of claw_strt_read */
3683
3684/*-------------------------------------------------------------------*
3685* claw_strt_out_IO *
3686* *
3687*--------------------------------------------------------------------*/
3688
3689static void
3690claw_strt_out_IO( struct net_device *dev )
3691{
3692 int rc = 0;
3693 unsigned long parm;
3694 struct claw_privbk *privptr;
3695 struct chbk *p_ch;
3696 struct ccwbk *p_first_ccw;
3697
3698#ifdef FUNCTRACE
3699 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3700#endif
3701 if (!dev) {
3702 return;
3703 }
3704 privptr=(struct claw_privbk *)dev->priv;
3705 p_ch=&privptr->channel[WRITE];
3706
3707#ifdef DEBUGMSG
3708 printk(KERN_INFO "%s:%s state-%02x\n" ,
3709 dev->name,__FUNCTION__,p_ch->claw_state);
3710#endif
3711 CLAW_DBF_TEXT(4,trace,"strt_io");
3712 p_first_ccw=privptr->p_write_active_first;
3713
3714 if (p_ch->claw_state == CLAW_STOP)
3715 return;
3716 if (p_first_ccw == NULL) {
3717#ifdef FUNCTRACE
3718 printk(KERN_INFO "%s:%s Exit on line %d\n",
3719 dev->name,__FUNCTION__,__LINE__);
3720#endif
3721 return;
3722 }
3723 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
3724 parm = (unsigned long) p_ch;
3725#ifdef DEBUGMSG
3726 printk(KERN_INFO "%s:%s do_io \n" ,dev->name,__FUNCTION__);
3727 dumpit((char *)p_first_ccw, sizeof(struct ccwbk));
3728#endif
3729 CLAW_DBF_TEXT(2,trace,"StWrtIO");
3730 rc = ccw_device_start (p_ch->cdev,&p_first_ccw->write, parm,
3731 0xff, 0);
3732 if (rc != 0) {
3733 ccw_check_return_code(p_ch->cdev, rc);
3734 }
3735 }
3736 dev->trans_start = jiffies;
3737#ifdef FUNCTRACE
3738 printk(KERN_INFO "%s:%s Exit on line %d\n",
3739 dev->name,__FUNCTION__,__LINE__);
3740#endif
3741
3742 return;
3743} /* end of claw_strt_out_IO */
3744
3745/*-------------------------------------------------------------------*
3746* Free write buffers *
3747* *
3748*--------------------------------------------------------------------*/
3749
3750static void
3751claw_free_wrt_buf( struct net_device *dev )
3752{
3753
3754 struct claw_privbk *privptr=(struct claw_privbk *)dev->priv;
3755 struct ccwbk*p_first_ccw;
3756 struct ccwbk*p_last_ccw;
3757 struct ccwbk*p_this_ccw;
3758 struct ccwbk*p_next_ccw;
3759#ifdef IOTRACE
3760 struct ccwbk*p_buf;
3761#endif
3762#ifdef FUNCTRACE
3763 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3764 printk(KERN_INFO "%s: free count = %d variable dev =\n",
3765 dev->name,privptr->write_free_count);
3766#endif
3767 CLAW_DBF_TEXT(4,trace,"freewrtb");
3768 /* scan the write queue to free any completed write packets */
3769 p_first_ccw=NULL;
3770 p_last_ccw=NULL;
3771#ifdef IOTRACE
3772 printk(KERN_INFO "%s: Dump current CCW chain \n",dev->name );
3773 p_buf=privptr->p_write_active_first;
3774 while (p_buf!=NULL) {
3775 dumpit((char *)p_buf, sizeof(struct ccwbk));
3776 p_buf=p_buf->next;
3777 }
3778 if (p_buf==NULL) {
3779 printk(KERN_INFO "%s: privptr->p_write_"
3780 "active_first==NULL\n",dev->name );
3781 }
3782 p_buf=(struct ccwbk*)privptr->p_end_ccw;
3783 dumpit((char *)p_buf, sizeof(struct endccw));
3784#endif
3785 p_this_ccw=privptr->p_write_active_first;
3786 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
3787 {
3788 p_next_ccw = p_this_ccw->next;
3789 if (((p_next_ccw!=NULL) &&
3790 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
3791 ((p_this_ccw == privptr->p_write_active_last) &&
3792 (p_this_ccw->header.flag!=CLAW_PENDING))) {
3793 /* The next CCW is OK or this is */
3794 /* the last CCW...free it @A1A */
3795 privptr->p_write_active_first=p_this_ccw->next;
3796 p_this_ccw->header.flag=CLAW_PENDING;
3797 p_this_ccw->next=privptr->p_write_free_chain;
3798 privptr->p_write_free_chain=p_this_ccw;
3799 ++privptr->write_free_count;
3800 privptr->stats.tx_bytes+= p_this_ccw->write.count;
3801 p_this_ccw=privptr->p_write_active_first;
3802 privptr->stats.tx_packets++;
3803 }
3804 else {
3805 break;
3806 }
3807 }
3808 if (privptr->write_free_count!=0) {
3809 claw_clearbit_busy(TB_NOBUFFER,dev);
3810 }
3811 /* whole chain removed? */
3812 if (privptr->p_write_active_first==NULL) {
3813 privptr->p_write_active_last=NULL;
3814#ifdef DEBUGMSG
3815 printk(KERN_INFO "%s:%s p_write_"
3816 "active_first==NULL\n",dev->name,__FUNCTION__);
3817#endif
3818 }
3819#ifdef IOTRACE
3820 printk(KERN_INFO "%s: Dump arranged CCW chain \n",dev->name );
3821 p_buf=privptr->p_write_active_first;
3822 while (p_buf!=NULL) {
3823 dumpit((char *)p_buf, sizeof(struct ccwbk));
3824 p_buf=p_buf->next;
3825 }
3826 if (p_buf==NULL) {
3827 printk(KERN_INFO "%s: privptr->p_write_active_"
3828 "first==NULL\n",dev->name );
3829 }
3830 p_buf=(struct ccwbk*)privptr->p_end_ccw;
3831 dumpit((char *)p_buf, sizeof(struct endccw));
3832#endif
3833
3834 CLAW_DBF_TEXT_(4,trace,"FWC=%d",privptr->write_free_count);
3835#ifdef FUNCTRACE
3836 printk(KERN_INFO "%s:%s Exit on line %d free_count =%d\n",
3837 dev->name,__FUNCTION__, __LINE__,privptr->write_free_count);
3838#endif
3839 return;
3840}
3841
3842/*-------------------------------------------------------------------*
3843* claw free netdevice *
3844* *
3845*--------------------------------------------------------------------*/
3846static void
3847claw_free_netdevice(struct net_device * dev, int free_dev)
3848{
3849 struct claw_privbk *privptr;
3850#ifdef FUNCTRACE
3851 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3852#endif
3853 CLAW_DBF_TEXT(2,setup,"free_dev");
3854
3855 if (!dev)
3856 return;
3857 CLAW_DBF_TEXT_(2,setup,"%s",dev->name);
3858 privptr = dev->priv;
3859 if (dev->flags & IFF_RUNNING)
3860 claw_release(dev);
3861 if (privptr) {
3862 privptr->channel[READ].ndev = NULL; /* say it's free */
3863 }
3864 dev->priv=NULL;
3865#ifdef MODULE
3866 if (free_dev) {
3867 free_netdev(dev);
3868 }
3869#endif
3870 CLAW_DBF_TEXT(2,setup,"feee_ok");
3871#ifdef FUNCTRACE
3872 printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__);
3873#endif
3874}
3875
3876/**
3877 * Claw init netdevice
3878 * Initialize everything of the net device except the name and the
3879 * channel structs.
3880 */
3881static void
3882claw_init_netdevice(struct net_device * dev)
3883{
3884#ifdef FUNCTRACE
3885 printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__);
3886#endif
3887 CLAW_DBF_TEXT(2,setup,"init_dev");
3888 CLAW_DBF_TEXT_(2,setup,"%s",dev->name);
3889 if (!dev) {
3890 printk(KERN_WARNING "claw:%s BAD Device exit line %d\n",
3891 __FUNCTION__,__LINE__);
3892 CLAW_DBF_TEXT(2,setup,"baddev");
3893 return;
3894 }
3895 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
3896 dev->hard_start_xmit = claw_tx;
3897 dev->open = claw_open;
3898 dev->stop = claw_release;
3899 dev->get_stats = claw_stats;
3900 dev->change_mtu = claw_change_mtu;
3901 dev->hard_header_len = 0;
3902 dev->addr_len = 0;
3903 dev->type = ARPHRD_SLIP;
3904 dev->tx_queue_len = 1300;
3905 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
3906 SET_MODULE_OWNER(dev);
3907#ifdef FUNCTRACE
3908 printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__);
3909#endif
3910 CLAW_DBF_TEXT(2,setup,"initok");
3911 return;
3912}
3913
3914/**
3915 * Init a new channel in the privptr->channel[i].
3916 *
3917 * @param cdev The ccw_device to be added.
3918 *
3919 * @return 0 on success, !0 on error.
3920 */
3921static int
3922add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
3923{
3924 struct chbk *p_ch;
3925
3926#ifdef FUNCTRACE
3927 printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__FUNCTION__);
3928#endif
3929 CLAW_DBF_TEXT_(2,setup,"%s",cdev->dev.bus_id);
3930 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
3931 p_ch = &privptr->channel[i];
3932 p_ch->cdev = cdev;
3933 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id);
3934 sscanf(cdev->dev.bus_id+4,"%x",&p_ch->devno);
3935 if ((p_ch->irb = kmalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
3936 printk(KERN_WARNING "%s Out of memory in %s for irb\n",
3937 p_ch->id,__FUNCTION__);
3938#ifdef FUNCTRACE
3939 printk(KERN_INFO "%s:%s Exit on line %d\n",
3940 p_ch->id,__FUNCTION__,__LINE__);
3941#endif
3942 return -ENOMEM;
3943 }
3944 memset(p_ch->irb, 0, sizeof (struct irb));
3945#ifdef FUNCTRACE
3946 printk(KERN_INFO "%s:%s Exit on line %d\n",
3947 cdev->dev.bus_id,__FUNCTION__,__LINE__);
3948#endif
3949 return 0;
3950}
3951
3952
3953/**
3954 *
3955 * Setup an interface.
3956 *
3957 * @param cgdev Device to be setup.
3958 *
3959 * @returns 0 on success, !0 on failure.
3960 */
3961static int
3962claw_new_device(struct ccwgroup_device *cgdev)
3963{
3964 struct claw_privbk *privptr;
3965 struct claw_env *p_env;
3966 struct net_device *dev;
3967 int ret;
3968
3969 pr_debug("%s() called\n", __FUNCTION__);
3970 printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id);
3971 CLAW_DBF_TEXT(2,setup,"new_dev");
3972 privptr = cgdev->dev.driver_data;
3973 cgdev->cdev[READ]->dev.driver_data = privptr;
3974 cgdev->cdev[WRITE]->dev.driver_data = privptr;
3975 if (!privptr)
3976 return -ENODEV;
3977 p_env = privptr->p_env;
3978 sscanf(cgdev->cdev[READ]->dev.bus_id+4,"%x",
3979 &p_env->devno[READ]);
3980 sscanf(cgdev->cdev[WRITE]->dev.bus_id+4,"%x",
3981 &p_env->devno[WRITE]);
3982 ret = add_channel(cgdev->cdev[0],0,privptr);
3983 if (ret == 0)
3984 ret = add_channel(cgdev->cdev[1],1,privptr);
3985 if (ret != 0) {
3986 printk(KERN_WARNING
3987 "add channel failed "
3988 "with ret = %d\n", ret);
3989 goto out;
3990 }
3991 ret = ccw_device_set_online(cgdev->cdev[READ]);
3992 if (ret != 0) {
3993 printk(KERN_WARNING
3994 "claw: ccw_device_set_online %s READ failed "
3995 "with ret = %d\n",cgdev->cdev[READ]->dev.bus_id,ret);
3996 goto out;
3997 }
3998 ret = ccw_device_set_online(cgdev->cdev[WRITE]);
3999 if (ret != 0) {
4000 printk(KERN_WARNING
4001 "claw: ccw_device_set_online %s WRITE failed "
4002 "with ret = %d\n",cgdev->cdev[WRITE]->dev.bus_id, ret);
4003 goto out;
4004 }
4005 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
4006 if (!dev) {
4007 printk(KERN_WARNING "%s:alloc_netdev failed\n",__FUNCTION__);
4008 goto out;
4009 }
4010 dev->priv = privptr;
4011 cgdev->dev.driver_data = privptr;
4012 cgdev->cdev[READ]->dev.driver_data = privptr;
4013 cgdev->cdev[WRITE]->dev.driver_data = privptr;
4014 /* sysfs magic */
4015 SET_NETDEV_DEV(dev, &cgdev->dev);
4016 if (register_netdev(dev) != 0) {
4017 claw_free_netdevice(dev, 1);
4018 CLAW_DBF_TEXT(2,trace,"regfail");
4019 goto out;
4020 }
4021 dev->flags &=~IFF_RUNNING;
4022 if (privptr->buffs_alloc == 0) {
4023 ret=init_ccw_bk(dev);
4024 if (ret !=0) {
4025 printk(KERN_WARNING
4026 "claw: init_ccw_bk failed with ret=%d\n", ret);
4027 unregister_netdev(dev);
4028 claw_free_netdevice(dev,1);
4029 CLAW_DBF_TEXT(2,trace,"ccwmem");
4030 goto out;
4031 }
4032 }
4033 privptr->channel[READ].ndev = dev;
4034 privptr->channel[WRITE].ndev = dev;
4035 privptr->p_env->ndev = dev;
4036
4037 printk(KERN_INFO "%s:readsize=%d writesize=%d "
4038 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
4039 dev->name, p_env->read_size,
4040 p_env->write_size, p_env->read_buffers,
4041 p_env->write_buffers, p_env->devno[READ],
4042 p_env->devno[WRITE]);
4043 printk(KERN_INFO "%s:host_name:%.8s, adapter_name "
4044 ":%.8s api_type: %.8s\n",
4045 dev->name, p_env->host_name,
4046 p_env->adapter_name , p_env->api_type);
4047 return 0;
4048out:
4049 ccw_device_set_offline(cgdev->cdev[1]);
4050 ccw_device_set_offline(cgdev->cdev[0]);
4051
4052 return -ENODEV;
4053}
4054
4055static void
4056claw_purge_skb_queue(struct sk_buff_head *q)
4057{
4058 struct sk_buff *skb;
4059
4060 CLAW_DBF_TEXT(4,trace,"purgque");
4061
4062 while ((skb = skb_dequeue(q))) {
4063 atomic_dec(&skb->users);
4064 dev_kfree_skb_irq(skb);
4065 }
4066}
4067
4068/**
4069 * Shutdown an interface.
4070 *
4071 * @param cgdev Device to be shut down.
4072 *
4073 * @returns 0 on success, !0 on failure.
4074 */
4075static int
4076claw_shutdown_device(struct ccwgroup_device *cgdev)
4077{
4078 struct claw_privbk *priv;
4079 struct net_device *ndev;
4080 int ret;
4081
4082 pr_debug("%s() called\n", __FUNCTION__);
4083 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
4084 priv = cgdev->dev.driver_data;
4085 if (!priv)
4086 return -ENODEV;
4087 ndev = priv->channel[READ].ndev;
4088 if (ndev) {
4089 /* Close the device */
4090 printk(KERN_INFO
4091 "%s: shuting down \n",ndev->name);
4092 if (ndev->flags & IFF_RUNNING)
4093 ret = claw_release(ndev);
4094 ndev->flags &=~IFF_RUNNING;
4095 unregister_netdev(ndev);
4096 ndev->priv = NULL; /* cgdev data, not ndev's to free */
4097 claw_free_netdevice(ndev, 1);
4098 priv->channel[READ].ndev = NULL;
4099 priv->channel[WRITE].ndev = NULL;
4100 priv->p_env->ndev = NULL;
4101 }
4102 ccw_device_set_offline(cgdev->cdev[1]);
4103 ccw_device_set_offline(cgdev->cdev[0]);
4104 return 0;
4105}
4106
4107static void
4108claw_remove_device(struct ccwgroup_device *cgdev)
4109{
4110 struct claw_privbk *priv;
4111
4112 pr_debug("%s() called\n", __FUNCTION__);
4113 CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id);
4114 priv = cgdev->dev.driver_data;
4115 if (!priv) {
4116 printk(KERN_WARNING "claw: %s() no Priv exiting\n",__FUNCTION__);
4117 return;
4118 }
4119 printk(KERN_INFO "claw: %s() called %s will be removed.\n",
4120 __FUNCTION__,cgdev->cdev[0]->dev.bus_id);
4121 if (cgdev->state == CCWGROUP_ONLINE)
4122 claw_shutdown_device(cgdev);
4123 claw_remove_files(&cgdev->dev);
4124 if (priv->p_mtc_envelope!=NULL) {
4125 kfree(priv->p_mtc_envelope);
4126 priv->p_mtc_envelope=NULL;
4127 }
4128 if (priv->p_env != NULL) {
4129 kfree(priv->p_env);
4130 priv->p_env=NULL;
4131 }
4132 if (priv->channel[0].irb != NULL) {
4133 kfree(priv->channel[0].irb);
4134 priv->channel[0].irb=NULL;
4135 }
4136 if (priv->channel[1].irb != NULL) {
4137 kfree(priv->channel[1].irb);
4138 priv->channel[1].irb=NULL;
4139 }
4140 kfree(priv);
4141 cgdev->dev.driver_data=NULL;
4142 cgdev->cdev[READ]->dev.driver_data = NULL;
4143 cgdev->cdev[WRITE]->dev.driver_data = NULL;
4144 put_device(&cgdev->dev);
4145}
4146
4147
4148/*
4149 * sysfs attributes
4150 */
4151static ssize_t
4152claw_hname_show(struct device *dev, char *buf)
4153{
4154 struct claw_privbk *priv;
4155 struct claw_env * p_env;
4156
4157 priv = dev->driver_data;
4158 if (!priv)
4159 return -ENODEV;
4160 p_env = priv->p_env;
4161 return sprintf(buf, "%s\n",p_env->host_name);
4162}
4163
4164static ssize_t
4165claw_hname_write(struct device *dev, const char *buf, size_t count)
4166{
4167 struct claw_privbk *priv;
4168 struct claw_env * p_env;
4169
4170 priv = dev->driver_data;
4171 if (!priv)
4172 return -ENODEV;
4173 p_env = priv->p_env;
4174 if (count > MAX_NAME_LEN+1)
4175 return -EINVAL;
4176 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
4177 strncpy(p_env->host_name,buf, count);
4178 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
4179 p_env->host_name[MAX_NAME_LEN] = 0x00;
4180 CLAW_DBF_TEXT(2,setup,"HstnSet");
4181 CLAW_DBF_TEXT_(2,setup,"%s",p_env->host_name);
4182
4183 return count;
4184}
4185
4186static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
4187
4188static ssize_t
4189claw_adname_show(struct device *dev, char *buf)
4190{
4191 struct claw_privbk *priv;
4192 struct claw_env * p_env;
4193
4194 priv = dev->driver_data;
4195 if (!priv)
4196 return -ENODEV;
4197 p_env = priv->p_env;
4198 return sprintf(buf, "%s\n",p_env->adapter_name);
4199}
4200
4201static ssize_t
4202claw_adname_write(struct device *dev, const char *buf, size_t count)
4203{
4204 struct claw_privbk *priv;
4205 struct claw_env * p_env;
4206
4207 priv = dev->driver_data;
4208 if (!priv)
4209 return -ENODEV;
4210 p_env = priv->p_env;
4211 if (count > MAX_NAME_LEN+1)
4212 return -EINVAL;
4213 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
4214 strncpy(p_env->adapter_name,buf, count);
4215 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
4216 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
4217 CLAW_DBF_TEXT(2,setup,"AdnSet");
4218 CLAW_DBF_TEXT_(2,setup,"%s",p_env->adapter_name);
4219
4220 return count;
4221}
4222
4223static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
4224
4225static ssize_t
4226claw_apname_show(struct device *dev, char *buf)
4227{
4228 struct claw_privbk *priv;
4229 struct claw_env * p_env;
4230
4231 priv = dev->driver_data;
4232 if (!priv)
4233 return -ENODEV;
4234 p_env = priv->p_env;
4235 return sprintf(buf, "%s\n",
4236 p_env->api_type);
4237}
4238
4239static ssize_t
4240claw_apname_write(struct device *dev, const char *buf, size_t count)
4241{
4242 struct claw_privbk *priv;
4243 struct claw_env * p_env;
4244
4245 priv = dev->driver_data;
4246 if (!priv)
4247 return -ENODEV;
4248 p_env = priv->p_env;
4249 if (count > MAX_NAME_LEN+1)
4250 return -EINVAL;
4251 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
4252 strncpy(p_env->api_type,buf, count);
4253 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
4254 p_env->api_type[MAX_NAME_LEN] = 0x00;
4255 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
4256 p_env->read_size=DEF_PACK_BUFSIZE;
4257 p_env->write_size=DEF_PACK_BUFSIZE;
4258 p_env->packing=PACKING_ASK;
4259 CLAW_DBF_TEXT(2,setup,"PACKING");
4260 }
4261 else {
4262 p_env->packing=0;
4263 p_env->read_size=CLAW_FRAME_SIZE;
4264 p_env->write_size=CLAW_FRAME_SIZE;
4265 CLAW_DBF_TEXT(2,setup,"ApiSet");
4266 }
4267 CLAW_DBF_TEXT_(2,setup,"%s",p_env->api_type);
4268 return count;
4269}
4270
4271static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
4272
4273static ssize_t
4274claw_wbuff_show(struct device *dev, char *buf)
4275{
4276 struct claw_privbk *priv;
4277 struct claw_env * p_env;
4278
4279 priv = dev->driver_data;
4280 if (!priv)
4281 return -ENODEV;
4282 p_env = priv->p_env;
4283 return sprintf(buf, "%d\n", p_env->write_buffers);
4284}
4285
4286static ssize_t
4287claw_wbuff_write(struct device *dev, const char *buf, size_t count)
4288{
4289 struct claw_privbk *priv;
4290 struct claw_env * p_env;
4291 int nnn,max;
4292
4293 priv = dev->driver_data;
4294 if (!priv)
4295 return -ENODEV;
4296 p_env = priv->p_env;
4297 sscanf(buf, "%i", &nnn);
4298 if (p_env->packing) {
4299 max = 64;
4300 }
4301 else {
4302 max = 512;
4303 }
4304 if ((nnn > max ) || (nnn < 2))
4305 return -EINVAL;
4306 p_env->write_buffers = nnn;
4307 CLAW_DBF_TEXT(2,setup,"Wbufset");
4308 CLAW_DBF_TEXT_(2,setup,"WB=%d",p_env->write_buffers);
4309 return count;
4310}
4311
4312static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
4313
4314static ssize_t
4315claw_rbuff_show(struct device *dev, char *buf)
4316{
4317 struct claw_privbk *priv;
4318 struct claw_env * p_env;
4319
4320 priv = dev->driver_data;
4321 if (!priv)
4322 return -ENODEV;
4323 p_env = priv->p_env;
4324 return sprintf(buf, "%d\n", p_env->read_buffers);
4325}
4326
4327static ssize_t
4328claw_rbuff_write(struct device *dev, const char *buf, size_t count)
4329{
4330 struct claw_privbk *priv;
4331 struct claw_env *p_env;
4332 int nnn,max;
4333
4334 priv = dev->driver_data;
4335 if (!priv)
4336 return -ENODEV;
4337 p_env = priv->p_env;
4338 sscanf(buf, "%i", &nnn);
4339 if (p_env->packing) {
4340 max = 64;
4341 }
4342 else {
4343 max = 512;
4344 }
4345 if ((nnn > max ) || (nnn < 2))
4346 return -EINVAL;
4347 p_env->read_buffers = nnn;
4348 CLAW_DBF_TEXT(2,setup,"Rbufset");
4349 CLAW_DBF_TEXT_(2,setup,"RB=%d",p_env->read_buffers);
4350 return count;
4351}
4352
4353static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
4354
4355static struct attribute *claw_attr[] = {
4356 &dev_attr_read_buffer.attr,
4357 &dev_attr_write_buffer.attr,
4358 &dev_attr_adapter_name.attr,
4359 &dev_attr_api_type.attr,
4360 &dev_attr_host_name.attr,
4361 NULL,
4362};
4363
4364static struct attribute_group claw_attr_group = {
4365 .attrs = claw_attr,
4366};
4367
4368static int
4369claw_add_files(struct device *dev)
4370{
4371 pr_debug("%s() called\n", __FUNCTION__);
4372 CLAW_DBF_TEXT(2,setup,"add_file");
4373 return sysfs_create_group(&dev->kobj, &claw_attr_group);
4374}
4375
4376static void
4377claw_remove_files(struct device *dev)
4378{
4379 pr_debug("%s() called\n", __FUNCTION__);
4380 CLAW_DBF_TEXT(2,setup,"rem_file");
4381 sysfs_remove_group(&dev->kobj, &claw_attr_group);
4382}
4383
4384/*--------------------------------------------------------------------*
4385* claw_init and cleanup *
4386*---------------------------------------------------------------------*/
4387
4388static void __exit
4389claw_cleanup(void)
4390{
4391 unregister_cu3088_discipline(&claw_group_driver);
4392 claw_unregister_debug_facility();
4393 printk(KERN_INFO "claw: Driver unloaded\n");
4394
4395}
4396
4397/**
4398 * Initialize module.
4399 * This is called just after the module is loaded.
4400 *
4401 * @return 0 on success, !0 on error.
4402 */
4403static int __init
4404claw_init(void)
4405{
4406 int ret = 0;
4407 printk(KERN_INFO "claw: starting driver "
4408#ifdef MODULE
4409 "module "
4410#else
4411 "compiled into kernel "
4412#endif
4413 " $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $ \n");
4414
4415
4416#ifdef FUNCTRACE
4417 printk(KERN_INFO "claw: %s() enter \n",__FUNCTION__);
4418#endif
4419 ret = claw_register_debug_facility();
4420 if (ret) {
4421 printk(KERN_WARNING "claw: %s() debug_register failed %d\n",
4422 __FUNCTION__,ret);
4423 return ret;
4424 }
4425 CLAW_DBF_TEXT(2,setup,"init_mod");
4426 ret = register_cu3088_discipline(&claw_group_driver);
4427 if (ret) {
4428 claw_unregister_debug_facility();
4429 printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n",
4430 __FUNCTION__,ret);
4431 }
4432#ifdef FUNCTRACE
4433 printk(KERN_INFO "claw: %s() exit \n",__FUNCTION__);
4434#endif
4435 return ret;
4436}
4437
4438module_init(claw_init);
4439module_exit(claw_cleanup);
4440
4441
4442
4443/*--------------------------------------------------------------------*
4444* End of File *
4445*---------------------------------------------------------------------*/
4446
4447
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
new file mode 100644
index 000000000000..3df71970f601
--- /dev/null
+++ b/drivers/s390/net/claw.h
@@ -0,0 +1,335 @@
1/*******************************************************
2* Define constants *
3* *
4********************************************************/
5#define VERSION_CLAW_H "$Revision: 1.6 $"
6/*-----------------------------------------------------*
7* CCW command codes for CLAW protocol *
8*------------------------------------------------------*/
9
10#define CCW_CLAW_CMD_WRITE 0x01 /* write - not including link */
11#define CCW_CLAW_CMD_READ 0x02 /* read */
12#define CCW_CLAW_CMD_NOP 0x03 /* NOP */
13#define CCW_CLAW_CMD_SENSE 0x04 /* Sense */
14#define CCW_CLAW_CMD_SIGNAL_SMOD 0x05 /* Signal Status Modifier */
15#define CCW_CLAW_CMD_TIC 0x08 /* TIC */
16#define CCW_CLAW_CMD_READHEADER 0x12 /* read header data */
17#define CCW_CLAW_CMD_READFF 0x22 /* read an FF */
18#define CCW_CLAW_CMD_SENSEID 0xe4 /* Sense ID */
19
20
21/*-----------------------------------------------------*
22* CLAW Unique constants *
23*------------------------------------------------------*/
24
25#define MORE_to_COME_FLAG 0x04 /* OR with write CCW in case of m-t-c */
26#define CLAW_IDLE 0x00 /* flag to indicate CLAW is idle */
27#define CLAW_BUSY 0xff /* flag to indicate CLAW is busy */
28#define CLAW_PENDING 0x00 /* flag to indicate i/o is pending */
29#define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */
30
31/*-----------------------------------------------------*
32* CLAW control comand code *
33*------------------------------------------------------*/
34
35#define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */
36#define SYSTEM_VALIDATE_RESPONSE 0x02 /* System Validate response */
37#define CONNECTION_REQUEST 0x21 /* Connection request */
38#define CONNECTION_RESPONSE 0x22 /* Connection response */
39#define CONNECTION_CONFIRM 0x23 /* Connection confirm */
40#define DISCONNECT 0x24 /* Disconnect */
41#define CLAW_ERROR 0x41 /* CLAW error message */
42#define CLAW_VERSION_ID 2 /* CLAW version ID */
43
44/*-----------------------------------------------------*
45* CLAW adater sense bytes *
46*------------------------------------------------------*/
47
48#define CLAW_ADAPTER_SENSE_BYTE 0x41 /* Stop command issued to adapter */
49
50/*-----------------------------------------------------*
51* CLAW control command return codes *
52*------------------------------------------------------*/
53
54#define CLAW_RC_NAME_MISMATCH 166 /* names do not match */
55#define CLAW_RC_WRONG_VERSION 167 /* wrong CLAW version number */
56#define CLAW_RC_HOST_RCV_TOO_SMALL 180 /* Host maximum receive is */
57 /* less than Linux on zSeries*/
58 /* transmit size */
59
60/*-----------------------------------------------------*
61* CLAW Constants application name *
62*------------------------------------------------------*/
63
64#define HOST_APPL_NAME "TCPIP "
65#define WS_APPL_NAME_IP_LINK "TCPIP "
66#define WS_APPL_NAME_IP_NAME "IP "
67#define WS_APPL_NAME_API_LINK "API "
68#define WS_APPL_NAME_PACKED "PACKED "
69#define WS_NAME_NOT_DEF "NOT_DEF "
70#define PACKING_ASK 1
71#define PACK_SEND 2
72#define DO_PACKED 3
73
74#define MAX_ENVELOPE_SIZE 65536
75#define CLAW_DEFAULT_MTU_SIZE 4096
76#define DEF_PACK_BUFSIZE 32768
77#define READ 0
78#define WRITE 1
79
80#define TB_TX 0 /* sk buffer handling in process */
81#define TB_STOP 1 /* network device stop in process */
82#define TB_RETRY 2 /* retry in process */
83#define TB_NOBUFFER 3 /* no buffer on free queue */
84#define CLAW_MAX_LINK_ID 1
85#define CLAW_MAX_DEV 256 /* max claw devices */
86#define MAX_NAME_LEN 8 /* host name, adapter name length */
87#define CLAW_FRAME_SIZE 4096
88#define CLAW_ID_SIZE BUS_ID_SIZE+3
89
90/* state machine codes used in claw_irq_handler */
91
92#define CLAW_STOP 0
93#define CLAW_START_HALT_IO 1
94#define CLAW_START_SENSEID 2
95#define CLAW_START_READ 3
96#define CLAW_START_WRITE 4
97
98/*-----------------------------------------------------*
99* Lock flag *
100*------------------------------------------------------*/
101#define LOCK_YES 0
102#define LOCK_NO 1
103
104/*-----------------------------------------------------*
105* DBF Debug macros *
106*------------------------------------------------------*/
107#define CLAW_DBF_TEXT(level, name, text) \
108 do { \
109 debug_text_event(claw_dbf_##name, level, text); \
110 } while (0)
111
112#define CLAW_DBF_HEX(level,name,addr,len) \
113do { \
114 debug_event(claw_dbf_##name,level,(void*)(addr),len); \
115} while (0)
116
117#define CLAW_DBF_TEXT_(level,name,text...) \
118do { \
119 sprintf(debug_buffer, text); \
120 debug_text_event(claw_dbf_##name,level, debug_buffer);\
121} while (0)
122
123/*******************************************************
124* Define Control Blocks *
125* *
126********************************************************/
127
128/*------------------------------------------------------*/
129/* CLAW header */
130/*------------------------------------------------------*/
131
132struct clawh {
133 __u16 length; /* length of data read by preceding read CCW */
134 __u8 opcode; /* equivalent read CCW */
135 __u8 flag; /* flag of FF to indicate read was completed */
136};
137
138/*------------------------------------------------------*/
139/* CLAW Packing header 4 bytes */
140/*------------------------------------------------------*/
141struct clawph {
142 __u16 len; /* Length of Packed Data Area */
143 __u8 flag; /* Reserved not used */
144 __u8 link_num; /* Link ID */
145};
146
147/*------------------------------------------------------*/
148/* CLAW Ending struct ccwbk */
149/*------------------------------------------------------*/
150struct endccw {
151 __u32 real; /* real address of this block */
152 __u8 write1; /* write 1 is active */
153 __u8 read1; /* read 1 is active */
154 __u16 reserved; /* reserved for future use */
155 struct ccw1 write1_nop1;
156 struct ccw1 write1_nop2;
157 struct ccw1 write2_nop1;
158 struct ccw1 write2_nop2;
159 struct ccw1 read1_nop1;
160 struct ccw1 read1_nop2;
161 struct ccw1 read2_nop1;
162 struct ccw1 read2_nop2;
163};
164
165/*------------------------------------------------------*/
166/* CLAW struct ccwbk */
167/*------------------------------------------------------*/
168struct ccwbk {
169 void *next; /* pointer to next ccw block */
170 __u32 real; /* real address of this ccw */
171 void *p_buffer; /* virtual address of data */
172 struct clawh header; /* claw header */
173 struct ccw1 write; /* write CCW */
174 struct ccw1 w_read_FF; /* read FF */
175 struct ccw1 w_TIC_1; /* TIC */
176 struct ccw1 read; /* read CCW */
177 struct ccw1 read_h; /* read header */
178 struct ccw1 signal; /* signal SMOD */
179 struct ccw1 r_TIC_1; /* TIC1 */
180 struct ccw1 r_read_FF; /* read FF */
181 struct ccw1 r_TIC_2; /* TIC2 */
182};
183
184/*------------------------------------------------------*/
185/* CLAW control block */
186/*------------------------------------------------------*/
187struct clawctl {
188 __u8 command; /* control command */
189 __u8 version; /* CLAW protocol version */
190 __u8 linkid; /* link ID */
191 __u8 correlator; /* correlator */
192 __u8 rc; /* return code */
193 __u8 reserved1; /* reserved */
194 __u8 reserved2; /* reserved */
195 __u8 reserved3; /* reserved */
196 __u8 data[24]; /* command specific fields */
197};
198
199/*------------------------------------------------------*/
200/* Data for SYSTEMVALIDATE command */
201/*------------------------------------------------------*/
202struct sysval {
203 char WS_name[8]; /* Workstation System name */
204 char host_name[8]; /* Host system name */
205 __u16 read_frame_size; /* read frame size */
206 __u16 write_frame_size; /* write frame size */
207 __u8 reserved[4]; /* reserved */
208};
209
210/*------------------------------------------------------*/
211/* Data for Connect command */
212/*------------------------------------------------------*/
213struct conncmd {
214 char WS_name[8]; /* Workstation application name */
215 char host_name[8]; /* Host application name */
216 __u16 reserved1[2]; /* read frame size */
217 __u8 reserved2[4]; /* reserved */
218};
219
220/*------------------------------------------------------*/
221/* Data for CLAW error */
222/*------------------------------------------------------*/
223struct clawwerror {
224 char reserved1[8]; /* reserved */
225 char reserved2[8]; /* reserved */
226 char reserved3[8]; /* reserved */
227};
228
229/*------------------------------------------------------*/
230/* Data buffer for CLAW */
231/*------------------------------------------------------*/
232struct clawbuf {
233 char buffer[MAX_ENVELOPE_SIZE]; /* data buffer */
234};
235
236/*------------------------------------------------------*/
237/* Channel control block for read and write channel */
238/*------------------------------------------------------*/
239
240struct chbk {
241 unsigned int devno;
242 int irq;
243 char id[CLAW_ID_SIZE];
244 __u32 IO_active;
245 __u8 claw_state;
246 struct irb *irb;
247 struct ccw_device *cdev; /* pointer to the channel device */
248 struct net_device *ndev;
249 wait_queue_head_t wait;
250 struct tasklet_struct tasklet;
251 struct timer_list timer;
252 unsigned long flag_a; /* atomic flags */
253#define CLAW_BH_ACTIVE 0
254 unsigned long flag_b; /* atomic flags */
255#define CLAW_WRITE_ACTIVE 0
256 __u8 last_dstat;
257 __u8 flag;
258 struct sk_buff_head collect_queue;
259 spinlock_t collect_lock;
260#define CLAW_WRITE 0x02 /* - Set if this is a write channel */
261#define CLAW_READ 0x01 /* - Set if this is a read channel */
262#define CLAW_TIMER 0x80 /* - Set if timer made the wake_up */
263};
264
265/*--------------------------------------------------------------*
266* CLAW environment block *
267*---------------------------------------------------------------*/
268
269struct claw_env {
270 unsigned int devno[2]; /* device number */
271 char host_name[9]; /* Host name */
272 char adapter_name [9]; /* adapter name */
273 char api_type[9]; /* TCPIP, API or PACKED */
274 void *p_priv; /* privptr */
275 __u16 read_buffers; /* read buffer number */
276 __u16 write_buffers; /* write buffer number */
277 __u16 read_size; /* read buffer size */
278 __u16 write_size; /* write buffer size */
279 __u16 dev_id; /* device ident */
280 __u8 packing; /* are we packing? */
281 volatile __u8 queme_switch; /* gate for imed packing */
282 volatile unsigned long pk_delay; /* Delay for adaptive packing */
283 __u8 in_use; /* device active flag */
284 struct net_device *ndev; /* backward ptr to the net dev*/
285};
286
287/*--------------------------------------------------------------*
288* CLAW main control block *
289*---------------------------------------------------------------*/
290
291struct claw_privbk {
292 void *p_buff_ccw;
293 __u32 p_buff_ccw_num;
294 void *p_buff_read;
295 __u32 p_buff_read_num;
296 __u32 p_buff_pages_perread;
297 void *p_buff_write;
298 __u32 p_buff_write_num;
299 __u32 p_buff_pages_perwrite;
300 long active_link_ID; /* Active logical link ID */
301 struct ccwbk *p_write_free_chain; /* pointer to free ccw chain */
302 struct ccwbk *p_write_active_first; /* ptr to the first write ccw */
303 struct ccwbk *p_write_active_last; /* ptr to the last write ccw */
304 struct ccwbk *p_read_active_first; /* ptr to the first read ccw */
305 struct ccwbk *p_read_active_last; /* ptr to the last read ccw */
306 struct endccw *p_end_ccw; /*ptr to ending ccw */
307 struct ccwbk *p_claw_signal_blk; /* ptr to signal block */
308 __u32 write_free_count; /* number of free bufs for write */
309 struct net_device_stats stats; /* device status */
310 struct chbk channel[2]; /* Channel control blocks */
311 __u8 mtc_skipping;
312 int mtc_offset;
313 int mtc_logical_link;
314 void *p_mtc_envelope;
315 struct sk_buff *pk_skb; /* packing buffer */
316 int pk_cnt;
317 struct clawctl ctl_bk;
318 struct claw_env *p_env;
319 __u8 system_validate_comp;
320 __u8 release_pend;
321 __u8 checksum_received_ip_pkts;
322 __u8 buffs_alloc;
323 struct endccw end_ccw;
324 unsigned long tbusy;
325
326};
327
328
329/************************************************************/
330/* define global constants */
331/************************************************************/
332
333#define CCWBK_SIZE sizeof(struct ccwbk)
334
335
diff --git a/drivers/s390/net/ctcdbug.c b/drivers/s390/net/ctcdbug.c
new file mode 100644
index 000000000000..2c86bfa11b2f
--- /dev/null
+++ b/drivers/s390/net/ctcdbug.c
@@ -0,0 +1,83 @@
1/*
2 *
3 * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.4 $)
4 *
5 * CTC / ESCON network driver - s390 dbf exploit.
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Peter Tiedemann (ptiedem@de.ibm.com)
11 *
12 * $Revision: 1.4 $ $Date: 2004/08/04 10:11:59 $
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include "ctcdbug.h"
30
31/**
32 * Debug Facility Stuff
33 */
34debug_info_t *ctc_dbf_setup = NULL;
35debug_info_t *ctc_dbf_data = NULL;
36debug_info_t *ctc_dbf_trace = NULL;
37
38DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf);
39
40void
41ctc_unregister_dbf_views(void)
42{
43 if (ctc_dbf_setup)
44 debug_unregister(ctc_dbf_setup);
45 if (ctc_dbf_data)
46 debug_unregister(ctc_dbf_data);
47 if (ctc_dbf_trace)
48 debug_unregister(ctc_dbf_trace);
49}
50int
51ctc_register_dbf_views(void)
52{
53 ctc_dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
54 CTC_DBF_SETUP_INDEX,
55 CTC_DBF_SETUP_NR_AREAS,
56 CTC_DBF_SETUP_LEN);
57 ctc_dbf_data = debug_register(CTC_DBF_DATA_NAME,
58 CTC_DBF_DATA_INDEX,
59 CTC_DBF_DATA_NR_AREAS,
60 CTC_DBF_DATA_LEN);
61 ctc_dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
62 CTC_DBF_TRACE_INDEX,
63 CTC_DBF_TRACE_NR_AREAS,
64 CTC_DBF_TRACE_LEN);
65
66 if ((ctc_dbf_setup == NULL) || (ctc_dbf_data == NULL) ||
67 (ctc_dbf_trace == NULL)) {
68 ctc_unregister_dbf_views();
69 return -ENOMEM;
70 }
71 debug_register_view(ctc_dbf_setup, &debug_hex_ascii_view);
72 debug_set_level(ctc_dbf_setup, CTC_DBF_SETUP_LEVEL);
73
74 debug_register_view(ctc_dbf_data, &debug_hex_ascii_view);
75 debug_set_level(ctc_dbf_data, CTC_DBF_DATA_LEVEL);
76
77 debug_register_view(ctc_dbf_trace, &debug_hex_ascii_view);
78 debug_set_level(ctc_dbf_trace, CTC_DBF_TRACE_LEVEL);
79
80 return 0;
81}
82
83
diff --git a/drivers/s390/net/ctcdbug.h b/drivers/s390/net/ctcdbug.h
new file mode 100644
index 000000000000..ef8883951720
--- /dev/null
+++ b/drivers/s390/net/ctcdbug.h
@@ -0,0 +1,123 @@
1/*
2 *
3 * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.4 $)
4 *
5 * CTC / ESCON network driver - s390 dbf exploit.
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Peter Tiedemann (ptiedem@de.ibm.com)
11 *
12 * $Revision: 1.4 $ $Date: 2004/10/15 09:26:58 $
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29
30#include <asm/debug.h>
31/**
32 * Debug Facility stuff
33 */
34#define CTC_DBF_SETUP_NAME "ctc_setup"
35#define CTC_DBF_SETUP_LEN 16
36#define CTC_DBF_SETUP_INDEX 3
37#define CTC_DBF_SETUP_NR_AREAS 1
38#define CTC_DBF_SETUP_LEVEL 3
39
40#define CTC_DBF_DATA_NAME "ctc_data"
41#define CTC_DBF_DATA_LEN 128
42#define CTC_DBF_DATA_INDEX 3
43#define CTC_DBF_DATA_NR_AREAS 1
44#define CTC_DBF_DATA_LEVEL 2
45
46#define CTC_DBF_TRACE_NAME "ctc_trace"
47#define CTC_DBF_TRACE_LEN 16
48#define CTC_DBF_TRACE_INDEX 2
49#define CTC_DBF_TRACE_NR_AREAS 2
50#define CTC_DBF_TRACE_LEVEL 3
51
52#define DBF_TEXT(name,level,text) \
53 do { \
54 debug_text_event(ctc_dbf_##name,level,text); \
55 } while (0)
56
57#define DBF_HEX(name,level,addr,len) \
58 do { \
59 debug_event(ctc_dbf_##name,level,(void*)(addr),len); \
60 } while (0)
61
62DECLARE_PER_CPU(char[256], ctc_dbf_txt_buf);
63extern debug_info_t *ctc_dbf_setup;
64extern debug_info_t *ctc_dbf_data;
65extern debug_info_t *ctc_dbf_trace;
66
67
68#define DBF_TEXT_(name,level,text...) \
69 do { \
70 char* ctc_dbf_txt_buf = get_cpu_var(ctc_dbf_txt_buf); \
71 sprintf(ctc_dbf_txt_buf, text); \
72 debug_text_event(ctc_dbf_##name,level,ctc_dbf_txt_buf); \
73 put_cpu_var(ctc_dbf_txt_buf); \
74 } while (0)
75
76#define DBF_SPRINTF(name,level,text...) \
77 do { \
78 debug_sprintf_event(ctc_dbf_trace, level, ##text ); \
79 debug_sprintf_event(ctc_dbf_trace, level, text ); \
80 } while (0)
81
82
83int ctc_register_dbf_views(void);
84
85void ctc_unregister_dbf_views(void);
86
87/**
88 * some more debug stuff
89 */
90
91#define HEXDUMP16(importance,header,ptr) \
92PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
93 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
94 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
95 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
96 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
97 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
98 *(((char*)ptr)+12),*(((char*)ptr)+13), \
99 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
100PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
101 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
102 *(((char*)ptr)+16),*(((char*)ptr)+17), \
103 *(((char*)ptr)+18),*(((char*)ptr)+19), \
104 *(((char*)ptr)+20),*(((char*)ptr)+21), \
105 *(((char*)ptr)+22),*(((char*)ptr)+23), \
106 *(((char*)ptr)+24),*(((char*)ptr)+25), \
107 *(((char*)ptr)+26),*(((char*)ptr)+27), \
108 *(((char*)ptr)+28),*(((char*)ptr)+29), \
109 *(((char*)ptr)+30),*(((char*)ptr)+31));
110
111static inline void
112hex_dump(unsigned char *buf, size_t len)
113{
114 size_t i;
115
116 for (i = 0; i < len; i++) {
117 if (i && !(i % 16))
118 printk("\n");
119 printk("%02x ", *(buf + i));
120 }
121 printk("\n");
122}
123
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
new file mode 100644
index 000000000000..7266bf5ea659
--- /dev/null
+++ b/drivers/s390/net/ctcmain.c
@@ -0,0 +1,3304 @@
1/*
2 * $Id: ctcmain.c,v 1.72 2005/03/17 10:51:52 ptiedem Exp $
3 *
4 * CTC / ESCON network driver
5 *
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
9 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
10 Peter Tiedemann (ptiedem@de.ibm.com)
11 * Driver Model stuff by : Cornelia Huck <cohuck@de.ibm.com>
12 *
13 * Documentation used:
14 * - Principles of Operation (IBM doc#: SA22-7201-06)
15 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
16 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
17 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
18 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
19 *
20 * and the source of the original CTC driver by:
21 * Dieter Wellerdiek (wel@de.ibm.com)
22 * Martin Schwidefsky (schwidefsky@de.ibm.com)
23 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
24 * Jochen Röhrig (roehrig@de.ibm.com)
25 *
26 * This program is free software; you can redistribute it and/or modify
27 * it under the terms of the GNU General Public License as published by
28 * the Free Software Foundation; either version 2, or (at your option)
29 * any later version.
30 *
31 * This program is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 * GNU General Public License for more details.
35 *
36 * You should have received a copy of the GNU General Public License
37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
39 *
40 * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.72 $
41 *
42 */
43
44#undef DEBUG
45
46#include <linux/module.h>
47#include <linux/init.h>
48#include <linux/kernel.h>
49#include <linux/slab.h>
50#include <linux/errno.h>
51#include <linux/types.h>
52#include <linux/interrupt.h>
53#include <linux/timer.h>
54#include <linux/sched.h>
55#include <linux/bitops.h>
56
57#include <linux/signal.h>
58#include <linux/string.h>
59
60#include <linux/ip.h>
61#include <linux/if_arp.h>
62#include <linux/tcp.h>
63#include <linux/skbuff.h>
64#include <linux/ctype.h>
65#include <net/dst.h>
66
67#include <asm/io.h>
68#include <asm/ccwdev.h>
69#include <asm/ccwgroup.h>
70#include <asm/uaccess.h>
71
72#include <asm/idals.h>
73
74#include "ctctty.h"
75#include "fsm.h"
76#include "cu3088.h"
77#include "ctcdbug.h"
78
79MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
80MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
81MODULE_LICENSE("GPL");
82
83/**
84 * CCW commands, used in this driver.
85 */
86#define CCW_CMD_WRITE 0x01
87#define CCW_CMD_READ 0x02
88#define CCW_CMD_SET_EXTENDED 0xc3
89#define CCW_CMD_PREPARE 0xe3
90
91#define CTC_PROTO_S390 0
92#define CTC_PROTO_LINUX 1
93#define CTC_PROTO_LINUX_TTY 2
94#define CTC_PROTO_OS390 3
95#define CTC_PROTO_MAX 3
96
97#define CTC_BUFSIZE_LIMIT 65535
98#define CTC_BUFSIZE_DEFAULT 32768
99
100#define CTC_TIMEOUT_5SEC 5000
101
102#define CTC_INITIAL_BLOCKLEN 2
103
104#define READ 0
105#define WRITE 1
106
107#define CTC_ID_SIZE BUS_ID_SIZE+3
108
109
110struct ctc_profile {
111 unsigned long maxmulti;
112 unsigned long maxcqueue;
113 unsigned long doios_single;
114 unsigned long doios_multi;
115 unsigned long txlen;
116 unsigned long tx_time;
117 struct timespec send_stamp;
118};
119
120/**
121 * Definition of one channel
122 */
123struct channel {
124
125 /**
126 * Pointer to next channel in list.
127 */
128 struct channel *next;
129 char id[CTC_ID_SIZE];
130 struct ccw_device *cdev;
131
132 /**
133 * Type of this channel.
134 * CTC/A or Escon for valid channels.
135 */
136 enum channel_types type;
137
138 /**
139 * Misc. flags. See CHANNEL_FLAGS_... below
140 */
141 __u32 flags;
142
143 /**
144 * The protocol of this channel
145 */
146 __u16 protocol;
147
148 /**
149 * I/O and irq related stuff
150 */
151 struct ccw1 *ccw;
152 struct irb *irb;
153
154 /**
155 * RX/TX buffer size
156 */
157 int max_bufsize;
158
159 /**
160 * Transmit/Receive buffer.
161 */
162 struct sk_buff *trans_skb;
163
164 /**
165 * Universal I/O queue.
166 */
167 struct sk_buff_head io_queue;
168
169 /**
170 * TX queue for collecting skb's during busy.
171 */
172 struct sk_buff_head collect_queue;
173
174 /**
175 * Amount of data in collect_queue.
176 */
177 int collect_len;
178
179 /**
180 * spinlock for collect_queue and collect_len
181 */
182 spinlock_t collect_lock;
183
184 /**
185 * Timer for detecting unresposive
186 * I/O operations.
187 */
188 fsm_timer timer;
189
190 /**
191 * Retry counter for misc. operations.
192 */
193 int retry;
194
195 /**
196 * The finite state machine of this channel
197 */
198 fsm_instance *fsm;
199
200 /**
201 * The corresponding net_device this channel
202 * belongs to.
203 */
204 struct net_device *netdev;
205
206 struct ctc_profile prof;
207
208 unsigned char *trans_skb_data;
209
210 __u16 logflags;
211};
212
213#define CHANNEL_FLAGS_READ 0
214#define CHANNEL_FLAGS_WRITE 1
215#define CHANNEL_FLAGS_INUSE 2
216#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
217#define CHANNEL_FLAGS_FAILED 8
218#define CHANNEL_FLAGS_WAITIRQ 16
219#define CHANNEL_FLAGS_RWMASK 1
220#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
221
222#define LOG_FLAG_ILLEGALPKT 1
223#define LOG_FLAG_ILLEGALSIZE 2
224#define LOG_FLAG_OVERRUN 4
225#define LOG_FLAG_NOMEM 8
226
227#define CTC_LOGLEVEL_INFO 1
228#define CTC_LOGLEVEL_NOTICE 2
229#define CTC_LOGLEVEL_WARN 4
230#define CTC_LOGLEVEL_EMERG 8
231#define CTC_LOGLEVEL_ERR 16
232#define CTC_LOGLEVEL_DEBUG 32
233#define CTC_LOGLEVEL_CRIT 64
234
235#define CTC_LOGLEVEL_DEFAULT \
236(CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT)
237
238#define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1)
239
240static int loglevel = CTC_LOGLEVEL_DEFAULT;
241
242#define ctc_pr_debug(fmt, arg...) \
243do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0)
244
245#define ctc_pr_info(fmt, arg...) \
246do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0)
247
248#define ctc_pr_notice(fmt, arg...) \
249do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0)
250
251#define ctc_pr_warn(fmt, arg...) \
252do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0)
253
254#define ctc_pr_emerg(fmt, arg...) \
255do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0)
256
257#define ctc_pr_err(fmt, arg...) \
258do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0)
259
260#define ctc_pr_crit(fmt, arg...) \
261do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0)
262
263/**
264 * Linked list of all detected channels.
265 */
266static struct channel *channels = NULL;
267
268struct ctc_priv {
269 struct net_device_stats stats;
270 unsigned long tbusy;
271 /**
272 * The finite state machine of this interface.
273 */
274 fsm_instance *fsm;
275 /**
276 * The protocol of this device
277 */
278 __u16 protocol;
279 /**
280 * Timer for restarting after I/O Errors
281 */
282 fsm_timer restart_timer;
283
284 int buffer_size;
285
286 struct channel *channel[2];
287};
288
289/**
290 * Definition of our link level header.
291 */
292struct ll_header {
293 __u16 length;
294 __u16 type;
295 __u16 unused;
296};
297#define LL_HEADER_LENGTH (sizeof(struct ll_header))
298
299/**
300 * Compatibility macros for busy handling
301 * of network devices.
302 */
303static __inline__ void
304ctc_clear_busy(struct net_device * dev)
305{
306 clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
307 if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
308 netif_wake_queue(dev);
309}
310
311static __inline__ int
312ctc_test_and_set_busy(struct net_device * dev)
313{
314 if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
315 netif_stop_queue(dev);
316 return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
317}
318
319/**
320 * Print Banner.
321 */
322static void
323print_banner(void)
324{
325 static int printed = 0;
326 char vbuf[] = "$Revision: 1.72 $";
327 char *version = vbuf;
328
329 if (printed)
330 return;
331 if ((version = strchr(version, ':'))) {
332 char *p = strchr(version + 1, '$');
333 if (p)
334 *p = '\0';
335 } else
336 version = " ??? ";
337 printk(KERN_INFO "CTC driver Version%s"
338#ifdef DEBUG
339 " (DEBUG-VERSION, " __DATE__ __TIME__ ")"
340#endif
341 " initialized\n", version);
342 printed = 1;
343}
344
345/**
346 * Return type of a detected device.
347 */
348static enum channel_types
349get_channel_type(struct ccw_device_id *id)
350{
351 enum channel_types type = (enum channel_types) id->driver_info;
352
353 if (type == channel_type_ficon)
354 type = channel_type_escon;
355
356 return type;
357}
358
359/**
360 * States of the interface statemachine.
361 */
362enum dev_states {
363 DEV_STATE_STOPPED,
364 DEV_STATE_STARTWAIT_RXTX,
365 DEV_STATE_STARTWAIT_RX,
366 DEV_STATE_STARTWAIT_TX,
367 DEV_STATE_STOPWAIT_RXTX,
368 DEV_STATE_STOPWAIT_RX,
369 DEV_STATE_STOPWAIT_TX,
370 DEV_STATE_RUNNING,
371 /**
372 * MUST be always the last element!!
373 */
374 NR_DEV_STATES
375};
376
377static const char *dev_state_names[] = {
378 "Stopped",
379 "StartWait RXTX",
380 "StartWait RX",
381 "StartWait TX",
382 "StopWait RXTX",
383 "StopWait RX",
384 "StopWait TX",
385 "Running",
386};
387
388/**
389 * Events of the interface statemachine.
390 */
391enum dev_events {
392 DEV_EVENT_START,
393 DEV_EVENT_STOP,
394 DEV_EVENT_RXUP,
395 DEV_EVENT_TXUP,
396 DEV_EVENT_RXDOWN,
397 DEV_EVENT_TXDOWN,
398 DEV_EVENT_RESTART,
399 /**
400 * MUST be always the last element!!
401 */
402 NR_DEV_EVENTS
403};
404
405static const char *dev_event_names[] = {
406 "Start",
407 "Stop",
408 "RX up",
409 "TX up",
410 "RX down",
411 "TX down",
412 "Restart",
413};
414
415/**
416 * Events of the channel statemachine
417 */
418enum ch_events {
419 /**
420 * Events, representing return code of
421 * I/O operations (ccw_device_start, ccw_device_halt et al.)
422 */
423 CH_EVENT_IO_SUCCESS,
424 CH_EVENT_IO_EBUSY,
425 CH_EVENT_IO_ENODEV,
426 CH_EVENT_IO_EIO,
427 CH_EVENT_IO_UNKNOWN,
428
429 CH_EVENT_ATTNBUSY,
430 CH_EVENT_ATTN,
431 CH_EVENT_BUSY,
432
433 /**
434 * Events, representing unit-check
435 */
436 CH_EVENT_UC_RCRESET,
437 CH_EVENT_UC_RSRESET,
438 CH_EVENT_UC_TXTIMEOUT,
439 CH_EVENT_UC_TXPARITY,
440 CH_EVENT_UC_HWFAIL,
441 CH_EVENT_UC_RXPARITY,
442 CH_EVENT_UC_ZERO,
443 CH_EVENT_UC_UNKNOWN,
444
445 /**
446 * Events, representing subchannel-check
447 */
448 CH_EVENT_SC_UNKNOWN,
449
450 /**
451 * Events, representing machine checks
452 */
453 CH_EVENT_MC_FAIL,
454 CH_EVENT_MC_GOOD,
455
456 /**
457 * Event, representing normal IRQ
458 */
459 CH_EVENT_IRQ,
460 CH_EVENT_FINSTAT,
461
462 /**
463 * Event, representing timer expiry.
464 */
465 CH_EVENT_TIMER,
466
467 /**
468 * Events, representing commands from upper levels.
469 */
470 CH_EVENT_START,
471 CH_EVENT_STOP,
472
473 /**
474 * MUST be always the last element!!
475 */
476 NR_CH_EVENTS,
477};
478
479static const char *ch_event_names[] = {
480 "ccw_device success",
481 "ccw_device busy",
482 "ccw_device enodev",
483 "ccw_device ioerr",
484 "ccw_device unknown",
485
486 "Status ATTN & BUSY",
487 "Status ATTN",
488 "Status BUSY",
489
490 "Unit check remote reset",
491 "Unit check remote system reset",
492 "Unit check TX timeout",
493 "Unit check TX parity",
494 "Unit check Hardware failure",
495 "Unit check RX parity",
496 "Unit check ZERO",
497 "Unit check Unknown",
498
499 "SubChannel check Unknown",
500
501 "Machine check failure",
502 "Machine check operational",
503
504 "IRQ normal",
505 "IRQ final",
506
507 "Timer",
508
509 "Start",
510 "Stop",
511};
512
513/**
514 * States of the channel statemachine.
515 */
516enum ch_states {
517 /**
518 * Channel not assigned to any device,
519 * initial state, direction invalid
520 */
521 CH_STATE_IDLE,
522
523 /**
524 * Channel assigned but not operating
525 */
526 CH_STATE_STOPPED,
527 CH_STATE_STARTWAIT,
528 CH_STATE_STARTRETRY,
529 CH_STATE_SETUPWAIT,
530 CH_STATE_RXINIT,
531 CH_STATE_TXINIT,
532 CH_STATE_RX,
533 CH_STATE_TX,
534 CH_STATE_RXIDLE,
535 CH_STATE_TXIDLE,
536 CH_STATE_RXERR,
537 CH_STATE_TXERR,
538 CH_STATE_TERM,
539 CH_STATE_DTERM,
540 CH_STATE_NOTOP,
541
542 /**
543 * MUST be always the last element!!
544 */
545 NR_CH_STATES,
546};
547
548static const char *ch_state_names[] = {
549 "Idle",
550 "Stopped",
551 "StartWait",
552 "StartRetry",
553 "SetupWait",
554 "RX init",
555 "TX init",
556 "RX",
557 "TX",
558 "RX idle",
559 "TX idle",
560 "RX error",
561 "TX error",
562 "Terminating",
563 "Restarting",
564 "Not operational",
565};
566
567#ifdef DEBUG
568/**
569 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
570 *
571 * @param skb The sk_buff to dump.
572 * @param offset Offset relative to skb-data, where to start the dump.
573 */
574static void
575ctc_dump_skb(struct sk_buff *skb, int offset)
576{
577 unsigned char *p = skb->data;
578 __u16 bl;
579 struct ll_header *header;
580 int i;
581
582 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
583 return;
584 p += offset;
585 bl = *((__u16 *) p);
586 p += 2;
587 header = (struct ll_header *) p;
588 p -= 2;
589
590 printk(KERN_DEBUG "dump:\n");
591 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
592
593 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
594 header->length);
595 printk(KERN_DEBUG "h->type=%04x\n", header->type);
596 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
597 if (bl > 16)
598 bl = 16;
599 printk(KERN_DEBUG "data: ");
600 for (i = 0; i < bl; i++)
601 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
602 printk("\n");
603}
604#else
605static inline void
606ctc_dump_skb(struct sk_buff *skb, int offset)
607{
608}
609#endif
610
611/**
612 * Unpack a just received skb and hand it over to
613 * upper layers.
614 *
615 * @param ch The channel where this skb has been received.
616 * @param pskb The received skb.
617 */
618static __inline__ void
619ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
620{
621 struct net_device *dev = ch->netdev;
622 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
623 __u16 len = *((__u16 *) pskb->data);
624
625 DBF_TEXT(trace, 4, __FUNCTION__);
626 skb_put(pskb, 2 + LL_HEADER_LENGTH);
627 skb_pull(pskb, 2);
628 pskb->dev = dev;
629 pskb->ip_summed = CHECKSUM_UNNECESSARY;
630 while (len > 0) {
631 struct sk_buff *skb;
632 struct ll_header *header = (struct ll_header *) pskb->data;
633
634 skb_pull(pskb, LL_HEADER_LENGTH);
635 if ((ch->protocol == CTC_PROTO_S390) &&
636 (header->type != ETH_P_IP)) {
637
638#ifndef DEBUG
639 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
640#endif
641 /**
642 * Check packet type only if we stick strictly
643 * to S/390's protocol of OS390. This only
644 * supports IP. Otherwise allow any packet
645 * type.
646 */
647 ctc_pr_warn(
648 "%s Illegal packet type 0x%04x received, dropping\n",
649 dev->name, header->type);
650 ch->logflags |= LOG_FLAG_ILLEGALPKT;
651#ifndef DEBUG
652 }
653#endif
654#ifdef DEBUG
655 ctc_dump_skb(pskb, -6);
656#endif
657 privptr->stats.rx_dropped++;
658 privptr->stats.rx_frame_errors++;
659 return;
660 }
661 pskb->protocol = ntohs(header->type);
662 if (header->length <= LL_HEADER_LENGTH) {
663#ifndef DEBUG
664 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
665#endif
666 ctc_pr_warn(
667 "%s Illegal packet size %d "
668 "received (MTU=%d blocklen=%d), "
669 "dropping\n", dev->name, header->length,
670 dev->mtu, len);
671 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
672#ifndef DEBUG
673 }
674#endif
675#ifdef DEBUG
676 ctc_dump_skb(pskb, -6);
677#endif
678 privptr->stats.rx_dropped++;
679 privptr->stats.rx_length_errors++;
680 return;
681 }
682 header->length -= LL_HEADER_LENGTH;
683 len -= LL_HEADER_LENGTH;
684 if ((header->length > skb_tailroom(pskb)) ||
685 (header->length > len)) {
686#ifndef DEBUG
687 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
688#endif
689 ctc_pr_warn(
690 "%s Illegal packet size %d "
691 "(beyond the end of received data), "
692 "dropping\n", dev->name, header->length);
693 ch->logflags |= LOG_FLAG_OVERRUN;
694#ifndef DEBUG
695 }
696#endif
697#ifdef DEBUG
698 ctc_dump_skb(pskb, -6);
699#endif
700 privptr->stats.rx_dropped++;
701 privptr->stats.rx_length_errors++;
702 return;
703 }
704 skb_put(pskb, header->length);
705 pskb->mac.raw = pskb->data;
706 len -= header->length;
707 skb = dev_alloc_skb(pskb->len);
708 if (!skb) {
709#ifndef DEBUG
710 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
711#endif
712 ctc_pr_warn(
713 "%s Out of memory in ctc_unpack_skb\n",
714 dev->name);
715 ch->logflags |= LOG_FLAG_NOMEM;
716#ifndef DEBUG
717 }
718#endif
719 privptr->stats.rx_dropped++;
720 return;
721 }
722 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
723 skb->mac.raw = skb->data;
724 skb->dev = pskb->dev;
725 skb->protocol = pskb->protocol;
726 pskb->ip_summed = CHECKSUM_UNNECESSARY;
727 if (ch->protocol == CTC_PROTO_LINUX_TTY)
728 ctc_tty_netif_rx(skb);
729 else
730 netif_rx_ni(skb);
731 /**
732 * Successful rx; reset logflags
733 */
734 ch->logflags = 0;
735 dev->last_rx = jiffies;
736 privptr->stats.rx_packets++;
737 privptr->stats.rx_bytes += skb->len;
738 if (len > 0) {
739 skb_pull(pskb, header->length);
740 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
741#ifndef DEBUG
742 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
743#endif
744 ctc_pr_warn(
745 "%s Overrun in ctc_unpack_skb\n",
746 dev->name);
747 ch->logflags |= LOG_FLAG_OVERRUN;
748#ifndef DEBUG
749 }
750#endif
751 return;
752 }
753 skb_put(pskb, LL_HEADER_LENGTH);
754 }
755 }
756}
757
758/**
759 * Check return code of a preceeding ccw_device call, halt_IO etc...
760 *
761 * @param ch The channel, the error belongs to.
762 * @param return_code The error code to inspect.
763 */
764static void inline
765ccw_check_return_code(struct channel *ch, int return_code, char *msg)
766{
767 DBF_TEXT(trace, 5, __FUNCTION__);
768 switch (return_code) {
769 case 0:
770 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
771 break;
772 case -EBUSY:
773 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
774 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
775 break;
776 case -ENODEV:
777 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
778 ch->id, msg);
779 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
780 break;
781 case -EIO:
782 ctc_pr_emerg("%s (%s): Status pending... \n",
783 ch->id, msg);
784 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
785 break;
786 default:
787 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
788 ch->id, msg, return_code);
789 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
790 }
791}
792
793/**
794 * Check sense of a unit check.
795 *
796 * @param ch The channel, the sense code belongs to.
797 * @param sense The sense code to inspect.
798 */
799static void inline
800ccw_unit_check(struct channel *ch, unsigned char sense)
801{
802 DBF_TEXT(trace, 5, __FUNCTION__);
803 if (sense & SNS0_INTERVENTION_REQ) {
804 if (sense & 0x01) {
805 if (ch->protocol != CTC_PROTO_LINUX_TTY)
806 ctc_pr_debug("%s: Interface disc. or Sel. reset "
807 "(remote)\n", ch->id);
808 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
809 } else {
810 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
811 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
812 }
813 } else if (sense & SNS0_EQUIPMENT_CHECK) {
814 if (sense & SNS0_BUS_OUT_CHECK) {
815 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
816 ch->id);
817 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
818 } else {
819 ctc_pr_warn("%s: Read-data parity error (remote)\n",
820 ch->id);
821 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
822 }
823 } else if (sense & SNS0_BUS_OUT_CHECK) {
824 if (sense & 0x04) {
825 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
826 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
827 } else {
828 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
829 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
830 }
831 } else if (sense & SNS0_CMD_REJECT) {
832 ctc_pr_warn("%s: Command reject\n", ch->id);
833 } else if (sense == 0) {
834 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
835 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
836 } else {
837 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
838 ch->id, sense);
839 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
840 }
841}
842
843static void
844ctc_purge_skb_queue(struct sk_buff_head *q)
845{
846 struct sk_buff *skb;
847
848 DBF_TEXT(trace, 5, __FUNCTION__);
849
850 while ((skb = skb_dequeue(q))) {
851 atomic_dec(&skb->users);
852 dev_kfree_skb_irq(skb);
853 }
854}
855
856static __inline__ int
857ctc_checkalloc_buffer(struct channel *ch, int warn)
858{
859 DBF_TEXT(trace, 5, __FUNCTION__);
860 if ((ch->trans_skb == NULL) ||
861 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
862 if (ch->trans_skb != NULL)
863 dev_kfree_skb(ch->trans_skb);
864 clear_normalized_cda(&ch->ccw[1]);
865 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
866 GFP_ATOMIC | GFP_DMA);
867 if (ch->trans_skb == NULL) {
868 if (warn)
869 ctc_pr_warn(
870 "%s: Couldn't alloc %s trans_skb\n",
871 ch->id,
872 (CHANNEL_DIRECTION(ch->flags) == READ) ?
873 "RX" : "TX");
874 return -ENOMEM;
875 }
876 ch->ccw[1].count = ch->max_bufsize;
877 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
878 dev_kfree_skb(ch->trans_skb);
879 ch->trans_skb = NULL;
880 if (warn)
881 ctc_pr_warn(
882 "%s: set_normalized_cda for %s "
883 "trans_skb failed, dropping packets\n",
884 ch->id,
885 (CHANNEL_DIRECTION(ch->flags) == READ) ?
886 "RX" : "TX");
887 return -ENOMEM;
888 }
889 ch->ccw[1].count = 0;
890 ch->trans_skb_data = ch->trans_skb->data;
891 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
892 }
893 return 0;
894}
895
896/**
897 * Dummy NOP action for statemachines
898 */
899static void
900fsm_action_nop(fsm_instance * fi, int event, void *arg)
901{
902}
903
904/**
905 * Actions for channel - statemachines.
906 *****************************************************************************/
907
908/**
909 * Normal data has been send. Free the corresponding
910 * skb (it's in io_queue), reset dev->tbusy and
911 * revert to idle state.
912 *
913 * @param fi An instance of a channel statemachine.
914 * @param event The event, just happened.
915 * @param arg Generic pointer, casted from channel * upon call.
916 */
917static void
918ch_action_txdone(fsm_instance * fi, int event, void *arg)
919{
920 struct channel *ch = (struct channel *) arg;
921 struct net_device *dev = ch->netdev;
922 struct ctc_priv *privptr = dev->priv;
923 struct sk_buff *skb;
924 int first = 1;
925 int i;
926 unsigned long duration;
927 struct timespec done_stamp = xtime;
928
929 DBF_TEXT(trace, 4, __FUNCTION__);
930
931 duration =
932 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
933 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
934 if (duration > ch->prof.tx_time)
935 ch->prof.tx_time = duration;
936
937 if (ch->irb->scsw.count != 0)
938 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
939 dev->name, ch->irb->scsw.count);
940 fsm_deltimer(&ch->timer);
941 while ((skb = skb_dequeue(&ch->io_queue))) {
942 privptr->stats.tx_packets++;
943 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
944 if (first) {
945 privptr->stats.tx_bytes += 2;
946 first = 0;
947 }
948 atomic_dec(&skb->users);
949 dev_kfree_skb_irq(skb);
950 }
951 spin_lock(&ch->collect_lock);
952 clear_normalized_cda(&ch->ccw[4]);
953 if (ch->collect_len > 0) {
954 int rc;
955
956 if (ctc_checkalloc_buffer(ch, 1)) {
957 spin_unlock(&ch->collect_lock);
958 return;
959 }
960 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
961 ch->trans_skb->len = 0;
962 if (ch->prof.maxmulti < (ch->collect_len + 2))
963 ch->prof.maxmulti = ch->collect_len + 2;
964 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
965 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
966 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
967 i = 0;
968 while ((skb = skb_dequeue(&ch->collect_queue))) {
969 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
970 skb->len);
971 privptr->stats.tx_packets++;
972 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
973 atomic_dec(&skb->users);
974 dev_kfree_skb_irq(skb);
975 i++;
976 }
977 ch->collect_len = 0;
978 spin_unlock(&ch->collect_lock);
979 ch->ccw[1].count = ch->trans_skb->len;
980 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
981 ch->prof.send_stamp = xtime;
982 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
983 (unsigned long) ch, 0xff, 0);
984 ch->prof.doios_multi++;
985 if (rc != 0) {
986 privptr->stats.tx_dropped += i;
987 privptr->stats.tx_errors += i;
988 fsm_deltimer(&ch->timer);
989 ccw_check_return_code(ch, rc, "chained TX");
990 }
991 } else {
992 spin_unlock(&ch->collect_lock);
993 fsm_newstate(fi, CH_STATE_TXIDLE);
994 }
995 ctc_clear_busy(dev);
996}
997
998/**
999 * Initial data is sent.
1000 * Notify device statemachine that we are up and
1001 * running.
1002 *
1003 * @param fi An instance of a channel statemachine.
1004 * @param event The event, just happened.
1005 * @param arg Generic pointer, casted from channel * upon call.
1006 */
1007static void
1008ch_action_txidle(fsm_instance * fi, int event, void *arg)
1009{
1010 struct channel *ch = (struct channel *) arg;
1011
1012 DBF_TEXT(trace, 4, __FUNCTION__);
1013 fsm_deltimer(&ch->timer);
1014 fsm_newstate(fi, CH_STATE_TXIDLE);
1015 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
1016 ch->netdev);
1017}
1018
1019/**
1020 * Got normal data, check for sanity, queue it up, allocate new buffer
1021 * trigger bottom half, and initiate next read.
1022 *
1023 * @param fi An instance of a channel statemachine.
1024 * @param event The event, just happened.
1025 * @param arg Generic pointer, casted from channel * upon call.
1026 */
1027static void
1028ch_action_rx(fsm_instance * fi, int event, void *arg)
1029{
1030 struct channel *ch = (struct channel *) arg;
1031 struct net_device *dev = ch->netdev;
1032 struct ctc_priv *privptr = dev->priv;
1033 int len = ch->max_bufsize - ch->irb->scsw.count;
1034 struct sk_buff *skb = ch->trans_skb;
1035 __u16 block_len = *((__u16 *) skb->data);
1036 int check_len;
1037 int rc;
1038
1039 DBF_TEXT(trace, 4, __FUNCTION__);
1040 fsm_deltimer(&ch->timer);
1041 if (len < 8) {
1042 ctc_pr_debug("%s: got packet with length %d < 8\n",
1043 dev->name, len);
1044 privptr->stats.rx_dropped++;
1045 privptr->stats.rx_length_errors++;
1046 goto again;
1047 }
1048 if (len > ch->max_bufsize) {
1049 ctc_pr_debug("%s: got packet with length %d > %d\n",
1050 dev->name, len, ch->max_bufsize);
1051 privptr->stats.rx_dropped++;
1052 privptr->stats.rx_length_errors++;
1053 goto again;
1054 }
1055
1056 /**
1057 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
1058 */
1059 switch (ch->protocol) {
1060 case CTC_PROTO_S390:
1061 case CTC_PROTO_OS390:
1062 check_len = block_len + 2;
1063 break;
1064 default:
1065 check_len = block_len;
1066 break;
1067 }
1068 if ((len < block_len) || (len > check_len)) {
1069 ctc_pr_debug("%s: got block length %d != rx length %d\n",
1070 dev->name, block_len, len);
1071#ifdef DEBUG
1072 ctc_dump_skb(skb, 0);
1073#endif
1074 *((__u16 *) skb->data) = len;
1075 privptr->stats.rx_dropped++;
1076 privptr->stats.rx_length_errors++;
1077 goto again;
1078 }
1079 block_len -= 2;
1080 if (block_len > 0) {
1081 *((__u16 *) skb->data) = block_len;
1082 ctc_unpack_skb(ch, skb);
1083 }
1084 again:
1085 skb->data = skb->tail = ch->trans_skb_data;
1086 skb->len = 0;
1087 if (ctc_checkalloc_buffer(ch, 1))
1088 return;
1089 ch->ccw[1].count = ch->max_bufsize;
1090 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
1091 if (rc != 0)
1092 ccw_check_return_code(ch, rc, "normal RX");
1093}
1094
1095static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
1096
1097/**
1098 * Initialize connection by sending a __u16 of value 0.
1099 *
1100 * @param fi An instance of a channel statemachine.
1101 * @param event The event, just happened.
1102 * @param arg Generic pointer, casted from channel * upon call.
1103 */
1104static void
1105ch_action_firstio(fsm_instance * fi, int event, void *arg)
1106{
1107 struct channel *ch = (struct channel *) arg;
1108 int rc;
1109
1110 DBF_TEXT(trace, 4, __FUNCTION__);
1111
1112 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
1113 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
1114 fsm_deltimer(&ch->timer);
1115 if (ctc_checkalloc_buffer(ch, 1))
1116 return;
1117 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1118 (ch->protocol == CTC_PROTO_OS390)) {
1119 /* OS/390 resp. z/OS */
1120 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1121 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
1122 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
1123 CH_EVENT_TIMER, ch);
1124 ch_action_rxidle(fi, event, arg);
1125 } else {
1126 struct net_device *dev = ch->netdev;
1127 fsm_newstate(fi, CH_STATE_TXIDLE);
1128 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1129 DEV_EVENT_TXUP, dev);
1130 }
1131 return;
1132 }
1133
1134 /**
1135 * Don´t setup a timer for receiving the initial RX frame
1136 * if in compatibility mode, since VM TCP delays the initial
1137 * frame until it has some data to send.
1138 */
1139 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
1140 (ch->protocol != CTC_PROTO_S390))
1141 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1142
1143 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
1144 ch->ccw[1].count = 2; /* Transfer only length */
1145
1146 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
1147 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
1148 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
1149 if (rc != 0) {
1150 fsm_deltimer(&ch->timer);
1151 fsm_newstate(fi, CH_STATE_SETUPWAIT);
1152 ccw_check_return_code(ch, rc, "init IO");
1153 }
1154 /**
1155 * If in compatibility mode since we don´t setup a timer, we
1156 * also signal RX channel up immediately. This enables us
1157 * to send packets early which in turn usually triggers some
1158 * reply from VM TCP which brings up the RX channel to it´s
1159 * final state.
1160 */
1161 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
1162 (ch->protocol == CTC_PROTO_S390)) {
1163 struct net_device *dev = ch->netdev;
1164 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
1165 dev);
1166 }
1167}
1168
1169/**
1170 * Got initial data, check it. If OK,
1171 * notify device statemachine that we are up and
1172 * running.
1173 *
1174 * @param fi An instance of a channel statemachine.
1175 * @param event The event, just happened.
1176 * @param arg Generic pointer, casted from channel * upon call.
1177 */
1178static void
1179ch_action_rxidle(fsm_instance * fi, int event, void *arg)
1180{
1181 struct channel *ch = (struct channel *) arg;
1182 struct net_device *dev = ch->netdev;
1183 __u16 buflen;
1184 int rc;
1185
1186 DBF_TEXT(trace, 4, __FUNCTION__);
1187 fsm_deltimer(&ch->timer);
1188 buflen = *((__u16 *) ch->trans_skb->data);
1189#ifdef DEBUG
1190 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
1191#endif
1192 if (buflen >= CTC_INITIAL_BLOCKLEN) {
1193 if (ctc_checkalloc_buffer(ch, 1))
1194 return;
1195 ch->ccw[1].count = ch->max_bufsize;
1196 fsm_newstate(fi, CH_STATE_RXIDLE);
1197 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1198 (unsigned long) ch, 0xff, 0);
1199 if (rc != 0) {
1200 fsm_newstate(fi, CH_STATE_RXINIT);
1201 ccw_check_return_code(ch, rc, "initial RX");
1202 } else
1203 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1204 DEV_EVENT_RXUP, dev);
1205 } else {
1206 ctc_pr_debug("%s: Initial RX count %d not %d\n",
1207 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
1208 ch_action_firstio(fi, event, arg);
1209 }
1210}
1211
1212/**
1213 * Set channel into extended mode.
1214 *
1215 * @param fi An instance of a channel statemachine.
1216 * @param event The event, just happened.
1217 * @param arg Generic pointer, casted from channel * upon call.
1218 */
1219static void
1220ch_action_setmode(fsm_instance * fi, int event, void *arg)
1221{
1222 struct channel *ch = (struct channel *) arg;
1223 int rc;
1224 unsigned long saveflags;
1225
1226 DBF_TEXT(trace, 4, __FUNCTION__);
1227 fsm_deltimer(&ch->timer);
1228 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1229 fsm_newstate(fi, CH_STATE_SETUPWAIT);
1230 saveflags = 0; /* avoids compiler warning with
1231 spin_unlock_irqrestore */
1232 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1233 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1234 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
1235 if (event == CH_EVENT_TIMER)
1236 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1237 if (rc != 0) {
1238 fsm_deltimer(&ch->timer);
1239 fsm_newstate(fi, CH_STATE_STARTWAIT);
1240 ccw_check_return_code(ch, rc, "set Mode");
1241 } else
1242 ch->retry = 0;
1243}
1244
1245/**
1246 * Setup channel.
1247 *
1248 * @param fi An instance of a channel statemachine.
1249 * @param event The event, just happened.
1250 * @param arg Generic pointer, casted from channel * upon call.
1251 */
1252static void
1253ch_action_start(fsm_instance * fi, int event, void *arg)
1254{
1255 struct channel *ch = (struct channel *) arg;
1256 unsigned long saveflags;
1257 int rc;
1258 struct net_device *dev;
1259
1260 DBF_TEXT(trace, 4, __FUNCTION__);
1261 if (ch == NULL) {
1262 ctc_pr_warn("ch_action_start ch=NULL\n");
1263 return;
1264 }
1265 if (ch->netdev == NULL) {
1266 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1267 return;
1268 }
1269 dev = ch->netdev;
1270
1271#ifdef DEBUG
1272 ctc_pr_debug("%s: %s channel start\n", dev->name,
1273 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1274#endif
1275
1276 if (ch->trans_skb != NULL) {
1277 clear_normalized_cda(&ch->ccw[1]);
1278 dev_kfree_skb(ch->trans_skb);
1279 ch->trans_skb = NULL;
1280 }
1281 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1282 ch->ccw[1].cmd_code = CCW_CMD_READ;
1283 ch->ccw[1].flags = CCW_FLAG_SLI;
1284 ch->ccw[1].count = 0;
1285 } else {
1286 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1287 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1288 ch->ccw[1].count = 0;
1289 }
1290 if (ctc_checkalloc_buffer(ch, 0)) {
1291 ctc_pr_notice(
1292 "%s: Could not allocate %s trans_skb, delaying "
1293 "allocation until first transfer\n",
1294 dev->name,
1295 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1296 }
1297
1298 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1299 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1300 ch->ccw[0].count = 0;
1301 ch->ccw[0].cda = 0;
1302 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1303 ch->ccw[2].flags = CCW_FLAG_SLI;
1304 ch->ccw[2].count = 0;
1305 ch->ccw[2].cda = 0;
1306 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1307 ch->ccw[4].cda = 0;
1308 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1309
1310 fsm_newstate(fi, CH_STATE_STARTWAIT);
1311 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1312 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1313 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1314 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1315 if (rc != 0) {
1316 if (rc != -EBUSY)
1317 fsm_deltimer(&ch->timer);
1318 ccw_check_return_code(ch, rc, "initial HaltIO");
1319 }
1320#ifdef DEBUG
1321 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1322#endif
1323}
1324
1325/**
1326 * Shutdown a channel.
1327 *
1328 * @param fi An instance of a channel statemachine.
1329 * @param event The event, just happened.
1330 * @param arg Generic pointer, casted from channel * upon call.
1331 */
1332static void
1333ch_action_haltio(fsm_instance * fi, int event, void *arg)
1334{
1335 struct channel *ch = (struct channel *) arg;
1336 unsigned long saveflags;
1337 int rc;
1338 int oldstate;
1339
1340 DBF_TEXT(trace, 3, __FUNCTION__);
1341 fsm_deltimer(&ch->timer);
1342 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1343 saveflags = 0; /* avoids comp warning with
1344 spin_unlock_irqrestore */
1345 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1346 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1347 oldstate = fsm_getstate(fi);
1348 fsm_newstate(fi, CH_STATE_TERM);
1349 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1350 if (event == CH_EVENT_STOP)
1351 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1352 if (rc != 0) {
1353 if (rc != -EBUSY) {
1354 fsm_deltimer(&ch->timer);
1355 fsm_newstate(fi, oldstate);
1356 }
1357 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1358 }
1359}
1360
1361/**
1362 * A channel has successfully been halted.
1363 * Cleanup it's queue and notify interface statemachine.
1364 *
1365 * @param fi An instance of a channel statemachine.
1366 * @param event The event, just happened.
1367 * @param arg Generic pointer, casted from channel * upon call.
1368 */
1369static void
1370ch_action_stopped(fsm_instance * fi, int event, void *arg)
1371{
1372 struct channel *ch = (struct channel *) arg;
1373 struct net_device *dev = ch->netdev;
1374
1375 DBF_TEXT(trace, 3, __FUNCTION__);
1376 fsm_deltimer(&ch->timer);
1377 fsm_newstate(fi, CH_STATE_STOPPED);
1378 if (ch->trans_skb != NULL) {
1379 clear_normalized_cda(&ch->ccw[1]);
1380 dev_kfree_skb(ch->trans_skb);
1381 ch->trans_skb = NULL;
1382 }
1383 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1384 skb_queue_purge(&ch->io_queue);
1385 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1386 DEV_EVENT_RXDOWN, dev);
1387 } else {
1388 ctc_purge_skb_queue(&ch->io_queue);
1389 spin_lock(&ch->collect_lock);
1390 ctc_purge_skb_queue(&ch->collect_queue);
1391 ch->collect_len = 0;
1392 spin_unlock(&ch->collect_lock);
1393 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1394 DEV_EVENT_TXDOWN, dev);
1395 }
1396}
1397
1398/**
1399 * A stop command from device statemachine arrived and we are in
1400 * not operational mode. Set state to stopped.
1401 *
1402 * @param fi An instance of a channel statemachine.
1403 * @param event The event, just happened.
1404 * @param arg Generic pointer, casted from channel * upon call.
1405 */
1406static void
1407ch_action_stop(fsm_instance * fi, int event, void *arg)
1408{
1409 fsm_newstate(fi, CH_STATE_STOPPED);
1410}
1411
1412/**
1413 * A machine check for no path, not operational status or gone device has
1414 * happened.
1415 * Cleanup queue and notify interface statemachine.
1416 *
1417 * @param fi An instance of a channel statemachine.
1418 * @param event The event, just happened.
1419 * @param arg Generic pointer, casted from channel * upon call.
1420 */
1421static void
1422ch_action_fail(fsm_instance * fi, int event, void *arg)
1423{
1424 struct channel *ch = (struct channel *) arg;
1425 struct net_device *dev = ch->netdev;
1426
1427 DBF_TEXT(trace, 3, __FUNCTION__);
1428 fsm_deltimer(&ch->timer);
1429 fsm_newstate(fi, CH_STATE_NOTOP);
1430 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1431 skb_queue_purge(&ch->io_queue);
1432 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1433 DEV_EVENT_RXDOWN, dev);
1434 } else {
1435 ctc_purge_skb_queue(&ch->io_queue);
1436 spin_lock(&ch->collect_lock);
1437 ctc_purge_skb_queue(&ch->collect_queue);
1438 ch->collect_len = 0;
1439 spin_unlock(&ch->collect_lock);
1440 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1441 DEV_EVENT_TXDOWN, dev);
1442 }
1443}
1444
1445/**
1446 * Handle error during setup of channel.
1447 *
1448 * @param fi An instance of a channel statemachine.
1449 * @param event The event, just happened.
1450 * @param arg Generic pointer, casted from channel * upon call.
1451 */
1452static void
1453ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1454{
1455 struct channel *ch = (struct channel *) arg;
1456 struct net_device *dev = ch->netdev;
1457
1458 DBF_TEXT(setup, 3, __FUNCTION__);
1459 /**
1460 * Special case: Got UC_RCRESET on setmode.
1461 * This means that remote side isn't setup. In this case
1462 * simply retry after some 10 secs...
1463 */
1464 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1465 ((event == CH_EVENT_UC_RCRESET) ||
1466 (event == CH_EVENT_UC_RSRESET))) {
1467 fsm_newstate(fi, CH_STATE_STARTRETRY);
1468 fsm_deltimer(&ch->timer);
1469 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1470 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1471 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1472 if (rc != 0)
1473 ccw_check_return_code(
1474 ch, rc, "HaltIO in ch_action_setuperr");
1475 }
1476 return;
1477 }
1478
1479 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1480 dev->name, ch_event_names[event],
1481 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1482 fsm_getstate_str(fi));
1483 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1484 fsm_newstate(fi, CH_STATE_RXERR);
1485 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1486 DEV_EVENT_RXDOWN, dev);
1487 } else {
1488 fsm_newstate(fi, CH_STATE_TXERR);
1489 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1490 DEV_EVENT_TXDOWN, dev);
1491 }
1492}
1493
1494/**
1495 * Restart a channel after an error.
1496 *
1497 * @param fi An instance of a channel statemachine.
1498 * @param event The event, just happened.
1499 * @param arg Generic pointer, casted from channel * upon call.
1500 */
1501static void
1502ch_action_restart(fsm_instance * fi, int event, void *arg)
1503{
1504 unsigned long saveflags;
1505 int oldstate;
1506 int rc;
1507
1508 struct channel *ch = (struct channel *) arg;
1509 struct net_device *dev = ch->netdev;
1510
1511 DBF_TEXT(trace, 3, __FUNCTION__);
1512 fsm_deltimer(&ch->timer);
1513 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1514 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1515 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1516 oldstate = fsm_getstate(fi);
1517 fsm_newstate(fi, CH_STATE_STARTWAIT);
1518 saveflags = 0; /* avoids compiler warning with
1519 spin_unlock_irqrestore */
1520 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1521 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1522 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1523 if (event == CH_EVENT_TIMER)
1524 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1525 if (rc != 0) {
1526 if (rc != -EBUSY) {
1527 fsm_deltimer(&ch->timer);
1528 fsm_newstate(fi, oldstate);
1529 }
1530 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1531 }
1532}
1533
1534/**
1535 * Handle error during RX initial handshake (exchange of
1536 * 0-length block header)
1537 *
1538 * @param fi An instance of a channel statemachine.
1539 * @param event The event, just happened.
1540 * @param arg Generic pointer, casted from channel * upon call.
1541 */
1542static void
1543ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1544{
1545 struct channel *ch = (struct channel *) arg;
1546 struct net_device *dev = ch->netdev;
1547
1548 DBF_TEXT(setup, 3, __FUNCTION__);
1549 if (event == CH_EVENT_TIMER) {
1550 fsm_deltimer(&ch->timer);
1551 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1552 if (ch->retry++ < 3)
1553 ch_action_restart(fi, event, arg);
1554 else {
1555 fsm_newstate(fi, CH_STATE_RXERR);
1556 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1557 DEV_EVENT_RXDOWN, dev);
1558 }
1559 } else
1560 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1561}
1562
1563/**
1564 * Notify device statemachine if we gave up initialization
1565 * of RX channel.
1566 *
1567 * @param fi An instance of a channel statemachine.
1568 * @param event The event, just happened.
1569 * @param arg Generic pointer, casted from channel * upon call.
1570 */
1571static void
1572ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1573{
1574 struct channel *ch = (struct channel *) arg;
1575 struct net_device *dev = ch->netdev;
1576
1577 DBF_TEXT(setup, 3, __FUNCTION__);
1578 fsm_newstate(fi, CH_STATE_RXERR);
1579 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1580 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1581 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1582}
1583
1584/**
1585 * Handle RX Unit check remote reset (remote disconnected)
1586 *
1587 * @param fi An instance of a channel statemachine.
1588 * @param event The event, just happened.
1589 * @param arg Generic pointer, casted from channel * upon call.
1590 */
1591static void
1592ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1593{
1594 struct channel *ch = (struct channel *) arg;
1595 struct channel *ch2;
1596 struct net_device *dev = ch->netdev;
1597
1598 DBF_TEXT(trace, 3, __FUNCTION__);
1599 fsm_deltimer(&ch->timer);
1600 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1601 dev->name);
1602
1603 /**
1604 * Notify device statemachine
1605 */
1606 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1607 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1608
1609 fsm_newstate(fi, CH_STATE_DTERM);
1610 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1611 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1612
1613 ccw_device_halt(ch->cdev, (unsigned long) ch);
1614 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1615}
1616
1617/**
1618 * Handle error during TX channel initialization.
1619 *
1620 * @param fi An instance of a channel statemachine.
1621 * @param event The event, just happened.
1622 * @param arg Generic pointer, casted from channel * upon call.
1623 */
1624static void
1625ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1626{
1627 struct channel *ch = (struct channel *) arg;
1628 struct net_device *dev = ch->netdev;
1629
1630 DBF_TEXT(setup, 2, __FUNCTION__);
1631 if (event == CH_EVENT_TIMER) {
1632 fsm_deltimer(&ch->timer);
1633 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1634 if (ch->retry++ < 3)
1635 ch_action_restart(fi, event, arg);
1636 else {
1637 fsm_newstate(fi, CH_STATE_TXERR);
1638 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1639 DEV_EVENT_TXDOWN, dev);
1640 }
1641 } else
1642 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1643}
1644
1645/**
1646 * Handle TX timeout by retrying operation.
1647 *
1648 * @param fi An instance of a channel statemachine.
1649 * @param event The event, just happened.
1650 * @param arg Generic pointer, casted from channel * upon call.
1651 */
1652static void
1653ch_action_txretry(fsm_instance * fi, int event, void *arg)
1654{
1655 struct channel *ch = (struct channel *) arg;
1656 struct net_device *dev = ch->netdev;
1657 unsigned long saveflags;
1658
1659 DBF_TEXT(trace, 4, __FUNCTION__);
1660 fsm_deltimer(&ch->timer);
1661 if (ch->retry++ > 3) {
1662 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1663 dev->name);
1664 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1665 DEV_EVENT_TXDOWN, dev);
1666 ch_action_restart(fi, event, arg);
1667 } else {
1668 struct sk_buff *skb;
1669
1670 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1671 if ((skb = skb_peek(&ch->io_queue))) {
1672 int rc = 0;
1673
1674 clear_normalized_cda(&ch->ccw[4]);
1675 ch->ccw[4].count = skb->len;
1676 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1677 ctc_pr_debug(
1678 "%s: IDAL alloc failed, chan restart\n",
1679 dev->name);
1680 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1681 DEV_EVENT_TXDOWN, dev);
1682 ch_action_restart(fi, event, arg);
1683 return;
1684 }
1685 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1686 saveflags = 0; /* avoids compiler warning with
1687 spin_unlock_irqrestore */
1688 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1689 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1690 saveflags);
1691 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1692 (unsigned long) ch, 0xff, 0);
1693 if (event == CH_EVENT_TIMER)
1694 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1695 saveflags);
1696 if (rc != 0) {
1697 fsm_deltimer(&ch->timer);
1698 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1699 ctc_purge_skb_queue(&ch->io_queue);
1700 }
1701 }
1702 }
1703
1704}
1705
1706/**
1707 * Handle fatal errors during an I/O command.
1708 *
1709 * @param fi An instance of a channel statemachine.
1710 * @param event The event, just happened.
1711 * @param arg Generic pointer, casted from channel * upon call.
1712 */
1713static void
1714ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1715{
1716 struct channel *ch = (struct channel *) arg;
1717 struct net_device *dev = ch->netdev;
1718
1719 DBF_TEXT(trace, 3, __FUNCTION__);
1720 fsm_deltimer(&ch->timer);
1721 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1722 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1723 fsm_newstate(fi, CH_STATE_RXERR);
1724 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1725 DEV_EVENT_RXDOWN, dev);
1726 } else {
1727 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1728 fsm_newstate(fi, CH_STATE_TXERR);
1729 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1730 DEV_EVENT_TXDOWN, dev);
1731 }
1732}
1733
1734static void
1735ch_action_reinit(fsm_instance *fi, int event, void *arg)
1736{
1737 struct channel *ch = (struct channel *)arg;
1738 struct net_device *dev = ch->netdev;
1739 struct ctc_priv *privptr = dev->priv;
1740
1741 DBF_TEXT(trace, 4, __FUNCTION__);
1742 ch_action_iofatal(fi, event, arg);
1743 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1744}
1745
1746
1747/**
1748 * The statemachine for a channel.
1749 */
1750static const fsm_node ch_fsm[] = {
1751 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1752 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1753 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1754 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1755
1756 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1757 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1758 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1759 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1760 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1761
1762 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1763 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1764 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1765 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1766 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1767 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1768 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1769
1770 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1771 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1772 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1773 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1774
1775 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1776 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1777 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1778 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1779 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1780 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1781 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1782 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1783 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1784
1785 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1786 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1787 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1788 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1789 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1790 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1791 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1792 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1793 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1794 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1795 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1796
1797 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1798 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1799 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1800 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1801// {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1802 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1803 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1804 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1805 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1806
1807 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1808 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1809 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1810 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1811 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1812 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1813 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1814 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1815 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1816
1817 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1818 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1819 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1820 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1821 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1822 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1823 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1824 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1825
1826 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1827 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1828 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1829 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1830 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1831 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1832
1833 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1834 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1835 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1836 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1837 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1838 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1839
1840 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1841 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1842 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1843 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1844 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1845 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1846 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1847 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1848 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1849
1850 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1851 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1852 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1853 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1854};
1855
1856static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1857
1858/**
1859 * Functions related to setup and device detection.
1860 *****************************************************************************/
1861
1862static inline int
1863less_than(char *id1, char *id2)
1864{
1865 int dev1, dev2, i;
1866
1867 for (i = 0; i < 5; i++) {
1868 id1++;
1869 id2++;
1870 }
1871 dev1 = simple_strtoul(id1, &id1, 16);
1872 dev2 = simple_strtoul(id2, &id2, 16);
1873
1874 return (dev1 < dev2);
1875}
1876
1877/**
1878 * Add a new channel to the list of channels.
1879 * Keeps the channel list sorted.
1880 *
1881 * @param cdev The ccw_device to be added.
1882 * @param type The type class of the new channel.
1883 *
1884 * @return 0 on success, !0 on error.
1885 */
1886static int
1887add_channel(struct ccw_device *cdev, enum channel_types type)
1888{
1889 struct channel **c = &channels;
1890 struct channel *ch;
1891
1892 DBF_TEXT(trace, 2, __FUNCTION__);
1893 if ((ch =
1894 (struct channel *) kmalloc(sizeof (struct channel),
1895 GFP_KERNEL)) == NULL) {
1896 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1897 return -1;
1898 }
1899 memset(ch, 0, sizeof (struct channel));
1900 if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1),
1901 GFP_KERNEL | GFP_DMA)) == NULL) {
1902 kfree(ch);
1903 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1904 return -1;
1905 }
1906
1907 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1908
1909 /**
1910 * "static" ccws are used in the following way:
1911 *
1912 * ccw[0..2] (Channel program for generic I/O):
1913 * 0: prepare
1914 * 1: read or write (depending on direction) with fixed
1915 * buffer (idal allocated once when buffer is allocated)
1916 * 2: nop
1917 * ccw[3..5] (Channel program for direct write of packets)
1918 * 3: prepare
1919 * 4: write (idal allocated on every write).
1920 * 5: nop
1921 * ccw[6..7] (Channel program for initial channel setup):
1922 * 6: set extended mode
1923 * 7: nop
1924 *
1925 * ch->ccw[0..5] are initialized in ch_action_start because
1926 * the channel's direction is yet unknown here.
1927 */
1928 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1929 ch->ccw[6].flags = CCW_FLAG_SLI;
1930
1931 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1932 ch->ccw[7].flags = CCW_FLAG_SLI;
1933
1934 ch->cdev = cdev;
1935 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1936 ch->type = type;
1937 loglevel = CTC_LOGLEVEL_DEFAULT;
1938 ch->fsm = init_fsm(ch->id, ch_state_names,
1939 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1940 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1941 if (ch->fsm == NULL) {
1942 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1943 kfree(ch->ccw);
1944 kfree(ch);
1945 return -1;
1946 }
1947 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1948 if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb),
1949 GFP_KERNEL)) == NULL) {
1950 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1951 kfree_fsm(ch->fsm);
1952 kfree(ch->ccw);
1953 kfree(ch);
1954 return -1;
1955 }
1956 memset(ch->irb, 0, sizeof (struct irb));
1957 while (*c && less_than((*c)->id, ch->id))
1958 c = &(*c)->next;
1959 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1960 ctc_pr_debug(
1961 "ctc: add_channel: device %s already in list, "
1962 "using old entry\n", (*c)->id);
1963 kfree(ch->irb);
1964 kfree_fsm(ch->fsm);
1965 kfree(ch->ccw);
1966 kfree(ch);
1967 return 0;
1968 }
1969 fsm_settimer(ch->fsm, &ch->timer);
1970 skb_queue_head_init(&ch->io_queue);
1971 skb_queue_head_init(&ch->collect_queue);
1972 ch->next = *c;
1973 *c = ch;
1974 return 0;
1975}
1976
1977/**
1978 * Release a specific channel in the channel list.
1979 *
1980 * @param ch Pointer to channel struct to be released.
1981 */
1982static void
1983channel_free(struct channel *ch)
1984{
1985 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1986 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1987}
1988
1989/**
1990 * Remove a specific channel in the channel list.
1991 *
1992 * @param ch Pointer to channel struct to be released.
1993 */
1994static void
1995channel_remove(struct channel *ch)
1996{
1997 struct channel **c = &channels;
1998
1999 DBF_TEXT(trace, 2, __FUNCTION__);
2000 if (ch == NULL)
2001 return;
2002
2003 channel_free(ch);
2004 while (*c) {
2005 if (*c == ch) {
2006 *c = ch->next;
2007 fsm_deltimer(&ch->timer);
2008 kfree_fsm(ch->fsm);
2009 clear_normalized_cda(&ch->ccw[4]);
2010 if (ch->trans_skb != NULL) {
2011 clear_normalized_cda(&ch->ccw[1]);
2012 dev_kfree_skb(ch->trans_skb);
2013 }
2014 kfree(ch->ccw);
2015 kfree(ch->irb);
2016 kfree(ch);
2017 return;
2018 }
2019 c = &((*c)->next);
2020 }
2021}
2022
2023/**
2024 * Get a specific channel from the channel list.
2025 *
2026 * @param type Type of channel we are interested in.
2027 * @param id Id of channel we are interested in.
2028 * @param direction Direction we want to use this channel for.
2029 *
2030 * @return Pointer to a channel or NULL if no matching channel available.
2031 */
2032static struct channel
2033*
2034channel_get(enum channel_types type, char *id, int direction)
2035{
2036 struct channel *ch = channels;
2037
2038 DBF_TEXT(trace, 3, __FUNCTION__);
2039#ifdef DEBUG
2040 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
2041 __func__, id, type);
2042#endif
2043
2044 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
2045#ifdef DEBUG
2046 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
2047 __func__, ch, ch->id, ch->type);
2048#endif
2049 ch = ch->next;
2050 }
2051#ifdef DEBUG
2052 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
2053 __func__, ch, ch->id, ch->type);
2054#endif
2055 if (!ch) {
2056 ctc_pr_warn("ctc: %s(): channel with id %s "
2057 "and type %d not found in channel list\n",
2058 __func__, id, type);
2059 } else {
2060 if (ch->flags & CHANNEL_FLAGS_INUSE)
2061 ch = NULL;
2062 else {
2063 ch->flags |= CHANNEL_FLAGS_INUSE;
2064 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
2065 ch->flags |= (direction == WRITE)
2066 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
2067 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
2068 }
2069 }
2070 return ch;
2071}
2072
2073/**
2074 * Return the channel type by name.
2075 *
2076 * @param name Name of network interface.
2077 *
2078 * @return Type class of channel to be used for that interface.
2079 */
2080static enum channel_types inline
2081extract_channel_media(char *name)
2082{
2083 enum channel_types ret = channel_type_unknown;
2084
2085 if (name != NULL) {
2086 if (strncmp(name, "ctc", 3) == 0)
2087 ret = channel_type_parallel;
2088 if (strncmp(name, "escon", 5) == 0)
2089 ret = channel_type_escon;
2090 }
2091 return ret;
2092}
2093
2094static long
2095__ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
2096{
2097 if (!IS_ERR(irb))
2098 return 0;
2099
2100 switch (PTR_ERR(irb)) {
2101 case -EIO:
2102 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
2103// CTC_DBF_TEXT(trace, 2, "ckirberr");
2104// CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
2105 break;
2106 case -ETIMEDOUT:
2107 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
2108// CTC_DBF_TEXT(trace, 2, "ckirberr");
2109// CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
2110 break;
2111 default:
2112 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
2113 cdev->dev.bus_id);
2114// CTC_DBF_TEXT(trace, 2, "ckirberr");
2115// CTC_DBF_TEXT(trace, 2, " rc???");
2116 }
2117 return PTR_ERR(irb);
2118}
2119
2120/**
2121 * Main IRQ handler.
2122 *
2123 * @param cdev The ccw_device the interrupt is for.
2124 * @param intparm interruption parameter.
2125 * @param irb interruption response block.
2126 */
2127static void
2128ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2129{
2130 struct channel *ch;
2131 struct net_device *dev;
2132 struct ctc_priv *priv;
2133
2134 DBF_TEXT(trace, 5, __FUNCTION__);
2135 if (__ctc_check_irb_error(cdev, irb))
2136 return;
2137
2138 /* Check for unsolicited interrupts. */
2139 if (!cdev->dev.driver_data) {
2140 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
2141 cdev->dev.bus_id, irb->scsw.cstat,
2142 irb->scsw.dstat);
2143 return;
2144 }
2145
2146 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
2147 ->dev.driver_data;
2148
2149 /* Try to extract channel from driver data. */
2150 if (priv->channel[READ]->cdev == cdev)
2151 ch = priv->channel[READ];
2152 else if (priv->channel[WRITE]->cdev == cdev)
2153 ch = priv->channel[WRITE];
2154 else {
2155 ctc_pr_err("ctc: Can't determine channel for interrupt, "
2156 "device %s\n", cdev->dev.bus_id);
2157 return;
2158 }
2159
2160 dev = (struct net_device *) (ch->netdev);
2161 if (dev == NULL) {
2162 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
2163 cdev->dev.bus_id, ch);
2164 return;
2165 }
2166
2167#ifdef DEBUG
2168 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
2169 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
2170#endif
2171
2172 /* Copy interruption response block. */
2173 memcpy(ch->irb, irb, sizeof(struct irb));
2174
2175 /* Check for good subchannel return code, otherwise error message */
2176 if (ch->irb->scsw.cstat) {
2177 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
2178 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
2179 dev->name, ch->id, ch->irb->scsw.cstat,
2180 ch->irb->scsw.dstat);
2181 return;
2182 }
2183
2184 /* Check the reason-code of a unit check */
2185 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
2186 ccw_unit_check(ch, ch->irb->ecw[0]);
2187 return;
2188 }
2189 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
2190 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
2191 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
2192 else
2193 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
2194 return;
2195 }
2196 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
2197 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
2198 return;
2199 }
2200 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
2201 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
2202 (ch->irb->scsw.stctl ==
2203 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
2204 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
2205 else
2206 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
2207
2208}
2209
2210/**
2211 * Actions for interface - statemachine.
2212 *****************************************************************************/
2213
2214/**
2215 * Startup channels by sending CH_EVENT_START to each channel.
2216 *
2217 * @param fi An instance of an interface statemachine.
2218 * @param event The event, just happened.
2219 * @param arg Generic pointer, casted from struct net_device * upon call.
2220 */
2221static void
2222dev_action_start(fsm_instance * fi, int event, void *arg)
2223{
2224 struct net_device *dev = (struct net_device *) arg;
2225 struct ctc_priv *privptr = dev->priv;
2226 int direction;
2227
2228 DBF_TEXT(setup, 3, __FUNCTION__);
2229 fsm_deltimer(&privptr->restart_timer);
2230 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2231 for (direction = READ; direction <= WRITE; direction++) {
2232 struct channel *ch = privptr->channel[direction];
2233 fsm_event(ch->fsm, CH_EVENT_START, ch);
2234 }
2235}
2236
2237/**
2238 * Shutdown channels by sending CH_EVENT_STOP to each channel.
2239 *
2240 * @param fi An instance of an interface statemachine.
2241 * @param event The event, just happened.
2242 * @param arg Generic pointer, casted from struct net_device * upon call.
2243 */
2244static void
2245dev_action_stop(fsm_instance * fi, int event, void *arg)
2246{
2247 struct net_device *dev = (struct net_device *) arg;
2248 struct ctc_priv *privptr = dev->priv;
2249 int direction;
2250
2251 DBF_TEXT(trace, 3, __FUNCTION__);
2252 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2253 for (direction = READ; direction <= WRITE; direction++) {
2254 struct channel *ch = privptr->channel[direction];
2255 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2256 }
2257}
2258static void
2259dev_action_restart(fsm_instance *fi, int event, void *arg)
2260{
2261 struct net_device *dev = (struct net_device *)arg;
2262 struct ctc_priv *privptr = dev->priv;
2263
2264 DBF_TEXT(trace, 3, __FUNCTION__);
2265 ctc_pr_debug("%s: Restarting\n", dev->name);
2266 dev_action_stop(fi, event, arg);
2267 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2268 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2269 DEV_EVENT_START, dev);
2270}
2271
2272/**
2273 * Called from channel statemachine
2274 * when a channel is up and running.
2275 *
2276 * @param fi An instance of an interface statemachine.
2277 * @param event The event, just happened.
2278 * @param arg Generic pointer, casted from struct net_device * upon call.
2279 */
2280static void
2281dev_action_chup(fsm_instance * fi, int event, void *arg)
2282{
2283 struct net_device *dev = (struct net_device *) arg;
2284 struct ctc_priv *privptr = dev->priv;
2285
2286 DBF_TEXT(trace, 3, __FUNCTION__);
2287 switch (fsm_getstate(fi)) {
2288 case DEV_STATE_STARTWAIT_RXTX:
2289 if (event == DEV_EVENT_RXUP)
2290 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2291 else
2292 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2293 break;
2294 case DEV_STATE_STARTWAIT_RX:
2295 if (event == DEV_EVENT_RXUP) {
2296 fsm_newstate(fi, DEV_STATE_RUNNING);
2297 ctc_pr_info("%s: connected with remote side\n",
2298 dev->name);
2299 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2300 ctc_tty_setcarrier(dev, 1);
2301 ctc_clear_busy(dev);
2302 }
2303 break;
2304 case DEV_STATE_STARTWAIT_TX:
2305 if (event == DEV_EVENT_TXUP) {
2306 fsm_newstate(fi, DEV_STATE_RUNNING);
2307 ctc_pr_info("%s: connected with remote side\n",
2308 dev->name);
2309 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2310 ctc_tty_setcarrier(dev, 1);
2311 ctc_clear_busy(dev);
2312 }
2313 break;
2314 case DEV_STATE_STOPWAIT_TX:
2315 if (event == DEV_EVENT_RXUP)
2316 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2317 break;
2318 case DEV_STATE_STOPWAIT_RX:
2319 if (event == DEV_EVENT_TXUP)
2320 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2321 break;
2322 }
2323}
2324
2325/**
2326 * Called from channel statemachine
2327 * when a channel has been shutdown.
2328 *
2329 * @param fi An instance of an interface statemachine.
2330 * @param event The event, just happened.
2331 * @param arg Generic pointer, casted from struct net_device * upon call.
2332 */
2333static void
2334dev_action_chdown(fsm_instance * fi, int event, void *arg)
2335{
2336 struct net_device *dev = (struct net_device *) arg;
2337 struct ctc_priv *privptr = dev->priv;
2338
2339 DBF_TEXT(trace, 3, __FUNCTION__);
2340 switch (fsm_getstate(fi)) {
2341 case DEV_STATE_RUNNING:
2342 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2343 ctc_tty_setcarrier(dev, 0);
2344 if (event == DEV_EVENT_TXDOWN)
2345 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2346 else
2347 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2348 break;
2349 case DEV_STATE_STARTWAIT_RX:
2350 if (event == DEV_EVENT_TXDOWN)
2351 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2352 break;
2353 case DEV_STATE_STARTWAIT_TX:
2354 if (event == DEV_EVENT_RXDOWN)
2355 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2356 break;
2357 case DEV_STATE_STOPWAIT_RXTX:
2358 if (event == DEV_EVENT_TXDOWN)
2359 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2360 else
2361 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2362 break;
2363 case DEV_STATE_STOPWAIT_RX:
2364 if (event == DEV_EVENT_RXDOWN)
2365 fsm_newstate(fi, DEV_STATE_STOPPED);
2366 break;
2367 case DEV_STATE_STOPWAIT_TX:
2368 if (event == DEV_EVENT_TXDOWN)
2369 fsm_newstate(fi, DEV_STATE_STOPPED);
2370 break;
2371 }
2372}
2373
2374static const fsm_node dev_fsm[] = {
2375 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2376
2377 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2378 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2379 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2380 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2381
2382 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2383 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2384 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2385 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2386 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2387
2388 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2389 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2390 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2391 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2392 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2393
2394 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2395 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2396 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2397 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2398 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2399 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2400
2401 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2402 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2403 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2404 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2405 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2406
2407 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2408 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2409 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2410 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2411 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2412
2413 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2414 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2415 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2416 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2417 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2418 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2419};
2420
2421static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2422
2423/**
2424 * Transmit a packet.
2425 * This is a helper function for ctc_tx().
2426 *
2427 * @param ch Channel to be used for sending.
2428 * @param skb Pointer to struct sk_buff of packet to send.
2429 * The linklevel header has already been set up
2430 * by ctc_tx().
2431 *
2432 * @return 0 on success, -ERRNO on failure. (Never fails.)
2433 */
2434static int
2435transmit_skb(struct channel *ch, struct sk_buff *skb)
2436{
2437 unsigned long saveflags;
2438 struct ll_header header;
2439 int rc = 0;
2440
2441 DBF_TEXT(trace, 5, __FUNCTION__);
2442 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2443 int l = skb->len + LL_HEADER_LENGTH;
2444
2445 spin_lock_irqsave(&ch->collect_lock, saveflags);
2446 if (ch->collect_len + l > ch->max_bufsize - 2)
2447 rc = -EBUSY;
2448 else {
2449 atomic_inc(&skb->users);
2450 header.length = l;
2451 header.type = skb->protocol;
2452 header.unused = 0;
2453 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2454 LL_HEADER_LENGTH);
2455 skb_queue_tail(&ch->collect_queue, skb);
2456 ch->collect_len += l;
2457 }
2458 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2459 } else {
2460 __u16 block_len;
2461 int ccw_idx;
2462 struct sk_buff *nskb;
2463 unsigned long hi;
2464
2465 /**
2466 * Protect skb against beeing free'd by upper
2467 * layers.
2468 */
2469 atomic_inc(&skb->users);
2470 ch->prof.txlen += skb->len;
2471 header.length = skb->len + LL_HEADER_LENGTH;
2472 header.type = skb->protocol;
2473 header.unused = 0;
2474 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2475 LL_HEADER_LENGTH);
2476 block_len = skb->len + 2;
2477 *((__u16 *) skb_push(skb, 2)) = block_len;
2478
2479 /**
2480 * IDAL support in CTC is broken, so we have to
2481 * care about skb's above 2G ourselves.
2482 */
2483 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2484 if (hi) {
2485 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2486 if (!nskb) {
2487 atomic_dec(&skb->users);
2488 skb_pull(skb, LL_HEADER_LENGTH + 2);
2489 return -ENOMEM;
2490 } else {
2491 memcpy(skb_put(nskb, skb->len),
2492 skb->data, skb->len);
2493 atomic_inc(&nskb->users);
2494 atomic_dec(&skb->users);
2495 dev_kfree_skb_irq(skb);
2496 skb = nskb;
2497 }
2498 }
2499
2500 ch->ccw[4].count = block_len;
2501 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2502 /**
2503 * idal allocation failed, try via copying to
2504 * trans_skb. trans_skb usually has a pre-allocated
2505 * idal.
2506 */
2507 if (ctc_checkalloc_buffer(ch, 1)) {
2508 /**
2509 * Remove our header. It gets added
2510 * again on retransmit.
2511 */
2512 atomic_dec(&skb->users);
2513 skb_pull(skb, LL_HEADER_LENGTH + 2);
2514 return -EBUSY;
2515 }
2516
2517 ch->trans_skb->tail = ch->trans_skb->data;
2518 ch->trans_skb->len = 0;
2519 ch->ccw[1].count = skb->len;
2520 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2521 skb->len);
2522 atomic_dec(&skb->users);
2523 dev_kfree_skb_irq(skb);
2524 ccw_idx = 0;
2525 } else {
2526 skb_queue_tail(&ch->io_queue, skb);
2527 ccw_idx = 3;
2528 }
2529 ch->retry = 0;
2530 fsm_newstate(ch->fsm, CH_STATE_TX);
2531 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2532 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2533 ch->prof.send_stamp = xtime;
2534 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2535 (unsigned long) ch, 0xff, 0);
2536 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2537 if (ccw_idx == 3)
2538 ch->prof.doios_single++;
2539 if (rc != 0) {
2540 fsm_deltimer(&ch->timer);
2541 ccw_check_return_code(ch, rc, "single skb TX");
2542 if (ccw_idx == 3)
2543 skb_dequeue_tail(&ch->io_queue);
2544 /**
2545 * Remove our header. It gets added
2546 * again on retransmit.
2547 */
2548 skb_pull(skb, LL_HEADER_LENGTH + 2);
2549 } else {
2550 if (ccw_idx == 0) {
2551 struct net_device *dev = ch->netdev;
2552 struct ctc_priv *privptr = dev->priv;
2553 privptr->stats.tx_packets++;
2554 privptr->stats.tx_bytes +=
2555 skb->len - LL_HEADER_LENGTH;
2556 }
2557 }
2558 }
2559
2560 return rc;
2561}
2562
2563/**
2564 * Interface API for upper network layers
2565 *****************************************************************************/
2566
2567/**
2568 * Open an interface.
2569 * Called from generic network layer when ifconfig up is run.
2570 *
2571 * @param dev Pointer to interface struct.
2572 *
2573 * @return 0 on success, -ERRNO on failure. (Never fails.)
2574 */
2575static int
2576ctc_open(struct net_device * dev)
2577{
2578 DBF_TEXT(trace, 5, __FUNCTION__);
2579 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2580 return 0;
2581}
2582
2583/**
2584 * Close an interface.
2585 * Called from generic network layer when ifconfig down is run.
2586 *
2587 * @param dev Pointer to interface struct.
2588 *
2589 * @return 0 on success, -ERRNO on failure. (Never fails.)
2590 */
2591static int
2592ctc_close(struct net_device * dev)
2593{
2594 DBF_TEXT(trace, 5, __FUNCTION__);
2595 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2596 return 0;
2597}
2598
2599/**
2600 * Start transmission of a packet.
2601 * Called from generic network device layer.
2602 *
2603 * @param skb Pointer to buffer containing the packet.
2604 * @param dev Pointer to interface struct.
2605 *
2606 * @return 0 if packet consumed, !0 if packet rejected.
2607 * Note: If we return !0, then the packet is free'd by
2608 * the generic network layer.
2609 */
2610static int
2611ctc_tx(struct sk_buff *skb, struct net_device * dev)
2612{
2613 int rc = 0;
2614 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2615
2616 DBF_TEXT(trace, 5, __FUNCTION__);
2617 /**
2618 * Some sanity checks ...
2619 */
2620 if (skb == NULL) {
2621 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2622 privptr->stats.tx_dropped++;
2623 return 0;
2624 }
2625 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2626 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2627 dev->name, LL_HEADER_LENGTH + 2);
2628 dev_kfree_skb(skb);
2629 privptr->stats.tx_dropped++;
2630 return 0;
2631 }
2632
2633 /**
2634 * If channels are not running, try to restart them
2635 * and throw away packet.
2636 */
2637 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2638 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2639 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2640 return -EBUSY;
2641 dev_kfree_skb(skb);
2642 privptr->stats.tx_dropped++;
2643 privptr->stats.tx_errors++;
2644 privptr->stats.tx_carrier_errors++;
2645 return 0;
2646 }
2647
2648 if (ctc_test_and_set_busy(dev))
2649 return -EBUSY;
2650
2651 dev->trans_start = jiffies;
2652 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2653 rc = 1;
2654 ctc_clear_busy(dev);
2655 return rc;
2656}
2657
2658/**
2659 * Sets MTU of an interface.
2660 *
2661 * @param dev Pointer to interface struct.
2662 * @param new_mtu The new MTU to use for this interface.
2663 *
2664 * @return 0 on success, -EINVAL if MTU is out of valid range.
2665 * (valid range is 576 .. 65527). If VM is on the
2666 * remote side, maximum MTU is 32760, however this is
2667 * <em>not</em> checked here.
2668 */
2669static int
2670ctc_change_mtu(struct net_device * dev, int new_mtu)
2671{
2672 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2673
2674 DBF_TEXT(trace, 3, __FUNCTION__);
2675 if ((new_mtu < 576) || (new_mtu > 65527) ||
2676 (new_mtu > (privptr->channel[READ]->max_bufsize -
2677 LL_HEADER_LENGTH - 2)))
2678 return -EINVAL;
2679 dev->mtu = new_mtu;
2680 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2681 return 0;
2682}
2683
2684/**
2685 * Returns interface statistics of a device.
2686 *
2687 * @param dev Pointer to interface struct.
2688 *
2689 * @return Pointer to stats struct of this interface.
2690 */
2691static struct net_device_stats *
2692ctc_stats(struct net_device * dev)
2693{
2694 return &((struct ctc_priv *) dev->priv)->stats;
2695}
2696
2697/*
2698 * sysfs attributes
2699 */
2700static ssize_t
2701buffer_show(struct device *dev, char *buf)
2702{
2703 struct ctc_priv *priv;
2704
2705 priv = dev->driver_data;
2706 if (!priv)
2707 return -ENODEV;
2708 return sprintf(buf, "%d\n",
2709 priv->buffer_size);
2710}
2711
2712static ssize_t
2713buffer_write(struct device *dev, const char *buf, size_t count)
2714{
2715 struct ctc_priv *priv;
2716 struct net_device *ndev;
2717 int bs1;
2718
2719 DBF_TEXT(trace, 3, __FUNCTION__);
2720 priv = dev->driver_data;
2721 if (!priv)
2722 return -ENODEV;
2723 ndev = priv->channel[READ]->netdev;
2724 if (!ndev)
2725 return -ENODEV;
2726 sscanf(buf, "%u", &bs1);
2727
2728 if (bs1 > CTC_BUFSIZE_LIMIT)
2729 return -EINVAL;
2730 if ((ndev->flags & IFF_RUNNING) &&
2731 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2732 return -EINVAL;
2733 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2734 return -EINVAL;
2735
2736 priv->buffer_size = bs1;
2737 priv->channel[READ]->max_bufsize =
2738 priv->channel[WRITE]->max_bufsize = bs1;
2739 if (!(ndev->flags & IFF_RUNNING))
2740 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2741 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2742 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2743
2744 return count;
2745
2746}
2747
2748static ssize_t
2749loglevel_show(struct device *dev, char *buf)
2750{
2751 struct ctc_priv *priv;
2752
2753 priv = dev->driver_data;
2754 if (!priv)
2755 return -ENODEV;
2756 return sprintf(buf, "%d\n", loglevel);
2757}
2758
2759static ssize_t
2760loglevel_write(struct device *dev, const char *buf, size_t count)
2761{
2762 struct ctc_priv *priv;
2763 int ll1;
2764
2765 DBF_TEXT(trace, 5, __FUNCTION__);
2766 priv = dev->driver_data;
2767 if (!priv)
2768 return -ENODEV;
2769 sscanf(buf, "%i", &ll1);
2770
2771 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2772 return -EINVAL;
2773 loglevel = ll1;
2774 return count;
2775}
2776
2777static void
2778ctc_print_statistics(struct ctc_priv *priv)
2779{
2780 char *sbuf;
2781 char *p;
2782
2783 DBF_TEXT(trace, 4, __FUNCTION__);
2784 if (!priv)
2785 return;
2786 sbuf = (char *)kmalloc(2048, GFP_KERNEL);
2787 if (sbuf == NULL)
2788 return;
2789 p = sbuf;
2790
2791 p += sprintf(p, " Device FSM state: %s\n",
2792 fsm_getstate_str(priv->fsm));
2793 p += sprintf(p, " RX channel FSM state: %s\n",
2794 fsm_getstate_str(priv->channel[READ]->fsm));
2795 p += sprintf(p, " TX channel FSM state: %s\n",
2796 fsm_getstate_str(priv->channel[WRITE]->fsm));
2797 p += sprintf(p, " Max. TX buffer used: %ld\n",
2798 priv->channel[WRITE]->prof.maxmulti);
2799 p += sprintf(p, " Max. chained SKBs: %ld\n",
2800 priv->channel[WRITE]->prof.maxcqueue);
2801 p += sprintf(p, " TX single write ops: %ld\n",
2802 priv->channel[WRITE]->prof.doios_single);
2803 p += sprintf(p, " TX multi write ops: %ld\n",
2804 priv->channel[WRITE]->prof.doios_multi);
2805 p += sprintf(p, " Netto bytes written: %ld\n",
2806 priv->channel[WRITE]->prof.txlen);
2807 p += sprintf(p, " Max. TX IO-time: %ld\n",
2808 priv->channel[WRITE]->prof.tx_time);
2809
2810 ctc_pr_debug("Statistics for %s:\n%s",
2811 priv->channel[WRITE]->netdev->name, sbuf);
2812 kfree(sbuf);
2813 return;
2814}
2815
2816static ssize_t
2817stats_show(struct device *dev, char *buf)
2818{
2819 struct ctc_priv *priv = dev->driver_data;
2820 if (!priv)
2821 return -ENODEV;
2822 ctc_print_statistics(priv);
2823 return sprintf(buf, "0\n");
2824}
2825
2826static ssize_t
2827stats_write(struct device *dev, const char *buf, size_t count)
2828{
2829 struct ctc_priv *priv = dev->driver_data;
2830 if (!priv)
2831 return -ENODEV;
2832 /* Reset statistics */
2833 memset(&priv->channel[WRITE]->prof, 0,
2834 sizeof(priv->channel[WRITE]->prof));
2835 return count;
2836}
2837
2838static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2839static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2840static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2841
2842static int
2843ctc_add_attributes(struct device *dev)
2844{
2845// device_create_file(dev, &dev_attr_buffer);
2846 device_create_file(dev, &dev_attr_loglevel);
2847 device_create_file(dev, &dev_attr_stats);
2848 return 0;
2849}
2850
2851static void
2852ctc_remove_attributes(struct device *dev)
2853{
2854 device_remove_file(dev, &dev_attr_stats);
2855 device_remove_file(dev, &dev_attr_loglevel);
2856// device_remove_file(dev, &dev_attr_buffer);
2857}
2858
2859
2860static void
2861ctc_netdev_unregister(struct net_device * dev)
2862{
2863 struct ctc_priv *privptr;
2864
2865 if (!dev)
2866 return;
2867 privptr = (struct ctc_priv *) dev->priv;
2868 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2869 unregister_netdev(dev);
2870 else
2871 ctc_tty_unregister_netdev(dev);
2872}
2873
2874static int
2875ctc_netdev_register(struct net_device * dev)
2876{
2877 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2878 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2879 return register_netdev(dev);
2880 else
2881 return ctc_tty_register_netdev(dev);
2882}
2883
2884static void
2885ctc_free_netdevice(struct net_device * dev, int free_dev)
2886{
2887 struct ctc_priv *privptr;
2888 if (!dev)
2889 return;
2890 privptr = dev->priv;
2891 if (privptr) {
2892 if (privptr->fsm)
2893 kfree_fsm(privptr->fsm);
2894 kfree(privptr);
2895 }
2896#ifdef MODULE
2897 if (free_dev)
2898 free_netdev(dev);
2899#endif
2900}
2901
2902/**
2903 * Initialize everything of the net device except the name and the
2904 * channel structs.
2905 */
2906static struct net_device *
2907ctc_init_netdevice(struct net_device * dev, int alloc_device,
2908 struct ctc_priv *privptr)
2909{
2910 if (!privptr)
2911 return NULL;
2912
2913 DBF_TEXT(setup, 3, __FUNCTION__);
2914 if (alloc_device) {
2915 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2916 if (!dev)
2917 return NULL;
2918 memset(dev, 0, sizeof (struct net_device));
2919 }
2920
2921 dev->priv = privptr;
2922 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2923 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2924 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2925 if (privptr->fsm == NULL) {
2926 if (alloc_device)
2927 kfree(dev);
2928 return NULL;
2929 }
2930 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2931 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2932 if (dev->mtu == 0)
2933 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2934 dev->hard_start_xmit = ctc_tx;
2935 dev->open = ctc_open;
2936 dev->stop = ctc_close;
2937 dev->get_stats = ctc_stats;
2938 dev->change_mtu = ctc_change_mtu;
2939 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2940 dev->addr_len = 0;
2941 dev->type = ARPHRD_SLIP;
2942 dev->tx_queue_len = 100;
2943 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2944 SET_MODULE_OWNER(dev);
2945 return dev;
2946}
2947
2948static ssize_t
2949ctc_proto_show(struct device *dev, char *buf)
2950{
2951 struct ctc_priv *priv;
2952
2953 priv = dev->driver_data;
2954 if (!priv)
2955 return -ENODEV;
2956
2957 return sprintf(buf, "%d\n", priv->protocol);
2958}
2959
2960static ssize_t
2961ctc_proto_store(struct device *dev, const char *buf, size_t count)
2962{
2963 struct ctc_priv *priv;
2964 int value;
2965
2966 DBF_TEXT(trace, 3, __FUNCTION__);
2967 pr_debug("%s() called\n", __FUNCTION__);
2968
2969 priv = dev->driver_data;
2970 if (!priv)
2971 return -ENODEV;
2972 sscanf(buf, "%u", &value);
2973 if ((value < 0) || (value > CTC_PROTO_MAX))
2974 return -EINVAL;
2975 priv->protocol = value;
2976
2977 return count;
2978}
2979
2980static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2981
2982static ssize_t
2983ctc_type_show(struct device *dev, char *buf)
2984{
2985 struct ccwgroup_device *cgdev;
2986
2987 cgdev = to_ccwgroupdev(dev);
2988 if (!cgdev)
2989 return -ENODEV;
2990
2991 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2992}
2993
2994static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2995
2996static struct attribute *ctc_attr[] = {
2997 &dev_attr_protocol.attr,
2998 &dev_attr_type.attr,
2999 &dev_attr_buffer.attr,
3000 NULL,
3001};
3002
3003static struct attribute_group ctc_attr_group = {
3004 .attrs = ctc_attr,
3005};
3006
3007static int
3008ctc_add_files(struct device *dev)
3009{
3010 pr_debug("%s() called\n", __FUNCTION__);
3011
3012 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
3013}
3014
3015static void
3016ctc_remove_files(struct device *dev)
3017{
3018 pr_debug("%s() called\n", __FUNCTION__);
3019
3020 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
3021}
3022
3023/**
3024 * Add ctc specific attributes.
3025 * Add ctc private data.
3026 *
3027 * @param cgdev pointer to ccwgroup_device just added
3028 *
3029 * @returns 0 on success, !0 on failure.
3030 */
3031
3032static int
3033ctc_probe_device(struct ccwgroup_device *cgdev)
3034{
3035 struct ctc_priv *priv;
3036 int rc;
3037
3038 pr_debug("%s() called\n", __FUNCTION__);
3039 DBF_TEXT(trace, 3, __FUNCTION__);
3040
3041 if (!get_device(&cgdev->dev))
3042 return -ENODEV;
3043
3044 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
3045 if (!priv) {
3046 ctc_pr_err("%s: Out of memory\n", __func__);
3047 put_device(&cgdev->dev);
3048 return -ENOMEM;
3049 }
3050
3051 memset(priv, 0, sizeof (struct ctc_priv));
3052 rc = ctc_add_files(&cgdev->dev);
3053 if (rc) {
3054 kfree(priv);
3055 put_device(&cgdev->dev);
3056 return rc;
3057 }
3058 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
3059 cgdev->cdev[0]->handler = ctc_irq_handler;
3060 cgdev->cdev[1]->handler = ctc_irq_handler;
3061 cgdev->dev.driver_data = priv;
3062
3063 return 0;
3064}
3065
3066/**
3067 *
3068 * Setup an interface.
3069 *
3070 * @param cgdev Device to be setup.
3071 *
3072 * @returns 0 on success, !0 on failure.
3073 */
3074static int
3075ctc_new_device(struct ccwgroup_device *cgdev)
3076{
3077 char read_id[CTC_ID_SIZE];
3078 char write_id[CTC_ID_SIZE];
3079 int direction;
3080 enum channel_types type;
3081 struct ctc_priv *privptr;
3082 struct net_device *dev;
3083 int ret;
3084
3085 pr_debug("%s() called\n", __FUNCTION__);
3086 DBF_TEXT(setup, 3, __FUNCTION__);
3087
3088 privptr = cgdev->dev.driver_data;
3089 if (!privptr)
3090 return -ENODEV;
3091
3092 type = get_channel_type(&cgdev->cdev[0]->id);
3093
3094 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
3095 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
3096
3097 if (add_channel(cgdev->cdev[0], type))
3098 return -ENOMEM;
3099 if (add_channel(cgdev->cdev[1], type))
3100 return -ENOMEM;
3101
3102 ret = ccw_device_set_online(cgdev->cdev[0]);
3103 if (ret != 0) {
3104 printk(KERN_WARNING
3105 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
3106 }
3107
3108 ret = ccw_device_set_online(cgdev->cdev[1]);
3109 if (ret != 0) {
3110 printk(KERN_WARNING
3111 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
3112 }
3113
3114 dev = ctc_init_netdevice(NULL, 1, privptr);
3115
3116 if (!dev) {
3117 ctc_pr_warn("ctc_init_netdevice failed\n");
3118 goto out;
3119 }
3120
3121 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
3122 strlcpy(dev->name, "ctctty%d", IFNAMSIZ);
3123 else
3124 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
3125
3126 for (direction = READ; direction <= WRITE; direction++) {
3127 privptr->channel[direction] =
3128 channel_get(type, direction == READ ? read_id : write_id,
3129 direction);
3130 if (privptr->channel[direction] == NULL) {
3131 if (direction == WRITE)
3132 channel_free(privptr->channel[READ]);
3133
3134 ctc_free_netdevice(dev, 1);
3135 goto out;
3136 }
3137 privptr->channel[direction]->netdev = dev;
3138 privptr->channel[direction]->protocol = privptr->protocol;
3139 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
3140 }
3141 /* sysfs magic */
3142 SET_NETDEV_DEV(dev, &cgdev->dev);
3143
3144 if (ctc_netdev_register(dev) != 0) {
3145 ctc_free_netdevice(dev, 1);
3146 goto out;
3147 }
3148
3149 ctc_add_attributes(&cgdev->dev);
3150
3151 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
3152
3153 print_banner();
3154
3155 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
3156 dev->name, privptr->channel[READ]->id,
3157 privptr->channel[WRITE]->id, privptr->protocol);
3158
3159 return 0;
3160out:
3161 ccw_device_set_offline(cgdev->cdev[1]);
3162 ccw_device_set_offline(cgdev->cdev[0]);
3163
3164 return -ENODEV;
3165}
3166
3167/**
3168 * Shutdown an interface.
3169 *
3170 * @param cgdev Device to be shut down.
3171 *
3172 * @returns 0 on success, !0 on failure.
3173 */
3174static int
3175ctc_shutdown_device(struct ccwgroup_device *cgdev)
3176{
3177 struct ctc_priv *priv;
3178 struct net_device *ndev;
3179
3180 DBF_TEXT(trace, 3, __FUNCTION__);
3181 pr_debug("%s() called\n", __FUNCTION__);
3182
3183 priv = cgdev->dev.driver_data;
3184 ndev = NULL;
3185 if (!priv)
3186 return -ENODEV;
3187
3188 if (priv->channel[READ]) {
3189 ndev = priv->channel[READ]->netdev;
3190
3191 /* Close the device */
3192 ctc_close(ndev);
3193 ndev->flags &=~IFF_RUNNING;
3194
3195 ctc_remove_attributes(&cgdev->dev);
3196
3197 channel_free(priv->channel[READ]);
3198 }
3199 if (priv->channel[WRITE])
3200 channel_free(priv->channel[WRITE]);
3201
3202 if (ndev) {
3203 ctc_netdev_unregister(ndev);
3204 ndev->priv = NULL;
3205 ctc_free_netdevice(ndev, 1);
3206 }
3207
3208 if (priv->fsm)
3209 kfree_fsm(priv->fsm);
3210
3211 ccw_device_set_offline(cgdev->cdev[1]);
3212 ccw_device_set_offline(cgdev->cdev[0]);
3213
3214 if (priv->channel[READ])
3215 channel_remove(priv->channel[READ]);
3216 if (priv->channel[WRITE])
3217 channel_remove(priv->channel[WRITE]);
3218
3219 priv->channel[READ] = priv->channel[WRITE] = NULL;
3220
3221 return 0;
3222
3223}
3224
3225static void
3226ctc_remove_device(struct ccwgroup_device *cgdev)
3227{
3228 struct ctc_priv *priv;
3229
3230 pr_debug("%s() called\n", __FUNCTION__);
3231 DBF_TEXT(trace, 3, __FUNCTION__);
3232
3233 priv = cgdev->dev.driver_data;
3234 if (!priv)
3235 return;
3236 if (cgdev->state == CCWGROUP_ONLINE)
3237 ctc_shutdown_device(cgdev);
3238 ctc_remove_files(&cgdev->dev);
3239 cgdev->dev.driver_data = NULL;
3240 kfree(priv);
3241 put_device(&cgdev->dev);
3242}
3243
3244static struct ccwgroup_driver ctc_group_driver = {
3245 .owner = THIS_MODULE,
3246 .name = "ctc",
3247 .max_slaves = 2,
3248 .driver_id = 0xC3E3C3,
3249 .probe = ctc_probe_device,
3250 .remove = ctc_remove_device,
3251 .set_online = ctc_new_device,
3252 .set_offline = ctc_shutdown_device,
3253};
3254
3255/**
3256 * Module related routines
3257 *****************************************************************************/
3258
3259/**
3260 * Prepare to be unloaded. Free IRQ's and release all resources.
3261 * This is called just before this module is unloaded. It is
3262 * <em>not</em> called, if the usage count is !0, so we don't need to check
3263 * for that.
3264 */
3265static void __exit
3266ctc_exit(void)
3267{
3268 unregister_cu3088_discipline(&ctc_group_driver);
3269 ctc_tty_cleanup();
3270 ctc_unregister_dbf_views();
3271 ctc_pr_info("CTC driver unloaded\n");
3272}
3273
3274/**
3275 * Initialize module.
3276 * This is called just after the module is loaded.
3277 *
3278 * @return 0 on success, !0 on error.
3279 */
3280static int __init
3281ctc_init(void)
3282{
3283 int ret = 0;
3284
3285 print_banner();
3286
3287 ret = ctc_register_dbf_views();
3288 if (ret){
3289 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3290 return ret;
3291 }
3292 ctc_tty_init();
3293 ret = register_cu3088_discipline(&ctc_group_driver);
3294 if (ret) {
3295 ctc_tty_cleanup();
3296 ctc_unregister_dbf_views();
3297 }
3298 return ret;
3299}
3300
3301module_init(ctc_init);
3302module_exit(ctc_exit);
3303
3304/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctctty.c b/drivers/s390/net/ctctty.c
new file mode 100644
index 000000000000..9257d60c7833
--- /dev/null
+++ b/drivers/s390/net/ctctty.c
@@ -0,0 +1,1276 @@
1/*
2 * $Id: ctctty.c,v 1.26 2004/08/04 11:06:55 mschwide Exp $
3 *
4 * CTC / ESCON network driver, tty interface.
5 *
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 */
24
25#include <linux/config.h>
26#include <linux/module.h>
27#include <linux/tty.h>
28#include <linux/serial_reg.h>
29#include <linux/interrupt.h>
30#include <linux/delay.h>
31#include <asm/uaccess.h>
32#include <linux/devfs_fs_kernel.h>
33#include "ctctty.h"
34#include "ctcdbug.h"
35
36#define CTC_TTY_MAJOR 43
37#define CTC_TTY_MAX_DEVICES 64
38
39#define CTC_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */
40#define CTC_ASYNC_INITIALIZED 0x80000000 /* port was initialized */
41#define CTC_ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device active */
42#define CTC_ASYNC_CLOSING 0x08000000 /* Serial port is closing */
43#define CTC_ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */
44#define CTC_ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */
45#define CTC_ASYNC_HUP_NOTIFY 0x0001 /* Notify tty on hangups/closes */
46#define CTC_ASYNC_NETDEV_OPEN 0x0002 /* Underlying netdev is open */
47#define CTC_ASYNC_TX_LINESTAT 0x0004 /* Must send line status */
48#define CTC_ASYNC_SPLIT_TERMIOS 0x0008 /* Sep. termios for dialin/out */
49#define CTC_TTY_XMIT_SIZE 1024 /* Default bufsize for write */
50#define CTC_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */
51
52/* Private data (similar to async_struct in <linux/serial.h>) */
53typedef struct {
54 int magic;
55 int flags; /* defined in tty.h */
56 int mcr; /* Modem control register */
57 int msr; /* Modem status register */
58 int lsr; /* Line status register */
59 int line;
60 int count; /* # of fd on device */
61 int blocked_open; /* # of blocked opens */
62 struct net_device *netdev;
63 struct sk_buff_head tx_queue; /* transmit queue */
64 struct sk_buff_head rx_queue; /* receive queue */
65 struct tty_struct *tty; /* Pointer to corresponding tty */
66 wait_queue_head_t open_wait;
67 wait_queue_head_t close_wait;
68 struct semaphore write_sem;
69 struct tasklet_struct tasklet;
70 struct timer_list stoptimer;
71} ctc_tty_info;
72
73/* Description of one CTC-tty */
74typedef struct {
75 struct tty_driver *ctc_tty_device; /* tty-device */
76 ctc_tty_info info[CTC_TTY_MAX_DEVICES]; /* Private data */
77} ctc_tty_driver;
78
79static ctc_tty_driver *driver;
80
81/* Leave this unchanged unless you know what you do! */
82#define MODEM_PARANOIA_CHECK
83#define MODEM_DO_RESTART
84
85#define CTC_TTY_NAME "ctctty"
86
87static __u32 ctc_tty_magic = CTC_ASYNC_MAGIC;
88static int ctc_tty_shuttingdown = 0;
89
90static spinlock_t ctc_tty_lock;
91
92/* ctc_tty_try_read() is called from within ctc_tty_rcv_skb()
93 * to stuff incoming data directly into a tty's flip-buffer. If the
94 * flip buffer is full, the packet gets queued up.
95 *
96 * Return:
97 * 1 = Success
98 * 0 = Failure, data has to be buffered and later processed by
99 * ctc_tty_readmodem().
100 */
101static int
102ctc_tty_try_read(ctc_tty_info * info, struct sk_buff *skb)
103{
104 int c;
105 int len;
106 struct tty_struct *tty;
107
108 DBF_TEXT(trace, 5, __FUNCTION__);
109 if ((tty = info->tty)) {
110 if (info->mcr & UART_MCR_RTS) {
111 c = TTY_FLIPBUF_SIZE - tty->flip.count;
112 len = skb->len;
113 if (c >= len) {
114 memcpy(tty->flip.char_buf_ptr, skb->data, len);
115 memset(tty->flip.flag_buf_ptr, 0, len);
116 tty->flip.count += len;
117 tty->flip.char_buf_ptr += len;
118 tty->flip.flag_buf_ptr += len;
119 tty_flip_buffer_push(tty);
120 kfree_skb(skb);
121 return 1;
122 }
123 }
124 }
125 return 0;
126}
127
128/* ctc_tty_readmodem() is called periodically from within timer-interrupt.
129 * It tries getting received data from the receive queue an stuff it into
130 * the tty's flip-buffer.
131 */
132static int
133ctc_tty_readmodem(ctc_tty_info *info)
134{
135 int ret = 1;
136 struct tty_struct *tty;
137
138 DBF_TEXT(trace, 5, __FUNCTION__);
139 if ((tty = info->tty)) {
140 if (info->mcr & UART_MCR_RTS) {
141 int c = TTY_FLIPBUF_SIZE - tty->flip.count;
142 struct sk_buff *skb;
143
144 if ((c > 0) && (skb = skb_dequeue(&info->rx_queue))) {
145 int len = skb->len;
146 if (len > c)
147 len = c;
148 memcpy(tty->flip.char_buf_ptr, skb->data, len);
149 skb_pull(skb, len);
150 memset(tty->flip.flag_buf_ptr, 0, len);
151 tty->flip.count += len;
152 tty->flip.char_buf_ptr += len;
153 tty->flip.flag_buf_ptr += len;
154 tty_flip_buffer_push(tty);
155 if (skb->len > 0)
156 skb_queue_head(&info->rx_queue, skb);
157 else {
158 kfree_skb(skb);
159 ret = skb_queue_len(&info->rx_queue);
160 }
161 }
162 }
163 }
164 return ret;
165}
166
167void
168ctc_tty_setcarrier(struct net_device *netdev, int on)
169{
170 int i;
171
172 DBF_TEXT(trace, 4, __FUNCTION__);
173 if ((!driver) || ctc_tty_shuttingdown)
174 return;
175 for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
176 if (driver->info[i].netdev == netdev) {
177 ctc_tty_info *info = &driver->info[i];
178 if (on)
179 info->msr |= UART_MSR_DCD;
180 else
181 info->msr &= ~UART_MSR_DCD;
182 if ((info->flags & CTC_ASYNC_CHECK_CD) && (!on))
183 tty_hangup(info->tty);
184 }
185}
186
187void
188ctc_tty_netif_rx(struct sk_buff *skb)
189{
190 int i;
191 ctc_tty_info *info = NULL;
192
193 DBF_TEXT(trace, 5, __FUNCTION__);
194 if (!skb)
195 return;
196 if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) {
197 dev_kfree_skb(skb);
198 return;
199 }
200 for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
201 if (driver->info[i].netdev == skb->dev) {
202 info = &driver->info[i];
203 break;
204 }
205 if (!info) {
206 dev_kfree_skb(skb);
207 return;
208 }
209 if (skb->len < 6) {
210 dev_kfree_skb(skb);
211 return;
212 }
213 if (memcmp(skb->data, &ctc_tty_magic, sizeof(__u32))) {
214 dev_kfree_skb(skb);
215 return;
216 }
217 skb_pull(skb, sizeof(__u32));
218
219 i = *((int *)skb->data);
220 skb_pull(skb, sizeof(info->mcr));
221 if (i & UART_MCR_RTS) {
222 info->msr |= UART_MSR_CTS;
223 if (info->flags & CTC_ASYNC_CTS_FLOW)
224 info->tty->hw_stopped = 0;
225 } else {
226 info->msr &= ~UART_MSR_CTS;
227 if (info->flags & CTC_ASYNC_CTS_FLOW)
228 info->tty->hw_stopped = 1;
229 }
230 if (i & UART_MCR_DTR)
231 info->msr |= UART_MSR_DSR;
232 else
233 info->msr &= ~UART_MSR_DSR;
234 if (skb->len <= 0) {
235 kfree_skb(skb);
236 return;
237 }
238 /* Try to deliver directly via tty-flip-buf if queue is empty */
239 if (skb_queue_empty(&info->rx_queue))
240 if (ctc_tty_try_read(info, skb))
241 return;
242 /* Direct deliver failed or queue wasn't empty.
243 * Queue up for later dequeueing via timer-irq.
244 */
245 skb_queue_tail(&info->rx_queue, skb);
246 /* Schedule dequeuing */
247 tasklet_schedule(&info->tasklet);
248}
249
250static int
251ctc_tty_tint(ctc_tty_info * info)
252{
253 struct sk_buff *skb = skb_dequeue(&info->tx_queue);
254 int stopped = (info->tty->hw_stopped || info->tty->stopped);
255 int wake = 1;
256 int rc;
257
258 DBF_TEXT(trace, 4, __FUNCTION__);
259 if (!info->netdev) {
260 if (skb)
261 kfree_skb(skb);
262 return 0;
263 }
264 if (info->flags & CTC_ASYNC_TX_LINESTAT) {
265 int skb_res = info->netdev->hard_header_len +
266 sizeof(info->mcr) + sizeof(__u32);
267 /* If we must update line status,
268 * create an empty dummy skb and insert it.
269 */
270 if (skb)
271 skb_queue_head(&info->tx_queue, skb);
272
273 skb = dev_alloc_skb(skb_res);
274 if (!skb) {
275 printk(KERN_WARNING
276 "ctc_tty: Out of memory in %s%d tint\n",
277 CTC_TTY_NAME, info->line);
278 return 1;
279 }
280 skb_reserve(skb, skb_res);
281 stopped = 0;
282 wake = 0;
283 }
284 if (!skb)
285 return 0;
286 if (stopped) {
287 skb_queue_head(&info->tx_queue, skb);
288 return 1;
289 }
290#if 0
291 if (skb->len > 0)
292 printk(KERN_DEBUG "tint: %d %02x\n", skb->len, *(skb->data));
293 else
294 printk(KERN_DEBUG "tint: %d STAT\n", skb->len);
295#endif
296 memcpy(skb_push(skb, sizeof(info->mcr)), &info->mcr, sizeof(info->mcr));
297 memcpy(skb_push(skb, sizeof(__u32)), &ctc_tty_magic, sizeof(__u32));
298 rc = info->netdev->hard_start_xmit(skb, info->netdev);
299 if (rc) {
300 skb_pull(skb, sizeof(info->mcr) + sizeof(__u32));
301 if (skb->len > 0)
302 skb_queue_head(&info->tx_queue, skb);
303 else
304 kfree_skb(skb);
305 } else {
306 struct tty_struct *tty = info->tty;
307
308 info->flags &= ~CTC_ASYNC_TX_LINESTAT;
309 if (tty) {
310 tty_wakeup(tty);
311 }
312 }
313 return (skb_queue_empty(&info->tx_queue) ? 0 : 1);
314}
315
316/************************************************************
317 *
318 * Modem-functions
319 *
320 * mostly "stolen" from original Linux-serial.c and friends.
321 *
322 ************************************************************/
323
324static inline int
325ctc_tty_paranoia_check(ctc_tty_info * info, char *name, const char *routine)
326{
327#ifdef MODEM_PARANOIA_CHECK
328 if (!info) {
329 printk(KERN_WARNING "ctc_tty: null info_struct for %s in %s\n",
330 name, routine);
331 return 1;
332 }
333 if (info->magic != CTC_ASYNC_MAGIC) {
334 printk(KERN_WARNING "ctc_tty: bad magic for info struct %s in %s\n",
335 name, routine);
336 return 1;
337 }
338#endif
339 return 0;
340}
341
342static void
343ctc_tty_inject(ctc_tty_info *info, char c)
344{
345 int skb_res;
346 struct sk_buff *skb;
347
348 DBF_TEXT(trace, 4, __FUNCTION__);
349 if (ctc_tty_shuttingdown)
350 return;
351 skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
352 sizeof(__u32) + 1;
353 skb = dev_alloc_skb(skb_res);
354 if (!skb) {
355 printk(KERN_WARNING
356 "ctc_tty: Out of memory in %s%d tx_inject\n",
357 CTC_TTY_NAME, info->line);
358 return;
359 }
360 skb_reserve(skb, skb_res);
361 *(skb_put(skb, 1)) = c;
362 skb_queue_head(&info->tx_queue, skb);
363 tasklet_schedule(&info->tasklet);
364}
365
366static void
367ctc_tty_transmit_status(ctc_tty_info *info)
368{
369 DBF_TEXT(trace, 5, __FUNCTION__);
370 if (ctc_tty_shuttingdown)
371 return;
372 info->flags |= CTC_ASYNC_TX_LINESTAT;
373 tasklet_schedule(&info->tasklet);
374}
375
376static void
377ctc_tty_change_speed(ctc_tty_info * info)
378{
379 unsigned int cflag;
380 unsigned int quot;
381 int i;
382
383 DBF_TEXT(trace, 3, __FUNCTION__);
384 if (!info->tty || !info->tty->termios)
385 return;
386 cflag = info->tty->termios->c_cflag;
387
388 quot = i = cflag & CBAUD;
389 if (i & CBAUDEX) {
390 i &= ~CBAUDEX;
391 if (i < 1 || i > 2)
392 info->tty->termios->c_cflag &= ~CBAUDEX;
393 else
394 i += 15;
395 }
396 if (quot) {
397 info->mcr |= UART_MCR_DTR;
398 info->mcr |= UART_MCR_RTS;
399 ctc_tty_transmit_status(info);
400 } else {
401 info->mcr &= ~UART_MCR_DTR;
402 info->mcr &= ~UART_MCR_RTS;
403 ctc_tty_transmit_status(info);
404 return;
405 }
406
407 /* CTS flow control flag and modem status interrupts */
408 if (cflag & CRTSCTS) {
409 info->flags |= CTC_ASYNC_CTS_FLOW;
410 } else
411 info->flags &= ~CTC_ASYNC_CTS_FLOW;
412 if (cflag & CLOCAL)
413 info->flags &= ~CTC_ASYNC_CHECK_CD;
414 else {
415 info->flags |= CTC_ASYNC_CHECK_CD;
416 }
417}
418
419static int
420ctc_tty_startup(ctc_tty_info * info)
421{
422 DBF_TEXT(trace, 3, __FUNCTION__);
423 if (info->flags & CTC_ASYNC_INITIALIZED)
424 return 0;
425#ifdef CTC_DEBUG_MODEM_OPEN
426 printk(KERN_DEBUG "starting up %s%d ...\n", CTC_TTY_NAME, info->line);
427#endif
428 /*
429 * Now, initialize the UART
430 */
431 info->mcr = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
432 if (info->tty)
433 clear_bit(TTY_IO_ERROR, &info->tty->flags);
434 /*
435 * and set the speed of the serial port
436 */
437 ctc_tty_change_speed(info);
438
439 info->flags |= CTC_ASYNC_INITIALIZED;
440 if (!(info->flags & CTC_ASYNC_NETDEV_OPEN))
441 info->netdev->open(info->netdev);
442 info->flags |= CTC_ASYNC_NETDEV_OPEN;
443 return 0;
444}
445
446static void
447ctc_tty_stopdev(unsigned long data)
448{
449 ctc_tty_info *info = (ctc_tty_info *)data;
450
451 if ((!info) || (!info->netdev) ||
452 (info->flags & CTC_ASYNC_INITIALIZED))
453 return;
454 info->netdev->stop(info->netdev);
455 info->flags &= ~CTC_ASYNC_NETDEV_OPEN;
456}
457
458/*
459 * This routine will shutdown a serial port; interrupts are disabled, and
460 * DTR is dropped if the hangup on close termio flag is on.
461 */
462static void
463ctc_tty_shutdown(ctc_tty_info * info)
464{
465 DBF_TEXT(trace, 3, __FUNCTION__);
466 if (!(info->flags & CTC_ASYNC_INITIALIZED))
467 return;
468#ifdef CTC_DEBUG_MODEM_OPEN
469 printk(KERN_DEBUG "Shutting down %s%d ....\n", CTC_TTY_NAME, info->line);
470#endif
471 info->msr &= ~UART_MSR_RI;
472 if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
473 info->mcr &= ~(UART_MCR_DTR | UART_MCR_RTS);
474 if (info->tty)
475 set_bit(TTY_IO_ERROR, &info->tty->flags);
476 mod_timer(&info->stoptimer, jiffies + (10 * HZ));
477 skb_queue_purge(&info->tx_queue);
478 skb_queue_purge(&info->rx_queue);
479 info->flags &= ~CTC_ASYNC_INITIALIZED;
480}
481
482/* ctc_tty_write() is the main send-routine. It is called from the upper
483 * levels within the kernel to perform sending data. Depending on the
484 * online-flag it either directs output to the at-command-interpreter or
485 * to the lower level. Additional tasks done here:
486 * - If online, check for escape-sequence (+++)
487 * - If sending audio-data, call ctc_tty_DLEdown() to parse DLE-codes.
488 * - If receiving audio-data, call ctc_tty_end_vrx() to abort if needed.
489 * - If dialing, abort dial.
490 */
491static int
492ctc_tty_write(struct tty_struct *tty, const u_char * buf, int count)
493{
494 int c;
495 int total = 0;
496 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
497
498 DBF_TEXT(trace, 5, __FUNCTION__);
499 if (ctc_tty_shuttingdown)
500 goto ex;
501 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write"))
502 goto ex;
503 if (!tty)
504 goto ex;
505 if (!info->netdev) {
506 total = -ENODEV;
507 goto ex;
508 }
509 while (1) {
510 struct sk_buff *skb;
511 int skb_res;
512
513 c = (count < CTC_TTY_XMIT_SIZE) ? count : CTC_TTY_XMIT_SIZE;
514 if (c <= 0)
515 break;
516
517 skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
518 + sizeof(__u32);
519 skb = dev_alloc_skb(skb_res + c);
520 if (!skb) {
521 printk(KERN_WARNING
522 "ctc_tty: Out of memory in %s%d write\n",
523 CTC_TTY_NAME, info->line);
524 break;
525 }
526 skb_reserve(skb, skb_res);
527 memcpy(skb_put(skb, c), buf, c);
528 skb_queue_tail(&info->tx_queue, skb);
529 buf += c;
530 total += c;
531 count -= c;
532 }
533 if (skb_queue_len(&info->tx_queue)) {
534 info->lsr &= ~UART_LSR_TEMT;
535 tasklet_schedule(&info->tasklet);
536 }
537ex:
538 DBF_TEXT(trace, 6, __FUNCTION__);
539 return total;
540}
541
542static int
543ctc_tty_write_room(struct tty_struct *tty)
544{
545 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
546
547 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write_room"))
548 return 0;
549 return CTC_TTY_XMIT_SIZE;
550}
551
552static int
553ctc_tty_chars_in_buffer(struct tty_struct *tty)
554{
555 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
556
557 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_chars_in_buffer"))
558 return 0;
559 return 0;
560}
561
562static void
563ctc_tty_flush_buffer(struct tty_struct *tty)
564{
565 ctc_tty_info *info;
566 unsigned long flags;
567
568 DBF_TEXT(trace, 4, __FUNCTION__);
569 if (!tty)
570 goto ex;
571 spin_lock_irqsave(&ctc_tty_lock, flags);
572 info = (ctc_tty_info *) tty->driver_data;
573 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_buffer")) {
574 spin_unlock_irqrestore(&ctc_tty_lock, flags);
575 goto ex;
576 }
577 skb_queue_purge(&info->tx_queue);
578 info->lsr |= UART_LSR_TEMT;
579 spin_unlock_irqrestore(&ctc_tty_lock, flags);
580 wake_up_interruptible(&tty->write_wait);
581 tty_wakeup(tty);
582ex:
583 DBF_TEXT_(trace, 2, "ex: %s ", __FUNCTION__);
584 return;
585}
586
587static void
588ctc_tty_flush_chars(struct tty_struct *tty)
589{
590 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
591
592 DBF_TEXT(trace, 4, __FUNCTION__);
593 if (ctc_tty_shuttingdown)
594 return;
595 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars"))
596 return;
597 if (tty->stopped || tty->hw_stopped || (!skb_queue_len(&info->tx_queue)))
598 return;
599 tasklet_schedule(&info->tasklet);
600}
601
602/*
603 * ------------------------------------------------------------
604 * ctc_tty_throttle()
605 *
606 * This routine is called by the upper-layer tty layer to signal that
607 * incoming characters should be throttled.
608 * ------------------------------------------------------------
609 */
610static void
611ctc_tty_throttle(struct tty_struct *tty)
612{
613 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
614
615 DBF_TEXT(trace, 4, __FUNCTION__);
616 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_throttle"))
617 return;
618 info->mcr &= ~UART_MCR_RTS;
619 if (I_IXOFF(tty))
620 ctc_tty_inject(info, STOP_CHAR(tty));
621 ctc_tty_transmit_status(info);
622}
623
624static void
625ctc_tty_unthrottle(struct tty_struct *tty)
626{
627 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
628
629 DBF_TEXT(trace, 4, __FUNCTION__);
630 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_unthrottle"))
631 return;
632 info->mcr |= UART_MCR_RTS;
633 if (I_IXOFF(tty))
634 ctc_tty_inject(info, START_CHAR(tty));
635 ctc_tty_transmit_status(info);
636}
637
638/*
639 * ------------------------------------------------------------
640 * ctc_tty_ioctl() and friends
641 * ------------------------------------------------------------
642 */
643
644/*
645 * ctc_tty_get_lsr_info - get line status register info
646 *
647 * Purpose: Let user call ioctl() to get info when the UART physically
648 * is emptied. On bus types like RS485, the transmitter must
649 * release the bus after transmitting. This must be done when
650 * the transmit shift register is empty, not be done when the
651 * transmit holding register is empty. This functionality
652 * allows RS485 driver to be written in user space.
653 */
654static int
655ctc_tty_get_lsr_info(ctc_tty_info * info, uint __user *value)
656{
657 u_char status;
658 uint result;
659 ulong flags;
660
661 DBF_TEXT(trace, 4, __FUNCTION__);
662 spin_lock_irqsave(&ctc_tty_lock, flags);
663 status = info->lsr;
664 spin_unlock_irqrestore(&ctc_tty_lock, flags);
665 result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
666 put_user(result, value);
667 return 0;
668}
669
670
671static int ctc_tty_tiocmget(struct tty_struct *tty, struct file *file)
672{
673 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
674 u_char control,
675 status;
676 uint result;
677 ulong flags;
678
679 DBF_TEXT(trace, 4, __FUNCTION__);
680 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
681 return -ENODEV;
682 if (tty->flags & (1 << TTY_IO_ERROR))
683 return -EIO;
684
685 control = info->mcr;
686 spin_lock_irqsave(&ctc_tty_lock, flags);
687 status = info->msr;
688 spin_unlock_irqrestore(&ctc_tty_lock, flags);
689 result = ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
690 | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
691 | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
692 | ((status & UART_MSR_RI) ? TIOCM_RNG : 0)
693 | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0)
694 | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
695 return result;
696}
697
698static int
699ctc_tty_tiocmset(struct tty_struct *tty, struct file *file,
700 unsigned int set, unsigned int clear)
701{
702 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
703
704 DBF_TEXT(trace, 4, __FUNCTION__);
705 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
706 return -ENODEV;
707 if (tty->flags & (1 << TTY_IO_ERROR))
708 return -EIO;
709
710 if (set & TIOCM_RTS)
711 info->mcr |= UART_MCR_RTS;
712 if (set & TIOCM_DTR)
713 info->mcr |= UART_MCR_DTR;
714
715 if (clear & TIOCM_RTS)
716 info->mcr &= ~UART_MCR_RTS;
717 if (clear & TIOCM_DTR)
718 info->mcr &= ~UART_MCR_DTR;
719
720 if ((set | clear) & (TIOCM_RTS|TIOCM_DTR))
721 ctc_tty_transmit_status(info);
722 return 0;
723}
724
725static int
726ctc_tty_ioctl(struct tty_struct *tty, struct file *file,
727 uint cmd, ulong arg)
728{
729 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
730 int error;
731 int retval;
732
733 DBF_TEXT(trace, 4, __FUNCTION__);
734 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
735 return -ENODEV;
736 if (tty->flags & (1 << TTY_IO_ERROR))
737 return -EIO;
738 switch (cmd) {
739 case TCSBRK: /* SVID version: non-zero arg --> no break */
740#ifdef CTC_DEBUG_MODEM_IOCTL
741 printk(KERN_DEBUG "%s%d ioctl TCSBRK\n", CTC_TTY_NAME, info->line);
742#endif
743 retval = tty_check_change(tty);
744 if (retval)
745 return retval;
746 tty_wait_until_sent(tty, 0);
747 return 0;
748 case TCSBRKP: /* support for POSIX tcsendbreak() */
749#ifdef CTC_DEBUG_MODEM_IOCTL
750 printk(KERN_DEBUG "%s%d ioctl TCSBRKP\n", CTC_TTY_NAME, info->line);
751#endif
752 retval = tty_check_change(tty);
753 if (retval)
754 return retval;
755 tty_wait_until_sent(tty, 0);
756 return 0;
757 case TIOCGSOFTCAR:
758#ifdef CTC_DEBUG_MODEM_IOCTL
759 printk(KERN_DEBUG "%s%d ioctl TIOCGSOFTCAR\n", CTC_TTY_NAME,
760 info->line);
761#endif
762 error = put_user(C_CLOCAL(tty) ? 1 : 0, (ulong __user *) arg);
763 return error;
764 case TIOCSSOFTCAR:
765#ifdef CTC_DEBUG_MODEM_IOCTL
766 printk(KERN_DEBUG "%s%d ioctl TIOCSSOFTCAR\n", CTC_TTY_NAME,
767 info->line);
768#endif
769 error = get_user(arg, (ulong __user *) arg);
770 if (error)
771 return error;
772 tty->termios->c_cflag =
773 ((tty->termios->c_cflag & ~CLOCAL) |
774 (arg ? CLOCAL : 0));
775 return 0;
776 case TIOCSERGETLSR: /* Get line status register */
777#ifdef CTC_DEBUG_MODEM_IOCTL
778 printk(KERN_DEBUG "%s%d ioctl TIOCSERGETLSR\n", CTC_TTY_NAME,
779 info->line);
780#endif
781 if (access_ok(VERIFY_WRITE, (void __user *) arg, sizeof(uint)))
782 return ctc_tty_get_lsr_info(info, (uint __user *) arg);
783 else
784 return -EFAULT;
785 default:
786#ifdef CTC_DEBUG_MODEM_IOCTL
787 printk(KERN_DEBUG "UNKNOWN ioctl 0x%08x on %s%d\n", cmd,
788 CTC_TTY_NAME, info->line);
789#endif
790 return -ENOIOCTLCMD;
791 }
792 return 0;
793}
794
795static void
796ctc_tty_set_termios(struct tty_struct *tty, struct termios *old_termios)
797{
798 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
799 unsigned int cflag = tty->termios->c_cflag;
800
801 DBF_TEXT(trace, 4, __FUNCTION__);
802 ctc_tty_change_speed(info);
803
804 /* Handle transition to B0 */
805 if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) {
806 info->mcr &= ~(UART_MCR_DTR|UART_MCR_RTS);
807 ctc_tty_transmit_status(info);
808 }
809
810 /* Handle transition from B0 to other */
811 if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
812 info->mcr |= UART_MCR_DTR;
813 if (!(tty->termios->c_cflag & CRTSCTS) ||
814 !test_bit(TTY_THROTTLED, &tty->flags)) {
815 info->mcr |= UART_MCR_RTS;
816 }
817 ctc_tty_transmit_status(info);
818 }
819
820 /* Handle turning off CRTSCTS */
821 if ((old_termios->c_cflag & CRTSCTS) &&
822 !(tty->termios->c_cflag & CRTSCTS))
823 tty->hw_stopped = 0;
824}
825
826/*
827 * ------------------------------------------------------------
828 * ctc_tty_open() and friends
829 * ------------------------------------------------------------
830 */
831static int
832ctc_tty_block_til_ready(struct tty_struct *tty, struct file *filp, ctc_tty_info *info)
833{
834 DECLARE_WAITQUEUE(wait, NULL);
835 int do_clocal = 0;
836 unsigned long flags;
837 int retval;
838
839 DBF_TEXT(trace, 4, __FUNCTION__);
840 /*
841 * If the device is in the middle of being closed, then block
842 * until it's done, and then try again.
843 */
844 if (tty_hung_up_p(filp) ||
845 (info->flags & CTC_ASYNC_CLOSING)) {
846 if (info->flags & CTC_ASYNC_CLOSING)
847 wait_event(info->close_wait,
848 !(info->flags & CTC_ASYNC_CLOSING));
849#ifdef MODEM_DO_RESTART
850 if (info->flags & CTC_ASYNC_HUP_NOTIFY)
851 return -EAGAIN;
852 else
853 return -ERESTARTSYS;
854#else
855 return -EAGAIN;
856#endif
857 }
858 /*
859 * If non-blocking mode is set, then make the check up front
860 * and then exit.
861 */
862 if ((filp->f_flags & O_NONBLOCK) ||
863 (tty->flags & (1 << TTY_IO_ERROR))) {
864 info->flags |= CTC_ASYNC_NORMAL_ACTIVE;
865 return 0;
866 }
867 if (tty->termios->c_cflag & CLOCAL)
868 do_clocal = 1;
869 /*
870 * Block waiting for the carrier detect and the line to become
871 * free (i.e., not in use by the callout). While we are in
872 * this loop, info->count is dropped by one, so that
873 * ctc_tty_close() knows when to free things. We restore it upon
874 * exit, either normal or abnormal.
875 */
876 retval = 0;
877 add_wait_queue(&info->open_wait, &wait);
878#ifdef CTC_DEBUG_MODEM_OPEN
879 printk(KERN_DEBUG "ctc_tty_block_til_ready before block: %s%d, count = %d\n",
880 CTC_TTY_NAME, info->line, info->count);
881#endif
882 spin_lock_irqsave(&ctc_tty_lock, flags);
883 if (!(tty_hung_up_p(filp)))
884 info->count--;
885 spin_unlock_irqrestore(&ctc_tty_lock, flags);
886 info->blocked_open++;
887 while (1) {
888 set_current_state(TASK_INTERRUPTIBLE);
889 if (tty_hung_up_p(filp) ||
890 !(info->flags & CTC_ASYNC_INITIALIZED)) {
891#ifdef MODEM_DO_RESTART
892 if (info->flags & CTC_ASYNC_HUP_NOTIFY)
893 retval = -EAGAIN;
894 else
895 retval = -ERESTARTSYS;
896#else
897 retval = -EAGAIN;
898#endif
899 break;
900 }
901 if (!(info->flags & CTC_ASYNC_CLOSING) &&
902 (do_clocal || (info->msr & UART_MSR_DCD))) {
903 break;
904 }
905 if (signal_pending(current)) {
906 retval = -ERESTARTSYS;
907 break;
908 }
909#ifdef CTC_DEBUG_MODEM_OPEN
910 printk(KERN_DEBUG "ctc_tty_block_til_ready blocking: %s%d, count = %d\n",
911 CTC_TTY_NAME, info->line, info->count);
912#endif
913 schedule();
914 }
915 current->state = TASK_RUNNING;
916 remove_wait_queue(&info->open_wait, &wait);
917 if (!tty_hung_up_p(filp))
918 info->count++;
919 info->blocked_open--;
920#ifdef CTC_DEBUG_MODEM_OPEN
921 printk(KERN_DEBUG "ctc_tty_block_til_ready after blocking: %s%d, count = %d\n",
922 CTC_TTY_NAME, info->line, info->count);
923#endif
924 if (retval)
925 return retval;
926 info->flags |= CTC_ASYNC_NORMAL_ACTIVE;
927 return 0;
928}
929
930/*
931 * This routine is called whenever a serial port is opened. It
932 * enables interrupts for a serial port, linking in its async structure into
933 * the IRQ chain. It also performs the serial-specific
934 * initialization for the tty structure.
935 */
936static int
937ctc_tty_open(struct tty_struct *tty, struct file *filp)
938{
939 ctc_tty_info *info;
940 unsigned long saveflags;
941 int retval,
942 line;
943
944 DBF_TEXT(trace, 3, __FUNCTION__);
945 line = tty->index;
946 if (line < 0 || line > CTC_TTY_MAX_DEVICES)
947 return -ENODEV;
948 info = &driver->info[line];
949 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_open"))
950 return -ENODEV;
951 if (!info->netdev)
952 return -ENODEV;
953#ifdef CTC_DEBUG_MODEM_OPEN
954 printk(KERN_DEBUG "ctc_tty_open %s, count = %d\n", tty->name,
955 info->count);
956#endif
957 spin_lock_irqsave(&ctc_tty_lock, saveflags);
958 info->count++;
959 tty->driver_data = info;
960 info->tty = tty;
961 spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
962 /*
963 * Start up serial port
964 */
965 retval = ctc_tty_startup(info);
966 if (retval) {
967#ifdef CTC_DEBUG_MODEM_OPEN
968 printk(KERN_DEBUG "ctc_tty_open return after startup\n");
969#endif
970 return retval;
971 }
972 retval = ctc_tty_block_til_ready(tty, filp, info);
973 if (retval) {
974#ifdef CTC_DEBUG_MODEM_OPEN
975 printk(KERN_DEBUG "ctc_tty_open return after ctc_tty_block_til_ready \n");
976#endif
977 return retval;
978 }
979#ifdef CTC_DEBUG_MODEM_OPEN
980 printk(KERN_DEBUG "ctc_tty_open %s successful...\n", tty->name);
981#endif
982 return 0;
983}
984
985static void
986ctc_tty_close(struct tty_struct *tty, struct file *filp)
987{
988 ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
989 ulong flags;
990 ulong timeout;
991 DBF_TEXT(trace, 3, __FUNCTION__);
992 if (!info || ctc_tty_paranoia_check(info, tty->name, "ctc_tty_close"))
993 return;
994 spin_lock_irqsave(&ctc_tty_lock, flags);
995 if (tty_hung_up_p(filp)) {
996 spin_unlock_irqrestore(&ctc_tty_lock, flags);
997#ifdef CTC_DEBUG_MODEM_OPEN
998 printk(KERN_DEBUG "ctc_tty_close return after tty_hung_up_p\n");
999#endif
1000 return;
1001 }
1002 if ((tty->count == 1) && (info->count != 1)) {
1003 /*
1004 * Uh, oh. tty->count is 1, which means that the tty
1005 * structure will be freed. Info->count should always
1006 * be one in these conditions. If it's greater than
1007 * one, we've got real problems, since it means the
1008 * serial port won't be shutdown.
1009 */
1010 printk(KERN_ERR "ctc_tty_close: bad port count; tty->count is 1, "
1011 "info->count is %d\n", info->count);
1012 info->count = 1;
1013 }
1014 if (--info->count < 0) {
1015 printk(KERN_ERR "ctc_tty_close: bad port count for %s%d: %d\n",
1016 CTC_TTY_NAME, info->line, info->count);
1017 info->count = 0;
1018 }
1019 if (info->count) {
1020 local_irq_restore(flags);
1021#ifdef CTC_DEBUG_MODEM_OPEN
1022 printk(KERN_DEBUG "ctc_tty_close after info->count != 0\n");
1023#endif
1024 return;
1025 }
1026 info->flags |= CTC_ASYNC_CLOSING;
1027 tty->closing = 1;
1028 /*
1029 * At this point we stop accepting input. To do this, we
1030 * disable the receive line status interrupts, and tell the
1031 * interrupt driver to stop checking the data ready bit in the
1032 * line status register.
1033 */
1034 if (info->flags & CTC_ASYNC_INITIALIZED) {
1035 tty_wait_until_sent(tty, 30*HZ); /* 30 seconds timeout */
1036 /*
1037 * Before we drop DTR, make sure the UART transmitter
1038 * has completely drained; this is especially
1039 * important if there is a transmit FIFO!
1040 */
1041 timeout = jiffies + HZ;
1042 while (!(info->lsr & UART_LSR_TEMT)) {
1043 spin_unlock_irqrestore(&ctc_tty_lock, flags);
1044 msleep(500);
1045 spin_lock_irqsave(&ctc_tty_lock, flags);
1046 if (time_after(jiffies,timeout))
1047 break;
1048 }
1049 }
1050 ctc_tty_shutdown(info);
1051 if (tty->driver->flush_buffer) {
1052 skb_queue_purge(&info->tx_queue);
1053 info->lsr |= UART_LSR_TEMT;
1054 }
1055 tty_ldisc_flush(tty);
1056 info->tty = 0;
1057 tty->closing = 0;
1058 if (info->blocked_open) {
1059 set_current_state(TASK_INTERRUPTIBLE);
1060 schedule_timeout(HZ/2);
1061 wake_up_interruptible(&info->open_wait);
1062 }
1063 info->flags &= ~(CTC_ASYNC_NORMAL_ACTIVE | CTC_ASYNC_CLOSING);
1064 wake_up_interruptible(&info->close_wait);
1065 spin_unlock_irqrestore(&ctc_tty_lock, flags);
1066#ifdef CTC_DEBUG_MODEM_OPEN
1067 printk(KERN_DEBUG "ctc_tty_close normal exit\n");
1068#endif
1069}
1070
1071/*
1072 * ctc_tty_hangup() --- called by tty_hangup() when a hangup is signaled.
1073 */
1074static void
1075ctc_tty_hangup(struct tty_struct *tty)
1076{
1077 ctc_tty_info *info = (ctc_tty_info *)tty->driver_data;
1078 unsigned long saveflags;
1079 DBF_TEXT(trace, 3, __FUNCTION__);
1080 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup"))
1081 return;
1082 ctc_tty_shutdown(info);
1083 info->count = 0;
1084 info->flags &= ~CTC_ASYNC_NORMAL_ACTIVE;
1085 spin_lock_irqsave(&ctc_tty_lock, saveflags);
1086 info->tty = 0;
1087 spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
1088 wake_up_interruptible(&info->open_wait);
1089}
1090
1091
1092/*
1093 * For all online tty's, try sending data to
1094 * the lower levels.
1095 */
1096static void
1097ctc_tty_task(unsigned long arg)
1098{
1099 ctc_tty_info *info = (void *)arg;
1100 unsigned long saveflags;
1101 int again;
1102
1103 DBF_TEXT(trace, 3, __FUNCTION__);
1104 spin_lock_irqsave(&ctc_tty_lock, saveflags);
1105 if ((!ctc_tty_shuttingdown) && info) {
1106 again = ctc_tty_tint(info);
1107 if (!again)
1108 info->lsr |= UART_LSR_TEMT;
1109 again |= ctc_tty_readmodem(info);
1110 if (again) {
1111 tasklet_schedule(&info->tasklet);
1112 }
1113 }
1114 spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
1115}
1116
1117static struct tty_operations ctc_ops = {
1118 .open = ctc_tty_open,
1119 .close = ctc_tty_close,
1120 .write = ctc_tty_write,
1121 .flush_chars = ctc_tty_flush_chars,
1122 .write_room = ctc_tty_write_room,
1123 .chars_in_buffer = ctc_tty_chars_in_buffer,
1124 .flush_buffer = ctc_tty_flush_buffer,
1125 .ioctl = ctc_tty_ioctl,
1126 .throttle = ctc_tty_throttle,
1127 .unthrottle = ctc_tty_unthrottle,
1128 .set_termios = ctc_tty_set_termios,
1129 .hangup = ctc_tty_hangup,
1130 .tiocmget = ctc_tty_tiocmget,
1131 .tiocmset = ctc_tty_tiocmset,
1132};
1133
1134int
1135ctc_tty_init(void)
1136{
1137 int i;
1138 ctc_tty_info *info;
1139 struct tty_driver *device;
1140
1141 DBF_TEXT(trace, 2, __FUNCTION__);
1142 driver = kmalloc(sizeof(ctc_tty_driver), GFP_KERNEL);
1143 if (driver == NULL) {
1144 printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
1145 return -ENOMEM;
1146 }
1147 memset(driver, 0, sizeof(ctc_tty_driver));
1148 device = alloc_tty_driver(CTC_TTY_MAX_DEVICES);
1149 if (!device) {
1150 kfree(driver);
1151 printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
1152 return -ENOMEM;
1153 }
1154
1155 device->devfs_name = "ctc/" CTC_TTY_NAME;
1156 device->name = CTC_TTY_NAME;
1157 device->major = CTC_TTY_MAJOR;
1158 device->minor_start = 0;
1159 device->type = TTY_DRIVER_TYPE_SERIAL;
1160 device->subtype = SERIAL_TYPE_NORMAL;
1161 device->init_termios = tty_std_termios;
1162 device->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1163 device->flags = TTY_DRIVER_REAL_RAW;
1164 device->driver_name = "ctc_tty",
1165 tty_set_operations(device, &ctc_ops);
1166 if (tty_register_driver(device)) {
1167 printk(KERN_WARNING "ctc_tty: Couldn't register serial-device\n");
1168 put_tty_driver(device);
1169 kfree(driver);
1170 return -1;
1171 }
1172 driver->ctc_tty_device = device;
1173 for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) {
1174 info = &driver->info[i];
1175 init_MUTEX(&info->write_sem);
1176 tasklet_init(&info->tasklet, ctc_tty_task,
1177 (unsigned long) info);
1178 info->magic = CTC_ASYNC_MAGIC;
1179 info->line = i;
1180 info->tty = 0;
1181 info->count = 0;
1182 info->blocked_open = 0;
1183 init_waitqueue_head(&info->open_wait);
1184 init_waitqueue_head(&info->close_wait);
1185 skb_queue_head_init(&info->tx_queue);
1186 skb_queue_head_init(&info->rx_queue);
1187 init_timer(&info->stoptimer);
1188 info->stoptimer.function = ctc_tty_stopdev;
1189 info->stoptimer.data = (unsigned long)info;
1190 info->mcr = UART_MCR_RTS;
1191 }
1192 return 0;
1193}
1194
1195int
1196ctc_tty_register_netdev(struct net_device *dev) {
1197 int ttynum;
1198 char *err;
1199 char *p;
1200
1201 DBF_TEXT(trace, 2, __FUNCTION__);
1202 if ((!dev) || (!dev->name)) {
1203 printk(KERN_WARNING
1204 "ctc_tty_register_netdev called "
1205 "with NULL dev or NULL dev-name\n");
1206 return -1;
1207 }
1208
1209 /*
1210 * If the name is a format string the caller wants us to
1211 * do a name allocation : format string must end with %d
1212 */
1213 if (strchr(dev->name, '%'))
1214 {
1215 int err = dev_alloc_name(dev, dev->name); // dev->name is changed by this
1216 if (err < 0) {
1217 printk(KERN_DEBUG "dev_alloc returned error %d\n", err);
1218 return err;
1219 }
1220
1221 }
1222
1223 for (p = dev->name; p && ((*p < '0') || (*p > '9')); p++);
1224 ttynum = simple_strtoul(p, &err, 0);
1225 if ((ttynum < 0) || (ttynum >= CTC_TTY_MAX_DEVICES) ||
1226 (err && *err)) {
1227 printk(KERN_WARNING
1228 "ctc_tty_register_netdev called "
1229 "with number in name '%s'\n", dev->name);
1230 return -1;
1231 }
1232 if (driver->info[ttynum].netdev) {
1233 printk(KERN_WARNING
1234 "ctc_tty_register_netdev called "
1235 "for already registered device '%s'\n",
1236 dev->name);
1237 return -1;
1238 }
1239 driver->info[ttynum].netdev = dev;
1240 return 0;
1241}
1242
1243void
1244ctc_tty_unregister_netdev(struct net_device *dev) {
1245 int i;
1246 unsigned long saveflags;
1247 ctc_tty_info *info = NULL;
1248
1249 DBF_TEXT(trace, 2, __FUNCTION__);
1250 spin_lock_irqsave(&ctc_tty_lock, saveflags);
1251 for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
1252 if (driver->info[i].netdev == dev) {
1253 info = &driver->info[i];
1254 break;
1255 }
1256 if (info) {
1257 info->netdev = NULL;
1258 skb_queue_purge(&info->tx_queue);
1259 skb_queue_purge(&info->rx_queue);
1260 }
1261 spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
1262}
1263
1264void
1265ctc_tty_cleanup(void) {
1266 unsigned long saveflags;
1267
1268 DBF_TEXT(trace, 2, __FUNCTION__);
1269 spin_lock_irqsave(&ctc_tty_lock, saveflags);
1270 ctc_tty_shuttingdown = 1;
1271 spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
1272 tty_unregister_driver(driver->ctc_tty_device);
1273 put_tty_driver(driver->ctc_tty_device);
1274 kfree(driver);
1275 driver = NULL;
1276}
diff --git a/drivers/s390/net/ctctty.h b/drivers/s390/net/ctctty.h
new file mode 100644
index 000000000000..84b2f8f23ab3
--- /dev/null
+++ b/drivers/s390/net/ctctty.h
@@ -0,0 +1,37 @@
1/*
2 * $Id: ctctty.h,v 1.4 2003/09/18 08:01:10 mschwide Exp $
3 *
4 * CTC / ESCON network driver, tty interface.
5 *
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#ifndef _CTCTTY_H_
25#define _CTCTTY_H_
26
27#include <linux/skbuff.h>
28#include <linux/netdevice.h>
29
30extern int ctc_tty_register_netdev(struct net_device *);
31extern void ctc_tty_unregister_netdev(struct net_device *);
32extern void ctc_tty_netif_rx(struct sk_buff *);
33extern int ctc_tty_init(void);
34extern void ctc_tty_cleanup(void);
35extern void ctc_tty_setcarrier(struct net_device *, int);
36
37#endif
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
new file mode 100644
index 000000000000..1b0a9f16024c
--- /dev/null
+++ b/drivers/s390/net/cu3088.c
@@ -0,0 +1,166 @@
1/*
2 * $Id: cu3088.c,v 1.34 2004/06/15 13:16:27 pavlic Exp $
3 *
4 * CTC / LCS ccw_device driver
5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Arnd Bergmann <arndb@de.ibm.com>
8 * Cornelia Huck <cohuck@de.ibm.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 */
25
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/err.h>
29
30#include <asm/ccwdev.h>
31#include <asm/ccwgroup.h>
32
33#include "cu3088.h"
34
35const char *cu3088_type[] = {
36 "not a channel",
37 "CTC/A",
38 "ESCON channel",
39 "FICON channel",
40 "P390 LCS card",
41 "OSA LCS card",
42 "unknown channel type",
43 "unsupported channel type",
44};
45
46/* static definitions */
47
48static struct ccw_device_id cu3088_ids[] = {
49 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
50 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
51 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
52 { CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 },
53 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
54 { /* end of list */ }
55};
56
57static struct ccw_driver cu3088_driver;
58
59struct device *cu3088_root_dev;
60
61static ssize_t
62group_write(struct device_driver *drv, const char *buf, size_t count)
63{
64 const char *start, *end;
65 char bus_ids[2][BUS_ID_SIZE], *argv[2];
66 int i;
67 int ret;
68 struct ccwgroup_driver *cdrv;
69
70 cdrv = to_ccwgroupdrv(drv);
71 if (!cdrv)
72 return -EINVAL;
73 start = buf;
74 for (i=0; i<2; i++) {
75 static const char delim[] = {',', '\n'};
76 int len;
77
78 if (!(end = strchr(start, delim[i])))
79 return count;
80 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start + 1);
81 strlcpy (bus_ids[i], start, len);
82 argv[i] = bus_ids[i];
83 start = end + 1;
84 }
85
86 ret = ccwgroup_create(cu3088_root_dev, cdrv->driver_id,
87 &cu3088_driver, 2, argv);
88
89 return (ret == 0) ? count : ret;
90}
91
92static DRIVER_ATTR(group, 0200, NULL, group_write);
93
94/* Register-unregister for ctc&lcs */
95int
96register_cu3088_discipline(struct ccwgroup_driver *dcp)
97{
98 int rc;
99
100 if (!dcp)
101 return -EINVAL;
102
103 /* Register discipline.*/
104 rc = ccwgroup_driver_register(dcp);
105 if (rc)
106 return rc;
107
108 rc = driver_create_file(&dcp->driver, &driver_attr_group);
109 if (rc)
110 ccwgroup_driver_unregister(dcp);
111
112 return rc;
113
114}
115
116void
117unregister_cu3088_discipline(struct ccwgroup_driver *dcp)
118{
119 if (!dcp)
120 return;
121
122 driver_remove_file(&dcp->driver, &driver_attr_group);
123 ccwgroup_driver_unregister(dcp);
124}
125
126static struct ccw_driver cu3088_driver = {
127 .owner = THIS_MODULE,
128 .ids = cu3088_ids,
129 .name = "cu3088",
130 .probe = ccwgroup_probe_ccwdev,
131 .remove = ccwgroup_remove_ccwdev,
132};
133
134/* module setup */
135static int __init
136cu3088_init (void)
137{
138 int rc;
139
140 cu3088_root_dev = s390_root_dev_register("cu3088");
141 if (IS_ERR(cu3088_root_dev))
142 return PTR_ERR(cu3088_root_dev);
143 rc = ccw_driver_register(&cu3088_driver);
144 if (rc)
145 s390_root_dev_unregister(cu3088_root_dev);
146
147 return rc;
148}
149
150static void __exit
151cu3088_exit (void)
152{
153 ccw_driver_unregister(&cu3088_driver);
154 s390_root_dev_unregister(cu3088_root_dev);
155}
156
157MODULE_DEVICE_TABLE(ccw,cu3088_ids);
158MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
159MODULE_LICENSE("GPL");
160
161module_init(cu3088_init);
162module_exit(cu3088_exit);
163
164EXPORT_SYMBOL_GPL(cu3088_type);
165EXPORT_SYMBOL_GPL(register_cu3088_discipline);
166EXPORT_SYMBOL_GPL(unregister_cu3088_discipline);
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
new file mode 100644
index 000000000000..0ec49a8b3adc
--- /dev/null
+++ b/drivers/s390/net/cu3088.h
@@ -0,0 +1,41 @@
1#ifndef _CU3088_H
2#define _CU3088_H
3
4/**
5 * Enum for classifying detected devices.
6 */
7enum channel_types {
8 /* Device is not a channel */
9 channel_type_none,
10
11 /* Device is a CTC/A */
12 channel_type_parallel,
13
14 /* Device is a ESCON channel */
15 channel_type_escon,
16
17 /* Device is a FICON channel */
18 channel_type_ficon,
19
20 /* Device is a P390 LCS card */
21 channel_type_p390,
22
23 /* Device is a OSA2 card */
24 channel_type_osa2,
25
26 /* Device is a channel, but we don't know
27 * anything about it */
28 channel_type_unknown,
29
30 /* Device is an unsupported model */
31 channel_type_unsupported,
32
33 /* number of type entries */
34 num_channel_types
35};
36
37extern const char *cu3088_type[num_channel_types];
38extern int register_cu3088_discipline(struct ccwgroup_driver *);
39extern void unregister_cu3088_discipline(struct ccwgroup_driver *);
40
41#endif
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
new file mode 100644
index 000000000000..fa09440d82e5
--- /dev/null
+++ b/drivers/s390/net/fsm.c
@@ -0,0 +1,220 @@
1/**
2 * $Id: fsm.c,v 1.6 2003/10/15 11:37:29 mschwide Exp $
3 *
4 * A generic FSM based on fsm used in isdn4linux
5 *
6 */
7
8#include "fsm.h"
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/timer.h>
12
13MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
14MODULE_DESCRIPTION("Finite state machine helper functions");
15MODULE_LICENSE("GPL");
16
17fsm_instance *
18init_fsm(char *name, const char **state_names, const char **event_names, int nr_states,
19 int nr_events, const fsm_node *tmpl, int tmpl_len, int order)
20{
21 int i;
22 fsm_instance *this;
23 fsm_function_t *m;
24 fsm *f;
25
26 this = (fsm_instance *)kmalloc(sizeof(fsm_instance), order);
27 if (this == NULL) {
28 printk(KERN_WARNING
29 "fsm(%s): init_fsm: Couldn't alloc instance\n", name);
30 return NULL;
31 }
32 memset(this, 0, sizeof(fsm_instance));
33 strlcpy(this->name, name, sizeof(this->name));
34
35 f = (fsm *)kmalloc(sizeof(fsm), order);
36 if (f == NULL) {
37 printk(KERN_WARNING
38 "fsm(%s): init_fsm: Couldn't alloc fsm\n", name);
39 kfree_fsm(this);
40 return NULL;
41 }
42 memset(f, 0, sizeof(fsm));
43 f->nr_events = nr_events;
44 f->nr_states = nr_states;
45 f->event_names = event_names;
46 f->state_names = state_names;
47 this->f = f;
48
49 m = (fsm_function_t *)kmalloc(
50 sizeof(fsm_function_t) * nr_states * nr_events, order);
51 if (m == NULL) {
52 printk(KERN_WARNING
53 "fsm(%s): init_fsm: Couldn't alloc jumptable\n", name);
54 kfree_fsm(this);
55 return NULL;
56 }
57 memset(m, 0, sizeof(fsm_function_t) * f->nr_states * f->nr_events);
58 f->jumpmatrix = m;
59
60 for (i = 0; i < tmpl_len; i++) {
61 if ((tmpl[i].cond_state >= nr_states) ||
62 (tmpl[i].cond_event >= nr_events) ) {
63 printk(KERN_ERR
64 "fsm(%s): init_fsm: Bad template l=%d st(%ld/%ld) ev(%ld/%ld)\n",
65 name, i, (long)tmpl[i].cond_state, (long)f->nr_states,
66 (long)tmpl[i].cond_event, (long)f->nr_events);
67 kfree_fsm(this);
68 return NULL;
69 } else
70 m[nr_states * tmpl[i].cond_event + tmpl[i].cond_state] =
71 tmpl[i].function;
72 }
73 return this;
74}
75
76void
77kfree_fsm(fsm_instance *this)
78{
79 if (this) {
80 if (this->f) {
81 if (this->f->jumpmatrix)
82 kfree(this->f->jumpmatrix);
83 kfree(this->f);
84 }
85 kfree(this);
86 } else
87 printk(KERN_WARNING
88 "fsm: kfree_fsm called with NULL argument\n");
89}
90
91#if FSM_DEBUG_HISTORY
92void
93fsm_print_history(fsm_instance *fi)
94{
95 int idx = 0;
96 int i;
97
98 if (fi->history_size >= FSM_HISTORY_SIZE)
99 idx = fi->history_index;
100
101 printk(KERN_DEBUG "fsm(%s): History:\n", fi->name);
102 for (i = 0; i < fi->history_size; i++) {
103 int e = fi->history[idx].event;
104 int s = fi->history[idx++].state;
105 idx %= FSM_HISTORY_SIZE;
106 if (e == -1)
107 printk(KERN_DEBUG " S=%s\n",
108 fi->f->state_names[s]);
109 else
110 printk(KERN_DEBUG " S=%s E=%s\n",
111 fi->f->state_names[s],
112 fi->f->event_names[e]);
113 }
114 fi->history_size = fi->history_index = 0;
115}
116
117void
118fsm_record_history(fsm_instance *fi, int state, int event)
119{
120 fi->history[fi->history_index].state = state;
121 fi->history[fi->history_index++].event = event;
122 fi->history_index %= FSM_HISTORY_SIZE;
123 if (fi->history_size < FSM_HISTORY_SIZE)
124 fi->history_size++;
125}
126#endif
127
128const char *
129fsm_getstate_str(fsm_instance *fi)
130{
131 int st = atomic_read(&fi->state);
132 if (st >= fi->f->nr_states)
133 return "Invalid";
134 return fi->f->state_names[st];
135}
136
137static void
138fsm_expire_timer(fsm_timer *this)
139{
140#if FSM_TIMER_DEBUG
141 printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
142 this->fi->name, this);
143#endif
144 fsm_event(this->fi, this->expire_event, this->event_arg);
145}
146
147void
148fsm_settimer(fsm_instance *fi, fsm_timer *this)
149{
150 this->fi = fi;
151 this->tl.function = (void *)fsm_expire_timer;
152 this->tl.data = (long)this;
153#if FSM_TIMER_DEBUG
154 printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
155 this);
156#endif
157 init_timer(&this->tl);
158}
159
160void
161fsm_deltimer(fsm_timer *this)
162{
163#if FSM_TIMER_DEBUG
164 printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name,
165 this);
166#endif
167 del_timer(&this->tl);
168}
169
170int
171fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
172{
173
174#if FSM_TIMER_DEBUG
175 printk(KERN_DEBUG "fsm(%s): Add timer %p %dms\n",
176 this->fi->name, this, millisec);
177#endif
178
179 init_timer(&this->tl);
180 this->tl.function = (void *)fsm_expire_timer;
181 this->tl.data = (long)this;
182 this->expire_event = event;
183 this->event_arg = arg;
184 this->tl.expires = jiffies + (millisec * HZ) / 1000;
185 add_timer(&this->tl);
186 return 0;
187}
188
189/* FIXME: this function is never used, why */
190void
191fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
192{
193
194#if FSM_TIMER_DEBUG
195 printk(KERN_DEBUG "fsm(%s): Restart timer %p %dms\n",
196 this->fi->name, this, millisec);
197#endif
198
199 del_timer(&this->tl);
200 init_timer(&this->tl);
201 this->tl.function = (void *)fsm_expire_timer;
202 this->tl.data = (long)this;
203 this->expire_event = event;
204 this->event_arg = arg;
205 this->tl.expires = jiffies + (millisec * HZ) / 1000;
206 add_timer(&this->tl);
207}
208
209EXPORT_SYMBOL(init_fsm);
210EXPORT_SYMBOL(kfree_fsm);
211EXPORT_SYMBOL(fsm_settimer);
212EXPORT_SYMBOL(fsm_deltimer);
213EXPORT_SYMBOL(fsm_addtimer);
214EXPORT_SYMBOL(fsm_modtimer);
215EXPORT_SYMBOL(fsm_getstate_str);
216
217#if FSM_DEBUG_HISTORY
218EXPORT_SYMBOL(fsm_print_history);
219EXPORT_SYMBOL(fsm_record_history);
220#endif
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
new file mode 100644
index 000000000000..f9a011001eb6
--- /dev/null
+++ b/drivers/s390/net/fsm.h
@@ -0,0 +1,265 @@
1/* $Id: fsm.h,v 1.1.1.1 2002/03/13 19:33:09 mschwide Exp $
2 */
3#ifndef _FSM_H_
4#define _FSM_H_
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/timer.h>
9#include <linux/time.h>
10#include <linux/slab.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <asm/atomic.h>
14
15/**
16 * Define this to get debugging messages.
17 */
18#define FSM_DEBUG 0
19
20/**
21 * Define this to get debugging massages for
22 * timer handling.
23 */
24#define FSM_TIMER_DEBUG 0
25
26/**
27 * Define these to record a history of
28 * Events/Statechanges and print it if a
29 * action_function is not found.
30 */
31#define FSM_DEBUG_HISTORY 0
32#define FSM_HISTORY_SIZE 40
33
34struct fsm_instance_t;
35
36/**
37 * Definition of an action function, called by a FSM
38 */
39typedef void (*fsm_function_t)(struct fsm_instance_t *, int, void *);
40
41/**
42 * Internal jump table for a FSM
43 */
44typedef struct {
45 fsm_function_t *jumpmatrix;
46 int nr_events;
47 int nr_states;
48 const char **event_names;
49 const char **state_names;
50} fsm;
51
52#if FSM_DEBUG_HISTORY
53/**
54 * Element of State/Event history used for debugging.
55 */
56typedef struct {
57 int state;
58 int event;
59} fsm_history;
60#endif
61
62/**
63 * Representation of a FSM
64 */
65typedef struct fsm_instance_t {
66 fsm *f;
67 atomic_t state;
68 char name[16];
69 void *userdata;
70 int userint;
71#if FSM_DEBUG_HISTORY
72 int history_index;
73 int history_size;
74 fsm_history history[FSM_HISTORY_SIZE];
75#endif
76} fsm_instance;
77
78/**
79 * Description of a state-event combination
80 */
81typedef struct {
82 int cond_state;
83 int cond_event;
84 fsm_function_t function;
85} fsm_node;
86
87/**
88 * Description of a FSM Timer.
89 */
90typedef struct {
91 fsm_instance *fi;
92 struct timer_list tl;
93 int expire_event;
94 void *event_arg;
95} fsm_timer;
96
97/**
98 * Creates an FSM
99 *
100 * @param name Name of this instance for logging purposes.
101 * @param state_names An array of names for all states for logging purposes.
102 * @param event_names An array of names for all events for logging purposes.
103 * @param nr_states Number of states for this instance.
104 * @param nr_events Number of events for this instance.
105 * @param tmpl An array of fsm_nodes, describing this FSM.
106 * @param tmpl_len Length of the describing array.
107 * @param order Parameter for allocation of the FSM data structs.
108 */
109extern fsm_instance *
110init_fsm(char *name, const char **state_names,
111 const char **event_names,
112 int nr_states, int nr_events, const fsm_node *tmpl,
113 int tmpl_len, int order);
114
115/**
116 * Releases an FSM
117 *
118 * @param fi Pointer to an FSM, previously created with init_fsm.
119 */
120extern void kfree_fsm(fsm_instance *fi);
121
122#if FSM_DEBUG_HISTORY
123extern void
124fsm_print_history(fsm_instance *fi);
125
126extern void
127fsm_record_history(fsm_instance *fi, int state, int event);
128#endif
129
130/**
131 * Emits an event to a FSM.
132 * If an action function is defined for the current state/event combination,
133 * this function is called.
134 *
135 * @param fi Pointer to FSM which should receive the event.
136 * @param event The event do be delivered.
137 * @param arg A generic argument, handed to the action function.
138 *
139 * @return 0 on success,
140 * 1 if current state or event is out of range
141 * !0 if state and event in range, but no action defined.
142 */
143extern __inline__ int
144fsm_event(fsm_instance *fi, int event, void *arg)
145{
146 fsm_function_t r;
147 int state = atomic_read(&fi->state);
148
149 if ((state >= fi->f->nr_states) ||
150 (event >= fi->f->nr_events) ) {
151 printk(KERN_ERR "fsm(%s): Invalid state st(%ld/%ld) ev(%d/%ld)\n",
152 fi->name, (long)state,(long)fi->f->nr_states, event,
153 (long)fi->f->nr_events);
154#if FSM_DEBUG_HISTORY
155 fsm_print_history(fi);
156#endif
157 return 1;
158 }
159 r = fi->f->jumpmatrix[fi->f->nr_states * event + state];
160 if (r) {
161#if FSM_DEBUG
162 printk(KERN_DEBUG "fsm(%s): state %s event %s\n",
163 fi->name, fi->f->state_names[state],
164 fi->f->event_names[event]);
165#endif
166#if FSM_DEBUG_HISTORY
167 fsm_record_history(fi, state, event);
168#endif
169 r(fi, event, arg);
170 return 0;
171 } else {
172#if FSM_DEBUG || FSM_DEBUG_HISTORY
173 printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n",
174 fi->name, fi->f->event_names[event],
175 fi->f->state_names[state]);
176#endif
177#if FSM_DEBUG_HISTORY
178 fsm_print_history(fi);
179#endif
180 return !0;
181 }
182}
183
184/**
185 * Modifies the state of an FSM.
186 * This does <em>not</em> trigger an event or calls an action function.
187 *
188 * @param fi Pointer to FSM
189 * @param state The new state for this FSM.
190 */
191extern __inline__ void
192fsm_newstate(fsm_instance *fi, int newstate)
193{
194 atomic_set(&fi->state,newstate);
195#if FSM_DEBUG_HISTORY
196 fsm_record_history(fi, newstate, -1);
197#endif
198#if FSM_DEBUG
199 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
200 fi->f->state_names[newstate]);
201#endif
202}
203
204/**
205 * Retrieves the state of an FSM
206 *
207 * @param fi Pointer to FSM
208 *
209 * @return The current state of the FSM.
210 */
211extern __inline__ int
212fsm_getstate(fsm_instance *fi)
213{
214 return atomic_read(&fi->state);
215}
216
217/**
218 * Retrieves the name of the state of an FSM
219 *
220 * @param fi Pointer to FSM
221 *
222 * @return The current state of the FSM in a human readable form.
223 */
224extern const char *fsm_getstate_str(fsm_instance *fi);
225
226/**
227 * Initializes a timer for an FSM.
228 * This prepares an fsm_timer for usage with fsm_addtimer.
229 *
230 * @param fi Pointer to FSM
231 * @param timer The timer to be initialized.
232 */
233extern void fsm_settimer(fsm_instance *fi, fsm_timer *);
234
235/**
236 * Clears a pending timer of an FSM instance.
237 *
238 * @param timer The timer to clear.
239 */
240extern void fsm_deltimer(fsm_timer *timer);
241
242/**
243 * Adds and starts a timer to an FSM instance.
244 *
245 * @param timer The timer to be added. The field fi of that timer
246 * must have been set to point to the instance.
247 * @param millisec Duration, after which the timer should expire.
248 * @param event Event, to trigger if timer expires.
249 * @param arg Generic argument, provided to expiry function.
250 *
251 * @return 0 on success, -1 if timer is already active.
252 */
253extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg);
254
255/**
256 * Modifies a timer of an FSM.
257 *
258 * @param timer The timer to modify.
259 * @param millisec Duration, after which the timer should expire.
260 * @param event Event, to trigger if timer expires.
261 * @param arg Generic argument, provided to expiry function.
262 */
263extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg);
264
265#endif /* _FSM_H_ */
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
new file mode 100644
index 000000000000..1ac6563ee3e0
--- /dev/null
+++ b/drivers/s390/net/iucv.c
@@ -0,0 +1,2567 @@
1/*
2 * $Id: iucv.c,v 1.43 2005/02/09 14:47:43 braunu Exp $
3 *
4 * IUCV network driver
5 *
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s):
8 * Original source:
9 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
10 * Xenia Tkatschow (xenia@us.ibm.com)
11 * 2Gb awareness and general cleanup:
12 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
13 *
14 * Documentation used:
15 * The original source
16 * CP Programming Service, IBM document # SC24-5760
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 *
32 * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.43 $
33 *
34 */
35
36/* #define DEBUG */
37
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/config.h>
41
42#include <linux/spinlock.h>
43#include <linux/kernel.h>
44#include <linux/slab.h>
45#include <linux/init.h>
46#include <linux/interrupt.h>
47#include <linux/list.h>
48#include <linux/errno.h>
49#include <linux/err.h>
50#include <linux/device.h>
51#include <asm/atomic.h>
52#include "iucv.h"
53#include <asm/io.h>
54#include <asm/s390_ext.h>
55#include <asm/ebcdic.h>
56#include <asm/smp.h>
57#include <asm/ccwdev.h> //for root device stuff
58
59/* FLAGS:
60 * All flags are defined in the field IPFLAGS1 of each function
61 * and can be found in CP Programming Services.
62 * IPSRCCLS - Indicates you have specified a source class
63 * IPFGMCL - Indicates you have specified a target class
64 * IPFGPID - Indicates you have specified a pathid
65 * IPFGMID - Indicates you have specified a message ID
66 * IPANSLST - Indicates that you are using an address list for
67 * reply data
68 * IPBUFLST - Indicates that you are using an address list for
69 * message data
70 */
71
72#define IPSRCCLS 0x01
73#define IPFGMCL 0x01
74#define IPFGPID 0x02
75#define IPFGMID 0x04
76#define IPANSLST 0x08
77#define IPBUFLST 0x40
78
79static int
80iucv_bus_match (struct device *dev, struct device_driver *drv)
81{
82 return 0;
83}
84
85struct bus_type iucv_bus = {
86 .name = "iucv",
87 .match = iucv_bus_match,
88};
89
90struct device *iucv_root;
91
92/* General IUCV interrupt structure */
93typedef struct {
94 __u16 ippathid;
95 __u8 res1;
96 __u8 iptype;
97 __u32 res2;
98 __u8 ipvmid[8];
99 __u8 res3[24];
100} iucv_GeneralInterrupt;
101
102static iucv_GeneralInterrupt *iucv_external_int_buffer = NULL;
103
104/* Spin Lock declaration */
105
106static DEFINE_SPINLOCK(iucv_lock);
107
108static int messagesDisabled = 0;
109
110/***************INTERRUPT HANDLING ***************/
111
112typedef struct {
113 struct list_head queue;
114 iucv_GeneralInterrupt data;
115} iucv_irqdata;
116
117static struct list_head iucv_irq_queue;
118static DEFINE_SPINLOCK(iucv_irq_queue_lock);
119
120/*
121 *Internal function prototypes
122 */
123static void iucv_tasklet_handler(unsigned long);
124static void iucv_irq_handler(struct pt_regs *, __u16);
125
126static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0);
127
128/************ FUNCTION ID'S ****************************/
129
130#define ACCEPT 10
131#define CONNECT 11
132#define DECLARE_BUFFER 12
133#define PURGE 9
134#define QUERY 0
135#define QUIESCE 13
136#define RECEIVE 5
137#define REJECT 8
138#define REPLY 6
139#define RESUME 14
140#define RETRIEVE_BUFFER 2
141#define SEND 4
142#define SETMASK 16
143#define SEVER 15
144
145/**
146 * Structure: handler
147 * members: list - list management.
148 * structure: id
149 * userid - 8 char array of machine identification
150 * user_data - 16 char array for user identification
151 * mask - 24 char array used to compare the 2 previous
152 * interrupt_table - vector of interrupt functions.
153 * pgm_data - ulong, application data that is passed
154 * to the interrupt handlers
155*/
156typedef struct handler_t {
157 struct list_head list;
158 struct {
159 __u8 userid[8];
160 __u8 user_data[16];
161 __u8 mask[24];
162 } id;
163 iucv_interrupt_ops_t *interrupt_table;
164 void *pgm_data;
165} handler;
166
167/**
168 * iucv_handler_table: List of registered handlers.
169 */
170static struct list_head iucv_handler_table;
171
172/**
173 * iucv_pathid_table: an array of *handler pointing into
174 * iucv_handler_table for fast indexing by pathid;
175 */
176static handler **iucv_pathid_table;
177
178static unsigned long max_connections;
179
180/**
181 * iucv_cpuid: contains the logical cpu number of the cpu which
182 * has declared the iucv buffer by issuing DECLARE_BUFFER.
183 * If no cpu has done the initialization iucv_cpuid contains -1.
184 */
185static int iucv_cpuid = -1;
186/**
187 * register_flag: is 0 when external interrupt has not been registered
188 */
189static int register_flag;
190
191/****************FIVE 40-BYTE PARAMETER STRUCTURES******************/
192/* Data struct 1: iparml_control
193 * Used for iucv_accept
194 * iucv_connect
195 * iucv_quiesce
196 * iucv_resume
197 * iucv_sever
198 * iucv_retrieve_buffer
199 * Data struct 2: iparml_dpl (data in parameter list)
200 * Used for iucv_send_prmmsg
201 * iucv_send2way_prmmsg
202 * iucv_send2way_prmmsg_array
203 * iucv_reply_prmmsg
204 * Data struct 3: iparml_db (data in a buffer)
205 * Used for iucv_receive
206 * iucv_receive_array
207 * iucv_reject
208 * iucv_reply
209 * iucv_reply_array
210 * iucv_send
211 * iucv_send_array
212 * iucv_send2way
213 * iucv_send2way_array
214 * iucv_declare_buffer
215 * Data struct 4: iparml_purge
216 * Used for iucv_purge
217 * iucv_query
218 * Data struct 5: iparml_set_mask
219 * Used for iucv_set_mask
220 */
221
222typedef struct {
223 __u16 ippathid;
224 __u8 ipflags1;
225 __u8 iprcode;
226 __u16 ipmsglim;
227 __u16 res1;
228 __u8 ipvmid[8];
229 __u8 ipuser[16];
230 __u8 iptarget[8];
231} iparml_control;
232
233typedef struct {
234 __u16 ippathid;
235 __u8 ipflags1;
236 __u8 iprcode;
237 __u32 ipmsgid;
238 __u32 iptrgcls;
239 __u8 iprmmsg[8];
240 __u32 ipsrccls;
241 __u32 ipmsgtag;
242 __u32 ipbfadr2;
243 __u32 ipbfln2f;
244 __u32 res;
245} iparml_dpl;
246
247typedef struct {
248 __u16 ippathid;
249 __u8 ipflags1;
250 __u8 iprcode;
251 __u32 ipmsgid;
252 __u32 iptrgcls;
253 __u32 ipbfadr1;
254 __u32 ipbfln1f;
255 __u32 ipsrccls;
256 __u32 ipmsgtag;
257 __u32 ipbfadr2;
258 __u32 ipbfln2f;
259 __u32 res;
260} iparml_db;
261
262typedef struct {
263 __u16 ippathid;
264 __u8 ipflags1;
265 __u8 iprcode;
266 __u32 ipmsgid;
267 __u8 ipaudit[3];
268 __u8 res1[5];
269 __u32 res2;
270 __u32 ipsrccls;
271 __u32 ipmsgtag;
272 __u32 res3[3];
273} iparml_purge;
274
275typedef struct {
276 __u8 ipmask;
277 __u8 res1[2];
278 __u8 iprcode;
279 __u32 res2[9];
280} iparml_set_mask;
281
282typedef struct {
283 union {
284 iparml_control p_ctrl;
285 iparml_dpl p_dpl;
286 iparml_db p_db;
287 iparml_purge p_purge;
288 iparml_set_mask p_set_mask;
289 } param;
290 atomic_t in_use;
291 __u32 res;
292} __attribute__ ((aligned(8))) iucv_param;
293#define PARAM_POOL_SIZE (PAGE_SIZE / sizeof(iucv_param))
294
295static iucv_param * iucv_param_pool;
296
297MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
298MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
299MODULE_LICENSE("GPL");
300
301/*
302 * Debugging stuff
303 *******************************************************************************/
304
305
306#ifdef DEBUG
307static int debuglevel = 0;
308
309module_param(debuglevel, int, 0);
310MODULE_PARM_DESC(debuglevel,
311 "Specifies the debug level (0=off ... 3=all)");
312
313static void
314iucv_dumpit(char *title, void *buf, int len)
315{
316 int i;
317 __u8 *p = (__u8 *)buf;
318
319 if (debuglevel < 3)
320 return;
321
322 printk(KERN_DEBUG "%s\n", title);
323 printk(" ");
324 for (i = 0; i < len; i++) {
325 if (!(i % 16) && i != 0)
326 printk ("\n ");
327 else if (!(i % 4) && i != 0)
328 printk(" ");
329 printk("%02X", *p++);
330 }
331 if (len % 16)
332 printk ("\n");
333 return;
334}
335#define iucv_debug(lvl, fmt, args...) \
336do { \
337 if (debuglevel >= lvl) \
338 printk(KERN_DEBUG "%s: " fmt "\n", __FUNCTION__ , ## args); \
339} while (0)
340
341#else
342
343#define iucv_debug(lvl, fmt, args...)
344#define iucv_dumpit(title, buf, len)
345
346#endif
347
348/*
349 * Internal functions
350 *******************************************************************************/
351
352/**
353 * print start banner
354 */
355static void
356iucv_banner(void)
357{
358 char vbuf[] = "$Revision: 1.43 $";
359 char *version = vbuf;
360
361 if ((version = strchr(version, ':'))) {
362 char *p = strchr(version + 1, '$');
363 if (p)
364 *p = '\0';
365 } else
366 version = " ??? ";
367 printk(KERN_INFO
368 "IUCV lowlevel driver Version%s initialized\n", version);
369}
370
371/**
372 * iucv_init - Initialization
373 *
374 * Allocates and initializes various data structures.
375 */
376static int
377iucv_init(void)
378{
379 int ret;
380
381 if (iucv_external_int_buffer)
382 return 0;
383
384 if (!MACHINE_IS_VM) {
385 printk(KERN_ERR "IUCV: IUCV connection needs VM as base\n");
386 return -EPROTONOSUPPORT;
387 }
388
389 ret = bus_register(&iucv_bus);
390 if (ret) {
391 printk(KERN_ERR "IUCV: failed to register bus.\n");
392 return ret;
393 }
394
395 iucv_root = s390_root_dev_register("iucv");
396 if (IS_ERR(iucv_root)) {
397 printk(KERN_ERR "IUCV: failed to register iucv root.\n");
398 bus_unregister(&iucv_bus);
399 return PTR_ERR(iucv_root);
400 }
401
402 /* Note: GFP_DMA used used to get memory below 2G */
403 iucv_external_int_buffer = kmalloc(sizeof(iucv_GeneralInterrupt),
404 GFP_KERNEL|GFP_DMA);
405 if (!iucv_external_int_buffer) {
406 printk(KERN_WARNING
407 "%s: Could not allocate external interrupt buffer\n",
408 __FUNCTION__);
409 s390_root_dev_unregister(iucv_root);
410 bus_unregister(&iucv_bus);
411 return -ENOMEM;
412 }
413 memset(iucv_external_int_buffer, 0, sizeof(iucv_GeneralInterrupt));
414
415 /* Initialize parameter pool */
416 iucv_param_pool = kmalloc(sizeof(iucv_param) * PARAM_POOL_SIZE,
417 GFP_KERNEL|GFP_DMA);
418 if (!iucv_param_pool) {
419 printk(KERN_WARNING "%s: Could not allocate param pool\n",
420 __FUNCTION__);
421 kfree(iucv_external_int_buffer);
422 iucv_external_int_buffer = NULL;
423 s390_root_dev_unregister(iucv_root);
424 bus_unregister(&iucv_bus);
425 return -ENOMEM;
426 }
427 memset(iucv_param_pool, 0, sizeof(iucv_param) * PARAM_POOL_SIZE);
428
429 /* Initialize irq queue */
430 INIT_LIST_HEAD(&iucv_irq_queue);
431
432 /* Initialize handler table */
433 INIT_LIST_HEAD(&iucv_handler_table);
434
435 iucv_banner();
436 return 0;
437}
438
439/**
440 * iucv_exit - De-Initialization
441 *
442 * Frees everything allocated from iucv_init.
443 */
444static int iucv_retrieve_buffer (void);
445
446static void
447iucv_exit(void)
448{
449 iucv_retrieve_buffer();
450 if (iucv_external_int_buffer) {
451 kfree(iucv_external_int_buffer);
452 iucv_external_int_buffer = NULL;
453 }
454 if (iucv_param_pool) {
455 kfree(iucv_param_pool);
456 iucv_param_pool = NULL;
457 }
458 s390_root_dev_unregister(iucv_root);
459 bus_unregister(&iucv_bus);
460 printk(KERN_INFO "IUCV lowlevel driver unloaded\n");
461}
462
463/**
464 * grab_param: - Get a parameter buffer from the pre-allocated pool.
465 *
466 * This function searches for an unused element in the pre-allocated pool
467 * of parameter buffers. If one is found, it marks it "in use" and returns
468 * a pointer to it. The calling function is responsible for releasing it
469 * when it has finished its usage.
470 *
471 * Returns: A pointer to iucv_param.
472 */
473static __inline__ iucv_param *
474grab_param(void)
475{
476 iucv_param *ptr;
477 static int hint = 0;
478
479 ptr = iucv_param_pool + hint;
480 do {
481 ptr++;
482 if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
483 ptr = iucv_param_pool;
484 } while (atomic_compare_and_swap(0, 1, &ptr->in_use));
485 hint = ptr - iucv_param_pool;
486
487 memset(&ptr->param, 0, sizeof(ptr->param));
488 return ptr;
489}
490
491/**
492 * release_param - Release a parameter buffer.
493 * @p: A pointer to a struct iucv_param, previously obtained by calling
494 * grab_param().
495 *
496 * This function marks the specified parameter buffer "unused".
497 */
498static __inline__ void
499release_param(void *p)
500{
501 atomic_set(&((iucv_param *)p)->in_use, 0);
502}
503
504/**
505 * iucv_add_handler: - Add a new handler
506 * @new_handler: handle that is being entered into chain.
507 *
508 * Places new handle on iucv_handler_table, if identical handler is not
509 * found.
510 *
511 * Returns: 0 on success, !0 on failure (handler already in chain).
512 */
513static int
514iucv_add_handler (handler *new)
515{
516 ulong flags;
517
518 iucv_debug(1, "entering");
519 iucv_dumpit("handler:", new, sizeof(handler));
520
521 spin_lock_irqsave (&iucv_lock, flags);
522 if (!list_empty(&iucv_handler_table)) {
523 struct list_head *lh;
524
525 /**
526 * Search list for handler with identical id. If one
527 * is found, the new handler is _not_ added.
528 */
529 list_for_each(lh, &iucv_handler_table) {
530 handler *h = list_entry(lh, handler, list);
531 if (!memcmp(&new->id, &h->id, sizeof(h->id))) {
532 iucv_debug(1, "ret 1");
533 spin_unlock_irqrestore (&iucv_lock, flags);
534 return 1;
535 }
536 }
537 }
538 /**
539 * If we get here, no handler was found.
540 */
541 INIT_LIST_HEAD(&new->list);
542 list_add(&new->list, &iucv_handler_table);
543 spin_unlock_irqrestore (&iucv_lock, flags);
544
545 iucv_debug(1, "exiting");
546 return 0;
547}
548
549/**
550 * b2f0:
551 * @code: identifier of IUCV call to CP.
552 * @parm: pointer to 40 byte iparml area passed to CP
553 *
554 * Calls CP to execute IUCV commands.
555 *
556 * Returns: return code from CP's IUCV call
557 */
558static __inline__ ulong
559b2f0(__u32 code, void *parm)
560{
561 iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param));
562
563 asm volatile (
564 "LRA 1,0(%1)\n\t"
565 "LR 0,%0\n\t"
566 ".long 0xb2f01000"
567 :
568 : "d" (code), "a" (parm)
569 : "0", "1"
570 );
571
572 iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param));
573
574 return (unsigned long)*((__u8 *)(parm + 3));
575}
576
577/*
578 * Name: iucv_add_pathid
579 * Purpose: Adds a path id to the system.
580 * Input: pathid - pathid that is going to be entered into system
581 * handle - address of handler that the pathid will be associated
582 * with.
583 * pgm_data - token passed in by application.
584 * Output: 0: successful addition of pathid
585 * - EINVAL - pathid entry is being used by another application
586 * - ENOMEM - storage allocation for a new pathid table failed
587*/
588static int
589__iucv_add_pathid(__u16 pathid, handler *handler)
590{
591
592 iucv_debug(1, "entering");
593
594 iucv_debug(1, "handler is pointing to %p", handler);
595
596 if (pathid > (max_connections - 1))
597 return -EINVAL;
598
599 if (iucv_pathid_table[pathid]) {
600 iucv_debug(1, "pathid entry is %p", iucv_pathid_table[pathid]);
601 printk(KERN_WARNING
602 "%s: Pathid being used, error.\n", __FUNCTION__);
603 return -EINVAL;
604 }
605 iucv_pathid_table[pathid] = handler;
606
607 iucv_debug(1, "exiting");
608 return 0;
609} /* end of add_pathid function */
610
611static int
612iucv_add_pathid(__u16 pathid, handler *handler)
613{
614 ulong flags;
615 int rc;
616
617 spin_lock_irqsave (&iucv_lock, flags);
618 rc = __iucv_add_pathid(pathid, handler);
619 spin_unlock_irqrestore (&iucv_lock, flags);
620 return rc;
621}
622
623static void
624iucv_remove_pathid(__u16 pathid)
625{
626 ulong flags;
627
628 if (pathid > (max_connections - 1))
629 return;
630
631 spin_lock_irqsave (&iucv_lock, flags);
632 iucv_pathid_table[pathid] = NULL;
633 spin_unlock_irqrestore (&iucv_lock, flags);
634}
635
636/**
637 * iucv_declare_buffer_cpuid
638 * Register at VM for subsequent IUCV operations. This is executed
639 * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer().
640 */
641static void
642iucv_declare_buffer_cpuid (void *result)
643{
644 iparml_db *parm;
645
646 parm = (iparml_db *)grab_param();
647 parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer);
648 if ((*((ulong *)result) = b2f0(DECLARE_BUFFER, parm)) == 1)
649 *((ulong *)result) = parm->iprcode;
650 release_param(parm);
651}
652
653/**
654 * iucv_retrieve_buffer_cpuid:
655 * Unregister IUCV usage at VM. This is always executed on the same
656 * cpu that registered the buffer to VM.
657 * Called from iucv_retrieve_buffer().
658 */
659static void
660iucv_retrieve_buffer_cpuid (void *cpu)
661{
662 iparml_control *parm;
663
664 parm = (iparml_control *)grab_param();
665 b2f0(RETRIEVE_BUFFER, parm);
666 release_param(parm);
667}
668
669/**
670 * Name: iucv_declare_buffer
671 * Purpose: Specifies the guests real address of an external
672 * interrupt.
673 * Input: void
674 * Output: iprcode - return code from b2f0 call
675 */
676static int
677iucv_declare_buffer (void)
678{
679 unsigned long flags;
680 ulong b2f0_result;
681
682 iucv_debug(1, "entering");
683 b2f0_result = -ENODEV;
684 spin_lock_irqsave (&iucv_lock, flags);
685 if (iucv_cpuid == -1) {
686 /* Reserve any cpu for use by iucv. */
687 iucv_cpuid = smp_get_cpu(CPU_MASK_ALL);
688 spin_unlock_irqrestore (&iucv_lock, flags);
689 smp_call_function_on(iucv_declare_buffer_cpuid,
690 &b2f0_result, 0, 1, iucv_cpuid);
691 if (b2f0_result) {
692 smp_put_cpu(iucv_cpuid);
693 iucv_cpuid = -1;
694 }
695 iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
696 } else {
697 spin_unlock_irqrestore (&iucv_lock, flags);
698 b2f0_result = 0;
699 }
700 iucv_debug(1, "exiting");
701 return b2f0_result;
702}
703
704/**
705 * iucv_retrieve_buffer:
706 *
707 * Terminates all use of IUCV.
708 * Returns: return code from CP
709 */
710static int
711iucv_retrieve_buffer (void)
712{
713 iucv_debug(1, "entering");
714 if (iucv_cpuid != -1) {
715 smp_call_function_on(iucv_retrieve_buffer_cpuid,
716 0, 0, 1, iucv_cpuid);
717 /* Release the cpu reserved by iucv_declare_buffer. */
718 smp_put_cpu(iucv_cpuid);
719 iucv_cpuid = -1;
720 }
721 iucv_debug(1, "exiting");
722 return 0;
723}
724
725/**
726 * iucv_remove_handler:
727 * @users_handler: handler to be removed
728 *
729 * Remove handler when application unregisters.
730 */
731static void
732iucv_remove_handler(handler *handler)
733{
734 unsigned long flags;
735
736 if ((!iucv_pathid_table) || (!handler))
737 return;
738
739 iucv_debug(1, "entering");
740
741 spin_lock_irqsave (&iucv_lock, flags);
742 list_del(&handler->list);
743 if (list_empty(&iucv_handler_table)) {
744 if (register_flag) {
745 unregister_external_interrupt(0x4000, iucv_irq_handler);
746 register_flag = 0;
747 }
748 }
749 spin_unlock_irqrestore (&iucv_lock, flags);
750
751 iucv_debug(1, "exiting");
752 return;
753}
754
755/**
756 * iucv_register_program:
757 * @pgmname: user identification
758 * @userid: machine identification
759 * @pgmmask: Indicates which bits in the pgmname and userid combined will be
760 * used to determine who is given control.
761 * @ops: Address of interrupt handler table.
762 * @pgm_data: Application data to be passed to interrupt handlers.
763 *
764 * Registers an application with IUCV.
765 * Returns:
766 * The address of handler, or NULL on failure.
767 * NOTE on pgmmask:
768 * If pgmname, userid and pgmmask are provided, pgmmask is entered into the
769 * handler as is.
770 * If pgmmask is NULL, the internal mask is set to all 0xff's
771 * When userid is NULL, the first 8 bytes of the internal mask are forced
772 * to 0x00.
773 * If pgmmask and userid are NULL, the first 8 bytes of the internal mask
774 * are forced to 0x00 and the last 16 bytes to 0xff.
775 */
776
777iucv_handle_t
778iucv_register_program (__u8 pgmname[16],
779 __u8 userid[8],
780 __u8 pgmmask[24],
781 iucv_interrupt_ops_t * ops, void *pgm_data)
782{
783 ulong rc = 0; /* return code from function calls */
784 handler *new_handler;
785
786 iucv_debug(1, "entering");
787
788 if (ops == NULL) {
789 /* interrupt table is not defined */
790 printk(KERN_WARNING "%s: Interrupt table is not defined, "
791 "exiting\n", __FUNCTION__);
792 return NULL;
793 }
794 if (!pgmname) {
795 printk(KERN_WARNING "%s: pgmname not provided\n", __FUNCTION__);
796 return NULL;
797 }
798
799 /* Allocate handler entry */
800 new_handler = (handler *)kmalloc(sizeof(handler), GFP_ATOMIC);
801 if (new_handler == NULL) {
802 printk(KERN_WARNING "%s: storage allocation for new handler "
803 "failed.\n", __FUNCTION__);
804 return NULL;
805 }
806
807 if (!iucv_pathid_table) {
808 if (iucv_init()) {
809 kfree(new_handler);
810 return NULL;
811 }
812
813 max_connections = iucv_query_maxconn();
814 iucv_pathid_table = kmalloc(max_connections * sizeof(handler *),
815 GFP_ATOMIC);
816 if (iucv_pathid_table == NULL) {
817 printk(KERN_WARNING "%s: iucv_pathid_table storage "
818 "allocation failed\n", __FUNCTION__);
819 kfree(new_handler);
820 return NULL;
821 }
822 memset (iucv_pathid_table, 0, max_connections * sizeof(handler *));
823 }
824 memset(new_handler, 0, sizeof (handler));
825 memcpy(new_handler->id.user_data, pgmname,
826 sizeof (new_handler->id.user_data));
827 if (userid) {
828 memcpy (new_handler->id.userid, userid,
829 sizeof (new_handler->id.userid));
830 ASCEBC (new_handler->id.userid,
831 sizeof (new_handler->id.userid));
832 EBC_TOUPPER (new_handler->id.userid,
833 sizeof (new_handler->id.userid));
834
835 if (pgmmask) {
836 memcpy (new_handler->id.mask, pgmmask,
837 sizeof (new_handler->id.mask));
838 } else {
839 memset (new_handler->id.mask, 0xFF,
840 sizeof (new_handler->id.mask));
841 }
842 } else {
843 if (pgmmask) {
844 memcpy (new_handler->id.mask, pgmmask,
845 sizeof (new_handler->id.mask));
846 } else {
847 memset (new_handler->id.mask, 0xFF,
848 sizeof (new_handler->id.mask));
849 }
850 memset (new_handler->id.userid, 0x00,
851 sizeof (new_handler->id.userid));
852 }
853 /* fill in the rest of handler */
854 new_handler->pgm_data = pgm_data;
855 new_handler->interrupt_table = ops;
856
857 /*
858 * Check if someone else is registered with same pgmname, userid
859 * and mask. If someone is already registered with same pgmname,
860 * userid and mask, registration will fail and NULL will be returned
861 * to the application.
862 * If identical handler not found, then handler is added to list.
863 */
864 rc = iucv_add_handler(new_handler);
865 if (rc) {
866 printk(KERN_WARNING "%s: Someone already registered with same "
867 "pgmname, userid, pgmmask\n", __FUNCTION__);
868 kfree (new_handler);
869 return NULL;
870 }
871
872 rc = iucv_declare_buffer();
873 if (rc) {
874 char *err = "Unknown";
875 iucv_remove_handler(new_handler);
876 kfree(new_handler);
877 switch(rc) {
878 case 0x03:
879 err = "Directory error";
880 break;
881 case 0x0a:
882 err = "Invalid length";
883 break;
884 case 0x13:
885 err = "Buffer already exists";
886 break;
887 case 0x3e:
888 err = "Buffer overlap";
889 break;
890 case 0x5c:
891 err = "Paging or storage error";
892 break;
893 }
894 printk(KERN_WARNING "%s: iucv_declare_buffer "
895 "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err);
896 return NULL;
897 }
898 if (!register_flag) {
899 /* request the 0x4000 external interrupt */
900 rc = register_external_interrupt (0x4000, iucv_irq_handler);
901 if (rc) {
902 iucv_remove_handler(new_handler);
903 kfree (new_handler);
904 printk(KERN_WARNING "%s: "
905 "register_external_interrupt returned %ld\n",
906 __FUNCTION__, rc);
907 return NULL;
908
909 }
910 register_flag = 1;
911 }
912 iucv_debug(1, "exiting");
913 return new_handler;
914} /* end of register function */
915
916/**
917 * iucv_unregister_program:
918 * @handle: address of handler
919 *
920 * Unregister application with IUCV.
921 * Returns:
922 * 0 on success, -EINVAL, if specified handle is invalid.
923 */
924
925int
926iucv_unregister_program (iucv_handle_t handle)
927{
928 handler *h = NULL;
929 struct list_head *lh;
930 int i;
931 ulong flags;
932
933 iucv_debug(1, "entering");
934 iucv_debug(1, "address of handler is %p", h);
935
936 /* Checking if handle is valid */
937 spin_lock_irqsave (&iucv_lock, flags);
938 list_for_each(lh, &iucv_handler_table) {
939 if ((handler *)handle == list_entry(lh, handler, list)) {
940 h = (handler *)handle;
941 break;
942 }
943 }
944 if (!h) {
945 spin_unlock_irqrestore (&iucv_lock, flags);
946 if (handle)
947 printk(KERN_WARNING
948 "%s: Handler not found in iucv_handler_table.\n",
949 __FUNCTION__);
950 else
951 printk(KERN_WARNING
952 "%s: NULL handle passed by application.\n",
953 __FUNCTION__);
954 return -EINVAL;
955 }
956
957 /**
958 * First, walk thru iucv_pathid_table and sever any pathid which is
959 * still pointing to the handler to be removed.
960 */
961 for (i = 0; i < max_connections; i++)
962 if (iucv_pathid_table[i] == h) {
963 spin_unlock_irqrestore (&iucv_lock, flags);
964 iucv_sever(i, h->id.user_data);
965 spin_lock_irqsave(&iucv_lock, flags);
966 }
967 spin_unlock_irqrestore (&iucv_lock, flags);
968
969 iucv_remove_handler(h);
970 kfree(h);
971
972 iucv_debug(1, "exiting");
973 return 0;
974}
975
976/**
977 * iucv_accept:
978 * @pathid: Path identification number
979 * @msglim_reqstd: The number of outstanding messages requested.
980 * @user_data: Data specified by the iucv_connect function.
981 * @flags1: Contains options for this path.
982 * - IPPRTY (0x20) Specifies if you want to send priority message.
983 * - IPRMDATA (0x80) Specifies whether your program can handle a message
984 * in the parameter list.
985 * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
986 * established.
987 * @handle: Address of handler.
988 * @pgm_data: Application data passed to interrupt handlers.
989 * @flags1_out: Pointer to an int. If not NULL, on return the options for
990 * the path are stored at the given location:
991 * - IPPRTY (0x20) Indicates you may send a priority message.
992 * @msglim: Pointer to an __u16. If not NULL, on return the maximum
993 * number of outstanding messages is stored at the given
994 * location.
995 *
996 * This function is issued after the user receives a Connection Pending external
997 * interrupt and now wishes to complete the IUCV communication path.
998 * Returns:
999 * return code from CP
1000 */
1001int
1002iucv_accept(__u16 pathid, __u16 msglim_reqstd,
1003 __u8 user_data[16], int flags1,
1004 iucv_handle_t handle, void *pgm_data,
1005 int *flags1_out, __u16 * msglim)
1006{
1007 ulong b2f0_result = 0;
1008 ulong flags;
1009 struct list_head *lh;
1010 handler *h = NULL;
1011 iparml_control *parm;
1012
1013 iucv_debug(1, "entering");
1014 iucv_debug(1, "pathid = %d", pathid);
1015
1016 /* Checking if handle is valid */
1017 spin_lock_irqsave (&iucv_lock, flags);
1018 list_for_each(lh, &iucv_handler_table) {
1019 if ((handler *)handle == list_entry(lh, handler, list)) {
1020 h = (handler *)handle;
1021 break;
1022 }
1023 }
1024 spin_unlock_irqrestore (&iucv_lock, flags);
1025
1026 if (!h) {
1027 if (handle)
1028 printk(KERN_WARNING
1029 "%s: Handler not found in iucv_handler_table.\n",
1030 __FUNCTION__);
1031 else
1032 printk(KERN_WARNING
1033 "%s: NULL handle passed by application.\n",
1034 __FUNCTION__);
1035 return -EINVAL;
1036 }
1037
1038 parm = (iparml_control *)grab_param();
1039
1040 parm->ippathid = pathid;
1041 parm->ipmsglim = msglim_reqstd;
1042 if (user_data)
1043 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1044
1045 parm->ipflags1 = (__u8)flags1;
1046 b2f0_result = b2f0(ACCEPT, parm);
1047
1048 if (!b2f0_result) {
1049 if (msglim)
1050 *msglim = parm->ipmsglim;
1051 if (pgm_data)
1052 h->pgm_data = pgm_data;
1053 if (flags1_out)
1054 *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
1055 }
1056 release_param(parm);
1057
1058 iucv_debug(1, "exiting");
1059 return b2f0_result;
1060}
1061
1062/**
1063 * iucv_connect:
1064 * @pathid: Path identification number
1065 * @msglim_reqstd: Number of outstanding messages requested
1066 * @user_data: 16-byte user data
1067 * @userid: 8-byte of user identification
1068 * @system_name: 8-byte identifying the system name
1069 * @flags1: Specifies options for this path:
1070 * - IPPRTY (0x20) Specifies if you want to send priority message.
1071 * - IPRMDATA (0x80) Specifies whether your program can handle a message
1072 * in the parameter list.
1073 * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
1074 * established.
1075 * - IPLOCAL (0x01) Allows an application to force the partner to be on the
1076 * local system. If local is specified then target class
1077 * cannot be specified.
1078 * @flags1_out: Pointer to an int. If not NULL, on return the options for
1079 * the path are stored at the given location:
1080 * - IPPRTY (0x20) Indicates you may send a priority message.
1081 * @msglim: Pointer to an __u16. If not NULL, on return the maximum
1082 * number of outstanding messages is stored at the given
1083 * location.
1084 * @handle: Address of handler.
1085 * @pgm_data: Application data to be passed to interrupt handlers.
1086 *
1087 * This function establishes an IUCV path. Although the connect may complete
1088 * successfully, you are not able to use the path until you receive an IUCV
1089 * Connection Complete external interrupt.
1090 * Returns: return code from CP, or one of the following
1091 * - ENOMEM
1092 * - return code from iucv_declare_buffer
1093 * - EINVAL - invalid handle passed by application
1094 * - EINVAL - pathid address is NULL
1095 * - ENOMEM - pathid table storage allocation failed
1096 * - return code from internal function add_pathid
1097 */
1098int
1099iucv_connect (__u16 *pathid, __u16 msglim_reqstd,
1100 __u8 user_data[16], __u8 userid[8],
1101 __u8 system_name[8], int flags1,
1102 int *flags1_out, __u16 * msglim,
1103 iucv_handle_t handle, void *pgm_data)
1104{
1105 iparml_control *parm;
1106 iparml_control local_parm;
1107 struct list_head *lh;
1108 ulong b2f0_result = 0;
1109 ulong flags;
1110 int add_pathid_result = 0;
1111 handler *h = NULL;
1112 __u8 no_memory[16] = "NO MEMORY";
1113
1114 iucv_debug(1, "entering");
1115
1116 /* Checking if handle is valid */
1117 spin_lock_irqsave (&iucv_lock, flags);
1118 list_for_each(lh, &iucv_handler_table) {
1119 if ((handler *)handle == list_entry(lh, handler, list)) {
1120 h = (handler *)handle;
1121 break;
1122 }
1123 }
1124 spin_unlock_irqrestore (&iucv_lock, flags);
1125
1126 if (!h) {
1127 if (handle)
1128 printk(KERN_WARNING
1129 "%s: Handler not found in iucv_handler_table.\n",
1130 __FUNCTION__);
1131 else
1132 printk(KERN_WARNING
1133 "%s: NULL handle passed by application.\n",
1134 __FUNCTION__);
1135 return -EINVAL;
1136 }
1137
1138 if (pathid == NULL) {
1139 printk(KERN_WARNING "%s: NULL pathid pointer\n",
1140 __FUNCTION__);
1141 return -EINVAL;
1142 }
1143
1144 parm = (iparml_control *)grab_param();
1145
1146 parm->ipmsglim = msglim_reqstd;
1147
1148 if (user_data)
1149 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1150
1151 if (userid) {
1152 memcpy(parm->ipvmid, userid, sizeof(parm->ipvmid));
1153 ASCEBC(parm->ipvmid, sizeof(parm->ipvmid));
1154 EBC_TOUPPER(parm->ipvmid, sizeof(parm->ipvmid));
1155 }
1156
1157 if (system_name) {
1158 memcpy(parm->iptarget, system_name, sizeof(parm->iptarget));
1159 ASCEBC(parm->iptarget, sizeof(parm->iptarget));
1160 EBC_TOUPPER(parm->iptarget, sizeof(parm->iptarget));
1161 }
1162
1163 /* In order to establish an IUCV connection, the procedure is:
1164 *
1165 * b2f0(CONNECT)
1166 * take the ippathid from the b2f0 call
1167 * register the handler to the ippathid
1168 *
1169 * Unfortunately, the ConnectionEstablished message gets sent after the
1170 * b2f0(CONNECT) call but before the register is handled.
1171 *
1172 * In order for this race condition to be eliminated, the IUCV Control
1173 * Interrupts must be disabled for the above procedure.
1174 *
1175 * David Kennedy <dkennedy@linuxcare.com>
1176 */
1177
1178 /* Enable everything but IUCV Control messages */
1179 iucv_setmask(~(AllInterrupts));
1180 messagesDisabled = 1;
1181
1182 spin_lock_irqsave (&iucv_lock, flags);
1183 parm->ipflags1 = (__u8)flags1;
1184 b2f0_result = b2f0(CONNECT, parm);
1185 memcpy(&local_parm, parm, sizeof(local_parm));
1186 release_param(parm);
1187 parm = &local_parm;
1188 if (!b2f0_result)
1189 add_pathid_result = __iucv_add_pathid(parm->ippathid, h);
1190 spin_unlock_irqrestore (&iucv_lock, flags);
1191
1192 if (b2f0_result) {
1193 iucv_setmask(~0);
1194 messagesDisabled = 0;
1195 return b2f0_result;
1196 }
1197
1198 *pathid = parm->ippathid;
1199
1200 /* Enable everything again */
1201 iucv_setmask(IUCVControlInterruptsFlag);
1202
1203 if (msglim)
1204 *msglim = parm->ipmsglim;
1205 if (flags1_out)
1206 *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
1207
1208 if (add_pathid_result) {
1209 iucv_sever(*pathid, no_memory);
1210 printk(KERN_WARNING "%s: add_pathid failed with rc ="
1211 " %d\n", __FUNCTION__, add_pathid_result);
1212 return(add_pathid_result);
1213 }
1214
1215 iucv_debug(1, "exiting");
1216 return b2f0_result;
1217}
1218
1219/**
1220 * iucv_purge:
1221 * @pathid: Path identification number
1222 * @msgid: Message ID of message to purge.
1223 * @srccls: Message class of the message to purge.
1224 * @audit: Pointer to an __u32. If not NULL, on return, information about
1225 * asynchronous errors that may have affected the normal completion
1226 * of this message ist stored at the given location.
1227 *
1228 * Cancels a message you have sent.
1229 * Returns: return code from CP
1230 */
1231int
1232iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit)
1233{
1234 iparml_purge *parm;
1235 ulong b2f0_result = 0;
1236
1237 iucv_debug(1, "entering");
1238 iucv_debug(1, "pathid = %d", pathid);
1239
1240 parm = (iparml_purge *)grab_param();
1241
1242 parm->ipmsgid = msgid;
1243 parm->ippathid = pathid;
1244 parm->ipsrccls = srccls;
1245 parm->ipflags1 |= (IPSRCCLS | IPFGMID | IPFGPID);
1246 b2f0_result = b2f0(PURGE, parm);
1247
1248 if (!b2f0_result && audit) {
1249 memcpy(audit, parm->ipaudit, sizeof(parm->ipaudit));
1250 /* parm->ipaudit has only 3 bytes */
1251 *audit >>= 8;
1252 }
1253
1254 release_param(parm);
1255
1256 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1257 iucv_debug(1, "exiting");
1258 return b2f0_result;
1259}
1260
1261/**
1262 * iucv_query_generic:
1263 * @want_maxconn: Flag, describing which value is to be returned.
1264 *
1265 * Helper function for iucv_query_maxconn() and iucv_query_bufsize().
1266 *
1267 * Returns: The buffersize, if want_maxconn is 0; the maximum number of
1268 * connections, if want_maxconn is 1 or an error-code < 0 on failure.
1269 */
1270static int
1271iucv_query_generic(int want_maxconn)
1272{
1273 iparml_purge *parm = (iparml_purge *)grab_param();
1274 int bufsize, maxconn;
1275 int ccode;
1276
1277 /**
1278 * Call b2f0 and store R0 (max buffer size),
1279 * R1 (max connections) and CC.
1280 */
1281 asm volatile (
1282 "LRA 1,0(%4)\n\t"
1283 "LR 0,%3\n\t"
1284 ".long 0xb2f01000\n\t"
1285 "IPM %0\n\t"
1286 "SRL %0,28\n\t"
1287 "ST 0,%1\n\t"
1288 "ST 1,%2\n\t"
1289 : "=d" (ccode), "=m" (bufsize), "=m" (maxconn)
1290 : "d" (QUERY), "a" (parm)
1291 : "0", "1", "cc"
1292 );
1293 release_param(parm);
1294
1295 if (ccode)
1296 return -EPERM;
1297 if (want_maxconn)
1298 return maxconn;
1299 return bufsize;
1300}
1301
1302/**
1303 * iucv_query_maxconn:
1304 *
1305 * Determines the maximum number of connections thay may be established.
1306 *
1307 * Returns: Maximum number of connections that can be.
1308 */
1309ulong
1310iucv_query_maxconn(void)
1311{
1312 return iucv_query_generic(1);
1313}
1314
1315/**
1316 * iucv_query_bufsize:
1317 *
1318 * Determines the size of the external interrupt buffer.
1319 *
1320 * Returns: Size of external interrupt buffer.
1321 */
1322ulong
1323iucv_query_bufsize (void)
1324{
1325 return iucv_query_generic(0);
1326}
1327
1328/**
1329 * iucv_quiesce:
1330 * @pathid: Path identification number
1331 * @user_data: 16-byte user data
1332 *
1333 * Temporarily suspends incoming messages on an IUCV path.
1334 * You can later reactivate the path by invoking the iucv_resume function.
1335 * Returns: return code from CP
1336 */
1337int
1338iucv_quiesce (__u16 pathid, __u8 user_data[16])
1339{
1340 iparml_control *parm;
1341 ulong b2f0_result = 0;
1342
1343 iucv_debug(1, "entering");
1344 iucv_debug(1, "pathid = %d", pathid);
1345
1346 parm = (iparml_control *)grab_param();
1347
1348 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1349 parm->ippathid = pathid;
1350
1351 b2f0_result = b2f0(QUIESCE, parm);
1352 release_param(parm);
1353
1354 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1355 iucv_debug(1, "exiting");
1356
1357 return b2f0_result;
1358}
1359
1360/**
1361 * iucv_receive:
1362 * @pathid: Path identification number.
1363 * @buffer: Address of buffer to receive. Must be below 2G.
1364 * @buflen: Length of buffer to receive.
1365 * @msgid: Specifies the message ID.
1366 * @trgcls: Specifies target class.
1367 * @flags1_out: Receives options for path on return.
1368 * - IPNORPY (0x10) Specifies whether a reply is required
1369 * - IPPRTY (0x20) Specifies if you want to send priority message
1370 * - IPRMDATA (0x80) Specifies the data is contained in the parameter list
1371 * @residual_buffer: Receives the address of buffer updated by the number
1372 * of bytes you have received on return.
1373 * @residual_length: On return, receives one of the following values:
1374 * - 0 If the receive buffer is the same length as
1375 * the message.
1376 * - Remaining bytes in buffer If the receive buffer is longer than the
1377 * message.
1378 * - Remaining bytes in message If the receive buffer is shorter than the
1379 * message.
1380 *
1381 * This function receives messages that are being sent to you over established
1382 * paths.
1383 * Returns: return code from CP IUCV call; If the receive buffer is shorter
1384 * than the message, always 5
1385 * -EINVAL - buffer address is pointing to NULL
1386 */
1387int
1388iucv_receive (__u16 pathid, __u32 msgid, __u32 trgcls,
1389 void *buffer, ulong buflen,
1390 int *flags1_out, ulong * residual_buffer, ulong * residual_length)
1391{
1392 iparml_db *parm;
1393 ulong b2f0_result;
1394 int moved = 0; /* number of bytes moved from parmlist to buffer */
1395
1396 iucv_debug(2, "entering");
1397
1398 if (!buffer)
1399 return -EINVAL;
1400
1401 parm = (iparml_db *)grab_param();
1402
1403 parm->ipbfadr1 = (__u32) (addr_t) buffer;
1404 parm->ipbfln1f = (__u32) ((ulong) buflen);
1405 parm->ipmsgid = msgid;
1406 parm->ippathid = pathid;
1407 parm->iptrgcls = trgcls;
1408 parm->ipflags1 = (IPFGPID | IPFGMID | IPFGMCL);
1409
1410 b2f0_result = b2f0(RECEIVE, parm);
1411
1412 if (!b2f0_result || b2f0_result == 5) {
1413 if (flags1_out) {
1414 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1415 *flags1_out = (parm->ipflags1 & (~0x07));
1416 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1417 }
1418
1419 if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
1420 if (residual_length)
1421 *residual_length = parm->ipbfln1f;
1422
1423 if (residual_buffer)
1424 *residual_buffer = parm->ipbfadr1;
1425 } else {
1426 moved = min_t (unsigned long, buflen, 8);
1427
1428 memcpy ((char *) buffer,
1429 (char *) &parm->ipbfadr1, moved);
1430
1431 if (buflen < 8)
1432 b2f0_result = 5;
1433
1434 if (residual_length)
1435 *residual_length = abs (buflen - 8);
1436
1437 if (residual_buffer)
1438 *residual_buffer = (ulong) (buffer + moved);
1439 }
1440 }
1441 release_param(parm);
1442
1443 iucv_debug(2, "exiting");
1444 return b2f0_result;
1445}
1446
1447/*
1448 * Name: iucv_receive_array
1449 * Purpose: This function receives messages that are being sent to you
1450 * over established paths.
1451 * Input: pathid - path identification number
1452 * buffer - address of array of buffers
1453 * buflen - total length of buffers
1454 * msgid - specifies the message ID.
1455 * trgcls - specifies target class
1456 * Output:
1457 * flags1_out: Options for path.
1458 * IPNORPY - 0x10 specifies whether a reply is required
1459 * IPPRTY - 0x20 specifies if you want to send priority message
1460 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
1461 * residual_buffer - address points to the current list entry IUCV
1462 * is working on.
1463 * residual_length -
1464 * Contains one of the following values, if the receive buffer is:
1465 * The same length as the message, this field is zero.
1466 * Longer than the message, this field contains the number of
1467 * bytes remaining in the buffer.
1468 * Shorter than the message, this field contains the residual
1469 * count (that is, the number of bytes remaining in the
1470 * message that does not fit into the buffer. In this case
1471 * b2f0_result = 5.
1472 * Return: b2f0_result - return code from CP
1473 * (-EINVAL) - buffer address is NULL
1474 */
1475int
1476iucv_receive_array (__u16 pathid,
1477 __u32 msgid, __u32 trgcls,
1478 iucv_array_t * buffer, ulong buflen,
1479 int *flags1_out,
1480 ulong * residual_buffer, ulong * residual_length)
1481{
1482 iparml_db *parm;
1483 ulong b2f0_result;
1484 int i = 0, moved = 0, need_to_move = 8, dyn_len;
1485
1486 iucv_debug(2, "entering");
1487
1488 if (!buffer)
1489 return -EINVAL;
1490
1491 parm = (iparml_db *)grab_param();
1492
1493 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1494 parm->ipbfln1f = (__u32) buflen;
1495 parm->ipmsgid = msgid;
1496 parm->ippathid = pathid;
1497 parm->iptrgcls = trgcls;
1498 parm->ipflags1 = (IPBUFLST | IPFGPID | IPFGMID | IPFGMCL);
1499
1500 b2f0_result = b2f0(RECEIVE, parm);
1501
1502 if (!b2f0_result || b2f0_result == 5) {
1503
1504 if (flags1_out) {
1505 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1506 *flags1_out = (parm->ipflags1 & (~0x07));
1507 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1508 }
1509
1510 if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
1511
1512 if (residual_length)
1513 *residual_length = parm->ipbfln1f;
1514
1515 if (residual_buffer)
1516 *residual_buffer = parm->ipbfadr1;
1517
1518 } else {
1519 /* copy msg from parmlist to users array. */
1520
1521 while ((moved < 8) && (moved < buflen)) {
1522 dyn_len =
1523 min_t (unsigned int,
1524 (buffer + i)->length, need_to_move);
1525
1526 memcpy ((char *)((ulong)((buffer + i)->address)),
1527 ((char *) &parm->ipbfadr1) + moved,
1528 dyn_len);
1529
1530 moved += dyn_len;
1531 need_to_move -= dyn_len;
1532
1533 (buffer + i)->address =
1534 (__u32)
1535 ((ulong)(__u8 *) ((ulong)(buffer + i)->address)
1536 + dyn_len);
1537
1538 (buffer + i)->length -= dyn_len;
1539 i++;
1540 }
1541
1542 if (need_to_move) /* buflen < 8 bytes */
1543 b2f0_result = 5;
1544
1545 if (residual_length)
1546 *residual_length = abs (buflen - 8);
1547
1548 if (residual_buffer) {
1549 if (!moved)
1550 *residual_buffer = (ulong) buffer;
1551 else
1552 *residual_buffer =
1553 (ulong) (buffer + (i - 1));
1554 }
1555
1556 }
1557 }
1558 release_param(parm);
1559
1560 iucv_debug(2, "exiting");
1561 return b2f0_result;
1562}
1563
1564/**
1565 * iucv_reject:
1566 * @pathid: Path identification number.
1567 * @msgid: Message ID of the message to reject.
1568 * @trgcls: Target class of the message to reject.
1569 * Returns: return code from CP
1570 *
1571 * Refuses a specified message. Between the time you are notified of a
1572 * message and the time that you complete the message, the message may
1573 * be rejected.
1574 */
1575int
1576iucv_reject (__u16 pathid, __u32 msgid, __u32 trgcls)
1577{
1578 iparml_db *parm;
1579 ulong b2f0_result = 0;
1580
1581 iucv_debug(1, "entering");
1582 iucv_debug(1, "pathid = %d", pathid);
1583
1584 parm = (iparml_db *)grab_param();
1585
1586 parm->ippathid = pathid;
1587 parm->ipmsgid = msgid;
1588 parm->iptrgcls = trgcls;
1589 parm->ipflags1 = (IPFGMCL | IPFGMID | IPFGPID);
1590
1591 b2f0_result = b2f0(REJECT, parm);
1592 release_param(parm);
1593
1594 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1595 iucv_debug(1, "exiting");
1596
1597 return b2f0_result;
1598}
1599
1600/*
1601 * Name: iucv_reply
1602 * Purpose: This function responds to the two-way messages that you
1603 * receive. You must identify completely the message to
1604 * which you wish to reply. ie, pathid, msgid, and trgcls.
1605 * Input: pathid - path identification number
1606 * msgid - specifies the message ID.
1607 * trgcls - specifies target class
1608 * flags1 - option for path
1609 * IPPRTY- 0x20 - specifies if you want to send priority message
1610 * buffer - address of reply buffer
1611 * buflen - length of reply buffer
1612 * Output: ipbfadr2 - Address of buffer updated by the number
1613 * of bytes you have moved.
1614 * ipbfln2f - Contains one of the following values:
1615 * If the answer buffer is the same length as the reply, this field
1616 * contains zero.
1617 * If the answer buffer is longer than the reply, this field contains
1618 * the number of bytes remaining in the buffer.
1619 * If the answer buffer is shorter than the reply, this field contains
1620 * a residual count (that is, the number of bytes remianing in the
1621 * reply that does not fit into the buffer. In this
1622 * case b2f0_result = 5.
1623 * Return: b2f0_result - return code from CP
1624 * (-EINVAL) - buffer address is NULL
1625 */
1626int
1627iucv_reply (__u16 pathid,
1628 __u32 msgid, __u32 trgcls,
1629 int flags1,
1630 void *buffer, ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
1631{
1632 iparml_db *parm;
1633 ulong b2f0_result;
1634
1635 iucv_debug(2, "entering");
1636
1637 if (!buffer)
1638 return -EINVAL;
1639
1640 parm = (iparml_db *)grab_param();
1641
1642 parm->ipbfadr2 = (__u32) ((ulong) buffer);
1643 parm->ipbfln2f = (__u32) buflen; /* length of message */
1644 parm->ippathid = pathid;
1645 parm->ipmsgid = msgid;
1646 parm->iptrgcls = trgcls;
1647 parm->ipflags1 = (__u8) flags1; /* priority message */
1648
1649 b2f0_result = b2f0(REPLY, parm);
1650
1651 if ((!b2f0_result) || (b2f0_result == 5)) {
1652 if (ipbfadr2)
1653 *ipbfadr2 = parm->ipbfadr2;
1654 if (ipbfln2f)
1655 *ipbfln2f = parm->ipbfln2f;
1656 }
1657 release_param(parm);
1658
1659 iucv_debug(2, "exiting");
1660
1661 return b2f0_result;
1662}
1663
1664/*
1665 * Name: iucv_reply_array
1666 * Purpose: This function responds to the two-way messages that you
1667 * receive. You must identify completely the message to
1668 * which you wish to reply. ie, pathid, msgid, and trgcls.
1669 * The array identifies a list of addresses and lengths of
1670 * discontiguous buffers that contains the reply data.
1671 * Input: pathid - path identification number
1672 * msgid - specifies the message ID.
1673 * trgcls - specifies target class
1674 * flags1 - option for path
1675 * IPPRTY- specifies if you want to send priority message
1676 * buffer - address of array of reply buffers
1677 * buflen - total length of reply buffers
1678 * Output: ipbfadr2 - Address of buffer which IUCV is currently working on.
1679 * ipbfln2f - Contains one of the following values:
1680 * If the answer buffer is the same length as the reply, this field
1681 * contains zero.
1682 * If the answer buffer is longer than the reply, this field contains
1683 * the number of bytes remaining in the buffer.
1684 * If the answer buffer is shorter than the reply, this field contains
1685 * a residual count (that is, the number of bytes remianing in the
1686 * reply that does not fit into the buffer. In this
1687 * case b2f0_result = 5.
1688 * Return: b2f0_result - return code from CP
1689 * (-EINVAL) - buffer address is NULL
1690*/
1691int
1692iucv_reply_array (__u16 pathid,
1693 __u32 msgid, __u32 trgcls,
1694 int flags1,
1695 iucv_array_t * buffer,
1696 ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
1697{
1698 iparml_db *parm;
1699 ulong b2f0_result;
1700
1701 iucv_debug(2, "entering");
1702
1703 if (!buffer)
1704 return -EINVAL;
1705
1706 parm = (iparml_db *)grab_param();
1707
1708 parm->ipbfadr2 = (__u32) ((ulong) buffer);
1709 parm->ipbfln2f = buflen; /* length of message */
1710 parm->ippathid = pathid;
1711 parm->ipmsgid = msgid;
1712 parm->iptrgcls = trgcls;
1713 parm->ipflags1 = (IPANSLST | flags1);
1714
1715 b2f0_result = b2f0(REPLY, parm);
1716
1717 if ((!b2f0_result) || (b2f0_result == 5)) {
1718
1719 if (ipbfadr2)
1720 *ipbfadr2 = parm->ipbfadr2;
1721 if (ipbfln2f)
1722 *ipbfln2f = parm->ipbfln2f;
1723 }
1724 release_param(parm);
1725
1726 iucv_debug(2, "exiting");
1727
1728 return b2f0_result;
1729}
1730
1731/*
1732 * Name: iucv_reply_prmmsg
1733 * Purpose: This function responds to the two-way messages that you
1734 * receive. You must identify completely the message to
1735 * which you wish to reply. ie, pathid, msgid, and trgcls.
1736 * Prmmsg signifies the data is moved into the
1737 * parameter list.
1738 * Input: pathid - path identification number
1739 * msgid - specifies the message ID.
1740 * trgcls - specifies target class
1741 * flags1 - option for path
1742 * IPPRTY- specifies if you want to send priority message
1743 * prmmsg - 8-bytes of data to be placed into the parameter
1744 * list.
1745 * Output: NA
1746 * Return: b2f0_result - return code from CP
1747*/
1748int
1749iucv_reply_prmmsg (__u16 pathid,
1750 __u32 msgid, __u32 trgcls, int flags1, __u8 prmmsg[8])
1751{
1752 iparml_dpl *parm;
1753 ulong b2f0_result;
1754
1755 iucv_debug(2, "entering");
1756
1757 parm = (iparml_dpl *)grab_param();
1758
1759 parm->ippathid = pathid;
1760 parm->ipmsgid = msgid;
1761 parm->iptrgcls = trgcls;
1762 memcpy(parm->iprmmsg, prmmsg, sizeof (parm->iprmmsg));
1763 parm->ipflags1 = (IPRMDATA | flags1);
1764
1765 b2f0_result = b2f0(REPLY, parm);
1766 release_param(parm);
1767
1768 iucv_debug(2, "exiting");
1769
1770 return b2f0_result;
1771}
1772
1773/**
1774 * iucv_resume:
1775 * @pathid: Path identification number
1776 * @user_data: 16-byte of user data
1777 *
1778 * This function restores communication over a quiesced path.
1779 * Returns: return code from CP
1780 */
1781int
1782iucv_resume (__u16 pathid, __u8 user_data[16])
1783{
1784 iparml_control *parm;
1785 ulong b2f0_result = 0;
1786
1787 iucv_debug(1, "entering");
1788 iucv_debug(1, "pathid = %d", pathid);
1789
1790 parm = (iparml_control *)grab_param();
1791
1792 memcpy (parm->ipuser, user_data, sizeof (*user_data));
1793 parm->ippathid = pathid;
1794
1795 b2f0_result = b2f0(RESUME, parm);
1796 release_param(parm);
1797
1798 iucv_debug(1, "exiting");
1799
1800 return b2f0_result;
1801}
1802
1803/*
1804 * Name: iucv_send
1805 * Purpose: sends messages
1806 * Input: pathid - ushort, pathid
1807 * msgid - ulong *, id of message returned to caller
1808 * trgcls - ulong, target message class
1809 * srccls - ulong, source message class
1810 * msgtag - ulong, message tag
1811 * flags1 - Contains options for this path.
1812 * IPPRTY - Ox20 - specifies if you want to send a priority message.
1813 * buffer - pointer to buffer
1814 * buflen - ulong, length of buffer
1815 * Output: b2f0_result - return code from b2f0 call
1816 * msgid - returns message id
1817 */
1818int
1819iucv_send (__u16 pathid, __u32 * msgid,
1820 __u32 trgcls, __u32 srccls,
1821 __u32 msgtag, int flags1, void *buffer, ulong buflen)
1822{
1823 iparml_db *parm;
1824 ulong b2f0_result;
1825
1826 iucv_debug(2, "entering");
1827
1828 if (!buffer)
1829 return -EINVAL;
1830
1831 parm = (iparml_db *)grab_param();
1832
1833 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1834 parm->ippathid = pathid;
1835 parm->iptrgcls = trgcls;
1836 parm->ipbfln1f = (__u32) buflen; /* length of message */
1837 parm->ipsrccls = srccls;
1838 parm->ipmsgtag = msgtag;
1839 parm->ipflags1 = (IPNORPY | flags1); /* one way priority message */
1840
1841 b2f0_result = b2f0(SEND, parm);
1842
1843 if ((!b2f0_result) && (msgid))
1844 *msgid = parm->ipmsgid;
1845 release_param(parm);
1846
1847 iucv_debug(2, "exiting");
1848
1849 return b2f0_result;
1850}
1851
1852/*
1853 * Name: iucv_send_array
1854 * Purpose: This function transmits data to another application.
1855 * The contents of buffer is the address of the array of
1856 * addresses and lengths of discontiguous buffers that hold
1857 * the message text. This is a one-way message and the
1858 * receiver will not reply to the message.
1859 * Input: pathid - path identification number
1860 * trgcls - specifies target class
1861 * srccls - specifies the source message class
1862 * msgtag - specifies a tag to be associated witht the message
1863 * flags1 - option for path
1864 * IPPRTY- specifies if you want to send priority message
1865 * buffer - address of array of send buffers
1866 * buflen - total length of send buffers
1867 * Output: msgid - specifies the message ID.
1868 * Return: b2f0_result - return code from CP
1869 * (-EINVAL) - buffer address is NULL
1870 */
1871int
1872iucv_send_array (__u16 pathid,
1873 __u32 * msgid,
1874 __u32 trgcls,
1875 __u32 srccls,
1876 __u32 msgtag, int flags1, iucv_array_t * buffer, ulong buflen)
1877{
1878 iparml_db *parm;
1879 ulong b2f0_result;
1880
1881 iucv_debug(2, "entering");
1882
1883 if (!buffer)
1884 return -EINVAL;
1885
1886 parm = (iparml_db *)grab_param();
1887
1888 parm->ippathid = pathid;
1889 parm->iptrgcls = trgcls;
1890 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1891 parm->ipbfln1f = (__u32) buflen; /* length of message */
1892 parm->ipsrccls = srccls;
1893 parm->ipmsgtag = msgtag;
1894 parm->ipflags1 = (IPNORPY | IPBUFLST | flags1);
1895 b2f0_result = b2f0(SEND, parm);
1896
1897 if ((!b2f0_result) && (msgid))
1898 *msgid = parm->ipmsgid;
1899 release_param(parm);
1900
1901 iucv_debug(2, "exiting");
1902 return b2f0_result;
1903}
1904
1905/*
1906 * Name: iucv_send_prmmsg
1907 * Purpose: This function transmits data to another application.
1908 * Prmmsg specifies that the 8-bytes of data are to be moved
1909 * into the parameter list. This is a one-way message and the
1910 * receiver will not reply to the message.
1911 * Input: pathid - path identification number
1912 * trgcls - specifies target class
1913 * srccls - specifies the source message class
1914 * msgtag - specifies a tag to be associated with the message
1915 * flags1 - option for path
1916 * IPPRTY- specifies if you want to send priority message
1917 * prmmsg - 8-bytes of data to be placed into parameter list
1918 * Output: msgid - specifies the message ID.
1919 * Return: b2f0_result - return code from CP
1920*/
1921int
1922iucv_send_prmmsg (__u16 pathid,
1923 __u32 * msgid,
1924 __u32 trgcls,
1925 __u32 srccls, __u32 msgtag, int flags1, __u8 prmmsg[8])
1926{
1927 iparml_dpl *parm;
1928 ulong b2f0_result;
1929
1930 iucv_debug(2, "entering");
1931
1932 parm = (iparml_dpl *)grab_param();
1933
1934 parm->ippathid = pathid;
1935 parm->iptrgcls = trgcls;
1936 parm->ipsrccls = srccls;
1937 parm->ipmsgtag = msgtag;
1938 parm->ipflags1 = (IPRMDATA | IPNORPY | flags1);
1939 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
1940
1941 b2f0_result = b2f0(SEND, parm);
1942
1943 if ((!b2f0_result) && (msgid))
1944 *msgid = parm->ipmsgid;
1945 release_param(parm);
1946
1947 iucv_debug(2, "exiting");
1948
1949 return b2f0_result;
1950}
1951
1952/*
1953 * Name: iucv_send2way
1954 * Purpose: This function transmits data to another application.
1955 * Data to be transmitted is in a buffer. The receiver
1956 * of the send is expected to reply to the message and
1957 * a buffer is provided into which IUCV moves the reply
1958 * to this message.
1959 * Input: pathid - path identification number
1960 * trgcls - specifies target class
1961 * srccls - specifies the source message class
1962 * msgtag - specifies a tag associated with the message
1963 * flags1 - option for path
1964 * IPPRTY- specifies if you want to send priority message
1965 * buffer - address of send buffer
1966 * buflen - length of send buffer
1967 * ansbuf - address of buffer to reply with
1968 * anslen - length of buffer to reply with
1969 * Output: msgid - specifies the message ID.
1970 * Return: b2f0_result - return code from CP
1971 * (-EINVAL) - buffer or ansbuf address is NULL
1972 */
1973int
1974iucv_send2way (__u16 pathid,
1975 __u32 * msgid,
1976 __u32 trgcls,
1977 __u32 srccls,
1978 __u32 msgtag,
1979 int flags1,
1980 void *buffer, ulong buflen, void *ansbuf, ulong anslen)
1981{
1982 iparml_db *parm;
1983 ulong b2f0_result;
1984
1985 iucv_debug(2, "entering");
1986
1987 if (!buffer || !ansbuf)
1988 return -EINVAL;
1989
1990 parm = (iparml_db *)grab_param();
1991
1992 parm->ippathid = pathid;
1993 parm->iptrgcls = trgcls;
1994 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1995 parm->ipbfln1f = (__u32) buflen; /* length of message */
1996 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
1997 parm->ipbfln2f = (__u32) anslen;
1998 parm->ipsrccls = srccls;
1999 parm->ipmsgtag = msgtag;
2000 parm->ipflags1 = flags1; /* priority message */
2001
2002 b2f0_result = b2f0(SEND, parm);
2003
2004 if ((!b2f0_result) && (msgid))
2005 *msgid = parm->ipmsgid;
2006 release_param(parm);
2007
2008 iucv_debug(2, "exiting");
2009
2010 return b2f0_result;
2011}
2012
2013/*
2014 * Name: iucv_send2way_array
2015 * Purpose: This function transmits data to another application.
2016 * The contents of buffer is the address of the array of
2017 * addresses and lengths of discontiguous buffers that hold
2018 * the message text. The receiver of the send is expected to
2019 * reply to the message and a buffer is provided into which
2020 * IUCV moves the reply to this message.
2021 * Input: pathid - path identification number
2022 * trgcls - specifies target class
2023 * srccls - specifies the source message class
2024 * msgtag - spcifies a tag to be associated with the message
2025 * flags1 - option for path
2026 * IPPRTY- specifies if you want to send priority message
2027 * buffer - address of array of send buffers
2028 * buflen - total length of send buffers
2029 * ansbuf - address of buffer to reply with
2030 * anslen - length of buffer to reply with
2031 * Output: msgid - specifies the message ID.
2032 * Return: b2f0_result - return code from CP
2033 * (-EINVAL) - buffer address is NULL
2034 */
2035int
2036iucv_send2way_array (__u16 pathid,
2037 __u32 * msgid,
2038 __u32 trgcls,
2039 __u32 srccls,
2040 __u32 msgtag,
2041 int flags1,
2042 iucv_array_t * buffer,
2043 ulong buflen, iucv_array_t * ansbuf, ulong anslen)
2044{
2045 iparml_db *parm;
2046 ulong b2f0_result;
2047
2048 iucv_debug(2, "entering");
2049
2050 if (!buffer || !ansbuf)
2051 return -EINVAL;
2052
2053 parm = (iparml_db *)grab_param();
2054
2055 parm->ippathid = pathid;
2056 parm->iptrgcls = trgcls;
2057 parm->ipbfadr1 = (__u32) ((ulong) buffer);
2058 parm->ipbfln1f = (__u32) buflen; /* length of message */
2059 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2060 parm->ipbfln2f = (__u32) anslen;
2061 parm->ipsrccls = srccls;
2062 parm->ipmsgtag = msgtag;
2063 parm->ipflags1 = (IPBUFLST | IPANSLST | flags1);
2064 b2f0_result = b2f0(SEND, parm);
2065 if ((!b2f0_result) && (msgid))
2066 *msgid = parm->ipmsgid;
2067 release_param(parm);
2068
2069 iucv_debug(2, "exiting");
2070 return b2f0_result;
2071}
2072
2073/*
2074 * Name: iucv_send2way_prmmsg
2075 * Purpose: This function transmits data to another application.
2076 * Prmmsg specifies that the 8-bytes of data are to be moved
2077 * into the parameter list. This is a two-way message and the
2078 * receiver of the message is expected to reply. A buffer
2079 * is provided into which IUCV moves the reply to this
2080 * message.
2081 * Input: pathid - path identification number
2082 * trgcls - specifies target class
2083 * srccls - specifies the source message class
2084 * msgtag - specifies a tag to be associated with the message
2085 * flags1 - option for path
2086 * IPPRTY- specifies if you want to send priority message
2087 * prmmsg - 8-bytes of data to be placed in parameter list
2088 * ansbuf - address of buffer to reply with
2089 * anslen - length of buffer to reply with
2090 * Output: msgid - specifies the message ID.
2091 * Return: b2f0_result - return code from CP
2092 * (-EINVAL) - buffer address is NULL
2093*/
2094int
2095iucv_send2way_prmmsg (__u16 pathid,
2096 __u32 * msgid,
2097 __u32 trgcls,
2098 __u32 srccls,
2099 __u32 msgtag,
2100 ulong flags1, __u8 prmmsg[8], void *ansbuf, ulong anslen)
2101{
2102 iparml_dpl *parm;
2103 ulong b2f0_result;
2104
2105 iucv_debug(2, "entering");
2106
2107 if (!ansbuf)
2108 return -EINVAL;
2109
2110 parm = (iparml_dpl *)grab_param();
2111
2112 parm->ippathid = pathid;
2113 parm->iptrgcls = trgcls;
2114 parm->ipsrccls = srccls;
2115 parm->ipmsgtag = msgtag;
2116 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2117 parm->ipbfln2f = (__u32) anslen;
2118 parm->ipflags1 = (IPRMDATA | flags1); /* message in prmlist */
2119 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
2120
2121 b2f0_result = b2f0(SEND, parm);
2122
2123 if ((!b2f0_result) && (msgid))
2124 *msgid = parm->ipmsgid;
2125 release_param(parm);
2126
2127 iucv_debug(2, "exiting");
2128
2129 return b2f0_result;
2130}
2131
2132/*
2133 * Name: iucv_send2way_prmmsg_array
2134 * Purpose: This function transmits data to another application.
2135 * Prmmsg specifies that the 8-bytes of data are to be moved
2136 * into the parameter list. This is a two-way message and the
2137 * receiver of the message is expected to reply. A buffer
2138 * is provided into which IUCV moves the reply to this
2139 * message. The contents of ansbuf is the address of the
2140 * array of addresses and lengths of discontiguous buffers
2141 * that contain the reply.
2142 * Input: pathid - path identification number
2143 * trgcls - specifies target class
2144 * srccls - specifies the source message class
2145 * msgtag - specifies a tag to be associated with the message
2146 * flags1 - option for path
2147 * IPPRTY- specifies if you want to send priority message
2148 * prmmsg - 8-bytes of data to be placed into the parameter list
2149 * ansbuf - address of buffer to reply with
2150 * anslen - length of buffer to reply with
2151 * Output: msgid - specifies the message ID.
2152 * Return: b2f0_result - return code from CP
2153 * (-EINVAL) - ansbuf address is NULL
2154 */
2155int
2156iucv_send2way_prmmsg_array (__u16 pathid,
2157 __u32 * msgid,
2158 __u32 trgcls,
2159 __u32 srccls,
2160 __u32 msgtag,
2161 int flags1,
2162 __u8 prmmsg[8],
2163 iucv_array_t * ansbuf, ulong anslen)
2164{
2165 iparml_dpl *parm;
2166 ulong b2f0_result;
2167
2168 iucv_debug(2, "entering");
2169
2170 if (!ansbuf)
2171 return -EINVAL;
2172
2173 parm = (iparml_dpl *)grab_param();
2174
2175 parm->ippathid = pathid;
2176 parm->iptrgcls = trgcls;
2177 parm->ipsrccls = srccls;
2178 parm->ipmsgtag = msgtag;
2179 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2180 parm->ipbfln2f = (__u32) anslen;
2181 parm->ipflags1 = (IPRMDATA | IPANSLST | flags1);
2182 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
2183 b2f0_result = b2f0(SEND, parm);
2184 if ((!b2f0_result) && (msgid))
2185 *msgid = parm->ipmsgid;
2186 release_param(parm);
2187
2188 iucv_debug(2, "exiting");
2189 return b2f0_result;
2190}
2191
2192void
2193iucv_setmask_cpuid (void *result)
2194{
2195 iparml_set_mask *parm;
2196
2197 iucv_debug(1, "entering");
2198 parm = (iparml_set_mask *)grab_param();
2199 parm->ipmask = *((__u8*)result);
2200 *((ulong *)result) = b2f0(SETMASK, parm);
2201 release_param(parm);
2202
2203 iucv_debug(1, "b2f0_result = %ld", *((ulong *)result));
2204 iucv_debug(1, "exiting");
2205}
2206
2207/*
2208 * Name: iucv_setmask
2209 * Purpose: This function enables or disables the following IUCV
2210 * external interruptions: Nonpriority and priority message
2211 * interrupts, nonpriority and priority reply interrupts.
2212 * Input: SetMaskFlag - options for interrupts
2213 * 0x80 - Nonpriority_MessagePendingInterruptsFlag
2214 * 0x40 - Priority_MessagePendingInterruptsFlag
2215 * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
2216 * 0x10 - Priority_MessageCompletionInterruptsFlag
2217 * 0x08 - IUCVControlInterruptsFlag
2218 * Output: NA
2219 * Return: b2f0_result - return code from CP
2220*/
2221int
2222iucv_setmask (int SetMaskFlag)
2223{
2224 union {
2225 ulong result;
2226 __u8 param;
2227 } u;
2228 int cpu;
2229
2230 u.param = SetMaskFlag;
2231 cpu = get_cpu();
2232 smp_call_function_on(iucv_setmask_cpuid, &u, 0, 1, iucv_cpuid);
2233 put_cpu();
2234
2235 return u.result;
2236}
2237
2238/**
2239 * iucv_sever:
2240 * @pathid: Path identification number
2241 * @user_data: 16-byte of user data
2242 *
2243 * This function terminates an iucv path.
2244 * Returns: return code from CP
2245 */
2246int
2247iucv_sever(__u16 pathid, __u8 user_data[16])
2248{
2249 iparml_control *parm;
2250 ulong b2f0_result = 0;
2251
2252 iucv_debug(1, "entering");
2253 parm = (iparml_control *)grab_param();
2254
2255 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
2256 parm->ippathid = pathid;
2257
2258 b2f0_result = b2f0(SEVER, parm);
2259
2260 if (!b2f0_result)
2261 iucv_remove_pathid(pathid);
2262 release_param(parm);
2263
2264 iucv_debug(1, "exiting");
2265 return b2f0_result;
2266}
2267
2268/*
2269 * Interrupt Handlers
2270 *******************************************************************************/
2271
2272/**
2273 * iucv_irq_handler:
2274 * @regs: Current registers
2275 * @code: irq code
2276 *
2277 * Handles external interrupts coming in from CP.
2278 * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler().
2279 */
2280static void
2281iucv_irq_handler(struct pt_regs *regs, __u16 code)
2282{
2283 iucv_irqdata *irqdata;
2284
2285 irqdata = kmalloc(sizeof(iucv_irqdata), GFP_ATOMIC);
2286 if (!irqdata) {
2287 printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__);
2288 return;
2289 }
2290
2291 memcpy(&irqdata->data, iucv_external_int_buffer,
2292 sizeof(iucv_GeneralInterrupt));
2293
2294 spin_lock(&iucv_irq_queue_lock);
2295 list_add_tail(&irqdata->queue, &iucv_irq_queue);
2296 spin_unlock(&iucv_irq_queue_lock);
2297
2298 tasklet_schedule(&iucv_tasklet);
2299}
2300
2301/**
2302 * iucv_do_int:
2303 * @int_buf: Pointer to copy of external interrupt buffer
2304 *
2305 * The workhorse for handling interrupts queued by iucv_irq_handler().
2306 * This function is called from the bottom half iucv_tasklet_handler().
2307 */
2308static void
2309iucv_do_int(iucv_GeneralInterrupt * int_buf)
2310{
2311 handler *h = NULL;
2312 struct list_head *lh;
2313 ulong flags;
2314 iucv_interrupt_ops_t *interrupt = NULL; /* interrupt addresses */
2315 __u8 temp_buff1[24], temp_buff2[24]; /* masked handler id. */
2316 int rc = 0, j = 0;
2317 __u8 no_listener[16] = "NO LISTENER";
2318
2319 iucv_debug(2, "entering, pathid %d, type %02X",
2320 int_buf->ippathid, int_buf->iptype);
2321 iucv_dumpit("External Interrupt Buffer:",
2322 int_buf, sizeof(iucv_GeneralInterrupt));
2323
2324 ASCEBC (no_listener, 16);
2325
2326 if (int_buf->iptype != 01) {
2327 if ((int_buf->ippathid) > (max_connections - 1)) {
2328 printk(KERN_WARNING "%s: Got interrupt with pathid %d"
2329 " > max_connections (%ld)\n", __FUNCTION__,
2330 int_buf->ippathid, max_connections - 1);
2331 } else {
2332 h = iucv_pathid_table[int_buf->ippathid];
2333 interrupt = h->interrupt_table;
2334 iucv_dumpit("Handler:", h, sizeof(handler));
2335 }
2336 }
2337
2338 /* end of if statement */
2339 switch (int_buf->iptype) {
2340 case 0x01: /* connection pending */
2341 if (messagesDisabled) {
2342 iucv_setmask(~0);
2343 messagesDisabled = 0;
2344 }
2345 spin_lock_irqsave(&iucv_lock, flags);
2346 list_for_each(lh, &iucv_handler_table) {
2347 h = list_entry(lh, handler, list);
2348 memcpy(temp_buff1, &(int_buf->ipvmid), 24);
2349 memcpy(temp_buff2, &(h->id.userid), 24);
2350 for (j = 0; j < 24; j++) {
2351 temp_buff1[j] &= (h->id.mask)[j];
2352 temp_buff2[j] &= (h->id.mask)[j];
2353 }
2354
2355 iucv_dumpit("temp_buff1:",
2356 temp_buff1, sizeof(temp_buff1));
2357 iucv_dumpit("temp_buff2",
2358 temp_buff2, sizeof(temp_buff2));
2359
2360 if (!memcmp (temp_buff1, temp_buff2, 24)) {
2361
2362 iucv_debug(2,
2363 "found a matching handler");
2364 break;
2365 } else
2366 h = NULL;
2367 }
2368 spin_unlock_irqrestore (&iucv_lock, flags);
2369 if (h) {
2370 /* ADD PATH TO PATHID TABLE */
2371 rc = iucv_add_pathid(int_buf->ippathid, h);
2372 if (rc) {
2373 iucv_sever (int_buf->ippathid,
2374 no_listener);
2375 iucv_debug(1,
2376 "add_pathid failed, rc = %d",
2377 rc);
2378 } else {
2379 interrupt = h->interrupt_table;
2380 if (interrupt->ConnectionPending) {
2381 EBCASC (int_buf->ipvmid, 8);
2382 interrupt->ConnectionPending(
2383 (iucv_ConnectionPending *)int_buf,
2384 h->pgm_data);
2385 } else
2386 iucv_sever(int_buf->ippathid,
2387 no_listener);
2388 }
2389 } else
2390 iucv_sever(int_buf->ippathid, no_listener);
2391 break;
2392
2393 case 0x02: /*connection complete */
2394 if (messagesDisabled) {
2395 iucv_setmask(~0);
2396 messagesDisabled = 0;
2397 }
2398 if (h) {
2399 if (interrupt->ConnectionComplete)
2400 {
2401 interrupt->ConnectionComplete(
2402 (iucv_ConnectionComplete *)int_buf,
2403 h->pgm_data);
2404 }
2405 else
2406 iucv_debug(1,
2407 "ConnectionComplete not called");
2408 } else
2409 iucv_sever(int_buf->ippathid, no_listener);
2410 break;
2411
2412 case 0x03: /* connection severed */
2413 if (messagesDisabled) {
2414 iucv_setmask(~0);
2415 messagesDisabled = 0;
2416 }
2417 if (h) {
2418 if (interrupt->ConnectionSevered)
2419 interrupt->ConnectionSevered(
2420 (iucv_ConnectionSevered *)int_buf,
2421 h->pgm_data);
2422
2423 else
2424 iucv_sever (int_buf->ippathid, no_listener);
2425 } else
2426 iucv_sever(int_buf->ippathid, no_listener);
2427 break;
2428
2429 case 0x04: /* connection quiesced */
2430 if (messagesDisabled) {
2431 iucv_setmask(~0);
2432 messagesDisabled = 0;
2433 }
2434 if (h) {
2435 if (interrupt->ConnectionQuiesced)
2436 interrupt->ConnectionQuiesced(
2437 (iucv_ConnectionQuiesced *)int_buf,
2438 h->pgm_data);
2439 else
2440 iucv_debug(1,
2441 "ConnectionQuiesced not called");
2442 }
2443 break;
2444
2445 case 0x05: /* connection resumed */
2446 if (messagesDisabled) {
2447 iucv_setmask(~0);
2448 messagesDisabled = 0;
2449 }
2450 if (h) {
2451 if (interrupt->ConnectionResumed)
2452 interrupt->ConnectionResumed(
2453 (iucv_ConnectionResumed *)int_buf,
2454 h->pgm_data);
2455 else
2456 iucv_debug(1,
2457 "ConnectionResumed not called");
2458 }
2459 break;
2460
2461 case 0x06: /* priority message complete */
2462 case 0x07: /* nonpriority message complete */
2463 if (h) {
2464 if (interrupt->MessageComplete)
2465 interrupt->MessageComplete(
2466 (iucv_MessageComplete *)int_buf,
2467 h->pgm_data);
2468 else
2469 iucv_debug(2,
2470 "MessageComplete not called");
2471 }
2472 break;
2473
2474 case 0x08: /* priority message pending */
2475 case 0x09: /* nonpriority message pending */
2476 if (h) {
2477 if (interrupt->MessagePending)
2478 interrupt->MessagePending(
2479 (iucv_MessagePending *) int_buf,
2480 h->pgm_data);
2481 else
2482 iucv_debug(2,
2483 "MessagePending not called");
2484 }
2485 break;
2486 default: /* unknown iucv type */
2487 printk(KERN_WARNING "%s: unknown iucv interrupt\n",
2488 __FUNCTION__);
2489 break;
2490 } /* end switch */
2491
2492 iucv_debug(2, "exiting pathid %d, type %02X",
2493 int_buf->ippathid, int_buf->iptype);
2494
2495 return;
2496}
2497
2498/**
2499 * iucv_tasklet_handler:
2500 *
2501 * This function loops over the queue of irq buffers and runs iucv_do_int()
2502 * on every queue element.
2503 */
2504static void
2505iucv_tasklet_handler(unsigned long ignored)
2506{
2507 struct list_head head;
2508 struct list_head *next;
2509 ulong flags;
2510
2511 spin_lock_irqsave(&iucv_irq_queue_lock, flags);
2512 list_add(&head, &iucv_irq_queue);
2513 list_del_init(&iucv_irq_queue);
2514 spin_unlock_irqrestore (&iucv_irq_queue_lock, flags);
2515
2516 next = head.next;
2517 while (next != &head) {
2518 iucv_irqdata *p = list_entry(next, iucv_irqdata, queue);
2519
2520 next = next->next;
2521 iucv_do_int(&p->data);
2522 kfree(p);
2523 }
2524
2525 return;
2526}
2527
2528subsys_initcall(iucv_init);
2529module_exit(iucv_exit);
2530
2531/**
2532 * Export all public stuff
2533 */
2534EXPORT_SYMBOL (iucv_bus);
2535EXPORT_SYMBOL (iucv_root);
2536EXPORT_SYMBOL (iucv_accept);
2537EXPORT_SYMBOL (iucv_connect);
2538#if 0
2539EXPORT_SYMBOL (iucv_purge);
2540EXPORT_SYMBOL (iucv_query_maxconn);
2541EXPORT_SYMBOL (iucv_query_bufsize);
2542EXPORT_SYMBOL (iucv_quiesce);
2543#endif
2544EXPORT_SYMBOL (iucv_receive);
2545#if 0
2546EXPORT_SYMBOL (iucv_receive_array);
2547#endif
2548EXPORT_SYMBOL (iucv_reject);
2549#if 0
2550EXPORT_SYMBOL (iucv_reply);
2551EXPORT_SYMBOL (iucv_reply_array);
2552EXPORT_SYMBOL (iucv_resume);
2553#endif
2554EXPORT_SYMBOL (iucv_reply_prmmsg);
2555EXPORT_SYMBOL (iucv_send);
2556#if 0
2557EXPORT_SYMBOL (iucv_send2way);
2558EXPORT_SYMBOL (iucv_send2way_array);
2559EXPORT_SYMBOL (iucv_send_array);
2560EXPORT_SYMBOL (iucv_send2way_prmmsg);
2561EXPORT_SYMBOL (iucv_send2way_prmmsg_array);
2562EXPORT_SYMBOL (iucv_send_prmmsg);
2563EXPORT_SYMBOL (iucv_setmask);
2564#endif
2565EXPORT_SYMBOL (iucv_sever);
2566EXPORT_SYMBOL (iucv_register_program);
2567EXPORT_SYMBOL (iucv_unregister_program);
diff --git a/drivers/s390/net/iucv.h b/drivers/s390/net/iucv.h
new file mode 100644
index 000000000000..198330217eff
--- /dev/null
+++ b/drivers/s390/net/iucv.h
@@ -0,0 +1,849 @@
1/*
2 * drivers/s390/net/iucv.h
3 * IUCV base support.
4 *
5 * S390 version
6 * Copyright (C) 2000 IBM Corporation
7 * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com)
8 * Xenia Tkatschow (xenia@us.ibm.com)
9 *
10 *
11 * Functionality:
12 * To explore any of the IUCV functions, one must first register
13 * their program using iucv_register_program(). Once your program has
14 * successfully completed a register, it can exploit the other functions.
15 * For furthur reference on all IUCV functionality, refer to the
16 * CP Programming Services book, also available on the web
17 * thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
18 *
19 * Definition of Return Codes
20 * -All positive return codes including zero are reflected back
21 * from CP except for iucv_register_program. The definition of each
22 * return code can be found in CP Programming Services book.
23 * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
24 * - Return Code of:
25 * (-EINVAL) Invalid value
26 * (-ENOMEM) storage allocation failed
27 * pgmask defined in iucv_register_program will be set depending on input
28 * paramters.
29 *
30 */
31
32#include <linux/types.h>
33#include <asm/debug.h>
34
35/**
36 * Debug Facility stuff
37 */
38#define IUCV_DBF_SETUP_NAME "iucv_setup"
39#define IUCV_DBF_SETUP_LEN 32
40#define IUCV_DBF_SETUP_INDEX 1
41#define IUCV_DBF_SETUP_NR_AREAS 1
42#define IUCV_DBF_SETUP_LEVEL 3
43
44#define IUCV_DBF_DATA_NAME "iucv_data"
45#define IUCV_DBF_DATA_LEN 128
46#define IUCV_DBF_DATA_INDEX 1
47#define IUCV_DBF_DATA_NR_AREAS 1
48#define IUCV_DBF_DATA_LEVEL 2
49
50#define IUCV_DBF_TRACE_NAME "iucv_trace"
51#define IUCV_DBF_TRACE_LEN 16
52#define IUCV_DBF_TRACE_INDEX 2
53#define IUCV_DBF_TRACE_NR_AREAS 1
54#define IUCV_DBF_TRACE_LEVEL 3
55
56#define IUCV_DBF_TEXT(name,level,text) \
57 do { \
58 debug_text_event(iucv_dbf_##name,level,text); \
59 } while (0)
60
61#define IUCV_DBF_HEX(name,level,addr,len) \
62 do { \
63 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
64 } while (0)
65
66DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
67
68#define IUCV_DBF_TEXT_(name,level,text...) \
69 do { \
70 char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
71 sprintf(iucv_dbf_txt_buf, text); \
72 debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
73 put_cpu_var(iucv_dbf_txt_buf); \
74 } while (0)
75
76#define IUCV_DBF_SPRINTF(name,level,text...) \
77 do { \
78 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
79 debug_sprintf_event(iucv_dbf_trace, level, text ); \
80 } while (0)
81
82/**
83 * some more debug stuff
84 */
85#define IUCV_HEXDUMP16(importance,header,ptr) \
86PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
87 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
88 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
89 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
90 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
91 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
92 *(((char*)ptr)+12),*(((char*)ptr)+13), \
93 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
94PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
95 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
96 *(((char*)ptr)+16),*(((char*)ptr)+17), \
97 *(((char*)ptr)+18),*(((char*)ptr)+19), \
98 *(((char*)ptr)+20),*(((char*)ptr)+21), \
99 *(((char*)ptr)+22),*(((char*)ptr)+23), \
100 *(((char*)ptr)+24),*(((char*)ptr)+25), \
101 *(((char*)ptr)+26),*(((char*)ptr)+27), \
102 *(((char*)ptr)+28),*(((char*)ptr)+29), \
103 *(((char*)ptr)+30),*(((char*)ptr)+31));
104
105static inline void
106iucv_hex_dump(unsigned char *buf, size_t len)
107{
108 size_t i;
109
110 for (i = 0; i < len; i++) {
111 if (i && !(i % 16))
112 printk("\n");
113 printk("%02x ", *(buf + i));
114 }
115 printk("\n");
116}
117/**
118 * end of debug stuff
119 */
120
121#define uchar unsigned char
122#define ushort unsigned short
123#define ulong unsigned long
124#define iucv_handle_t void *
125
126/* flags1:
127 * All flags are defined in the field IPFLAGS1 of each function
128 * and can be found in CP Programming Services.
129 * IPLOCAL - Indicates the connect can only be satisfied on the
130 * local system
131 * IPPRTY - Indicates a priority message
132 * IPQUSCE - Indicates you do not want to receive messages on a
133 * path until an iucv_resume is issued
134 * IPRMDATA - Indicates that the message is in the parameter list
135 */
136#define IPLOCAL 0x01
137#define IPPRTY 0x20
138#define IPQUSCE 0x40
139#define IPRMDATA 0x80
140
141/* flags1_out:
142 * All flags are defined in the output field of IPFLAGS1 for each function
143 * and can be found in CP Programming Services.
144 * IPNORPY - Specifies this is a one-way message and no reply is expected.
145 * IPPRTY - Indicates a priority message is permitted. Defined in flags1.
146 */
147#define IPNORPY 0x10
148
149#define Nonpriority_MessagePendingInterruptsFlag 0x80
150#define Priority_MessagePendingInterruptsFlag 0x40
151#define Nonpriority_MessageCompletionInterruptsFlag 0x20
152#define Priority_MessageCompletionInterruptsFlag 0x10
153#define IUCVControlInterruptsFlag 0x08
154#define AllInterrupts 0xf8
155/*
156 * Mapping of external interrupt buffers should be used with the corresponding
157 * interrupt types.
158 * Names: iucv_ConnectionPending -> connection pending
159 * iucv_ConnectionComplete -> connection complete
160 * iucv_ConnectionSevered -> connection severed
161 * iucv_ConnectionQuiesced -> connection quiesced
162 * iucv_ConnectionResumed -> connection resumed
163 * iucv_MessagePending -> message pending
164 * iucv_MessageComplete -> message complete
165 */
166typedef struct {
167 u16 ippathid;
168 uchar ipflags1;
169 uchar iptype;
170 u16 ipmsglim;
171 u16 res1;
172 uchar ipvmid[8];
173 uchar ipuser[16];
174 u32 res3;
175 uchar ippollfg;
176 uchar res4[3];
177} iucv_ConnectionPending;
178
179typedef struct {
180 u16 ippathid;
181 uchar ipflags1;
182 uchar iptype;
183 u16 ipmsglim;
184 u16 res1;
185 uchar res2[8];
186 uchar ipuser[16];
187 u32 res3;
188 uchar ippollfg;
189 uchar res4[3];
190} iucv_ConnectionComplete;
191
192typedef struct {
193 u16 ippathid;
194 uchar res1;
195 uchar iptype;
196 u32 res2;
197 uchar res3[8];
198 uchar ipuser[16];
199 u32 res4;
200 uchar ippollfg;
201 uchar res5[3];
202} iucv_ConnectionSevered;
203
204typedef struct {
205 u16 ippathid;
206 uchar res1;
207 uchar iptype;
208 u32 res2;
209 uchar res3[8];
210 uchar ipuser[16];
211 u32 res4;
212 uchar ippollfg;
213 uchar res5[3];
214} iucv_ConnectionQuiesced;
215
216typedef struct {
217 u16 ippathid;
218 uchar res1;
219 uchar iptype;
220 u32 res2;
221 uchar res3[8];
222 uchar ipuser[16];
223 u32 res4;
224 uchar ippollfg;
225 uchar res5[3];
226} iucv_ConnectionResumed;
227
228typedef struct {
229 u16 ippathid;
230 uchar ipflags1;
231 uchar iptype;
232 u32 ipmsgid;
233 u32 iptrgcls;
234 union u2 {
235 u32 iprmmsg1_u32;
236 uchar iprmmsg1[4];
237 } ln1msg1;
238 union u1 {
239 u32 ipbfln1f;
240 uchar iprmmsg2[4];
241 } ln1msg2;
242 u32 res1[3];
243 u32 ipbfln2f;
244 uchar ippollfg;
245 uchar res2[3];
246} iucv_MessagePending;
247
248typedef struct {
249 u16 ippathid;
250 uchar ipflags1;
251 uchar iptype;
252 u32 ipmsgid;
253 u32 ipaudit;
254 uchar iprmmsg[8];
255 u32 ipsrccls;
256 u32 ipmsgtag;
257 u32 res;
258 u32 ipbfln2f;
259 uchar ippollfg;
260 uchar res2[3];
261} iucv_MessageComplete;
262
263/*
264 * iucv_interrupt_ops_t: Is a vector of functions that handle
265 * IUCV interrupts.
266 * Parameter list:
267 * eib - is a pointer to a 40-byte area described
268 * with one of the structures above.
269 * pgm_data - this data is strictly for the
270 * interrupt handler that is passed by
271 * the application. This may be an address
272 * or token.
273*/
274typedef struct {
275 void (*ConnectionPending) (iucv_ConnectionPending * eib,
276 void *pgm_data);
277 void (*ConnectionComplete) (iucv_ConnectionComplete * eib,
278 void *pgm_data);
279 void (*ConnectionSevered) (iucv_ConnectionSevered * eib,
280 void *pgm_data);
281 void (*ConnectionQuiesced) (iucv_ConnectionQuiesced * eib,
282 void *pgm_data);
283 void (*ConnectionResumed) (iucv_ConnectionResumed * eib,
284 void *pgm_data);
285 void (*MessagePending) (iucv_MessagePending * eib, void *pgm_data);
286 void (*MessageComplete) (iucv_MessageComplete * eib, void *pgm_data);
287} iucv_interrupt_ops_t;
288
289/*
290 *iucv_array_t : Defines buffer array.
291 * Inside the array may be 31- bit addresses and 31-bit lengths.
292*/
293typedef struct {
294 u32 address;
295 u32 length;
296} iucv_array_t __attribute__ ((aligned (8)));
297
298extern struct bus_type iucv_bus;
299extern struct device *iucv_root;
300
301/* -prototypes- */
302/*
303 * Name: iucv_register_program
304 * Purpose: Registers an application with IUCV
305 * Input: prmname - user identification
306 * userid - machine identification
307 * pgmmask - indicates which bits in the prmname and userid combined will be
308 * used to determine who is given control
309 * ops - address of vector of interrupt handlers
310 * pgm_data- application data passed to interrupt handlers
311 * Output: NA
312 * Return: address of handler
313 * (0) - Error occurred, registration not completed.
314 * NOTE: Exact cause of failure will be recorded in syslog.
315*/
316iucv_handle_t iucv_register_program (uchar pgmname[16],
317 uchar userid[8],
318 uchar pgmmask[24],
319 iucv_interrupt_ops_t * ops,
320 void *pgm_data);
321
322/*
323 * Name: iucv_unregister_program
324 * Purpose: Unregister application with IUCV
325 * Input: address of handler
326 * Output: NA
327 * Return: (0) - Normal return
328 * (-EINVAL) - Internal error, wild pointer
329*/
330int iucv_unregister_program (iucv_handle_t handle);
331
332/*
333 * Name: iucv_accept
334 * Purpose: This function is issued after the user receives a Connection Pending external
335 * interrupt and now wishes to complete the IUCV communication path.
336 * Input: pathid - u16 , Path identification number
337 * msglim_reqstd - u16, The number of outstanding messages requested.
338 * user_data - uchar[16], Data specified by the iucv_connect function.
339 * flags1 - int, Contains options for this path.
340 * -IPPRTY - 0x20- Specifies if you want to send priority message.
341 * -IPRMDATA - 0x80, Specifies whether your program can handle a message
342 * in the parameter list.
343 * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
344 * established.
345 * handle - iucv_handle_t, Address of handler.
346 * pgm_data - void *, Application data passed to interrupt handlers.
347 * flags1_out - int * Contains information about the path
348 * - IPPRTY - 0x20, Indicates you may send priority messages.
349 * msglim - *u16, Number of outstanding messages.
350 * Output: return code from CP IUCV call.
351*/
352
353int iucv_accept (u16 pathid,
354 u16 msglim_reqstd,
355 uchar user_data[16],
356 int flags1,
357 iucv_handle_t handle,
358 void *pgm_data, int *flags1_out, u16 * msglim);
359
360/*
361 * Name: iucv_connect
362 * Purpose: This function establishes an IUCV path. Although the connect may complete
363 * successfully, you are not able to use the path until you receive an IUCV
364 * Connection Complete external interrupt.
365 * Input: pathid - u16 *, Path identification number
366 * msglim_reqstd - u16, Number of outstanding messages requested
367 * user_data - uchar[16], 16-byte user data
368 * userid - uchar[8], User identification
369 * system_name - uchar[8], 8-byte identifying the system name
370 * flags1 - int, Contains options for this path.
371 * -IPPRTY - 0x20, Specifies if you want to send priority message.
372 * -IPRMDATA - 0x80, Specifies whether your program can handle a message
373 * in the parameter list.
374 * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
375 * established.
376 * -IPLOCAL - 0X01, Allows an application to force the partner to be on
377 * the local system. If local is specified then target class cannot be
378 * specified.
379 * flags1_out - int * Contains information about the path
380 * - IPPRTY - 0x20, Indicates you may send priority messages.
381 * msglim - * u16, Number of outstanding messages
382 * handle - iucv_handle_t, Address of handler
383 * pgm_data - void *, Application data passed to interrupt handlers
384 * Output: return code from CP IUCV call
385 * rc - return code from iucv_declare_buffer
386 * -EINVAL - Invalid handle passed by application
387 * -EINVAL - Pathid address is NULL
388 * add_pathid_result - Return code from internal function add_pathid
389*/
390int
391 iucv_connect (u16 * pathid,
392 u16 msglim_reqstd,
393 uchar user_data[16],
394 uchar userid[8],
395 uchar system_name[8],
396 int flags1,
397 int *flags1_out,
398 u16 * msglim, iucv_handle_t handle, void *pgm_data);
399
400/*
401 * Name: iucv_purge
402 * Purpose: This function cancels a message that you have sent.
403 * Input: pathid - Path identification number.
404 * msgid - Specifies the message ID of the message to be purged.
405 * srccls - Specifies the source message class.
406 * Output: audit - Contains information about asynchronous error
407 * that may have affected the normal completion
408 * of this message.
409 * Return: Return code from CP IUCV call.
410*/
411int iucv_purge (u16 pathid, u32 msgid, u32 srccls, __u32 *audit);
412/*
413 * Name: iucv_query_maxconn
414 * Purpose: This function determines the maximum number of communication paths you
415 * may establish.
416 * Return: maxconn - ulong, Maximum number of connection the virtual machine may
417 * establish.
418*/
419ulong iucv_query_maxconn (void);
420
421/*
422 * Name: iucv_query_bufsize
423 * Purpose: This function determines how large an external interrupt
424 * buffer IUCV requires to store information.
425 * Return: bufsize - ulong, Size of external interrupt buffer.
426 */
427ulong iucv_query_bufsize (void);
428
429/*
430 * Name: iucv_quiesce
431 * Purpose: This function temporarily suspends incoming messages on an
432 * IUCV path. You can later reactivate the path by invoking
433 * the iucv_resume function.
434 * Input: pathid - Path identification number
435 * user_data - 16-bytes of user data
436 * Output: NA
437 * Return: Return code from CP IUCV call.
438*/
439int iucv_quiesce (u16 pathid, uchar user_data[16]);
440
441/*
442 * Name: iucv_receive
443 * Purpose: This function receives messages that are being sent to you
444 * over established paths. Data will be returned in buffer for length of
445 * buflen.
446 * Input:
447 * pathid - Path identification number.
448 * buffer - Address of buffer to receive.
449 * buflen - Length of buffer to receive.
450 * msgid - Specifies the message ID.
451 * trgcls - Specifies target class.
452 * Output:
453 * flags1_out: int *, Contains information about this path.
454 * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
455 * expected.
456 * IPPRTY - 0x20 Specifies if you want to send priority message.
457 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
458 * residual_buffer - address of buffer updated by the number
459 * of bytes you have received.
460 * residual_length -
461 * Contains one of the following values, if the receive buffer is:
462 * The same length as the message, this field is zero.
463 * Longer than the message, this field contains the number of
464 * bytes remaining in the buffer.
465 * Shorter than the message, this field contains the residual
466 * count (that is, the number of bytes remaining in the
467 * message that does not fit into the buffer. In this
468 * case b2f0_result = 5.
469 * Return: Return code from CP IUCV call.
470 * (-EINVAL) - buffer address is pointing to NULL
471*/
472int iucv_receive (u16 pathid,
473 u32 msgid,
474 u32 trgcls,
475 void *buffer,
476 ulong buflen,
477 int *flags1_out,
478 ulong * residual_buffer, ulong * residual_length);
479
480 /*
481 * Name: iucv_receive_array
482 * Purpose: This function receives messages that are being sent to you
483 * over established paths. Data will be returned in first buffer for
484 * length of first buffer.
485 * Input: pathid - Path identification number.
486 * msgid - specifies the message ID.
487 * trgcls - Specifies target class.
488 * buffer - Address of array of buffers.
489 * buflen - Total length of buffers.
490 * Output:
491 * flags1_out: int *, Contains information about this path.
492 * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
493 * expected.
494 * IPPRTY - 0x20 Specifies if you want to send priority message.
495 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
496 * residual_buffer - address points to the current list entry IUCV
497 * is working on.
498 * residual_length -
499 * Contains one of the following values, if the receive buffer is:
500 * The same length as the message, this field is zero.
501 * Longer than the message, this field contains the number of
502 * bytes remaining in the buffer.
503 * Shorter than the message, this field contains the residual
504 * count (that is, the number of bytes remaining in the
505 * message that does not fit into the buffer. In this
506 * case b2f0_result = 5.
507 * Return: Return code from CP IUCV call.
508 * (-EINVAL) - Buffer address is NULL.
509 */
510int iucv_receive_array (u16 pathid,
511 u32 msgid,
512 u32 trgcls,
513 iucv_array_t * buffer,
514 ulong buflen,
515 int *flags1_out,
516 ulong * residual_buffer, ulong * residual_length);
517
518/*
519 * Name: iucv_reject
520 * Purpose: The reject function refuses a specified message. Between the
521 * time you are notified of a message and the time that you
522 * complete the message, the message may be rejected.
523 * Input: pathid - Path identification number.
524 * msgid - Specifies the message ID.
525 * trgcls - Specifies target class.
526 * Output: NA
527 * Return: Return code from CP IUCV call.
528*/
529int iucv_reject (u16 pathid, u32 msgid, u32 trgcls);
530
531/*
532 * Name: iucv_reply
533 * Purpose: This function responds to the two-way messages that you
534 * receive. You must identify completely the message to
535 * which you wish to reply. ie, pathid, msgid, and trgcls.
536 * Input: pathid - Path identification number.
537 * msgid - Specifies the message ID.
538 * trgcls - Specifies target class.
539 * flags1 - Option for path.
540 * IPPRTY- 0x20, Specifies if you want to send priority message.
541 * buffer - Address of reply buffer.
542 * buflen - Length of reply buffer.
543 * Output: residual_buffer - Address of buffer updated by the number
544 * of bytes you have moved.
545 * residual_length - Contains one of the following values:
546 * If the answer buffer is the same length as the reply, this field
547 * contains zero.
548 * If the answer buffer is longer than the reply, this field contains
549 * the number of bytes remaining in the buffer.
550 * If the answer buffer is shorter than the reply, this field contains
551 * a residual count (that is, the number of bytes remianing in the
552 * reply that does not fit into the buffer. In this
553 * case b2f0_result = 5.
554 * Return: Return code from CP IUCV call.
555 * (-EINVAL) - Buffer address is NULL.
556*/
557int iucv_reply (u16 pathid,
558 u32 msgid,
559 u32 trgcls,
560 int flags1,
561 void *buffer, ulong buflen, ulong * residual_buffer,
562 ulong * residual_length);
563
564/*
565 * Name: iucv_reply_array
566 * Purpose: This function responds to the two-way messages that you
567 * receive. You must identify completely the message to
568 * which you wish to reply. ie, pathid, msgid, and trgcls.
569 * The array identifies a list of addresses and lengths of
570 * discontiguous buffers that contains the reply data.
571 * Input: pathid - Path identification number
572 * msgid - Specifies the message ID.
573 * trgcls - Specifies target class.
574 * flags1 - Option for path.
575 * IPPRTY- 0x20, Specifies if you want to send priority message.
576 * buffer - Address of array of reply buffers.
577 * buflen - Total length of reply buffers.
578 * Output: residual_buffer - Address of buffer which IUCV is currently working on.
579 * residual_length - Contains one of the following values:
580 * If the answer buffer is the same length as the reply, this field
581 * contains zero.
582 * If the answer buffer is longer than the reply, this field contains
583 * the number of bytes remaining in the buffer.
584 * If the answer buffer is shorter than the reply, this field contains
585 * a residual count (that is, the number of bytes remianing in the
586 * reply that does not fit into the buffer. In this
587 * case b2f0_result = 5.
588 * Return: Return code from CP IUCV call.
589 * (-EINVAL) - Buffer address is NULL.
590*/
591int iucv_reply_array (u16 pathid,
592 u32 msgid,
593 u32 trgcls,
594 int flags1,
595 iucv_array_t * buffer,
596 ulong buflen, ulong * residual_address,
597 ulong * residual_length);
598
599/*
600 * Name: iucv_reply_prmmsg
601 * Purpose: This function responds to the two-way messages that you
602 * receive. You must identify completely the message to
603 * which you wish to reply. ie, pathid, msgid, and trgcls.
604 * Prmmsg signifies the data is moved into the
605 * parameter list.
606 * Input: pathid - Path identification number.
607 * msgid - Specifies the message ID.
608 * trgcls - Specifies target class.
609 * flags1 - Option for path.
610 * IPPRTY- 0x20 Specifies if you want to send priority message.
611 * prmmsg - 8-bytes of data to be placed into the parameter.
612 * list.
613 * Output: NA
614 * Return: Return code from CP IUCV call.
615*/
616int iucv_reply_prmmsg (u16 pathid,
617 u32 msgid, u32 trgcls, int flags1, uchar prmmsg[8]);
618
619/*
620 * Name: iucv_resume
621 * Purpose: This function restores communications over a quiesced path
622 * Input: pathid - Path identification number.
623 * user_data - 16-bytes of user data.
624 * Output: NA
625 * Return: Return code from CP IUCV call.
626*/
627int iucv_resume (u16 pathid, uchar user_data[16]);
628
629/*
630 * Name: iucv_send
631 * Purpose: This function transmits data to another application.
632 * Data to be transmitted is in a buffer and this is a
633 * one-way message and the receiver will not reply to the
634 * message.
635 * Input: pathid - Path identification number.
636 * trgcls - Specifies target class.
637 * srccls - Specifies the source message class.
638 * msgtag - Specifies a tag to be associated with the message.
639 * flags1 - Option for path.
640 * IPPRTY- 0x20 Specifies if you want to send priority message.
641 * buffer - Address of send buffer.
642 * buflen - Length of send buffer.
643 * Output: msgid - Specifies the message ID.
644 * Return: Return code from CP IUCV call.
645 * (-EINVAL) - Buffer address is NULL.
646*/
647int iucv_send (u16 pathid,
648 u32 * msgid,
649 u32 trgcls,
650 u32 srccls, u32 msgtag, int flags1, void *buffer, ulong buflen);
651
652/*
653 * Name: iucv_send_array
654 * Purpose: This function transmits data to another application.
655 * The contents of buffer is the address of the array of
656 * addresses and lengths of discontiguous buffers that hold
657 * the message text. This is a one-way message and the
658 * receiver will not reply to the message.
659 * Input: pathid - Path identification number.
660 * trgcls - Specifies target class.
661 * srccls - Specifies the source message class.
662 * msgtag - Specifies a tag to be associated witht the message.
663 * flags1 - Option for path.
664 * IPPRTY- specifies if you want to send priority message.
665 * buffer - Address of array of send buffers.
666 * buflen - Total length of send buffers.
667 * Output: msgid - Specifies the message ID.
668 * Return: Return code from CP IUCV call.
669 * (-EINVAL) - Buffer address is NULL.
670*/
671int iucv_send_array (u16 pathid,
672 u32 * msgid,
673 u32 trgcls,
674 u32 srccls,
675 u32 msgtag,
676 int flags1, iucv_array_t * buffer, ulong buflen);
677
678/*
679 * Name: iucv_send_prmmsg
680 * Purpose: This function transmits data to another application.
681 * Prmmsg specifies that the 8-bytes of data are to be moved
682 * into the parameter list. This is a one-way message and the
683 * receiver will not reply to the message.
684 * Input: pathid - Path identification number.
685 * trgcls - Specifies target class.
686 * srccls - Specifies the source message class.
687 * msgtag - Specifies a tag to be associated with the message.
688 * flags1 - Option for path.
689 * IPPRTY- 0x20 specifies if you want to send priority message.
690 * prmmsg - 8-bytes of data to be placed into parameter list.
691 * Output: msgid - Specifies the message ID.
692 * Return: Return code from CP IUCV call.
693*/
694int iucv_send_prmmsg (u16 pathid,
695 u32 * msgid,
696 u32 trgcls,
697 u32 srccls, u32 msgtag, int flags1, uchar prmmsg[8]);
698
699/*
700 * Name: iucv_send2way
701 * Purpose: This function transmits data to another application.
702 * Data to be transmitted is in a buffer. The receiver
703 * of the send is expected to reply to the message and
704 * a buffer is provided into which IUCV moves the reply
705 * to this message.
706 * Input: pathid - Path identification number.
707 * trgcls - Specifies target class.
708 * srccls - Specifies the source message class.
709 * msgtag - Specifies a tag associated with the message.
710 * flags1 - Option for path.
711 * IPPRTY- 0x20 Specifies if you want to send priority message.
712 * buffer - Address of send buffer.
713 * buflen - Length of send buffer.
714 * ansbuf - Address of buffer into which IUCV moves the reply of
715 * this message.
716 * anslen - Address of length of buffer.
717 * Output: msgid - Specifies the message ID.
718 * Return: Return code from CP IUCV call.
719 * (-EINVAL) - Buffer or ansbuf address is NULL.
720*/
721int iucv_send2way (u16 pathid,
722 u32 * msgid,
723 u32 trgcls,
724 u32 srccls,
725 u32 msgtag,
726 int flags1,
727 void *buffer, ulong buflen, void *ansbuf, ulong anslen);
728
729/*
730 * Name: iucv_send2way_array
731 * Purpose: This function transmits data to another application.
732 * The contents of buffer is the address of the array of
733 * addresses and lengths of discontiguous buffers that hold
734 * the message text. The receiver of the send is expected to
735 * reply to the message and a buffer is provided into which
736 * IUCV moves the reply to this message.
737 * Input: pathid - Path identification number.
738 * trgcls - Specifies target class.
739 * srccls - Specifies the source message class.
740 * msgtag - Specifies a tag to be associated with the message.
741 * flags1 - Option for path.
742 * IPPRTY- 0x20 Specifies if you want to send priority message.
743 * buffer - Sddress of array of send buffers.
744 * buflen - Total length of send buffers.
745 * ansbuf - Address of array of buffer into which IUCV moves the reply
746 * of this message.
747 * anslen - Address of length reply buffers.
748 * Output: msgid - Specifies the message ID.
749 * Return: Return code from CP IUCV call.
750 * (-EINVAL) - Buffer address is NULL.
751*/
752int iucv_send2way_array (u16 pathid,
753 u32 * msgid,
754 u32 trgcls,
755 u32 srccls,
756 u32 msgtag,
757 int flags1,
758 iucv_array_t * buffer,
759 ulong buflen, iucv_array_t * ansbuf, ulong anslen);
760
761/*
762 * Name: iucv_send2way_prmmsg
763 * Purpose: This function transmits data to another application.
764 * Prmmsg specifies that the 8-bytes of data are to be moved
765 * into the parameter list. This is a two-way message and the
766 * receiver of the message is expected to reply. A buffer
767 * is provided into which IUCV moves the reply to this
768 * message.
769 * Input: pathid - Rath identification number.
770 * trgcls - Specifies target class.
771 * srccls - Specifies the source message class.
772 * msgtag - Specifies a tag to be associated with the message.
773 * flags1 - Option for path.
774 * IPPRTY- 0x20 Specifies if you want to send priority message.
775 * prmmsg - 8-bytes of data to be placed in parameter list.
776 * ansbuf - Address of buffer into which IUCV moves the reply of
777 * this message.
778 * anslen - Address of length of buffer.
779 * Output: msgid - Specifies the message ID.
780 * Return: Return code from CP IUCV call.
781 * (-EINVAL) - Buffer address is NULL.
782*/
783int iucv_send2way_prmmsg (u16 pathid,
784 u32 * msgid,
785 u32 trgcls,
786 u32 srccls,
787 u32 msgtag,
788 ulong flags1,
789 uchar prmmsg[8], void *ansbuf, ulong anslen);
790
791/*
792 * Name: iucv_send2way_prmmsg_array
793 * Purpose: This function transmits data to another application.
794 * Prmmsg specifies that the 8-bytes of data are to be moved
795 * into the parameter list. This is a two-way message and the
796 * receiver of the message is expected to reply. A buffer
797 * is provided into which IUCV moves the reply to this
798 * message. The contents of ansbuf is the address of the
799 * array of addresses and lengths of discontiguous buffers
800 * that contain the reply.
801 * Input: pathid - Path identification number.
802 * trgcls - Specifies target class.
803 * srccls - Specifies the source message class.
804 * msgtag - Specifies a tag to be associated with the message.
805 * flags1 - Option for path.
806 * IPPRTY- 0x20 specifies if you want to send priority message.
807 * prmmsg - 8-bytes of data to be placed into the parameter list.
808 * ansbuf - Address of array of buffer into which IUCV moves the reply
809 * of this message.
810 * anslen - Address of length of reply buffers.
811 * Output: msgid - Specifies the message ID.
812 * Return: Return code from CP IUCV call.
813 * (-EINVAL) - Ansbuf address is NULL.
814*/
815int iucv_send2way_prmmsg_array (u16 pathid,
816 u32 * msgid,
817 u32 trgcls,
818 u32 srccls,
819 u32 msgtag,
820 int flags1,
821 uchar prmmsg[8],
822 iucv_array_t * ansbuf, ulong anslen);
823
824/*
825 * Name: iucv_setmask
826 * Purpose: This function enables or disables the following IUCV
827 * external interruptions: Nonpriority and priority message
828 * interrupts, nonpriority and priority reply interrupts.
829 * Input: SetMaskFlag - options for interrupts
830 * 0x80 - Nonpriority_MessagePendingInterruptsFlag
831 * 0x40 - Priority_MessagePendingInterruptsFlag
832 * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
833 * 0x10 - Priority_MessageCompletionInterruptsFlag
834 * 0x08 - IUCVControlInterruptsFlag
835 * Output: NA
836 * Return: Return code from CP IUCV call.
837*/
838int iucv_setmask (int SetMaskFlag);
839
840/*
841 * Name: iucv_sever
842 * Purpose: This function terminates an IUCV path.
843 * Input: pathid - Path identification number.
844 * user_data - 16-bytes of user data.
845 * Output: NA
846 * Return: Return code from CP IUCV call.
847 * (-EINVAL) - Interal error, wild pointer.
848*/
849int iucv_sever (u16 pathid, uchar user_data[16]);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
new file mode 100644
index 000000000000..0f76e945b984
--- /dev/null
+++ b/drivers/s390/net/lcs.c
@@ -0,0 +1,2347 @@
1/*
2 * linux/drivers/s390/net/lcs.c
3 *
4 * Linux for S/390 Lan Channel Station Network Driver
5 *
6 * Copyright (C) 1999-2001 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Original Code written by
9 * DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
10 * Rewritten by
11 * Frank Pavlic (pavlic@de.ibm.com) and
12 * Martin Schwidefsky <schwidefsky@de.ibm.com>
13 *
14 * $Revision: 1.96 $ $Date: 2004/11/11 13:42:33 $
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2, or (at your option)
19 * any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 */
30
31#include <linux/module.h>
32#include <linux/if.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/trdevice.h>
36#include <linux/fddidevice.h>
37#include <linux/inetdevice.h>
38#include <linux/in.h>
39#include <linux/igmp.h>
40#include <linux/delay.h>
41#include <net/arp.h>
42#include <net/ip.h>
43
44#include <asm/debug.h>
45#include <asm/idals.h>
46#include <asm/timex.h>
47#include <linux/device.h>
48#include <asm/ccwgroup.h>
49
50#include "lcs.h"
51#include "cu3088.h"
52
53
54#if !defined(CONFIG_NET_ETHERNET) && \
55 !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
56#error Cannot compile lcs.c without some net devices switched on.
57#endif
58
59/**
60 * initialization string for output
61 */
62#define VERSION_LCS_C "$Revision: 1.96 $"
63
64static char version[] __initdata = "LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")";
65static char debug_buffer[255];
66
67/**
68 * Some prototypes.
69 */
70static void lcs_tasklet(unsigned long);
71static void lcs_start_kernel_thread(struct lcs_card *card);
72static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
73static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
74
75/**
76 * Debug Facility Stuff
77 */
78static debug_info_t *lcs_dbf_setup;
79static debug_info_t *lcs_dbf_trace;
80
81/**
82 * LCS Debug Facility functions
83 */
84static void
85lcs_unregister_debug_facility(void)
86{
87 if (lcs_dbf_setup)
88 debug_unregister(lcs_dbf_setup);
89 if (lcs_dbf_trace)
90 debug_unregister(lcs_dbf_trace);
91}
92
93static int
94lcs_register_debug_facility(void)
95{
96 lcs_dbf_setup = debug_register("lcs_setup", 1, 1, 8);
97 lcs_dbf_trace = debug_register("lcs_trace", 1, 2, 8);
98 if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
99 PRINT_ERR("Not enough memory for debug facility.\n");
100 lcs_unregister_debug_facility();
101 return -ENOMEM;
102 }
103 debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
104 debug_set_level(lcs_dbf_setup, 4);
105 debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
106 debug_set_level(lcs_dbf_trace, 4);
107 return 0;
108}
109
110/**
111 * Allocate io buffers.
112 */
113static int
114lcs_alloc_channel(struct lcs_channel *channel)
115{
116 int cnt;
117
118 LCS_DBF_TEXT(2, setup, "ichalloc");
119 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
120 /* alloc memory fo iobuffer */
121 channel->iob[cnt].data = (void *)
122 kmalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
123 if (channel->iob[cnt].data == NULL)
124 break;
125 memset(channel->iob[cnt].data, 0, LCS_IOBUFFERSIZE);
126 channel->iob[cnt].state = BUF_STATE_EMPTY;
127 }
128 if (cnt < LCS_NUM_BUFFS) {
129 /* Not all io buffers could be allocated. */
130 LCS_DBF_TEXT(2, setup, "echalloc");
131 while (cnt-- > 0)
132 kfree(channel->iob[cnt].data);
133 return -ENOMEM;
134 }
135 return 0;
136}
137
138/**
139 * Free io buffers.
140 */
141static void
142lcs_free_channel(struct lcs_channel *channel)
143{
144 int cnt;
145
146 LCS_DBF_TEXT(2, setup, "ichfree");
147 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
148 if (channel->iob[cnt].data != NULL)
149 kfree(channel->iob[cnt].data);
150 channel->iob[cnt].data = NULL;
151 }
152}
153
154/*
155 * Cleanup channel.
156 */
157static void
158lcs_cleanup_channel(struct lcs_channel *channel)
159{
160 LCS_DBF_TEXT(3, setup, "cleanch");
161 /* Kill write channel tasklets. */
162 tasklet_kill(&channel->irq_tasklet);
163 /* Free channel buffers. */
164 lcs_free_channel(channel);
165}
166
167/**
168 * LCS free memory for card and channels.
169 */
170static void
171lcs_free_card(struct lcs_card *card)
172{
173 LCS_DBF_TEXT(2, setup, "remcard");
174 LCS_DBF_HEX(2, setup, &card, sizeof(void*));
175 kfree(card);
176}
177
178/**
179 * LCS alloc memory for card and channels
180 */
181static struct lcs_card *
182lcs_alloc_card(void)
183{
184 struct lcs_card *card;
185 int rc;
186
187 LCS_DBF_TEXT(2, setup, "alloclcs");
188
189 card = kmalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
190 if (card == NULL)
191 return NULL;
192 memset(card, 0, sizeof(struct lcs_card));
193 card->lan_type = LCS_FRAME_TYPE_AUTO;
194 card->pkt_seq = 0;
195 card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
196 /* Allocate io buffers for the read channel. */
197 rc = lcs_alloc_channel(&card->read);
198 if (rc){
199 LCS_DBF_TEXT(2, setup, "iccwerr");
200 lcs_free_card(card);
201 return NULL;
202 }
203 /* Allocate io buffers for the write channel. */
204 rc = lcs_alloc_channel(&card->write);
205 if (rc) {
206 LCS_DBF_TEXT(2, setup, "iccwerr");
207 lcs_cleanup_channel(&card->read);
208 lcs_free_card(card);
209 return NULL;
210 }
211
212#ifdef CONFIG_IP_MULTICAST
213 INIT_LIST_HEAD(&card->ipm_list);
214#endif
215 LCS_DBF_HEX(2, setup, &card, sizeof(void*));
216 return card;
217}
218
219/*
220 * Setup read channel.
221 */
222static void
223lcs_setup_read_ccws(struct lcs_card *card)
224{
225 int cnt;
226
227 LCS_DBF_TEXT(2, setup, "ireadccw");
228 /* Setup read ccws. */
229 memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
230 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
231 card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
232 card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
233 card->read.ccws[cnt].flags =
234 CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
235 /*
236 * Note: we have allocated the buffer with GFP_DMA, so
237 * we do not need to do set_normalized_cda.
238 */
239 card->read.ccws[cnt].cda =
240 (__u32) __pa(card->read.iob[cnt].data);
241 ((struct lcs_header *)
242 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
243 card->read.iob[cnt].callback = lcs_get_frames_cb;
244 card->read.iob[cnt].state = BUF_STATE_READY;
245 card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
246 }
247 card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
248 card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
249 card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
250 /* Last ccw is a tic (transfer in channel). */
251 card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
252 card->read.ccws[LCS_NUM_BUFFS].cda =
253 (__u32) __pa(card->read.ccws);
254 /* Setg initial state of the read channel. */
255 card->read.state = CH_STATE_INIT;
256
257 card->read.io_idx = 0;
258 card->read.buf_idx = 0;
259}
260
261static void
262lcs_setup_read(struct lcs_card *card)
263{
264 LCS_DBF_TEXT(3, setup, "initread");
265
266 lcs_setup_read_ccws(card);
267 /* Initialize read channel tasklet. */
268 card->read.irq_tasklet.data = (unsigned long) &card->read;
269 card->read.irq_tasklet.func = lcs_tasklet;
270 /* Initialize waitqueue. */
271 init_waitqueue_head(&card->read.wait_q);
272}
273
274/*
275 * Setup write channel.
276 */
277static void
278lcs_setup_write_ccws(struct lcs_card *card)
279{
280 int cnt;
281
282 LCS_DBF_TEXT(3, setup, "iwritccw");
283 /* Setup write ccws. */
284 memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1);
285 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
286 card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
287 card->write.ccws[cnt].count = 0;
288 card->write.ccws[cnt].flags =
289 CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
290 /*
291 * Note: we have allocated the buffer with GFP_DMA, so
292 * we do not need to do set_normalized_cda.
293 */
294 card->write.ccws[cnt].cda =
295 (__u32) __pa(card->write.iob[cnt].data);
296 }
297 /* Last ccw is a tic (transfer in channel). */
298 card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
299 card->write.ccws[LCS_NUM_BUFFS].cda =
300 (__u32) __pa(card->write.ccws);
301 /* Set initial state of the write channel. */
302 card->read.state = CH_STATE_INIT;
303
304 card->write.io_idx = 0;
305 card->write.buf_idx = 0;
306}
307
308static void
309lcs_setup_write(struct lcs_card *card)
310{
311 LCS_DBF_TEXT(3, setup, "initwrit");
312
313 lcs_setup_write_ccws(card);
314 /* Initialize write channel tasklet. */
315 card->write.irq_tasklet.data = (unsigned long) &card->write;
316 card->write.irq_tasklet.func = lcs_tasklet;
317 /* Initialize waitqueue. */
318 init_waitqueue_head(&card->write.wait_q);
319}
320
321static void
322lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
323{
324 unsigned long flags;
325
326 spin_lock_irqsave(&card->mask_lock, flags);
327 card->thread_allowed_mask = threads;
328 spin_unlock_irqrestore(&card->mask_lock, flags);
329 wake_up(&card->wait_q);
330}
331static inline int
332lcs_threads_running(struct lcs_card *card, unsigned long threads)
333{
334 unsigned long flags;
335 int rc = 0;
336
337 spin_lock_irqsave(&card->mask_lock, flags);
338 rc = (card->thread_running_mask & threads);
339 spin_unlock_irqrestore(&card->mask_lock, flags);
340 return rc;
341}
342
343static int
344lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
345{
346 return wait_event_interruptible(card->wait_q,
347 lcs_threads_running(card, threads) == 0);
348}
349
350static inline int
351lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
352{
353 unsigned long flags;
354
355 spin_lock_irqsave(&card->mask_lock, flags);
356 if ( !(card->thread_allowed_mask & thread) ||
357 (card->thread_start_mask & thread) ) {
358 spin_unlock_irqrestore(&card->mask_lock, flags);
359 return -EPERM;
360 }
361 card->thread_start_mask |= thread;
362 spin_unlock_irqrestore(&card->mask_lock, flags);
363 return 0;
364}
365
366static void
367lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
368{
369 unsigned long flags;
370
371 spin_lock_irqsave(&card->mask_lock, flags);
372 card->thread_running_mask &= ~thread;
373 spin_unlock_irqrestore(&card->mask_lock, flags);
374 wake_up(&card->wait_q);
375}
376
377static inline int
378__lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
379{
380 unsigned long flags;
381 int rc = 0;
382
383 spin_lock_irqsave(&card->mask_lock, flags);
384 if (card->thread_start_mask & thread){
385 if ((card->thread_allowed_mask & thread) &&
386 !(card->thread_running_mask & thread)){
387 rc = 1;
388 card->thread_start_mask &= ~thread;
389 card->thread_running_mask |= thread;
390 } else
391 rc = -EPERM;
392 }
393 spin_unlock_irqrestore(&card->mask_lock, flags);
394 return rc;
395}
396
397static int
398lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
399{
400 int rc = 0;
401 wait_event(card->wait_q,
402 (rc = __lcs_do_run_thread(card, thread)) >= 0);
403 return rc;
404}
405
406static int
407lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
408{
409 unsigned long flags;
410 int rc = 0;
411
412 spin_lock_irqsave(&card->mask_lock, flags);
413 LCS_DBF_TEXT_(4, trace, " %02x%02x%02x",
414 (u8) card->thread_start_mask,
415 (u8) card->thread_allowed_mask,
416 (u8) card->thread_running_mask);
417 rc = (card->thread_start_mask & thread);
418 spin_unlock_irqrestore(&card->mask_lock, flags);
419 return rc;
420}
421
422/**
423 * Initialize channels,card and state machines.
424 */
425static void
426lcs_setup_card(struct lcs_card *card)
427{
428 LCS_DBF_TEXT(2, setup, "initcard");
429 LCS_DBF_HEX(2, setup, &card, sizeof(void*));
430
431 lcs_setup_read(card);
432 lcs_setup_write(card);
433 /* Set cards initial state. */
434 card->state = DEV_STATE_DOWN;
435 card->tx_buffer = NULL;
436 card->tx_emitted = 0;
437
438 /* Initialize kernel thread task used for LGW commands. */
439 INIT_WORK(&card->kernel_thread_starter,
440 (void *)lcs_start_kernel_thread,card);
441 card->thread_start_mask = 0;
442 card->thread_allowed_mask = 0;
443 card->thread_running_mask = 0;
444 init_waitqueue_head(&card->wait_q);
445 spin_lock_init(&card->lock);
446 spin_lock_init(&card->ipm_lock);
447 spin_lock_init(&card->mask_lock);
448#ifdef CONFIG_IP_MULTICAST
449 INIT_LIST_HEAD(&card->ipm_list);
450#endif
451 INIT_LIST_HEAD(&card->lancmd_waiters);
452}
453
454static inline void
455lcs_clear_multicast_list(struct lcs_card *card)
456{
457#ifdef CONFIG_IP_MULTICAST
458 struct lcs_ipm_list *ipm;
459 unsigned long flags;
460
461 /* Free multicast list. */
462 LCS_DBF_TEXT(3, setup, "clmclist");
463 spin_lock_irqsave(&card->ipm_lock, flags);
464 while (!list_empty(&card->ipm_list)){
465 ipm = list_entry(card->ipm_list.next,
466 struct lcs_ipm_list, list);
467 list_del(&ipm->list);
468 if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
469 spin_unlock_irqrestore(&card->ipm_lock, flags);
470 lcs_send_delipm(card, ipm);
471 spin_lock_irqsave(&card->ipm_lock, flags);
472 }
473 kfree(ipm);
474 }
475 spin_unlock_irqrestore(&card->ipm_lock, flags);
476#endif
477}
478/**
479 * Cleanup channels,card and state machines.
480 */
481static void
482lcs_cleanup_card(struct lcs_card *card)
483{
484
485 LCS_DBF_TEXT(3, setup, "cleancrd");
486 LCS_DBF_HEX(2,setup,&card,sizeof(void*));
487
488 if (card->dev != NULL)
489 free_netdev(card->dev);
490 /* Cleanup channels. */
491 lcs_cleanup_channel(&card->write);
492 lcs_cleanup_channel(&card->read);
493}
494
495/**
496 * Start channel.
497 */
498static int
499lcs_start_channel(struct lcs_channel *channel)
500{
501 unsigned long flags;
502 int rc;
503
504 LCS_DBF_TEXT_(4,trace,"ssch%s", channel->ccwdev->dev.bus_id);
505 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
506 rc = ccw_device_start(channel->ccwdev,
507 channel->ccws + channel->io_idx, 0, 0,
508 DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
509 if (rc == 0)
510 channel->state = CH_STATE_RUNNING;
511 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
512 if (rc) {
513 LCS_DBF_TEXT_(4,trace,"essh%s", channel->ccwdev->dev.bus_id);
514 PRINT_ERR("Error in starting channel, rc=%d!\n", rc);
515 }
516 return rc;
517}
518
519static int
520lcs_clear_channel(struct lcs_channel *channel)
521{
522 unsigned long flags;
523 int rc;
524
525 LCS_DBF_TEXT(4,trace,"clearch");
526 LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
527 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
528 rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
529 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
530 if (rc) {
531 LCS_DBF_TEXT_(4,trace,"ecsc%s", channel->ccwdev->dev.bus_id);
532 return rc;
533 }
534 wait_event(channel->wait_q, (channel->state == CH_STATE_CLEARED));
535 channel->state = CH_STATE_STOPPED;
536 return rc;
537}
538
539
540/**
541 * Stop channel.
542 */
543static int
544lcs_stop_channel(struct lcs_channel *channel)
545{
546 unsigned long flags;
547 int rc;
548
549 if (channel->state == CH_STATE_STOPPED)
550 return 0;
551 LCS_DBF_TEXT(4,trace,"haltsch");
552 LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
553 channel->state = CH_STATE_INIT;
554 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
555 rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
556 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
557 if (rc) {
558 LCS_DBF_TEXT_(4,trace,"ehsc%s", channel->ccwdev->dev.bus_id);
559 return rc;
560 }
561 /* Asynchronous halt initialted. Wait for its completion. */
562 wait_event(channel->wait_q, (channel->state == CH_STATE_HALTED));
563 lcs_clear_channel(channel);
564 return 0;
565}
566
567/**
568 * start read and write channel
569 */
570static int
571lcs_start_channels(struct lcs_card *card)
572{
573 int rc;
574
575 LCS_DBF_TEXT(2, trace, "chstart");
576 /* start read channel */
577 rc = lcs_start_channel(&card->read);
578 if (rc)
579 return rc;
580 /* start write channel */
581 rc = lcs_start_channel(&card->write);
582 if (rc)
583 lcs_stop_channel(&card->read);
584 return rc;
585}
586
587/**
588 * stop read and write channel
589 */
590static int
591lcs_stop_channels(struct lcs_card *card)
592{
593 LCS_DBF_TEXT(2, trace, "chhalt");
594 lcs_stop_channel(&card->read);
595 lcs_stop_channel(&card->write);
596 return 0;
597}
598
599/**
600 * Get empty buffer.
601 */
602static struct lcs_buffer *
603__lcs_get_buffer(struct lcs_channel *channel)
604{
605 int index;
606
607 LCS_DBF_TEXT(5, trace, "_getbuff");
608 index = channel->io_idx;
609 do {
610 if (channel->iob[index].state == BUF_STATE_EMPTY) {
611 channel->iob[index].state = BUF_STATE_LOCKED;
612 return channel->iob + index;
613 }
614 index = (index + 1) & (LCS_NUM_BUFFS - 1);
615 } while (index != channel->io_idx);
616 return NULL;
617}
618
619static struct lcs_buffer *
620lcs_get_buffer(struct lcs_channel *channel)
621{
622 struct lcs_buffer *buffer;
623 unsigned long flags;
624
625 LCS_DBF_TEXT(5, trace, "getbuff");
626 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
627 buffer = __lcs_get_buffer(channel);
628 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
629 return buffer;
630}
631
632/**
633 * Resume channel program if the channel is suspended.
634 */
635static int
636__lcs_resume_channel(struct lcs_channel *channel)
637{
638 int rc;
639
640 if (channel->state != CH_STATE_SUSPENDED)
641 return 0;
642 if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
643 return 0;
644 LCS_DBF_TEXT_(5, trace, "rsch%s", channel->ccwdev->dev.bus_id);
645 rc = ccw_device_resume(channel->ccwdev);
646 if (rc) {
647 LCS_DBF_TEXT_(4, trace, "ersc%s", channel->ccwdev->dev.bus_id);
648 PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc);
649 } else
650 channel->state = CH_STATE_RUNNING;
651 return rc;
652
653}
654
655/**
656 * Make a buffer ready for processing.
657 */
658static inline void
659__lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
660{
661 int prev, next;
662
663 LCS_DBF_TEXT(5, trace, "rdybits");
664 prev = (index - 1) & (LCS_NUM_BUFFS - 1);
665 next = (index + 1) & (LCS_NUM_BUFFS - 1);
666 /* Check if we may clear the suspend bit of this buffer. */
667 if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
668 /* Check if we have to set the PCI bit. */
669 if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
670 /* Suspend bit of the previous buffer is not set. */
671 channel->ccws[index].flags |= CCW_FLAG_PCI;
672 /* Suspend bit of the next buffer is set. */
673 channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
674 }
675}
676
677static int
678lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
679{
680 unsigned long flags;
681 int index, rc;
682
683 LCS_DBF_TEXT(5, trace, "rdybuff");
684 if (buffer->state != BUF_STATE_LOCKED &&
685 buffer->state != BUF_STATE_PROCESSED)
686 BUG();
687 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
688 buffer->state = BUF_STATE_READY;
689 index = buffer - channel->iob;
690 /* Set length. */
691 channel->ccws[index].count = buffer->count;
692 /* Check relevant PCI/suspend bits. */
693 __lcs_ready_buffer_bits(channel, index);
694 rc = __lcs_resume_channel(channel);
695 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
696 return rc;
697}
698
699/**
700 * Mark the buffer as processed. Take care of the suspend bit
701 * of the previous buffer. This function is called from
702 * interrupt context, so the lock must not be taken.
703 */
704static int
705__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
706{
707 int index, prev, next;
708
709 LCS_DBF_TEXT(5, trace, "prcsbuff");
710 if (buffer->state != BUF_STATE_READY)
711 BUG();
712 buffer->state = BUF_STATE_PROCESSED;
713 index = buffer - channel->iob;
714 prev = (index - 1) & (LCS_NUM_BUFFS - 1);
715 next = (index + 1) & (LCS_NUM_BUFFS - 1);
716 /* Set the suspend bit and clear the PCI bit of this buffer. */
717 channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
718 channel->ccws[index].flags &= ~CCW_FLAG_PCI;
719 /* Check the suspend bit of the previous buffer. */
720 if (channel->iob[prev].state == BUF_STATE_READY) {
721 /*
722 * Previous buffer is in state ready. It might have
723 * happened in lcs_ready_buffer that the suspend bit
724 * has not been cleared to avoid an endless loop.
725 * Do it now.
726 */
727 __lcs_ready_buffer_bits(channel, prev);
728 }
729 /* Clear PCI bit of next buffer. */
730 channel->ccws[next].flags &= ~CCW_FLAG_PCI;
731 return __lcs_resume_channel(channel);
732}
733
734/**
735 * Put a processed buffer back to state empty.
736 */
737static void
738lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
739{
740 unsigned long flags;
741
742 LCS_DBF_TEXT(5, trace, "relbuff");
743 if (buffer->state != BUF_STATE_LOCKED &&
744 buffer->state != BUF_STATE_PROCESSED)
745 BUG();
746 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
747 buffer->state = BUF_STATE_EMPTY;
748 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
749}
750
751/**
752 * Get buffer for a lan command.
753 */
754static struct lcs_buffer *
755lcs_get_lancmd(struct lcs_card *card, int count)
756{
757 struct lcs_buffer *buffer;
758 struct lcs_cmd *cmd;
759
760 LCS_DBF_TEXT(4, trace, "getlncmd");
761 /* Get buffer and wait if none is available. */
762 wait_event(card->write.wait_q,
763 ((buffer = lcs_get_buffer(&card->write)) != NULL));
764 count += sizeof(struct lcs_header);
765 *(__u16 *)(buffer->data + count) = 0;
766 buffer->count = count + sizeof(__u16);
767 buffer->callback = lcs_release_buffer;
768 cmd = (struct lcs_cmd *) buffer->data;
769 cmd->offset = count;
770 cmd->type = LCS_FRAME_TYPE_CONTROL;
771 cmd->slot = 0;
772 return buffer;
773}
774
775
776static void
777lcs_get_reply(struct lcs_reply *reply)
778{
779 WARN_ON(atomic_read(&reply->refcnt) <= 0);
780 atomic_inc(&reply->refcnt);
781}
782
783static void
784lcs_put_reply(struct lcs_reply *reply)
785{
786 WARN_ON(atomic_read(&reply->refcnt) <= 0);
787 if (atomic_dec_and_test(&reply->refcnt)) {
788 kfree(reply);
789 }
790
791}
792
793static struct lcs_reply *
794lcs_alloc_reply(struct lcs_cmd *cmd)
795{
796 struct lcs_reply *reply;
797
798 LCS_DBF_TEXT(4, trace, "getreply");
799
800 reply = kmalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
801 if (!reply)
802 return NULL;
803 memset(reply,0,sizeof(struct lcs_reply));
804 atomic_set(&reply->refcnt,1);
805 reply->sequence_no = cmd->sequence_no;
806 reply->received = 0;
807 reply->rc = 0;
808 init_waitqueue_head(&reply->wait_q);
809
810 return reply;
811}
812
813/**
814 * Notifier function for lancmd replies. Called from read irq.
815 */
816static void
817lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
818{
819 struct list_head *l, *n;
820 struct lcs_reply *reply;
821
822 LCS_DBF_TEXT(4, trace, "notiwait");
823 spin_lock(&card->lock);
824 list_for_each_safe(l, n, &card->lancmd_waiters) {
825 reply = list_entry(l, struct lcs_reply, list);
826 if (reply->sequence_no == cmd->sequence_no) {
827 lcs_get_reply(reply);
828 list_del_init(&reply->list);
829 if (reply->callback != NULL)
830 reply->callback(card, cmd);
831 reply->received = 1;
832 reply->rc = cmd->return_code;
833 wake_up(&reply->wait_q);
834 lcs_put_reply(reply);
835 break;
836 }
837 }
838 spin_unlock(&card->lock);
839}
840
841/**
842 * Emit buffer of a lan comand.
843 */
844void
845lcs_lancmd_timeout(unsigned long data)
846{
847 struct lcs_reply *reply, *list_reply, *r;
848 unsigned long flags;
849
850 LCS_DBF_TEXT(4, trace, "timeout");
851 reply = (struct lcs_reply *) data;
852 spin_lock_irqsave(&reply->card->lock, flags);
853 list_for_each_entry_safe(list_reply, r,
854 &reply->card->lancmd_waiters,list) {
855 if (reply == list_reply) {
856 lcs_get_reply(reply);
857 list_del_init(&reply->list);
858 spin_unlock_irqrestore(&reply->card->lock, flags);
859 reply->received = 1;
860 reply->rc = -ETIME;
861 wake_up(&reply->wait_q);
862 lcs_put_reply(reply);
863 return;
864 }
865 }
866 spin_unlock_irqrestore(&reply->card->lock, flags);
867}
868
869static int
870lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
871 void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
872{
873 struct lcs_reply *reply;
874 struct lcs_cmd *cmd;
875 struct timer_list timer;
876 unsigned long flags;
877 int rc;
878
879 LCS_DBF_TEXT(4, trace, "sendcmd");
880 cmd = (struct lcs_cmd *) buffer->data;
881 cmd->return_code = 0;
882 cmd->sequence_no = card->sequence_no++;
883 reply = lcs_alloc_reply(cmd);
884 if (!reply)
885 return -ENOMEM;
886 reply->callback = reply_callback;
887 reply->card = card;
888 spin_lock_irqsave(&card->lock, flags);
889 list_add_tail(&reply->list, &card->lancmd_waiters);
890 spin_unlock_irqrestore(&card->lock, flags);
891
892 buffer->callback = lcs_release_buffer;
893 rc = lcs_ready_buffer(&card->write, buffer);
894 if (rc)
895 return rc;
896 init_timer(&timer);
897 timer.function = lcs_lancmd_timeout;
898 timer.data = (unsigned long) reply;
899 timer.expires = jiffies + HZ*card->lancmd_timeout;
900 add_timer(&timer);
901 wait_event(reply->wait_q, reply->received);
902 del_timer_sync(&timer);
903 LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
904 rc = reply->rc;
905 lcs_put_reply(reply);
906 return rc ? -EIO : 0;
907}
908
909/**
910 * LCS startup command
911 */
912static int
913lcs_send_startup(struct lcs_card *card, __u8 initiator)
914{
915 struct lcs_buffer *buffer;
916 struct lcs_cmd *cmd;
917
918 LCS_DBF_TEXT(2, trace, "startup");
919 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
920 cmd = (struct lcs_cmd *) buffer->data;
921 cmd->cmd_code = LCS_CMD_STARTUP;
922 cmd->initiator = initiator;
923 cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
924 return lcs_send_lancmd(card, buffer, NULL);
925}
926
927/**
928 * LCS shutdown command
929 */
930static int
931lcs_send_shutdown(struct lcs_card *card)
932{
933 struct lcs_buffer *buffer;
934 struct lcs_cmd *cmd;
935
936 LCS_DBF_TEXT(2, trace, "shutdown");
937 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
938 cmd = (struct lcs_cmd *) buffer->data;
939 cmd->cmd_code = LCS_CMD_SHUTDOWN;
940 cmd->initiator = LCS_INITIATOR_TCPIP;
941 return lcs_send_lancmd(card, buffer, NULL);
942}
943
944/**
945 * LCS lanstat command
946 */
947static void
948__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
949{
950 LCS_DBF_TEXT(2, trace, "statcb");
951 memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
952}
953
954static int
955lcs_send_lanstat(struct lcs_card *card)
956{
957 struct lcs_buffer *buffer;
958 struct lcs_cmd *cmd;
959
960 LCS_DBF_TEXT(2,trace, "cmdstat");
961 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
962 cmd = (struct lcs_cmd *) buffer->data;
963 /* Setup lanstat command. */
964 cmd->cmd_code = LCS_CMD_LANSTAT;
965 cmd->initiator = LCS_INITIATOR_TCPIP;
966 cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
967 cmd->cmd.lcs_std_cmd.portno = card->portno;
968 return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
969}
970
971/**
972 * send stoplan command
973 */
974static int
975lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
976{
977 struct lcs_buffer *buffer;
978 struct lcs_cmd *cmd;
979
980 LCS_DBF_TEXT(2, trace, "cmdstpln");
981 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
982 cmd = (struct lcs_cmd *) buffer->data;
983 cmd->cmd_code = LCS_CMD_STOPLAN;
984 cmd->initiator = initiator;
985 cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
986 cmd->cmd.lcs_std_cmd.portno = card->portno;
987 return lcs_send_lancmd(card, buffer, NULL);
988}
989
990/**
991 * send startlan command
992 */
993static void
994__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
995{
996 LCS_DBF_TEXT(2, trace, "srtlancb");
997 card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
998 card->portno = cmd->cmd.lcs_std_cmd.portno;
999}
1000
1001static int
1002lcs_send_startlan(struct lcs_card *card, __u8 initiator)
1003{
1004 struct lcs_buffer *buffer;
1005 struct lcs_cmd *cmd;
1006
1007 LCS_DBF_TEXT(2, trace, "cmdstaln");
1008 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
1009 cmd = (struct lcs_cmd *) buffer->data;
1010 cmd->cmd_code = LCS_CMD_STARTLAN;
1011 cmd->initiator = initiator;
1012 cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
1013 cmd->cmd.lcs_std_cmd.portno = card->portno;
1014 return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
1015}
1016
1017#ifdef CONFIG_IP_MULTICAST
1018/**
1019 * send setipm command (Multicast)
1020 */
1021static int
1022lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
1023{
1024 struct lcs_buffer *buffer;
1025 struct lcs_cmd *cmd;
1026
1027 LCS_DBF_TEXT(2, trace, "cmdsetim");
1028 buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
1029 cmd = (struct lcs_cmd *) buffer->data;
1030 cmd->cmd_code = LCS_CMD_SETIPM;
1031 cmd->initiator = LCS_INITIATOR_TCPIP;
1032 cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1033 cmd->cmd.lcs_qipassist.portno = card->portno;
1034 cmd->cmd.lcs_qipassist.version = 4;
1035 cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1036 memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
1037 &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
1038 LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
1039 return lcs_send_lancmd(card, buffer, NULL);
1040}
1041
1042/**
1043 * send delipm command (Multicast)
1044 */
1045static int
1046lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
1047{
1048 struct lcs_buffer *buffer;
1049 struct lcs_cmd *cmd;
1050
1051 LCS_DBF_TEXT(2, trace, "cmddelim");
1052 buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
1053 cmd = (struct lcs_cmd *) buffer->data;
1054 cmd->cmd_code = LCS_CMD_DELIPM;
1055 cmd->initiator = LCS_INITIATOR_TCPIP;
1056 cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1057 cmd->cmd.lcs_qipassist.portno = card->portno;
1058 cmd->cmd.lcs_qipassist.version = 4;
1059 cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1060 memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
1061 &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
1062 LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
1063 return lcs_send_lancmd(card, buffer, NULL);
1064}
1065
1066/**
1067 * check if multicast is supported by LCS
1068 */
1069static void
1070__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
1071{
1072 LCS_DBF_TEXT(2, trace, "chkmccb");
1073 card->ip_assists_supported =
1074 cmd->cmd.lcs_qipassist.ip_assists_supported;
1075 card->ip_assists_enabled =
1076 cmd->cmd.lcs_qipassist.ip_assists_enabled;
1077}
1078
1079static int
1080lcs_check_multicast_support(struct lcs_card *card)
1081{
1082 struct lcs_buffer *buffer;
1083 struct lcs_cmd *cmd;
1084 int rc;
1085
1086 LCS_DBF_TEXT(2, trace, "cmdqipa");
1087 /* Send query ipassist. */
1088 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
1089 cmd = (struct lcs_cmd *) buffer->data;
1090 cmd->cmd_code = LCS_CMD_QIPASSIST;
1091 cmd->initiator = LCS_INITIATOR_TCPIP;
1092 cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1093 cmd->cmd.lcs_qipassist.portno = card->portno;
1094 cmd->cmd.lcs_qipassist.version = 4;
1095 cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1096 rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
1097 if (rc != 0) {
1098 PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n");
1099 return -EOPNOTSUPP;
1100 }
1101 /* Print out supported assists: IPv6 */
1102 PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name,
1103 (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
1104 "with" : "without");
1105 /* Print out supported assist: Multicast */
1106 PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name,
1107 (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
1108 "with" : "without");
1109 if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
1110 return 0;
1111 return -EOPNOTSUPP;
1112}
1113
1114/**
1115 * set or del multicast address on LCS card
1116 */
1117static void
1118lcs_fix_multicast_list(struct lcs_card *card)
1119{
1120 struct list_head failed_list;
1121 struct lcs_ipm_list *ipm, *tmp;
1122 unsigned long flags;
1123 int rc;
1124
1125 LCS_DBF_TEXT(4,trace, "fixipm");
1126 INIT_LIST_HEAD(&failed_list);
1127 spin_lock_irqsave(&card->ipm_lock, flags);
1128list_modified:
1129 list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
1130 switch (ipm->ipm_state) {
1131 case LCS_IPM_STATE_SET_REQUIRED:
1132 /* del from ipm_list so noone else can tamper with
1133 * this entry */
1134 list_del_init(&ipm->list);
1135 spin_unlock_irqrestore(&card->ipm_lock, flags);
1136 rc = lcs_send_setipm(card, ipm);
1137 spin_lock_irqsave(&card->ipm_lock, flags);
1138 if (rc) {
1139 PRINT_INFO("Adding multicast address failed."
1140 "Table possibly full!\n");
1141 /* store ipm in failed list -> will be added
1142 * to ipm_list again, so a retry will be done
1143 * during the next call of this function */
1144 list_add_tail(&ipm->list, &failed_list);
1145 } else {
1146 ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
1147 /* re-insert into ipm_list */
1148 list_add_tail(&ipm->list, &card->ipm_list);
1149 }
1150 goto list_modified;
1151 case LCS_IPM_STATE_DEL_REQUIRED:
1152 list_del(&ipm->list);
1153 spin_unlock_irqrestore(&card->ipm_lock, flags);
1154 lcs_send_delipm(card, ipm);
1155 spin_lock_irqsave(&card->ipm_lock, flags);
1156 kfree(ipm);
1157 goto list_modified;
1158 case LCS_IPM_STATE_ON_CARD:
1159 break;
1160 }
1161 }
1162 /* re-insert all entries from the failed_list into ipm_list */
1163 list_for_each_entry(ipm, &failed_list, list) {
1164 list_del_init(&ipm->list);
1165 list_add_tail(&ipm->list, &card->ipm_list);
1166 }
1167 spin_unlock_irqrestore(&card->ipm_lock, flags);
1168 if (card->state == DEV_STATE_UP)
1169 netif_wake_queue(card->dev);
1170}
1171
1172/**
1173 * get mac address for the relevant Multicast address
1174 */
1175static void
1176lcs_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
1177{
1178 LCS_DBF_TEXT(4,trace, "getmac");
1179 if (dev->type == ARPHRD_IEEE802_TR)
1180 ip_tr_mc_map(ipm, mac);
1181 else
1182 ip_eth_mc_map(ipm, mac);
1183}
1184
1185/**
1186 * function called by net device to handle multicast address relevant things
1187 */
1188static inline void
1189lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1190{
1191 struct ip_mc_list *im4;
1192 struct list_head *l;
1193 struct lcs_ipm_list *ipm;
1194 unsigned long flags;
1195 char buf[MAX_ADDR_LEN];
1196
1197 LCS_DBF_TEXT(4, trace, "remmclst");
1198 spin_lock_irqsave(&card->ipm_lock, flags);
1199 list_for_each(l, &card->ipm_list) {
1200 ipm = list_entry(l, struct lcs_ipm_list, list);
1201 for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) {
1202 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1203 if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
1204 (memcmp(buf, &ipm->ipm.mac_addr,
1205 LCS_MAC_LENGTH) == 0) )
1206 break;
1207 }
1208 if (im4 == NULL)
1209 ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
1210 }
1211 spin_unlock_irqrestore(&card->ipm_lock, flags);
1212}
1213
1214static inline struct lcs_ipm_list *
1215lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
1216{
1217 struct lcs_ipm_list *tmp, *ipm = NULL;
1218 struct list_head *l;
1219 unsigned long flags;
1220
1221 LCS_DBF_TEXT(4, trace, "chkmcent");
1222 spin_lock_irqsave(&card->ipm_lock, flags);
1223 list_for_each(l, &card->ipm_list) {
1224 tmp = list_entry(l, struct lcs_ipm_list, list);
1225 if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
1226 (memcmp(buf, &tmp->ipm.mac_addr,
1227 LCS_MAC_LENGTH) == 0) ) {
1228 ipm = tmp;
1229 break;
1230 }
1231 }
1232 spin_unlock_irqrestore(&card->ipm_lock, flags);
1233 return ipm;
1234}
1235
1236static inline void
1237lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1238{
1239
1240 struct ip_mc_list *im4;
1241 struct lcs_ipm_list *ipm;
1242 char buf[MAX_ADDR_LEN];
1243 unsigned long flags;
1244
1245 LCS_DBF_TEXT(4, trace, "setmclst");
1246 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
1247 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1248 ipm = lcs_check_addr_entry(card, im4, buf);
1249 if (ipm != NULL)
1250 continue; /* Address already in list. */
1251 ipm = (struct lcs_ipm_list *)
1252 kmalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
1253 if (ipm == NULL) {
1254 PRINT_INFO("Not enough memory to add "
1255 "new multicast entry!\n");
1256 break;
1257 }
1258 memset(ipm, 0, sizeof(struct lcs_ipm_list));
1259 memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
1260 ipm->ipm.ip_addr = im4->multiaddr;
1261 ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
1262 spin_lock_irqsave(&card->ipm_lock, flags);
1263 list_add(&ipm->list, &card->ipm_list);
1264 spin_unlock_irqrestore(&card->ipm_lock, flags);
1265 }
1266}
1267
1268static int
1269lcs_register_mc_addresses(void *data)
1270{
1271 struct lcs_card *card;
1272 struct in_device *in4_dev;
1273
1274 card = (struct lcs_card *) data;
1275 daemonize("regipm");
1276
1277 if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
1278 return 0;
1279 LCS_DBF_TEXT(4, trace, "regmulti");
1280
1281 in4_dev = in_dev_get(card->dev);
1282 if (in4_dev == NULL)
1283 goto out;
1284 read_lock(&in4_dev->mc_list_lock);
1285 lcs_remove_mc_addresses(card,in4_dev);
1286 lcs_set_mc_addresses(card, in4_dev);
1287 read_unlock(&in4_dev->mc_list_lock);
1288 in_dev_put(in4_dev);
1289
1290 lcs_fix_multicast_list(card);
1291out:
1292 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
1293 return 0;
1294}
1295/**
1296 * function called by net device to
1297 * handle multicast address relevant things
1298 */
1299static void
1300lcs_set_multicast_list(struct net_device *dev)
1301{
1302 struct lcs_card *card;
1303
1304 LCS_DBF_TEXT(4, trace, "setmulti");
1305 card = (struct lcs_card *) dev->priv;
1306
1307 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) {
1308 schedule_work(&card->kernel_thread_starter);
1309 }
1310}
1311
1312#endif /* CONFIG_IP_MULTICAST */
1313
1314static long
1315lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1316{
1317 if (!IS_ERR(irb))
1318 return 0;
1319
1320 switch (PTR_ERR(irb)) {
1321 case -EIO:
1322 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
1323 LCS_DBF_TEXT(2, trace, "ckirberr");
1324 LCS_DBF_TEXT_(2, trace, " rc%d", -EIO);
1325 break;
1326 case -ETIMEDOUT:
1327 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
1328 LCS_DBF_TEXT(2, trace, "ckirberr");
1329 LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT);
1330 break;
1331 default:
1332 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
1333 cdev->dev.bus_id);
1334 LCS_DBF_TEXT(2, trace, "ckirberr");
1335 LCS_DBF_TEXT(2, trace, " rc???");
1336 }
1337 return PTR_ERR(irb);
1338}
1339
1340
1341/**
1342 * IRQ Handler for LCS channels
1343 */
1344static void
1345lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1346{
1347 struct lcs_card *card;
1348 struct lcs_channel *channel;
1349 int index;
1350
1351 if (lcs_check_irb_error(cdev, irb))
1352 return;
1353
1354 card = CARD_FROM_DEV(cdev);
1355 if (card->read.ccwdev == cdev)
1356 channel = &card->read;
1357 else
1358 channel = &card->write;
1359
1360 LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id);
1361 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.cstat, irb->scsw.dstat);
1362 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.fctl, irb->scsw.actl);
1363
1364 /* How far in the ccw chain have we processed? */
1365 if ((channel->state != CH_STATE_INIT) &&
1366 (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
1367 index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa)
1368 - channel->ccws;
1369 if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) ||
1370 (irb->scsw.cstat | SCHN_STAT_PCI))
1371 /* Bloody io subsystem tells us lies about cpa... */
1372 index = (index - 1) & (LCS_NUM_BUFFS - 1);
1373 while (channel->io_idx != index) {
1374 __lcs_processed_buffer(channel,
1375 channel->iob + channel->io_idx);
1376 channel->io_idx =
1377 (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
1378 }
1379 }
1380
1381 if ((irb->scsw.dstat & DEV_STAT_DEV_END) ||
1382 (irb->scsw.dstat & DEV_STAT_CHN_END) ||
1383 (irb->scsw.dstat & DEV_STAT_UNIT_CHECK))
1384 /* Mark channel as stopped. */
1385 channel->state = CH_STATE_STOPPED;
1386 else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED)
1387 /* CCW execution stopped on a suspend bit. */
1388 channel->state = CH_STATE_SUSPENDED;
1389
1390 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
1391 if (irb->scsw.cc != 0) {
1392 ccw_device_halt(channel->ccwdev, (addr_t) channel);
1393 return;
1394 }
1395 /* The channel has been stopped by halt_IO. */
1396 channel->state = CH_STATE_HALTED;
1397 }
1398
1399 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
1400 channel->state = CH_STATE_CLEARED;
1401 }
1402 /* Do the rest in the tasklet. */
1403 tasklet_schedule(&channel->irq_tasklet);
1404}
1405
1406/**
1407 * Tasklet for IRQ handler
1408 */
1409static void
1410lcs_tasklet(unsigned long data)
1411{
1412 unsigned long flags;
1413 struct lcs_channel *channel;
1414 struct lcs_buffer *iob;
1415 int buf_idx;
1416 int rc;
1417
1418 channel = (struct lcs_channel *) data;
1419 LCS_DBF_TEXT_(5, trace, "tlet%s",channel->ccwdev->dev.bus_id);
1420
1421 /* Check for processed buffers. */
1422 iob = channel->iob;
1423 buf_idx = channel->buf_idx;
1424 while (iob[buf_idx].state == BUF_STATE_PROCESSED) {
1425 /* Do the callback thing. */
1426 if (iob[buf_idx].callback != NULL)
1427 iob[buf_idx].callback(channel, iob + buf_idx);
1428 buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
1429 }
1430 channel->buf_idx = buf_idx;
1431
1432 if (channel->state == CH_STATE_STOPPED)
1433 // FIXME: what if rc != 0 ??
1434 rc = lcs_start_channel(channel);
1435 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1436 if (channel->state == CH_STATE_SUSPENDED &&
1437 channel->iob[channel->io_idx].state == BUF_STATE_READY) {
1438 // FIXME: what if rc != 0 ??
1439 rc = __lcs_resume_channel(channel);
1440 }
1441 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1442
1443 /* Something happened on the channel. Wake up waiters. */
1444 wake_up(&channel->wait_q);
1445}
1446
1447/**
1448 * Finish current tx buffer and make it ready for transmit.
1449 */
1450static void
1451__lcs_emit_txbuffer(struct lcs_card *card)
1452{
1453 LCS_DBF_TEXT(5, trace, "emittx");
1454 *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
1455 card->tx_buffer->count += 2;
1456 lcs_ready_buffer(&card->write, card->tx_buffer);
1457 card->tx_buffer = NULL;
1458 card->tx_emitted++;
1459}
1460
1461/**
1462 * Callback for finished tx buffers.
1463 */
1464static void
1465lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
1466{
1467 struct lcs_card *card;
1468
1469 LCS_DBF_TEXT(5, trace, "txbuffcb");
1470 /* Put buffer back to pool. */
1471 lcs_release_buffer(channel, buffer);
1472 card = (struct lcs_card *)
1473 ((char *) channel - offsetof(struct lcs_card, write));
1474 spin_lock(&card->lock);
1475 card->tx_emitted--;
1476 if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
1477 /*
1478 * Last running tx buffer has finished. Submit partially
1479 * filled current buffer.
1480 */
1481 __lcs_emit_txbuffer(card);
1482 spin_unlock(&card->lock);
1483}
1484
1485/**
1486 * Packet transmit function called by network stack
1487 */
1488static int
1489__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
1490 struct net_device *dev)
1491{
1492 struct lcs_header *header;
1493
1494 LCS_DBF_TEXT(5, trace, "hardxmit");
1495 if (skb == NULL) {
1496 card->stats.tx_dropped++;
1497 card->stats.tx_errors++;
1498 return -EIO;
1499 }
1500 if (card->state != DEV_STATE_UP) {
1501 dev_kfree_skb(skb);
1502 card->stats.tx_dropped++;
1503 card->stats.tx_errors++;
1504 card->stats.tx_carrier_errors++;
1505 return 0;
1506 }
1507 if (netif_queue_stopped(dev) ) {
1508 card->stats.tx_dropped++;
1509 return -EBUSY;
1510 }
1511 if (card->tx_buffer != NULL &&
1512 card->tx_buffer->count + sizeof(struct lcs_header) +
1513 skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
1514 /* skb too big for current tx buffer. */
1515 __lcs_emit_txbuffer(card);
1516 if (card->tx_buffer == NULL) {
1517 /* Get new tx buffer */
1518 card->tx_buffer = lcs_get_buffer(&card->write);
1519 if (card->tx_buffer == NULL) {
1520 card->stats.tx_dropped++;
1521 return -EBUSY;
1522 }
1523 card->tx_buffer->callback = lcs_txbuffer_cb;
1524 card->tx_buffer->count = 0;
1525 }
1526 header = (struct lcs_header *)
1527 (card->tx_buffer->data + card->tx_buffer->count);
1528 card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
1529 header->offset = card->tx_buffer->count;
1530 header->type = card->lan_type;
1531 header->slot = card->portno;
1532 memcpy(header + 1, skb->data, skb->len);
1533 card->stats.tx_bytes += skb->len;
1534 card->stats.tx_packets++;
1535 dev_kfree_skb(skb);
1536 if (card->tx_emitted <= 0)
1537 /* If this is the first tx buffer emit it immediately. */
1538 __lcs_emit_txbuffer(card);
1539 return 0;
1540}
1541
1542static int
1543lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
1544{
1545 struct lcs_card *card;
1546 int rc;
1547
1548 LCS_DBF_TEXT(5, trace, "pktxmit");
1549 card = (struct lcs_card *) dev->priv;
1550 spin_lock(&card->lock);
1551 rc = __lcs_start_xmit(card, skb, dev);
1552 spin_unlock(&card->lock);
1553 return rc;
1554}
1555
1556/**
1557 * send startlan and lanstat command to make LCS device ready
1558 */
1559static int
1560lcs_startlan_auto(struct lcs_card *card)
1561{
1562 int rc;
1563
1564 LCS_DBF_TEXT(2, trace, "strtauto");
1565#ifdef CONFIG_NET_ETHERNET
1566 card->lan_type = LCS_FRAME_TYPE_ENET;
1567 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1568 if (rc == 0)
1569 return 0;
1570
1571#endif
1572#ifdef CONFIG_TR
1573 card->lan_type = LCS_FRAME_TYPE_TR;
1574 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1575 if (rc == 0)
1576 return 0;
1577#endif
1578#ifdef CONFIG_FDDI
1579 card->lan_type = LCS_FRAME_TYPE_FDDI;
1580 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1581 if (rc == 0)
1582 return 0;
1583#endif
1584 return -EIO;
1585}
1586
1587static int
1588lcs_startlan(struct lcs_card *card)
1589{
1590 int rc, i;
1591
1592 LCS_DBF_TEXT(2, trace, "startlan");
1593 rc = 0;
1594 if (card->portno != LCS_INVALID_PORT_NO) {
1595 if (card->lan_type == LCS_FRAME_TYPE_AUTO)
1596 rc = lcs_startlan_auto(card);
1597 else
1598 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1599 } else {
1600 for (i = 0; i <= 16; i++) {
1601 card->portno = i;
1602 if (card->lan_type != LCS_FRAME_TYPE_AUTO)
1603 rc = lcs_send_startlan(card,
1604 LCS_INITIATOR_TCPIP);
1605 else
1606 /* autodetecting lan type */
1607 rc = lcs_startlan_auto(card);
1608 if (rc == 0)
1609 break;
1610 }
1611 }
1612 if (rc == 0)
1613 return lcs_send_lanstat(card);
1614 return rc;
1615}
1616
1617/**
1618 * LCS detect function
1619 * setup channels and make them I/O ready
1620 */
1621static int
1622lcs_detect(struct lcs_card *card)
1623{
1624 int rc = 0;
1625
1626 LCS_DBF_TEXT(2, setup, "lcsdetct");
1627 /* start/reset card */
1628 if (card->dev)
1629 netif_stop_queue(card->dev);
1630 rc = lcs_stop_channels(card);
1631 if (rc == 0) {
1632 rc = lcs_start_channels(card);
1633 if (rc == 0) {
1634 rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
1635 if (rc == 0)
1636 rc = lcs_startlan(card);
1637 }
1638 }
1639 if (rc == 0) {
1640 card->state = DEV_STATE_UP;
1641 } else {
1642 card->state = DEV_STATE_DOWN;
1643 card->write.state = CH_STATE_INIT;
1644 card->read.state = CH_STATE_INIT;
1645 }
1646 return rc;
1647}
1648
1649/**
1650 * reset card
1651 */
1652static int
1653lcs_resetcard(struct lcs_card *card)
1654{
1655 int retries;
1656
1657 LCS_DBF_TEXT(2, trace, "rescard");
1658 for (retries = 0; retries < 10; retries++) {
1659 if (lcs_detect(card) == 0) {
1660 netif_wake_queue(card->dev);
1661 card->state = DEV_STATE_UP;
1662 PRINT_INFO("LCS device %s successfully restarted!\n",
1663 card->dev->name);
1664 return 0;
1665 }
1666 msleep(3000);
1667 }
1668 PRINT_ERR("Error in Reseting LCS card!\n");
1669 return -EIO;
1670}
1671
1672
1673/**
1674 * LCS Stop card
1675 */
1676static int
1677lcs_stopcard(struct lcs_card *card)
1678{
1679 int rc;
1680
1681 LCS_DBF_TEXT(3, setup, "stopcard");
1682
1683 if (card->read.state != CH_STATE_STOPPED &&
1684 card->write.state != CH_STATE_STOPPED &&
1685 card->state == DEV_STATE_UP) {
1686 lcs_clear_multicast_list(card);
1687 rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
1688 rc = lcs_send_shutdown(card);
1689 }
1690 rc = lcs_stop_channels(card);
1691 card->state = DEV_STATE_DOWN;
1692
1693 return rc;
1694}
1695
1696/**
1697 * LGW initiated commands
1698 */
1699static int
1700lcs_lgw_startlan_thread(void *data)
1701{
1702 struct lcs_card *card;
1703
1704 card = (struct lcs_card *) data;
1705 daemonize("lgwstpln");
1706
1707 if (!lcs_do_run_thread(card, LCS_STARTLAN_THREAD))
1708 return 0;
1709 LCS_DBF_TEXT(4, trace, "lgwstpln");
1710 if (card->dev)
1711 netif_stop_queue(card->dev);
1712 if (lcs_startlan(card) == 0) {
1713 netif_wake_queue(card->dev);
1714 card->state = DEV_STATE_UP;
1715 PRINT_INFO("LCS Startlan for device %s succeeded!\n",
1716 card->dev->name);
1717
1718 } else
1719 PRINT_ERR("LCS Startlan for device %s failed!\n",
1720 card->dev->name);
1721 lcs_clear_thread_running_bit(card, LCS_STARTLAN_THREAD);
1722 return 0;
1723}
1724
1725/**
1726 * Send startup command initiated by Lan Gateway
1727 */
1728static int
1729lcs_lgw_startup_thread(void *data)
1730{
1731 int rc;
1732
1733 struct lcs_card *card;
1734
1735 card = (struct lcs_card *) data;
1736 daemonize("lgwstaln");
1737
1738 if (!lcs_do_run_thread(card, LCS_STARTUP_THREAD))
1739 return 0;
1740 LCS_DBF_TEXT(4, trace, "lgwstaln");
1741 if (card->dev)
1742 netif_stop_queue(card->dev);
1743 rc = lcs_send_startup(card, LCS_INITIATOR_LGW);
1744 if (rc != 0) {
1745 PRINT_ERR("Startup for LCS device %s initiated " \
1746 "by LGW failed!\nReseting card ...\n",
1747 card->dev->name);
1748 /* do a card reset */
1749 rc = lcs_resetcard(card);
1750 if (rc == 0)
1751 goto Done;
1752 }
1753 rc = lcs_startlan(card);
1754 if (rc == 0) {
1755 netif_wake_queue(card->dev);
1756 card->state = DEV_STATE_UP;
1757 }
1758Done:
1759 if (rc == 0)
1760 PRINT_INFO("LCS Startup for device %s succeeded!\n",
1761 card->dev->name);
1762 else
1763 PRINT_ERR("LCS Startup for device %s failed!\n",
1764 card->dev->name);
1765 lcs_clear_thread_running_bit(card, LCS_STARTUP_THREAD);
1766 return 0;
1767}
1768
1769
1770/**
1771 * send stoplan command initiated by Lan Gateway
1772 */
1773static int
1774lcs_lgw_stoplan_thread(void *data)
1775{
1776 struct lcs_card *card;
1777 int rc;
1778
1779 card = (struct lcs_card *) data;
1780 daemonize("lgwstop");
1781
1782 if (!lcs_do_run_thread(card, LCS_STOPLAN_THREAD))
1783 return 0;
1784 LCS_DBF_TEXT(4, trace, "lgwstop");
1785 if (card->dev)
1786 netif_stop_queue(card->dev);
1787 if (lcs_send_stoplan(card, LCS_INITIATOR_LGW) == 0)
1788 PRINT_INFO("Stoplan for %s initiated by LGW succeeded!\n",
1789 card->dev->name);
1790 else
1791 PRINT_ERR("Stoplan %s initiated by LGW failed!\n",
1792 card->dev->name);
1793 /*Try to reset the card, stop it on failure */
1794 rc = lcs_resetcard(card);
1795 if (rc != 0)
1796 rc = lcs_stopcard(card);
1797 lcs_clear_thread_running_bit(card, LCS_STOPLAN_THREAD);
1798 return rc;
1799}
1800
1801/**
1802 * Kernel Thread helper functions for LGW initiated commands
1803 */
1804static void
1805lcs_start_kernel_thread(struct lcs_card *card)
1806{
1807 LCS_DBF_TEXT(5, trace, "krnthrd");
1808 if (lcs_do_start_thread(card, LCS_STARTUP_THREAD))
1809 kernel_thread(lcs_lgw_startup_thread, (void *) card, SIGCHLD);
1810 if (lcs_do_start_thread(card, LCS_STARTLAN_THREAD))
1811 kernel_thread(lcs_lgw_startlan_thread, (void *) card, SIGCHLD);
1812 if (lcs_do_start_thread(card, LCS_STOPLAN_THREAD))
1813 kernel_thread(lcs_lgw_stoplan_thread, (void *) card, SIGCHLD);
1814#ifdef CONFIG_IP_MULTICAST
1815 if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
1816 kernel_thread(lcs_register_mc_addresses, (void *) card, SIGCHLD);
1817#endif
1818}
1819
1820/**
1821 * Process control frames.
1822 */
1823static void
1824lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
1825{
1826 LCS_DBF_TEXT(5, trace, "getctrl");
1827 if (cmd->initiator == LCS_INITIATOR_LGW) {
1828 switch(cmd->cmd_code) {
1829 case LCS_CMD_STARTUP:
1830 if (!lcs_set_thread_start_bit(card,
1831 LCS_STARTUP_THREAD))
1832 schedule_work(&card->kernel_thread_starter);
1833 break;
1834 case LCS_CMD_STARTLAN:
1835 if (!lcs_set_thread_start_bit(card,
1836 LCS_STARTLAN_THREAD))
1837 schedule_work(&card->kernel_thread_starter);
1838 break;
1839 case LCS_CMD_STOPLAN:
1840 if (!lcs_set_thread_start_bit(card,
1841 LCS_STOPLAN_THREAD))
1842 schedule_work(&card->kernel_thread_starter);
1843 break;
1844 default:
1845 PRINT_INFO("UNRECOGNIZED LGW COMMAND\n");
1846 break;
1847 }
1848 } else
1849 lcs_notify_lancmd_waiters(card, cmd);
1850}
1851
1852/**
1853 * Unpack network packet.
1854 */
1855static void
1856lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
1857{
1858 struct sk_buff *skb;
1859
1860 LCS_DBF_TEXT(5, trace, "getskb");
1861 if (card->dev == NULL ||
1862 card->state != DEV_STATE_UP)
1863 /* The card isn't up. Ignore the packet. */
1864 return;
1865
1866 skb = dev_alloc_skb(skb_len);
1867 if (skb == NULL) {
1868 PRINT_ERR("LCS: alloc_skb failed for device=%s\n",
1869 card->dev->name);
1870 card->stats.rx_dropped++;
1871 return;
1872 }
1873 skb->dev = card->dev;
1874 memcpy(skb_put(skb, skb_len), skb_data, skb_len);
1875 skb->protocol = card->lan_type_trans(skb, card->dev);
1876 card->stats.rx_bytes += skb_len;
1877 card->stats.rx_packets++;
1878 *((__u32 *)skb->cb) = ++card->pkt_seq;
1879 netif_rx(skb);
1880}
1881
1882/**
1883 * LCS main routine to get packets and lancmd replies from the buffers
1884 */
1885static void
1886lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
1887{
1888 struct lcs_card *card;
1889 struct lcs_header *lcs_hdr;
1890 __u16 offset;
1891
1892 LCS_DBF_TEXT(5, trace, "lcsgtpkt");
1893 lcs_hdr = (struct lcs_header *) buffer->data;
1894 if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
1895 LCS_DBF_TEXT(4, trace, "-eiogpkt");
1896 return;
1897 }
1898 card = (struct lcs_card *)
1899 ((char *) channel - offsetof(struct lcs_card, read));
1900 offset = 0;
1901 while (lcs_hdr->offset != 0) {
1902 if (lcs_hdr->offset <= 0 ||
1903 lcs_hdr->offset > LCS_IOBUFFERSIZE ||
1904 lcs_hdr->offset < offset) {
1905 /* Offset invalid. */
1906 card->stats.rx_length_errors++;
1907 card->stats.rx_errors++;
1908 return;
1909 }
1910 /* What kind of frame is it? */
1911 if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
1912 /* Control frame. */
1913 lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
1914 else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
1915 lcs_hdr->type == LCS_FRAME_TYPE_TR ||
1916 lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
1917 /* Normal network packet. */
1918 lcs_get_skb(card, (char *)(lcs_hdr + 1),
1919 lcs_hdr->offset - offset -
1920 sizeof(struct lcs_header));
1921 else
1922 /* Unknown frame type. */
1923 ; // FIXME: error message ?
1924 /* Proceed to next frame. */
1925 offset = lcs_hdr->offset;
1926 lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
1927 lcs_hdr = (struct lcs_header *) (buffer->data + offset);
1928 }
1929 /* The buffer is now empty. Make it ready again. */
1930 lcs_ready_buffer(&card->read, buffer);
1931}
1932
1933/**
1934 * get network statistics for ifconfig and other user programs
1935 */
1936static struct net_device_stats *
1937lcs_getstats(struct net_device *dev)
1938{
1939 struct lcs_card *card;
1940
1941 LCS_DBF_TEXT(4, trace, "netstats");
1942 card = (struct lcs_card *) dev->priv;
1943 return &card->stats;
1944}
1945
1946/**
1947 * stop lcs device
1948 * This function will be called by user doing ifconfig xxx down
1949 */
1950static int
1951lcs_stop_device(struct net_device *dev)
1952{
1953 struct lcs_card *card;
1954 int rc;
1955
1956 LCS_DBF_TEXT(2, trace, "stopdev");
1957 card = (struct lcs_card *) dev->priv;
1958 netif_stop_queue(dev);
1959 dev->flags &= ~IFF_UP;
1960 rc = lcs_stopcard(card);
1961 if (rc)
1962 PRINT_ERR("Try it again!\n ");
1963 return rc;
1964}
1965
1966/**
1967 * start lcs device and make it runnable
1968 * This function will be called by user doing ifconfig xxx up
1969 */
1970static int
1971lcs_open_device(struct net_device *dev)
1972{
1973 struct lcs_card *card;
1974 int rc;
1975
1976 LCS_DBF_TEXT(2, trace, "opendev");
1977 card = (struct lcs_card *) dev->priv;
1978 /* initialize statistics */
1979 rc = lcs_detect(card);
1980 if (rc) {
1981 PRINT_ERR("LCS:Error in opening device!\n");
1982
1983 } else {
1984 dev->flags |= IFF_UP;
1985 netif_wake_queue(dev);
1986 card->state = DEV_STATE_UP;
1987 }
1988 return rc;
1989}
1990
1991/**
1992 * show function for portno called by cat or similar things
1993 */
1994static ssize_t
1995lcs_portno_show (struct device *dev, char *buf)
1996{
1997 struct lcs_card *card;
1998
1999 card = (struct lcs_card *)dev->driver_data;
2000
2001 if (!card)
2002 return 0;
2003
2004 return sprintf(buf, "%d\n", card->portno);
2005}
2006
2007/**
2008 * store the value which is piped to file portno
2009 */
2010static ssize_t
2011lcs_portno_store (struct device *dev, const char *buf, size_t count)
2012{
2013 struct lcs_card *card;
2014 int value;
2015
2016 card = (struct lcs_card *)dev->driver_data;
2017
2018 if (!card)
2019 return 0;
2020
2021 sscanf(buf, "%u", &value);
2022 /* TODO: sanity checks */
2023 card->portno = value;
2024
2025 return count;
2026
2027}
2028
2029static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
2030
2031static ssize_t
2032lcs_type_show(struct device *dev, char *buf)
2033{
2034 struct ccwgroup_device *cgdev;
2035
2036 cgdev = to_ccwgroupdev(dev);
2037 if (!cgdev)
2038 return -ENODEV;
2039
2040 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2041}
2042
2043static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
2044
2045static ssize_t
2046lcs_timeout_show(struct device *dev, char *buf)
2047{
2048 struct lcs_card *card;
2049
2050 card = (struct lcs_card *)dev->driver_data;
2051
2052 return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
2053}
2054
2055static ssize_t
2056lcs_timeout_store (struct device *dev, const char *buf, size_t count)
2057{
2058 struct lcs_card *card;
2059 int value;
2060
2061 card = (struct lcs_card *)dev->driver_data;
2062
2063 if (!card)
2064 return 0;
2065
2066 sscanf(buf, "%u", &value);
2067 /* TODO: sanity checks */
2068 card->lancmd_timeout = value;
2069
2070 return count;
2071
2072}
2073
2074DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
2075
2076static struct attribute * lcs_attrs[] = {
2077 &dev_attr_portno.attr,
2078 &dev_attr_type.attr,
2079 &dev_attr_lancmd_timeout.attr,
2080 NULL,
2081};
2082
2083static struct attribute_group lcs_attr_group = {
2084 .attrs = lcs_attrs,
2085};
2086
2087/**
2088 * lcs_probe_device is called on establishing a new ccwgroup_device.
2089 */
2090static int
2091lcs_probe_device(struct ccwgroup_device *ccwgdev)
2092{
2093 struct lcs_card *card;
2094 int ret;
2095
2096 if (!get_device(&ccwgdev->dev))
2097 return -ENODEV;
2098
2099 LCS_DBF_TEXT(2, setup, "add_dev");
2100 card = lcs_alloc_card();
2101 if (!card) {
2102 PRINT_ERR("Allocation of lcs card failed\n");
2103 put_device(&ccwgdev->dev);
2104 return -ENOMEM;
2105 }
2106 ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2107 if (ret) {
2108 PRINT_ERR("Creating attributes failed");
2109 lcs_free_card(card);
2110 put_device(&ccwgdev->dev);
2111 return ret;
2112 }
2113 ccwgdev->dev.driver_data = card;
2114 ccwgdev->cdev[0]->handler = lcs_irq;
2115 ccwgdev->cdev[1]->handler = lcs_irq;
2116 return 0;
2117}
2118
2119static int
2120lcs_register_netdev(struct ccwgroup_device *ccwgdev)
2121{
2122 struct lcs_card *card;
2123
2124 LCS_DBF_TEXT(2, setup, "regnetdv");
2125 card = (struct lcs_card *)ccwgdev->dev.driver_data;
2126 if (card->dev->reg_state != NETREG_UNINITIALIZED)
2127 return 0;
2128 SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
2129 return register_netdev(card->dev);
2130}
2131
2132/**
2133 * lcs_new_device will be called by setting the group device online.
2134 */
2135
2136static int
2137lcs_new_device(struct ccwgroup_device *ccwgdev)
2138{
2139 struct lcs_card *card;
2140 struct net_device *dev=NULL;
2141 enum lcs_dev_states recover_state;
2142 int rc;
2143
2144 card = (struct lcs_card *)ccwgdev->dev.driver_data;
2145 if (!card)
2146 return -ENODEV;
2147
2148 LCS_DBF_TEXT(2, setup, "newdev");
2149 LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2150 card->read.ccwdev = ccwgdev->cdev[0];
2151 card->write.ccwdev = ccwgdev->cdev[1];
2152
2153 recover_state = card->state;
2154 ccw_device_set_online(card->read.ccwdev);
2155 ccw_device_set_online(card->write.ccwdev);
2156
2157 LCS_DBF_TEXT(3, setup, "lcsnewdv");
2158
2159 lcs_setup_card(card);
2160 rc = lcs_detect(card);
2161 if (rc) {
2162 LCS_DBF_TEXT(2, setup, "dtctfail");
2163 PRINT_WARN("Detection of LCS card failed with return code "
2164 "%d (0x%x)\n", rc, rc);
2165 lcs_stopcard(card);
2166 goto out;
2167 }
2168 if (card->dev) {
2169 LCS_DBF_TEXT(2, setup, "samedev");
2170 LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2171 goto netdev_out;
2172 }
2173 switch (card->lan_type) {
2174#ifdef CONFIG_NET_ETHERNET
2175 case LCS_FRAME_TYPE_ENET:
2176 card->lan_type_trans = eth_type_trans;
2177 dev = alloc_etherdev(0);
2178 break;
2179#endif
2180#ifdef CONFIG_TR
2181 case LCS_FRAME_TYPE_TR:
2182 card->lan_type_trans = tr_type_trans;
2183 dev = alloc_trdev(0);
2184 break;
2185#endif
2186#ifdef CONFIG_FDDI
2187 case LCS_FRAME_TYPE_FDDI:
2188 card->lan_type_trans = fddi_type_trans;
2189 dev = alloc_fddidev(0);
2190 break;
2191#endif
2192 default:
2193 LCS_DBF_TEXT(3, setup, "errinit");
2194 PRINT_ERR("LCS: Initialization failed\n");
2195 PRINT_ERR("LCS: No device found!\n");
2196 goto out;
2197 }
2198 if (!dev)
2199 goto out;
2200 card->dev = dev;
2201netdev_out:
2202 card->dev->priv = card;
2203 card->dev->open = lcs_open_device;
2204 card->dev->stop = lcs_stop_device;
2205 card->dev->hard_start_xmit = lcs_start_xmit;
2206 card->dev->get_stats = lcs_getstats;
2207 SET_MODULE_OWNER(dev);
2208 if (lcs_register_netdev(ccwgdev) != 0)
2209 goto out;
2210 memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
2211#ifdef CONFIG_IP_MULTICAST
2212 if (!lcs_check_multicast_support(card))
2213 card->dev->set_multicast_list = lcs_set_multicast_list;
2214#endif
2215 netif_stop_queue(card->dev);
2216 lcs_set_allowed_threads(card,0xffffffff);
2217 if (recover_state == DEV_STATE_RECOVER) {
2218 lcs_set_multicast_list(card->dev);
2219 card->dev->flags |= IFF_UP;
2220 netif_wake_queue(card->dev);
2221 card->state = DEV_STATE_UP;
2222 } else
2223 lcs_stopcard(card);
2224
2225 return 0;
2226out:
2227
2228 ccw_device_set_offline(card->read.ccwdev);
2229 ccw_device_set_offline(card->write.ccwdev);
2230 return -ENODEV;
2231}
2232
2233/**
2234 * lcs_shutdown_device, called when setting the group device offline.
2235 */
2236static int
2237lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
2238{
2239 struct lcs_card *card;
2240 enum lcs_dev_states recover_state;
2241 int ret;
2242
2243 LCS_DBF_TEXT(3, setup, "shtdndev");
2244 card = (struct lcs_card *)ccwgdev->dev.driver_data;
2245 if (!card)
2246 return -ENODEV;
2247 lcs_set_allowed_threads(card, 0);
2248 if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
2249 return -ERESTARTSYS;
2250 LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2251 recover_state = card->state;
2252
2253 ret = lcs_stop_device(card->dev);
2254 ret = ccw_device_set_offline(card->read.ccwdev);
2255 ret = ccw_device_set_offline(card->write.ccwdev);
2256 if (recover_state == DEV_STATE_UP) {
2257 card->state = DEV_STATE_RECOVER;
2258 }
2259 if (ret)
2260 return ret;
2261 return 0;
2262}
2263
2264/**
2265 * lcs_remove_device, free buffers and card
2266 */
2267static void
2268lcs_remove_device(struct ccwgroup_device *ccwgdev)
2269{
2270 struct lcs_card *card;
2271
2272 card = (struct lcs_card *)ccwgdev->dev.driver_data;
2273 if (!card)
2274 return;
2275
2276 PRINT_INFO("Removing lcs group device ....\n");
2277 LCS_DBF_TEXT(3, setup, "remdev");
2278 LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2279 if (ccwgdev->state == CCWGROUP_ONLINE) {
2280 lcs_shutdown_device(ccwgdev);
2281 }
2282 if (card->dev)
2283 unregister_netdev(card->dev);
2284 sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2285 lcs_cleanup_card(card);
2286 lcs_free_card(card);
2287 put_device(&ccwgdev->dev);
2288}
2289
2290/**
2291 * LCS ccwgroup driver registration
2292 */
2293static struct ccwgroup_driver lcs_group_driver = {
2294 .owner = THIS_MODULE,
2295 .name = "lcs",
2296 .max_slaves = 2,
2297 .driver_id = 0xD3C3E2,
2298 .probe = lcs_probe_device,
2299 .remove = lcs_remove_device,
2300 .set_online = lcs_new_device,
2301 .set_offline = lcs_shutdown_device,
2302};
2303
2304/**
2305 * LCS Module/Kernel initialization function
2306 */
2307static int
2308__init lcs_init_module(void)
2309{
2310 int rc;
2311
2312 PRINT_INFO("Loading %s\n",version);
2313 rc = lcs_register_debug_facility();
2314 LCS_DBF_TEXT(0, setup, "lcsinit");
2315 if (rc) {
2316 PRINT_ERR("Initialization failed\n");
2317 return rc;
2318 }
2319
2320 rc = register_cu3088_discipline(&lcs_group_driver);
2321 if (rc) {
2322 PRINT_ERR("Initialization failed\n");
2323 return rc;
2324 }
2325
2326 return 0;
2327}
2328
2329
2330/**
2331 * LCS module cleanup function
2332 */
2333static void
2334__exit lcs_cleanup_module(void)
2335{
2336 PRINT_INFO("Terminating lcs module.\n");
2337 LCS_DBF_TEXT(0, trace, "cleanup");
2338 unregister_cu3088_discipline(&lcs_group_driver);
2339 lcs_unregister_debug_facility();
2340}
2341
2342module_init(lcs_init_module);
2343module_exit(lcs_cleanup_module);
2344
2345MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
2346MODULE_LICENSE("GPL");
2347
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
new file mode 100644
index 000000000000..a7f348ef1b08
--- /dev/null
+++ b/drivers/s390/net/lcs.h
@@ -0,0 +1,321 @@
1/*lcs.h*/
2
3#include <linux/interrupt.h>
4#include <linux/netdevice.h>
5#include <linux/skbuff.h>
6#include <linux/workqueue.h>
7#include <asm/ccwdev.h>
8
9#define VERSION_LCS_H "$Revision: 1.19 $"
10
11#define LCS_DBF_TEXT(level, name, text) \
12 do { \
13 debug_text_event(lcs_dbf_##name, level, text); \
14 } while (0)
15
16#define LCS_DBF_HEX(level,name,addr,len) \
17do { \
18 debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
19} while (0)
20
21#define LCS_DBF_TEXT_(level,name,text...) \
22do { \
23 sprintf(debug_buffer, text); \
24 debug_text_event(lcs_dbf_##name,level, debug_buffer);\
25} while (0)
26
27/**
28 * some more definitions for debug or output stuff
29 */
30#define PRINTK_HEADER " lcs: "
31
32/**
33 * sysfs related stuff
34 */
35#define CARD_FROM_DEV(cdev) \
36 (struct lcs_card *) \
37 ((struct ccwgroup_device *)cdev->dev.driver_data)->dev.driver_data;
38/**
39 * CCW commands used in this driver
40 */
41#define LCS_CCW_WRITE 0x01
42#define LCS_CCW_READ 0x02
43#define LCS_CCW_TRANSFER 0x08
44
45/**
46 * LCS device status primitives
47 */
48#define LCS_CMD_STARTLAN 0x01
49#define LCS_CMD_STOPLAN 0x02
50#define LCS_CMD_LANSTAT 0x04
51#define LCS_CMD_STARTUP 0x07
52#define LCS_CMD_SHUTDOWN 0x08
53#define LCS_CMD_QIPASSIST 0xb2
54#define LCS_CMD_SETIPM 0xb4
55#define LCS_CMD_DELIPM 0xb5
56
57#define LCS_INITIATOR_TCPIP 0x00
58#define LCS_INITIATOR_LGW 0x01
59#define LCS_STD_CMD_SIZE 16
60#define LCS_MULTICAST_CMD_SIZE 404
61
62/**
63 * LCS IPASSIST MASKS,only used when multicast is switched on
64 */
65/* Not supported by LCS */
66#define LCS_IPASS_ARP_PROCESSING 0x0001
67#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002
68#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004
69#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
70#define LCS_IPASS_IP_FILTERING 0x0010
71/* Supported by lcs 3172 */
72#define LCS_IPASS_IPV6_SUPPORT 0x0020
73#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
74
75/**
76 * LCS sense byte definitions
77 */
78#define LCS_SENSE_INTERFACE_DISCONNECT 0x01
79#define LCS_SENSE_EQUIPMENT_CHECK 0x10
80#define LCS_SENSE_BUS_OUT_CHECK 0x20
81#define LCS_SENSE_INTERVENTION_REQUIRED 0x40
82#define LCS_SENSE_CMD_REJECT 0x80
83#define LCS_SENSE_RESETTING_EVENT 0x0080
84#define LCS_SENSE_DEVICE_ONLINE 0x0020
85
86/**
87 * LCS packet type definitions
88 */
89#define LCS_FRAME_TYPE_CONTROL 0
90#define LCS_FRAME_TYPE_ENET 1
91#define LCS_FRAME_TYPE_TR 2
92#define LCS_FRAME_TYPE_FDDI 7
93#define LCS_FRAME_TYPE_AUTO -1
94
95/**
96 * some more definitions,we will sort them later
97 */
98#define LCS_ILLEGAL_OFFSET 0xffff
99#define LCS_IOBUFFERSIZE 0x5000
100#define LCS_NUM_BUFFS 8 /* needs to be power of 2 */
101#define LCS_MAC_LENGTH 6
102#define LCS_INVALID_PORT_NO -1
103#define LCS_LANCMD_TIMEOUT_DEFAULT 5
104
105/**
106 * Multicast state
107 */
108#define LCS_IPM_STATE_SET_REQUIRED 0
109#define LCS_IPM_STATE_DEL_REQUIRED 1
110#define LCS_IPM_STATE_ON_CARD 2
111
112/**
113 * LCS IP Assist declarations
114 * seems to be only used for multicast
115 */
116#define LCS_IPASS_ARP_PROCESSING 0x0001
117#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002
118#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004
119#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
120#define LCS_IPASS_IP_FILTERING 0x0010
121#define LCS_IPASS_IPV6_SUPPORT 0x0020
122#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
123
124/**
125 * LCS Buffer states
126 */
127enum lcs_buffer_states {
128 BUF_STATE_EMPTY, /* buffer is empty */
129 BUF_STATE_LOCKED, /* buffer is locked, don't touch */
130 BUF_STATE_READY, /* buffer is ready for read/write */
131 BUF_STATE_PROCESSED,
132};
133
134/**
135 * LCS Channel State Machine declarations
136 */
137enum lcs_channel_states {
138 CH_STATE_INIT,
139 CH_STATE_HALTED,
140 CH_STATE_STOPPED,
141 CH_STATE_RUNNING,
142 CH_STATE_SUSPENDED,
143 CH_STATE_CLEARED,
144};
145
146/**
147 * LCS device state machine
148 */
149enum lcs_dev_states {
150 DEV_STATE_DOWN,
151 DEV_STATE_UP,
152 DEV_STATE_RECOVER,
153};
154
155enum lcs_threads {
156 LCS_SET_MC_THREAD = 1,
157 LCS_STARTLAN_THREAD = 2,
158 LCS_STOPLAN_THREAD = 4,
159 LCS_STARTUP_THREAD = 8,
160};
161/**
162 * LCS struct declarations
163 */
164struct lcs_header {
165 __u16 offset;
166 __u8 type;
167 __u8 slot;
168} __attribute__ ((packed));
169
170struct lcs_ip_mac_pair {
171 __u32 ip_addr;
172 __u8 mac_addr[LCS_MAC_LENGTH];
173 __u8 reserved[2];
174} __attribute__ ((packed));
175
176struct lcs_ipm_list {
177 struct list_head list;
178 struct lcs_ip_mac_pair ipm;
179 __u8 ipm_state;
180};
181
182struct lcs_cmd {
183 __u16 offset;
184 __u8 type;
185 __u8 slot;
186 __u8 cmd_code;
187 __u8 initiator;
188 __u16 sequence_no;
189 __u16 return_code;
190 union {
191 struct {
192 __u8 lan_type;
193 __u8 portno;
194 __u16 parameter_count;
195 __u8 operator_flags[3];
196 __u8 reserved[3];
197 } lcs_std_cmd;
198 struct {
199 __u16 unused1;
200 __u16 buff_size;
201 __u8 unused2[6];
202 } lcs_startup;
203 struct {
204 __u8 lan_type;
205 __u8 portno;
206 __u8 unused[10];
207 __u8 mac_addr[LCS_MAC_LENGTH];
208 __u32 num_packets_deblocked;
209 __u32 num_packets_blocked;
210 __u32 num_packets_tx_on_lan;
211 __u32 num_tx_errors_detected;
212 __u32 num_tx_packets_disgarded;
213 __u32 num_packets_rx_from_lan;
214 __u32 num_rx_errors_detected;
215 __u32 num_rx_discarded_nobuffs_avail;
216 __u32 num_rx_packets_too_large;
217 } lcs_lanstat_cmd;
218#ifdef CONFIG_IP_MULTICAST
219 struct {
220 __u8 lan_type;
221 __u8 portno;
222 __u16 num_ip_pairs;
223 __u16 ip_assists_supported;
224 __u16 ip_assists_enabled;
225 __u16 version;
226 struct {
227 struct lcs_ip_mac_pair
228 ip_mac_pair[32];
229 __u32 response_data;
230 } lcs_ipass_ctlmsg __attribute ((packed));
231 } lcs_qipassist __attribute__ ((packed));
232#endif /*CONFIG_IP_MULTICAST */
233 } cmd __attribute__ ((packed));
234} __attribute__ ((packed));
235
236/**
237 * Forward declarations.
238 */
239struct lcs_card;
240struct lcs_channel;
241
242/**
243 * Definition of an lcs buffer.
244 */
245struct lcs_buffer {
246 enum lcs_buffer_states state;
247 void *data;
248 int count;
249 /* Callback for completion notification. */
250 void (*callback)(struct lcs_channel *, struct lcs_buffer *);
251};
252
253struct lcs_reply {
254 struct list_head list;
255 __u16 sequence_no;
256 atomic_t refcnt;
257 /* Callback for completion notification. */
258 void (*callback)(struct lcs_card *, struct lcs_cmd *);
259 wait_queue_head_t wait_q;
260 struct lcs_card *card;
261 int received;
262 int rc;
263};
264
265/**
266 * Definition of an lcs channel
267 */
268struct lcs_channel {
269 enum lcs_channel_states state;
270 struct ccw_device *ccwdev;
271 struct ccw1 ccws[LCS_NUM_BUFFS + 1];
272 wait_queue_head_t wait_q;
273 struct tasklet_struct irq_tasklet;
274 struct lcs_buffer iob[LCS_NUM_BUFFS];
275 int io_idx;
276 int buf_idx;
277};
278
279
280/**
281 * definition of the lcs card
282 */
283struct lcs_card {
284 spinlock_t lock;
285 spinlock_t ipm_lock;
286 enum lcs_dev_states state;
287 struct net_device *dev;
288 struct net_device_stats stats;
289 unsigned short (*lan_type_trans)(struct sk_buff *skb,
290 struct net_device *dev);
291 struct lcs_channel read;
292 struct lcs_channel write;
293 struct lcs_buffer *tx_buffer;
294 int tx_emitted;
295 struct list_head lancmd_waiters;
296 int lancmd_timeout;
297
298 struct work_struct kernel_thread_starter;
299 spinlock_t mask_lock;
300 unsigned long thread_start_mask;
301 unsigned long thread_running_mask;
302 unsigned long thread_allowed_mask;
303 wait_queue_head_t wait_q;
304
305#ifdef CONFIG_IP_MULTICAST
306 struct list_head ipm_list;
307#endif
308 __u8 mac[LCS_MAC_LENGTH];
309 __u16 ip_assists_supported;
310 __u16 ip_assists_enabled;
311 __s8 lan_type;
312 __u32 pkt_seq;
313 __u16 sequence_no;
314 __s16 portno;
315 /* Some info copied from probeinfo */
316 u8 device_forced;
317 u8 max_port_no;
318 u8 hint_port_no;
319 s16 port_protocol_no;
320} __attribute__ ((aligned(8)));
321
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
new file mode 100644
index 000000000000..16e8e69afb10
--- /dev/null
+++ b/drivers/s390/net/netiucv.c
@@ -0,0 +1,2149 @@
1/*
2 * $Id: netiucv.c,v 1.63 2004/07/27 13:36:05 mschwide Exp $
3 *
4 * IUCV network driver
5 *
6 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
8 *
9 * Driverfs integration and all bugs therein by Cornelia Huck(cohuck@de.ibm.com)
10 *
11 * Documentation used:
12 * the source of the original IUCV driver by:
13 * Stefan Hegewald <hegewald@de.ibm.com>
14 * Hartmut Penner <hpenner@de.ibm.com>
15 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
16 * Martin Schwidefsky (schwidefsky@de.ibm.com)
17 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2, or (at your option)
22 * any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 *
33 * RELEASE-TAG: IUCV network driver $Revision: 1.63 $
34 *
35 */
36
37#undef DEBUG
38
39#include <linux/module.h>
40#include <linux/init.h>
41#include <linux/kernel.h>
42#include <linux/slab.h>
43#include <linux/errno.h>
44#include <linux/types.h>
45#include <linux/interrupt.h>
46#include <linux/timer.h>
47#include <linux/sched.h>
48#include <linux/bitops.h>
49
50#include <linux/signal.h>
51#include <linux/string.h>
52#include <linux/device.h>
53
54#include <linux/ip.h>
55#include <linux/if_arp.h>
56#include <linux/tcp.h>
57#include <linux/skbuff.h>
58#include <linux/ctype.h>
59#include <net/dst.h>
60
61#include <asm/io.h>
62#include <asm/uaccess.h>
63
64#include "iucv.h"
65#include "fsm.h"
66
67MODULE_AUTHOR
68 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
69MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
70
71
72#define PRINTK_HEADER " iucv: " /* for debugging */
73
74static struct device_driver netiucv_driver = {
75 .name = "netiucv",
76 .bus = &iucv_bus,
77};
78
79/**
80 * Per connection profiling data
81 */
82struct connection_profile {
83 unsigned long maxmulti;
84 unsigned long maxcqueue;
85 unsigned long doios_single;
86 unsigned long doios_multi;
87 unsigned long txlen;
88 unsigned long tx_time;
89 struct timespec send_stamp;
90 unsigned long tx_pending;
91 unsigned long tx_max_pending;
92};
93
94/**
95 * Representation of one iucv connection
96 */
97struct iucv_connection {
98 struct iucv_connection *next;
99 iucv_handle_t handle;
100 __u16 pathid;
101 struct sk_buff *rx_buff;
102 struct sk_buff *tx_buff;
103 struct sk_buff_head collect_queue;
104 struct sk_buff_head commit_queue;
105 spinlock_t collect_lock;
106 int collect_len;
107 int max_buffsize;
108 fsm_timer timer;
109 fsm_instance *fsm;
110 struct net_device *netdev;
111 struct connection_profile prof;
112 char userid[9];
113};
114
115/**
116 * Linked list of all connection structs.
117 */
118static struct iucv_connection *iucv_connections;
119
120/**
121 * Representation of event-data for the
122 * connection state machine.
123 */
124struct iucv_event {
125 struct iucv_connection *conn;
126 void *data;
127};
128
129/**
130 * Private part of the network device structure
131 */
132struct netiucv_priv {
133 struct net_device_stats stats;
134 unsigned long tbusy;
135 fsm_instance *fsm;
136 struct iucv_connection *conn;
137 struct device *dev;
138};
139
140/**
141 * Link level header for a packet.
142 */
143typedef struct ll_header_t {
144 __u16 next;
145} ll_header;
146
147#define NETIUCV_HDRLEN (sizeof(ll_header))
148#define NETIUCV_BUFSIZE_MAX 32768
149#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
150#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
151#define NETIUCV_MTU_DEFAULT 9216
152#define NETIUCV_QUEUELEN_DEFAULT 50
153#define NETIUCV_TIMEOUT_5SEC 5000
154
155/**
156 * Compatibility macros for busy handling
157 * of network devices.
158 */
159static __inline__ void netiucv_clear_busy(struct net_device *dev)
160{
161 clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy));
162 netif_wake_queue(dev);
163}
164
165static __inline__ int netiucv_test_and_set_busy(struct net_device *dev)
166{
167 netif_stop_queue(dev);
168 return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy);
169}
170
171static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
172static __u8 iucvMagic[16] = {
173 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
174 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
175};
176
177/**
178 * This mask means the 16-byte IUCV "magic" and the origin userid must
179 * match exactly as specified in order to give connection_pending()
180 * control.
181 */
182static __u8 netiucv_mask[] = {
183 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
184 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
185 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
186};
187
188/**
189 * Convert an iucv userId to its printable
190 * form (strip whitespace at end).
191 *
192 * @param An iucv userId
193 *
194 * @returns The printable string (static data!!)
195 */
196static __inline__ char *
197netiucv_printname(char *name)
198{
199 static char tmp[9];
200 char *p = tmp;
201 memcpy(tmp, name, 8);
202 tmp[8] = '\0';
203 while (*p && (!isspace(*p)))
204 p++;
205 *p = '\0';
206 return tmp;
207}
208
209/**
210 * States of the interface statemachine.
211 */
212enum dev_states {
213 DEV_STATE_STOPPED,
214 DEV_STATE_STARTWAIT,
215 DEV_STATE_STOPWAIT,
216 DEV_STATE_RUNNING,
217 /**
218 * MUST be always the last element!!
219 */
220 NR_DEV_STATES
221};
222
223static const char *dev_state_names[] = {
224 "Stopped",
225 "StartWait",
226 "StopWait",
227 "Running",
228};
229
230/**
231 * Events of the interface statemachine.
232 */
233enum dev_events {
234 DEV_EVENT_START,
235 DEV_EVENT_STOP,
236 DEV_EVENT_CONUP,
237 DEV_EVENT_CONDOWN,
238 /**
239 * MUST be always the last element!!
240 */
241 NR_DEV_EVENTS
242};
243
244static const char *dev_event_names[] = {
245 "Start",
246 "Stop",
247 "Connection up",
248 "Connection down",
249};
250
251/**
252 * Events of the connection statemachine
253 */
254enum conn_events {
255 /**
256 * Events, representing callbacks from
257 * lowlevel iucv layer)
258 */
259 CONN_EVENT_CONN_REQ,
260 CONN_EVENT_CONN_ACK,
261 CONN_EVENT_CONN_REJ,
262 CONN_EVENT_CONN_SUS,
263 CONN_EVENT_CONN_RES,
264 CONN_EVENT_RX,
265 CONN_EVENT_TXDONE,
266
267 /**
268 * Events, representing errors return codes from
269 * calls to lowlevel iucv layer
270 */
271
272 /**
273 * Event, representing timer expiry.
274 */
275 CONN_EVENT_TIMER,
276
277 /**
278 * Events, representing commands from upper levels.
279 */
280 CONN_EVENT_START,
281 CONN_EVENT_STOP,
282
283 /**
284 * MUST be always the last element!!
285 */
286 NR_CONN_EVENTS,
287};
288
289static const char *conn_event_names[] = {
290 "Remote connection request",
291 "Remote connection acknowledge",
292 "Remote connection reject",
293 "Connection suspended",
294 "Connection resumed",
295 "Data received",
296 "Data sent",
297
298 "Timer",
299
300 "Start",
301 "Stop",
302};
303
304/**
305 * States of the connection statemachine.
306 */
307enum conn_states {
308 /**
309 * Connection not assigned to any device,
310 * initial state, invalid
311 */
312 CONN_STATE_INVALID,
313
314 /**
315 * Userid assigned but not operating
316 */
317 CONN_STATE_STOPPED,
318
319 /**
320 * Connection registered,
321 * no connection request sent yet,
322 * no connection request received
323 */
324 CONN_STATE_STARTWAIT,
325
326 /**
327 * Connection registered and connection request sent,
328 * no acknowledge and no connection request received yet.
329 */
330 CONN_STATE_SETUPWAIT,
331
332 /**
333 * Connection up and running idle
334 */
335 CONN_STATE_IDLE,
336
337 /**
338 * Data sent, awaiting CONN_EVENT_TXDONE
339 */
340 CONN_STATE_TX,
341
342 /**
343 * Error during registration.
344 */
345 CONN_STATE_REGERR,
346
347 /**
348 * Error during registration.
349 */
350 CONN_STATE_CONNERR,
351
352 /**
353 * MUST be always the last element!!
354 */
355 NR_CONN_STATES,
356};
357
358static const char *conn_state_names[] = {
359 "Invalid",
360 "Stopped",
361 "StartWait",
362 "SetupWait",
363 "Idle",
364 "TX",
365 "Terminating",
366 "Registration error",
367 "Connect error",
368};
369
370
371/**
372 * Debug Facility Stuff
373 */
374static debug_info_t *iucv_dbf_setup = NULL;
375static debug_info_t *iucv_dbf_data = NULL;
376static debug_info_t *iucv_dbf_trace = NULL;
377
378DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
379
380static void
381iucv_unregister_dbf_views(void)
382{
383 if (iucv_dbf_setup)
384 debug_unregister(iucv_dbf_setup);
385 if (iucv_dbf_data)
386 debug_unregister(iucv_dbf_data);
387 if (iucv_dbf_trace)
388 debug_unregister(iucv_dbf_trace);
389}
390static int
391iucv_register_dbf_views(void)
392{
393 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
394 IUCV_DBF_SETUP_INDEX,
395 IUCV_DBF_SETUP_NR_AREAS,
396 IUCV_DBF_SETUP_LEN);
397 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
398 IUCV_DBF_DATA_INDEX,
399 IUCV_DBF_DATA_NR_AREAS,
400 IUCV_DBF_DATA_LEN);
401 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
402 IUCV_DBF_TRACE_INDEX,
403 IUCV_DBF_TRACE_NR_AREAS,
404 IUCV_DBF_TRACE_LEN);
405
406 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
407 (iucv_dbf_trace == NULL)) {
408 iucv_unregister_dbf_views();
409 return -ENOMEM;
410 }
411 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
412 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
413
414 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
415 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
416
417 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
418 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
419
420 return 0;
421}
422
423/**
424 * Callback-wrappers, called from lowlevel iucv layer.
425 *****************************************************************************/
426
427static void
428netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data)
429{
430 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
431 struct iucv_event ev;
432
433 ev.conn = conn;
434 ev.data = (void *)eib;
435
436 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
437}
438
439static void
440netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data)
441{
442 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
443 struct iucv_event ev;
444
445 ev.conn = conn;
446 ev.data = (void *)eib;
447 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
448}
449
450static void
451netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data)
452{
453 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
454 struct iucv_event ev;
455
456 ev.conn = conn;
457 ev.data = (void *)eib;
458 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev);
459}
460
461static void
462netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data)
463{
464 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
465 struct iucv_event ev;
466
467 ev.conn = conn;
468 ev.data = (void *)eib;
469 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
470}
471
472static void
473netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data)
474{
475 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
476 struct iucv_event ev;
477
478 ev.conn = conn;
479 ev.data = (void *)eib;
480 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev);
481}
482
483static void
484netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data)
485{
486 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
487 struct iucv_event ev;
488
489 ev.conn = conn;
490 ev.data = (void *)eib;
491 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev);
492}
493
494static void
495netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data)
496{
497 struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
498 struct iucv_event ev;
499
500 ev.conn = conn;
501 ev.data = (void *)eib;
502 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev);
503}
504
505static iucv_interrupt_ops_t netiucv_ops = {
506 .ConnectionPending = netiucv_callback_connreq,
507 .ConnectionComplete = netiucv_callback_connack,
508 .ConnectionSevered = netiucv_callback_connrej,
509 .ConnectionQuiesced = netiucv_callback_connsusp,
510 .ConnectionResumed = netiucv_callback_connres,
511 .MessagePending = netiucv_callback_rx,
512 .MessageComplete = netiucv_callback_txdone
513};
514
515/**
516 * Dummy NOP action for all statemachines
517 */
518static void
519fsm_action_nop(fsm_instance *fi, int event, void *arg)
520{
521}
522
523/**
524 * Actions of the connection statemachine
525 *****************************************************************************/
526
527/**
528 * Helper function for conn_action_rx()
529 * Unpack a just received skb and hand it over to
530 * upper layers.
531 *
532 * @param conn The connection where this skb has been received.
533 * @param pskb The received skb.
534 */
535//static __inline__ void
536static void
537netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
538{
539 struct net_device *dev = conn->netdev;
540 struct netiucv_priv *privptr = dev->priv;
541 __u16 offset = 0;
542
543 skb_put(pskb, NETIUCV_HDRLEN);
544 pskb->dev = dev;
545 pskb->ip_summed = CHECKSUM_NONE;
546 pskb->protocol = ntohs(ETH_P_IP);
547
548 while (1) {
549 struct sk_buff *skb;
550 ll_header *header = (ll_header *)pskb->data;
551
552 if (!header->next)
553 break;
554
555 skb_pull(pskb, NETIUCV_HDRLEN);
556 header->next -= offset;
557 offset += header->next;
558 header->next -= NETIUCV_HDRLEN;
559 if (skb_tailroom(pskb) < header->next) {
560 PRINT_WARN("%s: Illegal next field in iucv header: "
561 "%d > %d\n",
562 dev->name, header->next, skb_tailroom(pskb));
563 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
564 header->next, skb_tailroom(pskb));
565 return;
566 }
567 skb_put(pskb, header->next);
568 pskb->mac.raw = pskb->data;
569 skb = dev_alloc_skb(pskb->len);
570 if (!skb) {
571 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
572 dev->name);
573 IUCV_DBF_TEXT(data, 2,
574 "Out of memory in netiucv_unpack_skb\n");
575 privptr->stats.rx_dropped++;
576 return;
577 }
578 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
579 skb->mac.raw = skb->data;
580 skb->dev = pskb->dev;
581 skb->protocol = pskb->protocol;
582 pskb->ip_summed = CHECKSUM_UNNECESSARY;
583 /*
584 * Since receiving is always initiated from a tasklet (in iucv.c),
585 * we must use netif_rx_ni() instead of netif_rx()
586 */
587 netif_rx_ni(skb);
588 dev->last_rx = jiffies;
589 privptr->stats.rx_packets++;
590 privptr->stats.rx_bytes += skb->len;
591 skb_pull(pskb, header->next);
592 skb_put(pskb, NETIUCV_HDRLEN);
593 }
594}
595
596static void
597conn_action_rx(fsm_instance *fi, int event, void *arg)
598{
599 struct iucv_event *ev = (struct iucv_event *)arg;
600 struct iucv_connection *conn = ev->conn;
601 iucv_MessagePending *eib = (iucv_MessagePending *)ev->data;
602 struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv;
603
604 __u32 msglen = eib->ln1msg2.ipbfln1f;
605 int rc;
606
607 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
608
609 if (!conn->netdev) {
610 /* FRITZ: How to tell iucv LL to drop the msg? */
611 PRINT_WARN("Received data for unlinked connection\n");
612 IUCV_DBF_TEXT(data, 2,
613 "Received data for unlinked connection\n");
614 return;
615 }
616 if (msglen > conn->max_buffsize) {
617 /* FRITZ: How to tell iucv LL to drop the msg? */
618 privptr->stats.rx_dropped++;
619 PRINT_WARN("msglen %d > max_buffsize %d\n",
620 msglen, conn->max_buffsize);
621 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
622 msglen, conn->max_buffsize);
623 return;
624 }
625 conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
626 conn->rx_buff->len = 0;
627 rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls,
628 conn->rx_buff->data, msglen, NULL, NULL, NULL);
629 if (rc || msglen < 5) {
630 privptr->stats.rx_errors++;
631 PRINT_WARN("iucv_receive returned %08x\n", rc);
632 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
633 return;
634 }
635 netiucv_unpack_skb(conn, conn->rx_buff);
636}
637
638static void
639conn_action_txdone(fsm_instance *fi, int event, void *arg)
640{
641 struct iucv_event *ev = (struct iucv_event *)arg;
642 struct iucv_connection *conn = ev->conn;
643 iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data;
644 struct netiucv_priv *privptr = NULL;
645 /* Shut up, gcc! skb is always below 2G. */
646 __u32 single_flag = eib->ipmsgtag;
647 __u32 txbytes = 0;
648 __u32 txpackets = 0;
649 __u32 stat_maxcq = 0;
650 struct sk_buff *skb;
651 unsigned long saveflags;
652 ll_header header;
653
654 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
655
656 if (conn && conn->netdev && conn->netdev->priv)
657 privptr = (struct netiucv_priv *)conn->netdev->priv;
658 conn->prof.tx_pending--;
659 if (single_flag) {
660 if ((skb = skb_dequeue(&conn->commit_queue))) {
661 atomic_dec(&skb->users);
662 dev_kfree_skb_any(skb);
663 if (privptr) {
664 privptr->stats.tx_packets++;
665 privptr->stats.tx_bytes +=
666 (skb->len - NETIUCV_HDRLEN
667 - NETIUCV_HDRLEN);
668 }
669 }
670 }
671 conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head;
672 conn->tx_buff->len = 0;
673 spin_lock_irqsave(&conn->collect_lock, saveflags);
674 while ((skb = skb_dequeue(&conn->collect_queue))) {
675 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
676 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
677 NETIUCV_HDRLEN);
678 memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len);
679 txbytes += skb->len;
680 txpackets++;
681 stat_maxcq++;
682 atomic_dec(&skb->users);
683 dev_kfree_skb_any(skb);
684 }
685 if (conn->collect_len > conn->prof.maxmulti)
686 conn->prof.maxmulti = conn->collect_len;
687 conn->collect_len = 0;
688 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
689 if (conn->tx_buff->len) {
690 int rc;
691
692 header.next = 0;
693 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
694 NETIUCV_HDRLEN);
695
696 conn->prof.send_stamp = xtime;
697 rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0,
698 conn->tx_buff->data, conn->tx_buff->len);
699 conn->prof.doios_multi++;
700 conn->prof.txlen += conn->tx_buff->len;
701 conn->prof.tx_pending++;
702 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
703 conn->prof.tx_max_pending = conn->prof.tx_pending;
704 if (rc) {
705 conn->prof.tx_pending--;
706 fsm_newstate(fi, CONN_STATE_IDLE);
707 if (privptr)
708 privptr->stats.tx_errors += txpackets;
709 PRINT_WARN("iucv_send returned %08x\n", rc);
710 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
711 } else {
712 if (privptr) {
713 privptr->stats.tx_packets += txpackets;
714 privptr->stats.tx_bytes += txbytes;
715 }
716 if (stat_maxcq > conn->prof.maxcqueue)
717 conn->prof.maxcqueue = stat_maxcq;
718 }
719 } else
720 fsm_newstate(fi, CONN_STATE_IDLE);
721}
722
723static void
724conn_action_connaccept(fsm_instance *fi, int event, void *arg)
725{
726 struct iucv_event *ev = (struct iucv_event *)arg;
727 struct iucv_connection *conn = ev->conn;
728 iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
729 struct net_device *netdev = conn->netdev;
730 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
731 int rc;
732 __u16 msglimit;
733 __u8 udata[16];
734
735 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
736
737 rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0,
738 conn->handle, conn, NULL, &msglimit);
739 if (rc) {
740 PRINT_WARN("%s: IUCV accept failed with error %d\n",
741 netdev->name, rc);
742 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
743 return;
744 }
745 fsm_newstate(fi, CONN_STATE_IDLE);
746 conn->pathid = eib->ippathid;
747 netdev->tx_queue_len = msglimit;
748 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
749}
750
751static void
752conn_action_connreject(fsm_instance *fi, int event, void *arg)
753{
754 struct iucv_event *ev = (struct iucv_event *)arg;
755 struct iucv_connection *conn = ev->conn;
756 struct net_device *netdev = conn->netdev;
757 iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
758 __u8 udata[16];
759
760 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
761
762 iucv_sever(eib->ippathid, udata);
763 if (eib->ippathid != conn->pathid) {
764 PRINT_INFO("%s: IR Connection Pending; "
765 "pathid %d does not match original pathid %d\n",
766 netdev->name, eib->ippathid, conn->pathid);
767 IUCV_DBF_TEXT_(data, 2,
768 "connreject: IR pathid %d, conn. pathid %d\n",
769 eib->ippathid, conn->pathid);
770 iucv_sever(conn->pathid, udata);
771 }
772}
773
774static void
775conn_action_connack(fsm_instance *fi, int event, void *arg)
776{
777 struct iucv_event *ev = (struct iucv_event *)arg;
778 struct iucv_connection *conn = ev->conn;
779 iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data;
780 struct net_device *netdev = conn->netdev;
781 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
782
783 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
784
785 fsm_deltimer(&conn->timer);
786 fsm_newstate(fi, CONN_STATE_IDLE);
787 if (eib->ippathid != conn->pathid) {
788 PRINT_INFO("%s: IR Connection Complete; "
789 "pathid %d does not match original pathid %d\n",
790 netdev->name, eib->ippathid, conn->pathid);
791 IUCV_DBF_TEXT_(data, 2,
792 "connack: IR pathid %d, conn. pathid %d\n",
793 eib->ippathid, conn->pathid);
794 conn->pathid = eib->ippathid;
795 }
796 netdev->tx_queue_len = eib->ipmsglim;
797 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
798}
799
800static void
801conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
802{
803 struct iucv_connection *conn = (struct iucv_connection *)arg;
804 __u8 udata[16];
805
806 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
807
808 fsm_deltimer(&conn->timer);
809 iucv_sever(conn->pathid, udata);
810 fsm_newstate(fi, CONN_STATE_STARTWAIT);
811}
812
813static void
814conn_action_connsever(fsm_instance *fi, int event, void *arg)
815{
816 struct iucv_event *ev = (struct iucv_event *)arg;
817 struct iucv_connection *conn = ev->conn;
818 struct net_device *netdev = conn->netdev;
819 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
820 __u8 udata[16];
821
822 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
823
824 fsm_deltimer(&conn->timer);
825 iucv_sever(conn->pathid, udata);
826 PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
827 IUCV_DBF_TEXT(data, 2,
828 "conn_action_connsever: Remote dropped connection\n");
829 fsm_newstate(fi, CONN_STATE_STARTWAIT);
830 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
831}
832
833static void
834conn_action_start(fsm_instance *fi, int event, void *arg)
835{
836 struct iucv_event *ev = (struct iucv_event *)arg;
837 struct iucv_connection *conn = ev->conn;
838 __u16 msglimit;
839 int rc;
840
841 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
842
843 if (!conn->handle) {
844 IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
845 conn->handle =
846 iucv_register_program(iucvMagic, conn->userid,
847 netiucv_mask,
848 &netiucv_ops, conn);
849 fsm_newstate(fi, CONN_STATE_STARTWAIT);
850 if (!conn->handle) {
851 fsm_newstate(fi, CONN_STATE_REGERR);
852 conn->handle = NULL;
853 IUCV_DBF_TEXT(setup, 2,
854 "NULL from iucv_register_program\n");
855 return;
856 }
857
858 PRINT_DEBUG("%s('%s'): registered successfully\n",
859 conn->netdev->name, conn->userid);
860 }
861
862 PRINT_DEBUG("%s('%s'): connecting ...\n",
863 conn->netdev->name, conn->userid);
864
865 /* We must set the state before calling iucv_connect because the callback
866 * handler could be called at any point after the connection request is
867 * sent */
868
869 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
870 rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic,
871 conn->userid, iucv_host, 0, NULL, &msglimit,
872 conn->handle, conn);
873 switch (rc) {
874 case 0:
875 conn->netdev->tx_queue_len = msglimit;
876 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
877 CONN_EVENT_TIMER, conn);
878 return;
879 case 11:
880 PRINT_INFO("%s: User %s is currently not available.\n",
881 conn->netdev->name,
882 netiucv_printname(conn->userid));
883 fsm_newstate(fi, CONN_STATE_STARTWAIT);
884 return;
885 case 12:
886 PRINT_INFO("%s: User %s is currently not ready.\n",
887 conn->netdev->name,
888 netiucv_printname(conn->userid));
889 fsm_newstate(fi, CONN_STATE_STARTWAIT);
890 return;
891 case 13:
892 PRINT_WARN("%s: Too many IUCV connections.\n",
893 conn->netdev->name);
894 fsm_newstate(fi, CONN_STATE_CONNERR);
895 break;
896 case 14:
897 PRINT_WARN(
898 "%s: User %s has too many IUCV connections.\n",
899 conn->netdev->name,
900 netiucv_printname(conn->userid));
901 fsm_newstate(fi, CONN_STATE_CONNERR);
902 break;
903 case 15:
904 PRINT_WARN(
905 "%s: No IUCV authorization in CP directory.\n",
906 conn->netdev->name);
907 fsm_newstate(fi, CONN_STATE_CONNERR);
908 break;
909 default:
910 PRINT_WARN("%s: iucv_connect returned error %d\n",
911 conn->netdev->name, rc);
912 fsm_newstate(fi, CONN_STATE_CONNERR);
913 break;
914 }
915 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
916 IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
917 iucv_unregister_program(conn->handle);
918 conn->handle = NULL;
919}
920
921static void
922netiucv_purge_skb_queue(struct sk_buff_head *q)
923{
924 struct sk_buff *skb;
925
926 while ((skb = skb_dequeue(q))) {
927 atomic_dec(&skb->users);
928 dev_kfree_skb_any(skb);
929 }
930}
931
932static void
933conn_action_stop(fsm_instance *fi, int event, void *arg)
934{
935 struct iucv_event *ev = (struct iucv_event *)arg;
936 struct iucv_connection *conn = ev->conn;
937 struct net_device *netdev = conn->netdev;
938 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
939
940 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
941
942 fsm_deltimer(&conn->timer);
943 fsm_newstate(fi, CONN_STATE_STOPPED);
944 netiucv_purge_skb_queue(&conn->collect_queue);
945 if (conn->handle)
946 IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
947 iucv_unregister_program(conn->handle);
948 conn->handle = NULL;
949 netiucv_purge_skb_queue(&conn->commit_queue);
950 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
951}
952
953static void
954conn_action_inval(fsm_instance *fi, int event, void *arg)
955{
956 struct iucv_event *ev = (struct iucv_event *)arg;
957 struct iucv_connection *conn = ev->conn;
958 struct net_device *netdev = conn->netdev;
959
960 PRINT_WARN("%s: Cannot connect without username\n",
961 netdev->name);
962 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
963}
964
965static const fsm_node conn_fsm[] = {
966 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
967 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
968
969 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
970 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
971 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
972 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
973 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
974 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
975 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
976
977 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
978 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
979 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
980 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
981 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
982
983 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
984 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
985
986 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
987 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
988 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
989
990 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
991 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
992
993 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
994 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
995};
996
997static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
998
999
1000/**
1001 * Actions for interface - statemachine.
1002 *****************************************************************************/
1003
1004/**
1005 * Startup connection by sending CONN_EVENT_START to it.
1006 *
1007 * @param fi An instance of an interface statemachine.
1008 * @param event The event, just happened.
1009 * @param arg Generic pointer, casted from struct net_device * upon call.
1010 */
1011static void
1012dev_action_start(fsm_instance *fi, int event, void *arg)
1013{
1014 struct net_device *dev = (struct net_device *)arg;
1015 struct netiucv_priv *privptr = dev->priv;
1016 struct iucv_event ev;
1017
1018 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1019
1020 ev.conn = privptr->conn;
1021 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1022 fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev);
1023}
1024
1025/**
1026 * Shutdown connection by sending CONN_EVENT_STOP to it.
1027 *
1028 * @param fi An instance of an interface statemachine.
1029 * @param event The event, just happened.
1030 * @param arg Generic pointer, casted from struct net_device * upon call.
1031 */
1032static void
1033dev_action_stop(fsm_instance *fi, int event, void *arg)
1034{
1035 struct net_device *dev = (struct net_device *)arg;
1036 struct netiucv_priv *privptr = dev->priv;
1037 struct iucv_event ev;
1038
1039 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1040
1041 ev.conn = privptr->conn;
1042
1043 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1044 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1045}
1046
1047/**
1048 * Called from connection statemachine
1049 * when a connection is up and running.
1050 *
1051 * @param fi An instance of an interface statemachine.
1052 * @param event The event, just happened.
1053 * @param arg Generic pointer, casted from struct net_device * upon call.
1054 */
1055static void
1056dev_action_connup(fsm_instance *fi, int event, void *arg)
1057{
1058 struct net_device *dev = (struct net_device *)arg;
1059 struct netiucv_priv *privptr = dev->priv;
1060
1061 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1062
1063 switch (fsm_getstate(fi)) {
1064 case DEV_STATE_STARTWAIT:
1065 fsm_newstate(fi, DEV_STATE_RUNNING);
1066 PRINT_INFO("%s: connected with remote side %s\n",
1067 dev->name, privptr->conn->userid);
1068 IUCV_DBF_TEXT(setup, 3,
1069 "connection is up and running\n");
1070 break;
1071 case DEV_STATE_STOPWAIT:
1072 PRINT_INFO(
1073 "%s: got connection UP event during shutdown!\n",
1074 dev->name);
1075 IUCV_DBF_TEXT(data, 2,
1076 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1077 break;
1078 }
1079}
1080
1081/**
1082 * Called from connection statemachine
1083 * when a connection has been shutdown.
1084 *
1085 * @param fi An instance of an interface statemachine.
1086 * @param event The event, just happened.
1087 * @param arg Generic pointer, casted from struct net_device * upon call.
1088 */
1089static void
1090dev_action_conndown(fsm_instance *fi, int event, void *arg)
1091{
1092 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1093
1094 switch (fsm_getstate(fi)) {
1095 case DEV_STATE_RUNNING:
1096 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1097 break;
1098 case DEV_STATE_STOPWAIT:
1099 fsm_newstate(fi, DEV_STATE_STOPPED);
1100 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1101 break;
1102 }
1103}
1104
1105static const fsm_node dev_fsm[] = {
1106 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1107
1108 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1109 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1110
1111 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1112 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1113
1114 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1115 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1116 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
1117};
1118
1119static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1120
1121/**
1122 * Transmit a packet.
1123 * This is a helper function for netiucv_tx().
1124 *
1125 * @param conn Connection to be used for sending.
1126 * @param skb Pointer to struct sk_buff of packet to send.
1127 * The linklevel header has already been set up
1128 * by netiucv_tx().
1129 *
1130 * @return 0 on success, -ERRNO on failure. (Never fails.)
1131 */
1132static int
1133netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1134 unsigned long saveflags;
1135 ll_header header;
1136 int rc = 0;
1137
1138 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1139 int l = skb->len + NETIUCV_HDRLEN;
1140
1141 spin_lock_irqsave(&conn->collect_lock, saveflags);
1142 if (conn->collect_len + l >
1143 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1144 rc = -EBUSY;
1145 IUCV_DBF_TEXT(data, 2,
1146 "EBUSY from netiucv_transmit_skb\n");
1147 } else {
1148 atomic_inc(&skb->users);
1149 skb_queue_tail(&conn->collect_queue, skb);
1150 conn->collect_len += l;
1151 }
1152 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1153 } else {
1154 struct sk_buff *nskb = skb;
1155 /**
1156 * Copy the skb to a new allocated skb in lowmem only if the
1157 * data is located above 2G in memory or tailroom is < 2.
1158 */
1159 unsigned long hi =
1160 ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31;
1161 int copied = 0;
1162 if (hi || (skb_tailroom(skb) < 2)) {
1163 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1164 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1165 if (!nskb) {
1166 PRINT_WARN("%s: Could not allocate tx_skb\n",
1167 conn->netdev->name);
1168 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1169 rc = -ENOMEM;
1170 return rc;
1171 } else {
1172 skb_reserve(nskb, NETIUCV_HDRLEN);
1173 memcpy(skb_put(nskb, skb->len),
1174 skb->data, skb->len);
1175 }
1176 copied = 1;
1177 }
1178 /**
1179 * skb now is below 2G and has enough room. Add headers.
1180 */
1181 header.next = nskb->len + NETIUCV_HDRLEN;
1182 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1183 header.next = 0;
1184 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1185
1186 fsm_newstate(conn->fsm, CONN_STATE_TX);
1187 conn->prof.send_stamp = xtime;
1188
1189 rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */,
1190 0, nskb->data, nskb->len);
1191 /* Shut up, gcc! nskb is always below 2G. */
1192 conn->prof.doios_single++;
1193 conn->prof.txlen += skb->len;
1194 conn->prof.tx_pending++;
1195 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1196 conn->prof.tx_max_pending = conn->prof.tx_pending;
1197 if (rc) {
1198 struct netiucv_priv *privptr;
1199 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1200 conn->prof.tx_pending--;
1201 privptr = (struct netiucv_priv *)conn->netdev->priv;
1202 if (privptr)
1203 privptr->stats.tx_errors++;
1204 if (copied)
1205 dev_kfree_skb(nskb);
1206 else {
1207 /**
1208 * Remove our headers. They get added
1209 * again on retransmit.
1210 */
1211 skb_pull(skb, NETIUCV_HDRLEN);
1212 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1213 }
1214 PRINT_WARN("iucv_send returned %08x\n", rc);
1215 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1216 } else {
1217 if (copied)
1218 dev_kfree_skb(skb);
1219 atomic_inc(&nskb->users);
1220 skb_queue_tail(&conn->commit_queue, nskb);
1221 }
1222 }
1223
1224 return rc;
1225}
1226
1227/**
1228 * Interface API for upper network layers
1229 *****************************************************************************/
1230
1231/**
1232 * Open an interface.
1233 * Called from generic network layer when ifconfig up is run.
1234 *
1235 * @param dev Pointer to interface struct.
1236 *
1237 * @return 0 on success, -ERRNO on failure. (Never fails.)
1238 */
1239static int
1240netiucv_open(struct net_device *dev) {
1241 fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev);
1242 return 0;
1243}
1244
1245/**
1246 * Close an interface.
1247 * Called from generic network layer when ifconfig down is run.
1248 *
1249 * @param dev Pointer to interface struct.
1250 *
1251 * @return 0 on success, -ERRNO on failure. (Never fails.)
1252 */
1253static int
1254netiucv_close(struct net_device *dev) {
1255 fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev);
1256 return 0;
1257}
1258
1259/**
1260 * Start transmission of a packet.
1261 * Called from generic network device layer.
1262 *
1263 * @param skb Pointer to buffer containing the packet.
1264 * @param dev Pointer to interface struct.
1265 *
1266 * @return 0 if packet consumed, !0 if packet rejected.
1267 * Note: If we return !0, then the packet is free'd by
1268 * the generic network layer.
1269 */
1270static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1271{
1272 int rc = 0;
1273 struct netiucv_priv *privptr = dev->priv;
1274
1275 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1276 /**
1277 * Some sanity checks ...
1278 */
1279 if (skb == NULL) {
1280 PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1281 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1282 privptr->stats.tx_dropped++;
1283 return 0;
1284 }
1285 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1286 PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1287 dev->name, NETIUCV_HDRLEN);
1288 IUCV_DBF_TEXT(data, 2,
1289 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1290 dev_kfree_skb(skb);
1291 privptr->stats.tx_dropped++;
1292 return 0;
1293 }
1294
1295 /**
1296 * If connection is not running, try to restart it
1297 * and throw away packet.
1298 */
1299 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1300 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
1301 dev_kfree_skb(skb);
1302 privptr->stats.tx_dropped++;
1303 privptr->stats.tx_errors++;
1304 privptr->stats.tx_carrier_errors++;
1305 return 0;
1306 }
1307
1308 if (netiucv_test_and_set_busy(dev)) {
1309 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1310 return -EBUSY;
1311 }
1312 dev->trans_start = jiffies;
1313 if (netiucv_transmit_skb(privptr->conn, skb))
1314 rc = 1;
1315 netiucv_clear_busy(dev);
1316 return rc;
1317}
1318
1319/**
1320 * Returns interface statistics of a device.
1321 *
1322 * @param dev Pointer to interface struct.
1323 *
1324 * @return Pointer to stats struct of this interface.
1325 */
1326static struct net_device_stats *
1327netiucv_stats (struct net_device * dev)
1328{
1329 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1330 return &((struct netiucv_priv *)dev->priv)->stats;
1331}
1332
1333/**
1334 * Sets MTU of an interface.
1335 *
1336 * @param dev Pointer to interface struct.
1337 * @param new_mtu The new MTU to use for this interface.
1338 *
1339 * @return 0 on success, -EINVAL if MTU is out of valid range.
1340 * (valid range is 576 .. NETIUCV_MTU_MAX).
1341 */
1342static int
1343netiucv_change_mtu (struct net_device * dev, int new_mtu)
1344{
1345 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1346 if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) {
1347 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1348 return -EINVAL;
1349 }
1350 dev->mtu = new_mtu;
1351 return 0;
1352}
1353
1354/**
1355 * attributes in sysfs
1356 *****************************************************************************/
1357
1358static ssize_t
1359user_show (struct device *dev, char *buf)
1360{
1361 struct netiucv_priv *priv = dev->driver_data;
1362
1363 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1364 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1365}
1366
1367static ssize_t
1368user_write (struct device *dev, const char *buf, size_t count)
1369{
1370 struct netiucv_priv *priv = dev->driver_data;
1371 struct net_device *ndev = priv->conn->netdev;
1372 char *p;
1373 char *tmp;
1374 char username[10];
1375 int i;
1376
1377 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1378 if (count>9) {
1379 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1380 IUCV_DBF_TEXT_(setup, 2,
1381 "%d is length of username\n", (int)count);
1382 return -EINVAL;
1383 }
1384
1385 tmp = strsep((char **) &buf, "\n");
1386 for (i=0, p=tmp; i<8 && *p; i++, p++) {
1387 if (isalnum(*p) || (*p == '$'))
1388 username[i]= *p;
1389 else if (*p == '\n') {
1390 /* trailing lf, grr */
1391 break;
1392 } else {
1393 PRINT_WARN("netiucv: Invalid char %c in username!\n",
1394 *p);
1395 IUCV_DBF_TEXT_(setup, 2,
1396 "username: invalid character %c\n",
1397 *p);
1398 return -EINVAL;
1399 }
1400 }
1401 while (i<9)
1402 username[i++] = ' ';
1403 username[9] = '\0';
1404
1405 if (memcmp(username, priv->conn->userid, 8)) {
1406 /* username changed */
1407 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
1408 PRINT_WARN(
1409 "netiucv: device %s active, connected to %s\n",
1410 dev->bus_id, priv->conn->userid);
1411 PRINT_WARN("netiucv: user cannot be updated\n");
1412 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1413 return -EBUSY;
1414 }
1415 }
1416 memcpy(priv->conn->userid, username, 9);
1417
1418 return count;
1419
1420}
1421
1422static DEVICE_ATTR(user, 0644, user_show, user_write);
1423
1424static ssize_t
1425buffer_show (struct device *dev, char *buf)
1426{
1427 struct netiucv_priv *priv = dev->driver_data;
1428
1429 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1430 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1431}
1432
1433static ssize_t
1434buffer_write (struct device *dev, const char *buf, size_t count)
1435{
1436 struct netiucv_priv *priv = dev->driver_data;
1437 struct net_device *ndev = priv->conn->netdev;
1438 char *e;
1439 int bs1;
1440
1441 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1442 if (count >= 39)
1443 return -EINVAL;
1444
1445 bs1 = simple_strtoul(buf, &e, 0);
1446
1447 if (e && (!isspace(*e))) {
1448 PRINT_WARN("netiucv: Invalid character in buffer!\n");
1449 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1450 return -EINVAL;
1451 }
1452 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1453 PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1454 bs1);
1455 IUCV_DBF_TEXT_(setup, 2,
1456 "buffer_write: buffer size %d too large\n",
1457 bs1);
1458 return -EINVAL;
1459 }
1460 if ((ndev->flags & IFF_RUNNING) &&
1461 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1462 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1463 bs1);
1464 IUCV_DBF_TEXT_(setup, 2,
1465 "buffer_write: buffer size %d too small\n",
1466 bs1);
1467 return -EINVAL;
1468 }
1469 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1470 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1471 bs1);
1472 IUCV_DBF_TEXT_(setup, 2,
1473 "buffer_write: buffer size %d too small\n",
1474 bs1);
1475 return -EINVAL;
1476 }
1477
1478 priv->conn->max_buffsize = bs1;
1479 if (!(ndev->flags & IFF_RUNNING))
1480 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1481
1482 return count;
1483
1484}
1485
1486static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1487
1488static ssize_t
1489dev_fsm_show (struct device *dev, char *buf)
1490{
1491 struct netiucv_priv *priv = dev->driver_data;
1492
1493 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1494 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1495}
1496
1497static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1498
1499static ssize_t
1500conn_fsm_show (struct device *dev, char *buf)
1501{
1502 struct netiucv_priv *priv = dev->driver_data;
1503
1504 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1505 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1506}
1507
1508static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1509
1510static ssize_t
1511maxmulti_show (struct device *dev, char *buf)
1512{
1513 struct netiucv_priv *priv = dev->driver_data;
1514
1515 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1516 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1517}
1518
1519static ssize_t
1520maxmulti_write (struct device *dev, const char *buf, size_t count)
1521{
1522 struct netiucv_priv *priv = dev->driver_data;
1523
1524 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1525 priv->conn->prof.maxmulti = 0;
1526 return count;
1527}
1528
1529static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1530
1531static ssize_t
1532maxcq_show (struct device *dev, char *buf)
1533{
1534 struct netiucv_priv *priv = dev->driver_data;
1535
1536 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1537 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1538}
1539
1540static ssize_t
1541maxcq_write (struct device *dev, const char *buf, size_t count)
1542{
1543 struct netiucv_priv *priv = dev->driver_data;
1544
1545 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1546 priv->conn->prof.maxcqueue = 0;
1547 return count;
1548}
1549
1550static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1551
1552static ssize_t
1553sdoio_show (struct device *dev, char *buf)
1554{
1555 struct netiucv_priv *priv = dev->driver_data;
1556
1557 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1558 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1559}
1560
1561static ssize_t
1562sdoio_write (struct device *dev, const char *buf, size_t count)
1563{
1564 struct netiucv_priv *priv = dev->driver_data;
1565
1566 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1567 priv->conn->prof.doios_single = 0;
1568 return count;
1569}
1570
1571static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1572
1573static ssize_t
1574mdoio_show (struct device *dev, char *buf)
1575{
1576 struct netiucv_priv *priv = dev->driver_data;
1577
1578 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1579 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1580}
1581
1582static ssize_t
1583mdoio_write (struct device *dev, const char *buf, size_t count)
1584{
1585 struct netiucv_priv *priv = dev->driver_data;
1586
1587 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1588 priv->conn->prof.doios_multi = 0;
1589 return count;
1590}
1591
1592static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1593
1594static ssize_t
1595txlen_show (struct device *dev, char *buf)
1596{
1597 struct netiucv_priv *priv = dev->driver_data;
1598
1599 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1600 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1601}
1602
1603static ssize_t
1604txlen_write (struct device *dev, const char *buf, size_t count)
1605{
1606 struct netiucv_priv *priv = dev->driver_data;
1607
1608 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1609 priv->conn->prof.txlen = 0;
1610 return count;
1611}
1612
1613static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1614
1615static ssize_t
1616txtime_show (struct device *dev, char *buf)
1617{
1618 struct netiucv_priv *priv = dev->driver_data;
1619
1620 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1621 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1622}
1623
1624static ssize_t
1625txtime_write (struct device *dev, const char *buf, size_t count)
1626{
1627 struct netiucv_priv *priv = dev->driver_data;
1628
1629 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1630 priv->conn->prof.tx_time = 0;
1631 return count;
1632}
1633
1634static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1635
1636static ssize_t
1637txpend_show (struct device *dev, char *buf)
1638{
1639 struct netiucv_priv *priv = dev->driver_data;
1640
1641 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1642 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1643}
1644
1645static ssize_t
1646txpend_write (struct device *dev, const char *buf, size_t count)
1647{
1648 struct netiucv_priv *priv = dev->driver_data;
1649
1650 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1651 priv->conn->prof.tx_pending = 0;
1652 return count;
1653}
1654
1655static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1656
1657static ssize_t
1658txmpnd_show (struct device *dev, char *buf)
1659{
1660 struct netiucv_priv *priv = dev->driver_data;
1661
1662 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1663 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1664}
1665
1666static ssize_t
1667txmpnd_write (struct device *dev, const char *buf, size_t count)
1668{
1669 struct netiucv_priv *priv = dev->driver_data;
1670
1671 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1672 priv->conn->prof.tx_max_pending = 0;
1673 return count;
1674}
1675
1676static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1677
1678static struct attribute *netiucv_attrs[] = {
1679 &dev_attr_buffer.attr,
1680 &dev_attr_user.attr,
1681 NULL,
1682};
1683
1684static struct attribute_group netiucv_attr_group = {
1685 .attrs = netiucv_attrs,
1686};
1687
1688static struct attribute *netiucv_stat_attrs[] = {
1689 &dev_attr_device_fsm_state.attr,
1690 &dev_attr_connection_fsm_state.attr,
1691 &dev_attr_max_tx_buffer_used.attr,
1692 &dev_attr_max_chained_skbs.attr,
1693 &dev_attr_tx_single_write_ops.attr,
1694 &dev_attr_tx_multi_write_ops.attr,
1695 &dev_attr_netto_bytes.attr,
1696 &dev_attr_max_tx_io_time.attr,
1697 &dev_attr_tx_pending.attr,
1698 &dev_attr_tx_max_pending.attr,
1699 NULL,
1700};
1701
1702static struct attribute_group netiucv_stat_attr_group = {
1703 .name = "stats",
1704 .attrs = netiucv_stat_attrs,
1705};
1706
1707static inline int
1708netiucv_add_files(struct device *dev)
1709{
1710 int ret;
1711
1712 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1713 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1714 if (ret)
1715 return ret;
1716 ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1717 if (ret)
1718 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1719 return ret;
1720}
1721
1722static inline void
1723netiucv_remove_files(struct device *dev)
1724{
1725 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1726 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1727 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1728}
1729
1730static int
1731netiucv_register_device(struct net_device *ndev)
1732{
1733 struct netiucv_priv *priv = ndev->priv;
1734 struct device *dev = kmalloc(sizeof(struct device), GFP_KERNEL);
1735 int ret;
1736
1737
1738 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1739
1740 if (dev) {
1741 memset(dev, 0, sizeof(struct device));
1742 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
1743 dev->bus = &iucv_bus;
1744 dev->parent = iucv_root;
1745 /*
1746 * The release function could be called after the
1747 * module has been unloaded. It's _only_ task is to
1748 * free the struct. Therefore, we specify kfree()
1749 * directly here. (Probably a little bit obfuscating
1750 * but legitime ...).
1751 */
1752 dev->release = (void (*)(struct device *))kfree;
1753 dev->driver = &netiucv_driver;
1754 } else
1755 return -ENOMEM;
1756
1757 ret = device_register(dev);
1758
1759 if (ret)
1760 return ret;
1761 ret = netiucv_add_files(dev);
1762 if (ret)
1763 goto out_unreg;
1764 priv->dev = dev;
1765 dev->driver_data = priv;
1766 return 0;
1767
1768out_unreg:
1769 device_unregister(dev);
1770 return ret;
1771}
1772
1773static void
1774netiucv_unregister_device(struct device *dev)
1775{
1776 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1777 netiucv_remove_files(dev);
1778 device_unregister(dev);
1779}
1780
1781/**
1782 * Allocate and initialize a new connection structure.
1783 * Add it to the list of netiucv connections;
1784 */
1785static struct iucv_connection *
1786netiucv_new_connection(struct net_device *dev, char *username)
1787{
1788 struct iucv_connection **clist = &iucv_connections;
1789 struct iucv_connection *conn =
1790 (struct iucv_connection *)
1791 kmalloc(sizeof(struct iucv_connection), GFP_KERNEL);
1792
1793 if (conn) {
1794 memset(conn, 0, sizeof(struct iucv_connection));
1795 skb_queue_head_init(&conn->collect_queue);
1796 skb_queue_head_init(&conn->commit_queue);
1797 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1798 conn->netdev = dev;
1799
1800 conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
1801 GFP_KERNEL | GFP_DMA);
1802 if (!conn->rx_buff) {
1803 kfree(conn);
1804 return NULL;
1805 }
1806 conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
1807 GFP_KERNEL | GFP_DMA);
1808 if (!conn->tx_buff) {
1809 kfree_skb(conn->rx_buff);
1810 kfree(conn);
1811 return NULL;
1812 }
1813 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1814 conn_event_names, NR_CONN_STATES,
1815 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1816 GFP_KERNEL);
1817 if (!conn->fsm) {
1818 kfree_skb(conn->tx_buff);
1819 kfree_skb(conn->rx_buff);
1820 kfree(conn);
1821 return NULL;
1822 }
1823 fsm_settimer(conn->fsm, &conn->timer);
1824 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1825
1826 if (username) {
1827 memcpy(conn->userid, username, 9);
1828 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1829 }
1830
1831 conn->next = *clist;
1832 *clist = conn;
1833 }
1834 return conn;
1835}
1836
1837/**
1838 * Release a connection structure and remove it from the
1839 * list of netiucv connections.
1840 */
1841static void
1842netiucv_remove_connection(struct iucv_connection *conn)
1843{
1844 struct iucv_connection **clist = &iucv_connections;
1845
1846 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1847 if (conn == NULL)
1848 return;
1849 while (*clist) {
1850 if (*clist == conn) {
1851 *clist = conn->next;
1852 if (conn->handle) {
1853 iucv_unregister_program(conn->handle);
1854 conn->handle = NULL;
1855 }
1856 fsm_deltimer(&conn->timer);
1857 kfree_fsm(conn->fsm);
1858 kfree_skb(conn->rx_buff);
1859 kfree_skb(conn->tx_buff);
1860 return;
1861 }
1862 clist = &((*clist)->next);
1863 }
1864}
1865
1866/**
1867 * Release everything of a net device.
1868 */
1869static void
1870netiucv_free_netdevice(struct net_device *dev)
1871{
1872 struct netiucv_priv *privptr;
1873
1874 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1875
1876 if (!dev)
1877 return;
1878
1879 privptr = (struct netiucv_priv *)dev->priv;
1880 if (privptr) {
1881 if (privptr->conn)
1882 netiucv_remove_connection(privptr->conn);
1883 if (privptr->fsm)
1884 kfree_fsm(privptr->fsm);
1885 privptr->conn = NULL; privptr->fsm = NULL;
1886 /* privptr gets freed by free_netdev() */
1887 }
1888 free_netdev(dev);
1889}
1890
1891/**
1892 * Initialize a net device. (Called from kernel in alloc_netdev())
1893 */
1894static void
1895netiucv_setup_netdevice(struct net_device *dev)
1896{
1897 memset(dev->priv, 0, sizeof(struct netiucv_priv));
1898
1899 dev->mtu = NETIUCV_MTU_DEFAULT;
1900 dev->hard_start_xmit = netiucv_tx;
1901 dev->open = netiucv_open;
1902 dev->stop = netiucv_close;
1903 dev->get_stats = netiucv_stats;
1904 dev->change_mtu = netiucv_change_mtu;
1905 dev->destructor = netiucv_free_netdevice;
1906 dev->hard_header_len = NETIUCV_HDRLEN;
1907 dev->addr_len = 0;
1908 dev->type = ARPHRD_SLIP;
1909 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1910 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1911 SET_MODULE_OWNER(dev);
1912}
1913
1914/**
1915 * Allocate and initialize everything of a net device.
1916 */
1917static struct net_device *
1918netiucv_init_netdevice(char *username)
1919{
1920 struct netiucv_priv *privptr;
1921 struct net_device *dev;
1922
1923 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1924 netiucv_setup_netdevice);
1925 if (!dev)
1926 return NULL;
1927 if (dev_alloc_name(dev, dev->name) < 0) {
1928 free_netdev(dev);
1929 return NULL;
1930 }
1931
1932 privptr = (struct netiucv_priv *)dev->priv;
1933 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1934 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1935 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1936 if (!privptr->fsm) {
1937 free_netdev(dev);
1938 return NULL;
1939 }
1940 privptr->conn = netiucv_new_connection(dev, username);
1941 if (!privptr->conn) {
1942 kfree_fsm(privptr->fsm);
1943 free_netdev(dev);
1944 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1945 return NULL;
1946 }
1947 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1948
1949 return dev;
1950}
1951
1952static ssize_t
1953conn_write(struct device_driver *drv, const char *buf, size_t count)
1954{
1955 char *p;
1956 char username[10];
1957 int i, ret;
1958 struct net_device *dev;
1959
1960 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1961 if (count>9) {
1962 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1963 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1964 return -EINVAL;
1965 }
1966
1967 for (i=0, p=(char *)buf; i<8 && *p; i++, p++) {
1968 if (isalnum(*p) || (*p == '$'))
1969 username[i]= *p;
1970 else if (*p == '\n') {
1971 /* trailing lf, grr */
1972 break;
1973 } else {
1974 PRINT_WARN("netiucv: Invalid character in username!\n");
1975 IUCV_DBF_TEXT_(setup, 2,
1976 "conn_write: invalid character %c\n", *p);
1977 return -EINVAL;
1978 }
1979 }
1980 while (i<9)
1981 username[i++] = ' ';
1982 username[9] = '\0';
1983 dev = netiucv_init_netdevice(username);
1984 if (!dev) {
1985 PRINT_WARN(
1986 "netiucv: Could not allocate network device structure "
1987 "for user '%s'\n", netiucv_printname(username));
1988 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1989 return -ENODEV;
1990 }
1991
1992 if ((ret = netiucv_register_device(dev))) {
1993 IUCV_DBF_TEXT_(setup, 2,
1994 "ret %d from netiucv_register_device\n", ret);
1995 goto out_free_ndev;
1996 }
1997
1998 /* sysfs magic */
1999 SET_NETDEV_DEV(dev,
2000 (struct device*)((struct netiucv_priv*)dev->priv)->dev);
2001
2002 if ((ret = register_netdev(dev))) {
2003 netiucv_unregister_device((struct device*)
2004 ((struct netiucv_priv*)dev->priv)->dev);
2005 goto out_free_ndev;
2006 }
2007
2008 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2009
2010 return count;
2011
2012out_free_ndev:
2013 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2014 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2015 netiucv_free_netdevice(dev);
2016 return ret;
2017}
2018
2019DRIVER_ATTR(connection, 0200, NULL, conn_write);
2020
2021static ssize_t
2022remove_write (struct device_driver *drv, const char *buf, size_t count)
2023{
2024 struct iucv_connection **clist = &iucv_connections;
2025 struct net_device *ndev;
2026 struct netiucv_priv *priv;
2027 struct device *dev;
2028 char name[IFNAMSIZ];
2029 char *p;
2030 int i;
2031
2032 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2033
2034 if (count >= IFNAMSIZ)
2035 count = IFNAMSIZ-1;
2036
2037 for (i=0, p=(char *)buf; i<count && *p; i++, p++) {
2038 if ((*p == '\n') | (*p == ' ')) {
2039 /* trailing lf, grr */
2040 break;
2041 } else {
2042 name[i]=*p;
2043 }
2044 }
2045 name[i] = '\0';
2046
2047 while (*clist) {
2048 ndev = (*clist)->netdev;
2049 priv = (struct netiucv_priv*)ndev->priv;
2050 dev = priv->dev;
2051
2052 if (strncmp(name, ndev->name, count)) {
2053 clist = &((*clist)->next);
2054 continue;
2055 }
2056 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2057 PRINT_WARN(
2058 "netiucv: net device %s active with peer %s\n",
2059 ndev->name, priv->conn->userid);
2060 PRINT_WARN("netiucv: %s cannot be removed\n",
2061 ndev->name);
2062 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2063 return -EBUSY;
2064 }
2065 unregister_netdev(ndev);
2066 netiucv_unregister_device(dev);
2067 return count;
2068 }
2069 PRINT_WARN("netiucv: net device %s unknown\n", name);
2070 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2071 return -EINVAL;
2072}
2073
2074DRIVER_ATTR(remove, 0200, NULL, remove_write);
2075
2076static void
2077netiucv_banner(void)
2078{
2079 char vbuf[] = "$Revision: 1.63 $";
2080 char *version = vbuf;
2081
2082 if ((version = strchr(version, ':'))) {
2083 char *p = strchr(version + 1, '$');
2084 if (p)
2085 *p = '\0';
2086 } else
2087 version = " ??? ";
2088 PRINT_INFO("NETIUCV driver Version%s initialized\n", version);
2089}
2090
2091static void __exit
2092netiucv_exit(void)
2093{
2094 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2095 while (iucv_connections) {
2096 struct net_device *ndev = iucv_connections->netdev;
2097 struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv;
2098 struct device *dev = priv->dev;
2099
2100 unregister_netdev(ndev);
2101 netiucv_unregister_device(dev);
2102 }
2103
2104 driver_remove_file(&netiucv_driver, &driver_attr_connection);
2105 driver_remove_file(&netiucv_driver, &driver_attr_remove);
2106 driver_unregister(&netiucv_driver);
2107 iucv_unregister_dbf_views();
2108
2109 PRINT_INFO("NETIUCV driver unloaded\n");
2110 return;
2111}
2112
2113static int __init
2114netiucv_init(void)
2115{
2116 int ret;
2117
2118 ret = iucv_register_dbf_views();
2119 if (ret) {
2120 PRINT_WARN("netiucv_init failed, "
2121 "iucv_register_dbf_views rc = %d\n", ret);
2122 return ret;
2123 }
2124 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2125 ret = driver_register(&netiucv_driver);
2126 if (ret) {
2127 PRINT_ERR("NETIUCV: failed to register driver.\n");
2128 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret);
2129 iucv_unregister_dbf_views();
2130 return ret;
2131 }
2132
2133 /* Add entry for specifying connections. */
2134 ret = driver_create_file(&netiucv_driver, &driver_attr_connection);
2135 if (!ret) {
2136 ret = driver_create_file(&netiucv_driver, &driver_attr_remove);
2137 netiucv_banner();
2138 } else {
2139 PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
2140 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
2141 driver_unregister(&netiucv_driver);
2142 iucv_unregister_dbf_views();
2143 }
2144 return ret;
2145}
2146
2147module_init(netiucv_init);
2148module_exit(netiucv_exit);
2149MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
new file mode 100644
index 000000000000..a341041a6cf7
--- /dev/null
+++ b/drivers/s390/net/qeth.h
@@ -0,0 +1,1162 @@
1#ifndef __QETH_H__
2#define __QETH_H__
3
4#include <linux/if.h>
5#include <linux/if_arp.h>
6
7#include <linux/if_tr.h>
8#include <linux/trdevice.h>
9#include <linux/etherdevice.h>
10#include <linux/if_vlan.h>
11
12#include <net/ipv6.h>
13#include <linux/in6.h>
14#include <net/if_inet6.h>
15#include <net/addrconf.h>
16
17
18#include <linux/bitops.h>
19
20#include <asm/debug.h>
21#include <asm/qdio.h>
22#include <asm/ccwdev.h>
23#include <asm/ccwgroup.h>
24
25#include "qeth_mpc.h"
26
27#define VERSION_QETH_H "$Revision: 1.135 $"
28
29#ifdef CONFIG_QETH_IPV6
30#define QETH_VERSION_IPV6 ":IPv6"
31#else
32#define QETH_VERSION_IPV6 ""
33#endif
34#ifdef CONFIG_QETH_VLAN
35#define QETH_VERSION_VLAN ":VLAN"
36#else
37#define QETH_VERSION_VLAN ""
38#endif
39
40/**
41 * Debug Facility stuff
42 */
43#define QETH_DBF_SETUP_NAME "qeth_setup"
44#define QETH_DBF_SETUP_LEN 8
45#define QETH_DBF_SETUP_INDEX 3
46#define QETH_DBF_SETUP_NR_AREAS 1
47#define QETH_DBF_SETUP_LEVEL 5
48
49#define QETH_DBF_MISC_NAME "qeth_misc"
50#define QETH_DBF_MISC_LEN 128
51#define QETH_DBF_MISC_INDEX 1
52#define QETH_DBF_MISC_NR_AREAS 1
53#define QETH_DBF_MISC_LEVEL 2
54
55#define QETH_DBF_DATA_NAME "qeth_data"
56#define QETH_DBF_DATA_LEN 96
57#define QETH_DBF_DATA_INDEX 3
58#define QETH_DBF_DATA_NR_AREAS 1
59#define QETH_DBF_DATA_LEVEL 2
60
61#define QETH_DBF_CONTROL_NAME "qeth_control"
62#define QETH_DBF_CONTROL_LEN 256
63#define QETH_DBF_CONTROL_INDEX 3
64#define QETH_DBF_CONTROL_NR_AREAS 2
65#define QETH_DBF_CONTROL_LEVEL 5
66
67#define QETH_DBF_TRACE_NAME "qeth_trace"
68#define QETH_DBF_TRACE_LEN 8
69#define QETH_DBF_TRACE_INDEX 2
70#define QETH_DBF_TRACE_NR_AREAS 2
71#define QETH_DBF_TRACE_LEVEL 3
72extern debug_info_t *qeth_dbf_trace;
73
74#define QETH_DBF_SENSE_NAME "qeth_sense"
75#define QETH_DBF_SENSE_LEN 64
76#define QETH_DBF_SENSE_INDEX 1
77#define QETH_DBF_SENSE_NR_AREAS 1
78#define QETH_DBF_SENSE_LEVEL 2
79
80#define QETH_DBF_QERR_NAME "qeth_qerr"
81#define QETH_DBF_QERR_LEN 8
82#define QETH_DBF_QERR_INDEX 1
83#define QETH_DBF_QERR_NR_AREAS 2
84#define QETH_DBF_QERR_LEVEL 2
85
86#define QETH_DBF_TEXT(name,level,text) \
87 do { \
88 debug_text_event(qeth_dbf_##name,level,text); \
89 } while (0)
90
91#define QETH_DBF_HEX(name,level,addr,len) \
92 do { \
93 debug_event(qeth_dbf_##name,level,(void*)(addr),len); \
94 } while (0)
95
96DECLARE_PER_CPU(char[256], qeth_dbf_txt_buf);
97
98#define QETH_DBF_TEXT_(name,level,text...) \
99 do { \
100 char* dbf_txt_buf = get_cpu_var(qeth_dbf_txt_buf); \
101 sprintf(dbf_txt_buf, text); \
102 debug_text_event(qeth_dbf_##name,level,dbf_txt_buf); \
103 put_cpu_var(qeth_dbf_txt_buf); \
104 } while (0)
105
106#define QETH_DBF_SPRINTF(name,level,text...) \
107 do { \
108 debug_sprintf_event(qeth_dbf_trace, level, ##text ); \
109 debug_sprintf_event(qeth_dbf_trace, level, text ); \
110 } while (0)
111
112/**
113 * some more debug stuff
114 */
115#define PRINTK_HEADER "qeth: "
116
117#define HEXDUMP16(importance,header,ptr) \
118PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
119 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
120 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
121 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
122 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
123 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
124 *(((char*)ptr)+12),*(((char*)ptr)+13), \
125 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
126PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
127 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
128 *(((char*)ptr)+16),*(((char*)ptr)+17), \
129 *(((char*)ptr)+18),*(((char*)ptr)+19), \
130 *(((char*)ptr)+20),*(((char*)ptr)+21), \
131 *(((char*)ptr)+22),*(((char*)ptr)+23), \
132 *(((char*)ptr)+24),*(((char*)ptr)+25), \
133 *(((char*)ptr)+26),*(((char*)ptr)+27), \
134 *(((char*)ptr)+28),*(((char*)ptr)+29), \
135 *(((char*)ptr)+30),*(((char*)ptr)+31));
136
137static inline void
138qeth_hex_dump(unsigned char *buf, size_t len)
139{
140 size_t i;
141
142 for (i = 0; i < len; i++) {
143 if (i && !(i % 16))
144 printk("\n");
145 printk("%02x ", *(buf + i));
146 }
147 printk("\n");
148}
149
150#define SENSE_COMMAND_REJECT_BYTE 0
151#define SENSE_COMMAND_REJECT_FLAG 0x80
152#define SENSE_RESETTING_EVENT_BYTE 1
153#define SENSE_RESETTING_EVENT_FLAG 0x80
154
155#define atomic_swap(a,b) xchg((int *)a.counter, b)
156
157/*
158 * Common IO related definitions
159 */
160extern struct device *qeth_root_dev;
161extern struct ccw_driver qeth_ccw_driver;
162extern struct ccwgroup_driver qeth_ccwgroup_driver;
163
164#define CARD_RDEV(card) card->read.ccwdev
165#define CARD_WDEV(card) card->write.ccwdev
166#define CARD_DDEV(card) card->data.ccwdev
167#define CARD_BUS_ID(card) card->gdev->dev.bus_id
168#define CARD_RDEV_ID(card) card->read.ccwdev->dev.bus_id
169#define CARD_WDEV_ID(card) card->write.ccwdev->dev.bus_id
170#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id
171#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id
172
173#define CARD_FROM_CDEV(cdev) (struct qeth_card *) \
174 ((struct ccwgroup_device *)cdev->dev.driver_data)\
175 ->dev.driver_data;
176
177/**
178 * card stuff
179 */
180#ifdef CONFIG_QETH_PERF_STATS
181struct qeth_perf_stats {
182 unsigned int bufs_rec;
183 unsigned int bufs_sent;
184
185 unsigned int skbs_sent_pack;
186 unsigned int bufs_sent_pack;
187
188 unsigned int sc_dp_p;
189 unsigned int sc_p_dp;
190 /* qdio_input_handler: number of times called, time spent in */
191 __u64 inbound_start_time;
192 unsigned int inbound_cnt;
193 unsigned int inbound_time;
194 /* qeth_send_packet: number of times called, time spent in */
195 __u64 outbound_start_time;
196 unsigned int outbound_cnt;
197 unsigned int outbound_time;
198 /* qdio_output_handler: number of times called, time spent in */
199 __u64 outbound_handler_start_time;
200 unsigned int outbound_handler_cnt;
201 unsigned int outbound_handler_time;
202 /* number of calls to and time spent in do_QDIO for inbound queue */
203 __u64 inbound_do_qdio_start_time;
204 unsigned int inbound_do_qdio_cnt;
205 unsigned int inbound_do_qdio_time;
206 /* number of calls to and time spent in do_QDIO for outbound queues */
207 __u64 outbound_do_qdio_start_time;
208 unsigned int outbound_do_qdio_cnt;
209 unsigned int outbound_do_qdio_time;
210 /* eddp data */
211 unsigned int large_send_bytes;
212 unsigned int large_send_cnt;
213 unsigned int sg_skbs_sent;
214 unsigned int sg_frags_sent;
215};
216#endif /* CONFIG_QETH_PERF_STATS */
217
218/* Routing stuff */
219struct qeth_routing_info {
220 enum qeth_routing_types type;
221};
222
223/* IPA stuff */
224struct qeth_ipa_info {
225 __u32 supported_funcs;
226 __u32 enabled_funcs;
227};
228
229static inline int
230qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
231{
232 return (ipa->supported_funcs & func);
233}
234
235static inline int
236qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
237{
238 return (ipa->supported_funcs & ipa->enabled_funcs & func);
239}
240
241#define qeth_adp_supported(c,f) \
242 qeth_is_ipa_supported(&c->options.adp, f)
243#define qeth_adp_enabled(c,f) \
244 qeth_is_ipa_enabled(&c->options.adp, f)
245#define qeth_is_supported(c,f) \
246 qeth_is_ipa_supported(&c->options.ipa4, f)
247#define qeth_is_enabled(c,f) \
248 qeth_is_ipa_enabled(&c->options.ipa4, f)
249#ifdef CONFIG_QETH_IPV6
250#define qeth_is_supported6(c,f) \
251 qeth_is_ipa_supported(&c->options.ipa6, f)
252#define qeth_is_enabled6(c,f) \
253 qeth_is_ipa_enabled(&c->options.ipa6, f)
254#else /* CONFIG_QETH_IPV6 */
255#define qeth_is_supported6(c,f) 0
256#define qeth_is_enabled6(c,f) 0
257#endif /* CONFIG_QETH_IPV6 */
258#define qeth_is_ipafunc_supported(c,prot,f) \
259 (prot==QETH_PROT_IPV6)? qeth_is_supported6(c,f):qeth_is_supported(c,f)
260#define qeth_is_ipafunc_enabled(c,prot,f) \
261 (prot==QETH_PROT_IPV6)? qeth_is_enabled6(c,f):qeth_is_enabled(c,f)
262
263
264#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101
265#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101
266#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
267#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
268
269#define QETH_MODELLIST_ARRAY \
270 {{0x1731,0x01,0x1732,0x01,QETH_CARD_TYPE_OSAE,1, \
271 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
272 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
273 QETH_MAX_QUEUES,0}, \
274 {0x1731,0x05,0x1732,0x05,QETH_CARD_TYPE_IQD,0, \
275 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
276 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
277 QETH_MAX_QUEUES,0x103}, \
278 {0,0,0,0,0,0,0,0,0}}
279
280#define QETH_REAL_CARD 1
281#define QETH_VLAN_CARD 2
282#define QETH_BUFSIZE 4096
283
284/**
285 * some more defs
286 */
287#define IF_NAME_LEN 16
288#define QETH_TX_TIMEOUT 100 * HZ
289#define QETH_HEADER_SIZE 32
290#define MAX_PORTNO 15
291#define QETH_FAKE_LL_LEN ETH_HLEN
292#define QETH_FAKE_LL_V6_ADDR_POS 24
293
294/*IPv6 address autoconfiguration stuff*/
295#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
296#define UNIQUE_ID_NOT_BY_CARD 0x10000
297
298/*****************************************************************************/
299/* QDIO queue and buffer handling */
300/*****************************************************************************/
301#define QETH_MAX_QUEUES 4
302#define QETH_IN_BUF_SIZE_DEFAULT 65536
303#define QETH_IN_BUF_COUNT_DEFAULT 16
304#define QETH_IN_BUF_COUNT_MIN 8
305#define QETH_IN_BUF_COUNT_MAX 128
306#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
307#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
308 ((card)->qdio.in_buf_pool.buf_count / 2)
309
310/* buffers we have to be behind before we get a PCI */
311#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
312/*enqueued free buffers left before we get a PCI*/
313#define QETH_PCI_THRESHOLD_B(card) 0
314/*not used unless the microcode gets patched*/
315#define QETH_PCI_TIMER_VALUE(card) 3
316
317#define QETH_MIN_INPUT_THRESHOLD 1
318#define QETH_MAX_INPUT_THRESHOLD 500
319#define QETH_MIN_OUTPUT_THRESHOLD 1
320#define QETH_MAX_OUTPUT_THRESHOLD 300
321
322/* priority queing */
323#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
324#define QETH_DEFAULT_QUEUE 2
325#define QETH_NO_PRIO_QUEUEING 0
326#define QETH_PRIO_Q_ING_PREC 1
327#define QETH_PRIO_Q_ING_TOS 2
328#define IP_TOS_LOWDELAY 0x10
329#define IP_TOS_HIGHTHROUGHPUT 0x08
330#define IP_TOS_HIGHRELIABILITY 0x04
331#define IP_TOS_NOTIMPORTANT 0x02
332
333/* Packing */
334#define QETH_LOW_WATERMARK_PACK 2
335#define QETH_HIGH_WATERMARK_PACK 5
336#define QETH_WATERMARK_PACK_FUZZ 1
337
338#define QETH_IP_HEADER_SIZE 40
339
340struct qeth_hdr_layer3 {
341 __u8 id;
342 __u8 flags;
343 __u16 inbound_checksum; /*TSO:__u16 seqno */
344 __u32 token; /*TSO: __u32 reserved */
345 __u16 length;
346 __u8 vlan_prio;
347 __u8 ext_flags;
348 __u16 vlan_id;
349 __u16 frame_offset;
350 __u8 dest_addr[16];
351} __attribute__ ((packed));
352
353struct qeth_hdr_layer2 {
354 __u8 id;
355 __u8 flags[3];
356 __u8 port_no;
357 __u8 hdr_length;
358 __u16 pkt_length;
359 __u16 seq_no;
360 __u16 vlan_id;
361 __u32 reserved;
362 __u8 reserved2[16];
363} __attribute__ ((packed));
364
365struct qeth_hdr {
366 union {
367 struct qeth_hdr_layer2 l2;
368 struct qeth_hdr_layer3 l3;
369 } hdr;
370} __attribute__ ((packed));
371
372
373/* flags for qeth_hdr.flags */
374#define QETH_HDR_PASSTHRU 0x10
375#define QETH_HDR_IPV6 0x80
376#define QETH_HDR_CAST_MASK 0x07
377enum qeth_cast_flags {
378 QETH_CAST_UNICAST = 0x06,
379 QETH_CAST_MULTICAST = 0x04,
380 QETH_CAST_BROADCAST = 0x05,
381 QETH_CAST_ANYCAST = 0x07,
382 QETH_CAST_NOCAST = 0x00,
383};
384
385enum qeth_layer2_frame_flags {
386 QETH_LAYER2_FLAG_MULTICAST = 0x01,
387 QETH_LAYER2_FLAG_BROADCAST = 0x02,
388 QETH_LAYER2_FLAG_UNICAST = 0x04,
389 QETH_LAYER2_FLAG_VLAN = 0x10,
390};
391
392enum qeth_header_ids {
393 QETH_HEADER_TYPE_LAYER3 = 0x01,
394 QETH_HEADER_TYPE_LAYER2 = 0x02,
395 QETH_HEADER_TYPE_TSO = 0x03,
396};
397/* flags for qeth_hdr.ext_flags */
398#define QETH_HDR_EXT_VLAN_FRAME 0x01
399#define QETH_HDR_EXT_TOKEN_ID 0x02
400#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
401#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
402#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
403#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
404#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/
405
406static inline int
407qeth_is_last_sbale(struct qdio_buffer_element *sbale)
408{
409 return (sbale->flags & SBAL_FLAGS_LAST_ENTRY);
410}
411
412enum qeth_qdio_buffer_states {
413 /*
414 * inbound: read out by driver; owned by hardware in order to be filled
415 * outbound: owned by driver in order to be filled
416 */
417 QETH_QDIO_BUF_EMPTY,
418 /*
419 * inbound: filled by hardware; owned by driver in order to be read out
420 * outbound: filled by driver; owned by hardware in order to be sent
421 */
422 QETH_QDIO_BUF_PRIMED,
423};
424
425enum qeth_qdio_info_states {
426 QETH_QDIO_UNINITIALIZED,
427 QETH_QDIO_ALLOCATED,
428 QETH_QDIO_ESTABLISHED,
429};
430
431struct qeth_buffer_pool_entry {
432 struct list_head list;
433 struct list_head init_list;
434 void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
435};
436
437struct qeth_qdio_buffer_pool {
438 struct list_head entry_list;
439 int buf_count;
440};
441
442struct qeth_qdio_buffer {
443 struct qdio_buffer *buffer;
444 volatile enum qeth_qdio_buffer_states state;
445 /* the buffer pool entry currently associated to this buffer */
446 struct qeth_buffer_pool_entry *pool_entry;
447};
448
449struct qeth_qdio_q {
450 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
451 struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
452 /*
453 * buf_to_init means "buffer must be initialized by driver and must
454 * be made available for hardware" -> state is set to EMPTY
455 */
456 volatile int next_buf_to_init;
457} __attribute__ ((aligned(256)));
458
459/* possible types of qeth large_send support */
460enum qeth_large_send_types {
461 QETH_LARGE_SEND_NO,
462 QETH_LARGE_SEND_EDDP,
463 QETH_LARGE_SEND_TSO,
464};
465
466struct qeth_qdio_out_buffer {
467 struct qdio_buffer *buffer;
468 atomic_t state;
469 volatile int next_element_to_fill;
470 struct sk_buff_head skb_list;
471 struct list_head ctx_list;
472};
473
474struct qeth_card;
475
476enum qeth_out_q_states {
477 QETH_OUT_Q_UNLOCKED,
478 QETH_OUT_Q_LOCKED,
479 QETH_OUT_Q_LOCKED_FLUSH,
480};
481
482struct qeth_qdio_out_q {
483 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
484 struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
485 int queue_no;
486 struct qeth_card *card;
487 atomic_t state;
488 volatile int do_pack;
489 /*
490 * index of buffer to be filled by driver; state EMPTY or PACKING
491 */
492 volatile int next_buf_to_fill;
493 /*
494 * number of buffers that are currently filled (PRIMED)
495 * -> these buffers are hardware-owned
496 */
497 atomic_t used_buffers;
498 /* indicates whether PCI flag must be set (or if one is outstanding) */
499 atomic_t set_pci_flags_count;
500} __attribute__ ((aligned(256)));
501
502struct qeth_qdio_info {
503 volatile enum qeth_qdio_info_states state;
504 /* input */
505 struct qeth_qdio_q *in_q;
506 struct qeth_qdio_buffer_pool in_buf_pool;
507 struct qeth_qdio_buffer_pool init_pool;
508 int in_buf_size;
509
510 /* output */
511 int no_out_queues;
512 struct qeth_qdio_out_q **out_qs;
513
514 /* priority queueing */
515 int do_prio_queueing;
516 int default_out_queue;
517};
518
519enum qeth_send_errors {
520 QETH_SEND_ERROR_NONE,
521 QETH_SEND_ERROR_LINK_FAILURE,
522 QETH_SEND_ERROR_RETRY,
523 QETH_SEND_ERROR_KICK_IT,
524};
525
526#define QETH_ETH_MAC_V4 0x0100 /* like v4 */
527#define QETH_ETH_MAC_V6 0x3333 /* like v6 */
528/* tr mc mac is longer, but that will be enough to detect mc frames */
529#define QETH_TR_MAC_NC 0xc000 /* non-canonical */
530#define QETH_TR_MAC_C 0x0300 /* canonical */
531
532#define DEFAULT_ADD_HHLEN 0
533#define MAX_ADD_HHLEN 1024
534
535/**
536 * buffer stuff for read channel
537 */
538#define QETH_CMD_BUFFER_NO 8
539
540/**
541 * channel state machine
542 */
543enum qeth_channel_states {
544 CH_STATE_UP,
545 CH_STATE_DOWN,
546 CH_STATE_ACTIVATING,
547 CH_STATE_HALTED,
548 CH_STATE_STOPPED,
549};
550/**
551 * card state machine
552 */
553enum qeth_card_states {
554 CARD_STATE_DOWN,
555 CARD_STATE_HARDSETUP,
556 CARD_STATE_SOFTSETUP,
557 CARD_STATE_UP,
558 CARD_STATE_RECOVER,
559};
560
561/**
562 * Protocol versions
563 */
564enum qeth_prot_versions {
565 QETH_PROT_SNA = 0x0001,
566 QETH_PROT_IPV4 = 0x0004,
567 QETH_PROT_IPV6 = 0x0006,
568};
569
570enum qeth_ip_types {
571 QETH_IP_TYPE_NORMAL,
572 QETH_IP_TYPE_VIPA,
573 QETH_IP_TYPE_RXIP,
574 QETH_IP_TYPE_DEL_ALL_MC,
575};
576
577enum qeth_cmd_buffer_state {
578 BUF_STATE_FREE,
579 BUF_STATE_LOCKED,
580 BUF_STATE_PROCESSED,
581};
582/**
583 * IP address and multicast list
584 */
585struct qeth_ipaddr {
586 struct list_head entry;
587 enum qeth_ip_types type;
588 enum qeth_ipa_setdelip_flags set_flags;
589 enum qeth_ipa_setdelip_flags del_flags;
590 int is_multicast;
591 volatile int users;
592 enum qeth_prot_versions proto;
593 unsigned char mac[OSA_ADDR_LEN];
594 union {
595 struct {
596 unsigned int addr;
597 unsigned int mask;
598 } a4;
599 struct {
600 struct in6_addr addr;
601 unsigned int pfxlen;
602 } a6;
603 } u;
604};
605
606struct qeth_ipato_entry {
607 struct list_head entry;
608 enum qeth_prot_versions proto;
609 char addr[16];
610 int mask_bits;
611};
612
613struct qeth_ipato {
614 int enabled;
615 int invert4;
616 int invert6;
617 struct list_head entries;
618};
619
620struct qeth_channel;
621
622struct qeth_cmd_buffer {
623 enum qeth_cmd_buffer_state state;
624 struct qeth_channel *channel;
625 unsigned char *data;
626 int rc;
627 void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
628};
629
630
631/**
632 * definition of a qeth channel, used for read and write
633 */
634struct qeth_channel {
635 enum qeth_channel_states state;
636 struct ccw1 ccw;
637 spinlock_t iob_lock;
638 wait_queue_head_t wait_q;
639 struct tasklet_struct irq_tasklet;
640 struct ccw_device *ccwdev;
641/*command buffer for control data*/
642 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
643 atomic_t irq_pending;
644 volatile int io_buf_no;
645 volatile int buf_no;
646};
647
648/**
649 * OSA card related definitions
650 */
651struct qeth_token {
652 __u32 issuer_rm_w;
653 __u32 issuer_rm_r;
654 __u32 cm_filter_w;
655 __u32 cm_filter_r;
656 __u32 cm_connection_w;
657 __u32 cm_connection_r;
658 __u32 ulp_filter_w;
659 __u32 ulp_filter_r;
660 __u32 ulp_connection_w;
661 __u32 ulp_connection_r;
662};
663
664struct qeth_seqno {
665 __u32 trans_hdr;
666 __u32 pdu_hdr;
667 __u32 pdu_hdr_ack;
668 __u16 ipa;
669};
670
671struct qeth_reply {
672 struct list_head list;
673 wait_queue_head_t wait_q;
674 int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long);
675 u32 seqno;
676 unsigned long offset;
677 int received;
678 int rc;
679 void *param;
680 struct qeth_card *card;
681 atomic_t refcnt;
682};
683
684#define QETH_BROADCAST_WITH_ECHO 1
685#define QETH_BROADCAST_WITHOUT_ECHO 2
686
687struct qeth_card_blkt {
688 int time_total;
689 int inter_packet;
690 int inter_packet_jumbo;
691};
692
693
694
695struct qeth_card_info {
696 unsigned short unit_addr2;
697 unsigned short cula;
698 unsigned short chpid;
699 __u16 func_level;
700 char mcl_level[QETH_MCL_LENGTH + 1];
701 int guestlan;
702 int layer2_mac_registered;
703 int portname_required;
704 int portno;
705 char portname[9];
706 enum qeth_card_types type;
707 enum qeth_link_types link_type;
708 int is_multicast_different;
709 int initial_mtu;
710 int max_mtu;
711 int broadcast_capable;
712 int unique_id;
713 struct qeth_card_blkt blkt;
714 __u32 csum_mask;
715};
716
717struct qeth_card_options {
718 struct qeth_routing_info route4;
719 struct qeth_ipa_info ipa4;
720 struct qeth_ipa_info adp; /*Adapter parameters*/
721#ifdef CONFIG_QETH_IPV6
722 struct qeth_routing_info route6;
723 struct qeth_ipa_info ipa6;
724#endif /* QETH_IPV6 */
725 enum qeth_checksum_types checksum_type;
726 int broadcast_mode;
727 int macaddr_mode;
728 int fake_broadcast;
729 int add_hhlen;
730 int fake_ll;
731 int layer2;
732 enum qeth_large_send_types large_send;
733};
734
735/*
736 * thread bits for qeth_card thread masks
737 */
738enum qeth_threads {
739 QETH_SET_IP_THREAD = 1,
740 QETH_RECOVER_THREAD = 2,
741};
742
743struct qeth_card {
744 struct list_head list;
745 enum qeth_card_states state;
746 int lan_online;
747 spinlock_t lock;
748/*hardware and sysfs stuff*/
749 struct ccwgroup_device *gdev;
750 struct qeth_channel read;
751 struct qeth_channel write;
752 struct qeth_channel data;
753
754 struct net_device *dev;
755 struct net_device_stats stats;
756
757 struct qeth_card_info info;
758 struct qeth_token token;
759 struct qeth_seqno seqno;
760 struct qeth_card_options options;
761
762 wait_queue_head_t wait_q;
763#ifdef CONFIG_QETH_VLAN
764 spinlock_t vlanlock;
765 struct vlan_group *vlangrp;
766#endif
767 struct work_struct kernel_thread_starter;
768 spinlock_t thread_mask_lock;
769 volatile unsigned long thread_start_mask;
770 volatile unsigned long thread_allowed_mask;
771 volatile unsigned long thread_running_mask;
772 spinlock_t ip_lock;
773 struct list_head ip_list;
774 struct list_head *ip_tbd_list;
775 struct qeth_ipato ipato;
776 struct list_head cmd_waiter_list;
777 /* QDIO buffer handling */
778 struct qeth_qdio_info qdio;
779#ifdef CONFIG_QETH_PERF_STATS
780 struct qeth_perf_stats perf_stats;
781#endif /* CONFIG_QETH_PERF_STATS */
782 int use_hard_stop;
783 int (*orig_hard_header)(struct sk_buff *,struct net_device *,
784 unsigned short,void *,void *,unsigned);
785};
786
787struct qeth_card_list_struct {
788 struct list_head list;
789 rwlock_t rwlock;
790};
791
792extern struct qeth_card_list_struct qeth_card_list;
793
794/*notifier list */
795struct qeth_notify_list_struct {
796 struct list_head list;
797 struct task_struct *task;
798 int signum;
799};
800extern spinlock_t qeth_notify_lock;
801extern struct list_head qeth_notify_list;
802
803/*some helper functions*/
804
805#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
806
807inline static __u8
808qeth_get_ipa_adp_type(enum qeth_link_types link_type)
809{
810 switch (link_type) {
811 case QETH_LINK_TYPE_HSTR:
812 return 2;
813 default:
814 return 1;
815 }
816}
817
818inline static int
819qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
820{
821 struct sk_buff *new_skb = NULL;
822
823 if (skb_headroom(*skb) < size){
824 new_skb = skb_realloc_headroom(*skb, size);
825 if (!new_skb) {
826 PRINT_ERR("qeth_prepare_skb: could "
827 "not realloc headroom for qeth_hdr "
828 "on interface %s", QETH_CARD_IFNAME(card));
829 return -ENOMEM;
830 }
831 *skb = new_skb;
832 }
833 return 0;
834}
835static inline struct sk_buff *
836qeth_pskb_unshare(struct sk_buff *skb, int pri)
837{
838 struct sk_buff *nskb;
839 if (!skb_cloned(skb))
840 return skb;
841 nskb = skb_copy(skb, pri);
842 kfree_skb(skb); /* free our shared copy */
843 return nskb;
844}
845
846
847inline static void *
848qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
849{
850 void *hdr;
851
852 hdr = (void *) skb_push(*skb, size);
853 /*
854 * sanity check, the Linux memory allocation scheme should
855 * never present us cases like this one (the qdio header size plus
856 * the first 40 bytes of the paket cross a 4k boundary)
857 */
858 if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
859 (((unsigned long) hdr + size +
860 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
861 PRINT_ERR("qeth_prepare_skb: misaligned "
862 "packet on interface %s. Discarded.",
863 QETH_CARD_IFNAME(card));
864 return NULL;
865 }
866 return hdr;
867}
868
869inline static int
870qeth_get_hlen(__u8 link_type)
871{
872#ifdef CONFIG_QETH_IPV6
873 switch (link_type) {
874 case QETH_LINK_TYPE_HSTR:
875 case QETH_LINK_TYPE_LANE_TR:
876 return sizeof(struct qeth_hdr) + TR_HLEN;
877 default:
878#ifdef CONFIG_QETH_VLAN
879 return sizeof(struct qeth_hdr) + VLAN_ETH_HLEN;
880#else
881 return sizeof(struct qeth_hdr) + ETH_HLEN;
882#endif
883 }
884#else /* CONFIG_QETH_IPV6 */
885#ifdef CONFIG_QETH_VLAN
886 return sizeof(struct qeth_hdr) + VLAN_HLEN;
887#else
888 return sizeof(struct qeth_hdr);
889#endif
890#endif /* CONFIG_QETH_IPV6 */
891}
892
893inline static unsigned short
894qeth_get_netdev_flags(struct qeth_card *card)
895{
896 if (card->options.layer2)
897 return 0;
898 switch (card->info.type) {
899 case QETH_CARD_TYPE_IQD:
900 return IFF_NOARP;
901#ifdef CONFIG_QETH_IPV6
902 default:
903 return 0;
904#else
905 default:
906 return IFF_NOARP;
907#endif
908 }
909}
910
911inline static int
912qeth_get_initial_mtu_for_card(struct qeth_card * card)
913{
914 switch (card->info.type) {
915 case QETH_CARD_TYPE_UNKNOWN:
916 return 1500;
917 case QETH_CARD_TYPE_IQD:
918 return card->info.max_mtu;
919 case QETH_CARD_TYPE_OSAE:
920 switch (card->info.link_type) {
921 case QETH_LINK_TYPE_HSTR:
922 case QETH_LINK_TYPE_LANE_TR:
923 return 2000;
924 default:
925 return 1492;
926 }
927 default:
928 return 1500;
929 }
930}
931
932inline static int
933qeth_get_max_mtu_for_card(int cardtype)
934{
935 switch (cardtype) {
936 case QETH_CARD_TYPE_UNKNOWN:
937 return 61440;
938 case QETH_CARD_TYPE_OSAE:
939 return 61440;
940 case QETH_CARD_TYPE_IQD:
941 return 57344;
942 default:
943 return 1500;
944 }
945}
946
947inline static int
948qeth_get_mtu_out_of_mpc(int cardtype)
949{
950 switch (cardtype) {
951 case QETH_CARD_TYPE_IQD:
952 return 1;
953 default:
954 return 0;
955 }
956}
957
958inline static int
959qeth_get_mtu_outof_framesize(int framesize)
960{
961 switch (framesize) {
962 case 0x4000:
963 return 8192;
964 case 0x6000:
965 return 16384;
966 case 0xa000:
967 return 32768;
968 case 0xffff:
969 return 57344;
970 default:
971 return 0;
972 }
973}
974
975inline static int
976qeth_mtu_is_valid(struct qeth_card * card, int mtu)
977{
978 switch (card->info.type) {
979 case QETH_CARD_TYPE_OSAE:
980 return ((mtu >= 576) && (mtu <= 61440));
981 case QETH_CARD_TYPE_IQD:
982 return ((mtu >= 576) &&
983 (mtu <= card->info.max_mtu + 4096 - 32));
984 case QETH_CARD_TYPE_UNKNOWN:
985 default:
986 return 1;
987 }
988}
989
990inline static int
991qeth_get_arphdr_type(int cardtype, int linktype)
992{
993 switch (cardtype) {
994 case QETH_CARD_TYPE_OSAE:
995 switch (linktype) {
996 case QETH_LINK_TYPE_LANE_TR:
997 case QETH_LINK_TYPE_HSTR:
998 return ARPHRD_IEEE802_TR;
999 default:
1000 return ARPHRD_ETHER;
1001 }
1002 case QETH_CARD_TYPE_IQD:
1003 default:
1004 return ARPHRD_ETHER;
1005 }
1006}
1007
1008#ifdef CONFIG_QETH_PERF_STATS
1009inline static int
1010qeth_get_micros(void)
1011{
1012 return (int) (get_clock() >> 12);
1013}
1014#endif
1015
1016static inline int
1017qeth_get_qdio_q_format(struct qeth_card *card)
1018{
1019 switch (card->info.type) {
1020 case QETH_CARD_TYPE_IQD:
1021 return 2;
1022 default:
1023 return 0;
1024 }
1025}
1026
1027static inline void
1028qeth_ipaddr4_to_string(const __u8 *addr, char *buf)
1029{
1030 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
1031}
1032
1033static inline int
1034qeth_string_to_ipaddr4(const char *buf, __u8 *addr)
1035{
1036 const char *start, *end;
1037 char abuf[4];
1038 char *tmp;
1039 int len;
1040 int i;
1041
1042 start = buf;
1043 for (i = 0; i < 3; i++) {
1044 if (!(end = strchr(start, '.')))
1045 return -EINVAL;
1046 len = end - start;
1047 memset(abuf, 0, 4);
1048 strncpy(abuf, start, len);
1049 addr[i] = simple_strtoul(abuf, &tmp, 10);
1050 start = end + 1;
1051 }
1052 memset(abuf, 0, 4);
1053 strcpy(abuf, start);
1054 addr[3] = simple_strtoul(abuf, &tmp, 10);
1055 return 0;
1056}
1057
1058static inline void
1059qeth_ipaddr6_to_string(const __u8 *addr, char *buf)
1060{
1061 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1062 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
1063 addr[0], addr[1], addr[2], addr[3],
1064 addr[4], addr[5], addr[6], addr[7],
1065 addr[8], addr[9], addr[10], addr[11],
1066 addr[12], addr[13], addr[14], addr[15]);
1067}
1068
1069static inline int
1070qeth_string_to_ipaddr6(const char *buf, __u8 *addr)
1071{
1072 const char *start, *end;
1073 u16 *tmp_addr;
1074 char abuf[5];
1075 char *tmp;
1076 int len;
1077 int i;
1078
1079 tmp_addr = (u16 *)addr;
1080 start = buf;
1081 for (i = 0; i < 7; i++) {
1082 if (!(end = strchr(start, ':')))
1083 return -EINVAL;
1084 len = end - start;
1085 memset(abuf, 0, 5);
1086 strncpy(abuf, start, len);
1087 tmp_addr[i] = simple_strtoul(abuf, &tmp, 16);
1088 start = end + 1;
1089 }
1090 memset(abuf, 0, 5);
1091 strcpy(abuf, start);
1092 tmp_addr[7] = simple_strtoul(abuf, &tmp, 16);
1093 return 0;
1094}
1095
1096static inline void
1097qeth_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
1098 char *buf)
1099{
1100 if (proto == QETH_PROT_IPV4)
1101 return qeth_ipaddr4_to_string(addr, buf);
1102 else if (proto == QETH_PROT_IPV6)
1103 return qeth_ipaddr6_to_string(addr, buf);
1104}
1105
1106static inline int
1107qeth_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
1108 __u8 *addr)
1109{
1110 if (proto == QETH_PROT_IPV4)
1111 return qeth_string_to_ipaddr4(buf, addr);
1112 else if (proto == QETH_PROT_IPV6)
1113 return qeth_string_to_ipaddr6(buf, addr);
1114 else
1115 return -EINVAL;
1116}
1117
1118extern int
1119qeth_setrouting_v4(struct qeth_card *);
1120extern int
1121qeth_setrouting_v6(struct qeth_card *);
1122
1123extern int
1124qeth_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
1125
1126extern void
1127qeth_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions, u8 *, int);
1128
1129extern int
1130qeth_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1131
1132extern void
1133qeth_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1134
1135extern int
1136qeth_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1137
1138extern void
1139qeth_del_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1140
1141extern int
1142qeth_notifier_register(struct task_struct *, int );
1143
1144extern int
1145qeth_notifier_unregister(struct task_struct * );
1146
1147extern void
1148qeth_schedule_recovery(struct qeth_card *);
1149
1150extern int
1151qeth_realloc_buffer_pool(struct qeth_card *, int);
1152
1153extern int
1154qeth_set_large_send(struct qeth_card *);
1155
1156extern void
1157qeth_fill_header(struct qeth_card *, struct qeth_hdr *,
1158 struct sk_buff *, int, int);
1159extern void
1160qeth_flush_buffers(struct qeth_qdio_out_q *, int, int, int);
1161
1162#endif /* __QETH_H__ */
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
new file mode 100644
index 000000000000..7ee1c06ed68a
--- /dev/null
+++ b/drivers/s390/net/qeth_eddp.c
@@ -0,0 +1,643 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.11 $)
4 *
5 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
6 *
7 * Copyright 2004 IBM Corporation
8 *
9 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
10 *
11 * $Revision: 1.11 $ $Date: 2005/03/24 09:04:18 $
12 *
13 */
14#include <linux/config.h>
15#include <linux/errno.h>
16#include <linux/ip.h>
17#include <linux/inetdevice.h>
18#include <linux/netdevice.h>
19#include <linux/kernel.h>
20#include <linux/tcp.h>
21#include <net/tcp.h>
22#include <linux/skbuff.h>
23
24#include <net/ip.h>
25
26#include "qeth.h"
27#include "qeth_mpc.h"
28#include "qeth_eddp.h"
29
30int
31qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
32 struct qeth_eddp_context *ctx)
33{
34 int index = queue->next_buf_to_fill;
35 int elements_needed = ctx->num_elements;
36 int elements_in_buffer;
37 int skbs_in_buffer;
38 int buffers_needed = 0;
39
40 QETH_DBF_TEXT(trace, 5, "eddpcbfc");
41 while(elements_needed > 0) {
42 buffers_needed++;
43 if (atomic_read(&queue->bufs[index].state) !=
44 QETH_QDIO_BUF_EMPTY)
45 return -EBUSY;
46
47 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
48 queue->bufs[index].next_element_to_fill;
49 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
50 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
51 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
52 }
53 return buffers_needed;
54}
55
56static inline void
57qeth_eddp_free_context(struct qeth_eddp_context *ctx)
58{
59 int i;
60
61 QETH_DBF_TEXT(trace, 5, "eddpfctx");
62 for (i = 0; i < ctx->num_pages; ++i)
63 free_page((unsigned long)ctx->pages[i]);
64 kfree(ctx->pages);
65 if (ctx->elements != NULL)
66 kfree(ctx->elements);
67 kfree(ctx);
68}
69
70
71static inline void
72qeth_eddp_get_context(struct qeth_eddp_context *ctx)
73{
74 atomic_inc(&ctx->refcnt);
75}
76
77void
78qeth_eddp_put_context(struct qeth_eddp_context *ctx)
79{
80 if (atomic_dec_return(&ctx->refcnt) == 0)
81 qeth_eddp_free_context(ctx);
82}
83
84void
85qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
86{
87 struct qeth_eddp_context_reference *ref;
88
89 QETH_DBF_TEXT(trace, 6, "eddprctx");
90 while (!list_empty(&buf->ctx_list)){
91 ref = list_entry(buf->ctx_list.next,
92 struct qeth_eddp_context_reference, list);
93 qeth_eddp_put_context(ref->ctx);
94 list_del(&ref->list);
95 kfree(ref);
96 }
97}
98
99static inline int
100qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
101 struct qeth_eddp_context *ctx)
102{
103 struct qeth_eddp_context_reference *ref;
104
105 QETH_DBF_TEXT(trace, 6, "eddprfcx");
106 ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
107 if (ref == NULL)
108 return -ENOMEM;
109 qeth_eddp_get_context(ctx);
110 ref->ctx = ctx;
111 list_add_tail(&ref->list, &buf->ctx_list);
112 return 0;
113}
114
115int
116qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
117 struct qeth_eddp_context *ctx,
118 int index)
119{
120 struct qeth_qdio_out_buffer *buf = NULL;
121 struct qdio_buffer *buffer;
122 int elements = ctx->num_elements;
123 int element = 0;
124 int flush_cnt = 0;
125 int must_refcnt = 1;
126 int i;
127
128 QETH_DBF_TEXT(trace, 5, "eddpfibu");
129 while (elements > 0) {
130 buf = &queue->bufs[index];
131 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
132 /* normally this should not happen since we checked for
133 * available elements in qeth_check_elements_for_context
134 */
135 if (element == 0)
136 return -EBUSY;
137 else {
138 PRINT_WARN("could only partially fill eddp "
139 "buffer!\n");
140 goto out;
141 }
142 }
143 /* check if the whole next skb fits into current buffer */
144 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
145 buf->next_element_to_fill)
146 < ctx->elements_per_skb){
147 /* no -> go to next buffer */
148 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
149 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
150 flush_cnt++;
151 /* new buffer, so we have to add ctx to buffer'ctx_list
152 * and increment ctx's refcnt */
153 must_refcnt = 1;
154 continue;
155 }
156 if (must_refcnt){
157 must_refcnt = 0;
158 if (qeth_eddp_buf_ref_context(buf, ctx)){
159 PRINT_WARN("no memory to create eddp context "
160 "reference\n");
161 goto out_check;
162 }
163 }
164 buffer = buf->buffer;
165 /* fill one skb into buffer */
166 for (i = 0; i < ctx->elements_per_skb; ++i){
167 buffer->element[buf->next_element_to_fill].addr =
168 ctx->elements[element].addr;
169 buffer->element[buf->next_element_to_fill].length =
170 ctx->elements[element].length;
171 buffer->element[buf->next_element_to_fill].flags =
172 ctx->elements[element].flags;
173 buf->next_element_to_fill++;
174 element++;
175 elements--;
176 }
177 }
178out_check:
179 if (!queue->do_pack) {
180 QETH_DBF_TEXT(trace, 6, "fillbfnp");
181 /* set state to PRIMED -> will be flushed */
182 if (buf->next_element_to_fill > 0){
183 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
184 flush_cnt++;
185 }
186 } else {
187#ifdef CONFIG_QETH_PERF_STATS
188 queue->card->perf_stats.skbs_sent_pack++;
189#endif
190 QETH_DBF_TEXT(trace, 6, "fillbfpa");
191 if (buf->next_element_to_fill >=
192 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
193 /*
194 * packed buffer if full -> set state PRIMED
195 * -> will be flushed
196 */
197 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
198 flush_cnt++;
199 }
200 }
201out:
202 return flush_cnt;
203}
204
205static inline int
206qeth_get_skb_data_len(struct sk_buff *skb)
207{
208 int len = skb->len;
209 int i;
210
211 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i)
212 len -= skb_shinfo(skb)->frags[i].size;
213 return len;
214}
215
216static inline void
217qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
218 struct qeth_eddp_data *eddp)
219{
220 u8 *page;
221 int page_remainder;
222 int page_offset;
223 int hdr_len;
224 struct qeth_eddp_element *element;
225
226 QETH_DBF_TEXT(trace, 5, "eddpcrsh");
227 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
228 page_offset = ctx->offset % PAGE_SIZE;
229 element = &ctx->elements[ctx->num_elements];
230 hdr_len = eddp->nhl + eddp->thl;
231 /* FIXME: layer2 and VLAN !!! */
232 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
233 hdr_len += ETH_HLEN;
234 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
235 hdr_len += VLAN_HLEN;
236 /* does complete header fit in current page ? */
237 page_remainder = PAGE_SIZE - page_offset;
238 if (page_remainder < (sizeof(struct qeth_hdr) + hdr_len)){
239 /* no -> go to start of next page */
240 ctx->offset += page_remainder;
241 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
242 page_offset = 0;
243 }
244 memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
245 element->addr = page + page_offset;
246 element->length = sizeof(struct qeth_hdr);
247 ctx->offset += sizeof(struct qeth_hdr);
248 page_offset += sizeof(struct qeth_hdr);
249 /* add mac header (?) */
250 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
251 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
252 element->length += ETH_HLEN;
253 ctx->offset += ETH_HLEN;
254 page_offset += ETH_HLEN;
255 }
256 /* add VLAN tag */
257 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
258 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
259 element->length += VLAN_HLEN;
260 ctx->offset += VLAN_HLEN;
261 page_offset += VLAN_HLEN;
262 }
263 /* add network header */
264 memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
265 element->length += eddp->nhl;
266 eddp->nh_in_ctx = page + page_offset;
267 ctx->offset += eddp->nhl;
268 page_offset += eddp->nhl;
269 /* add transport header */
270 memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
271 element->length += eddp->thl;
272 eddp->th_in_ctx = page + page_offset;
273 ctx->offset += eddp->thl;
274}
275
276static inline void
277qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
278 u32 *hcsum)
279{
280 struct skb_frag_struct *frag;
281 int left_in_frag;
282 int copy_len;
283 u8 *src;
284
285 QETH_DBF_TEXT(trace, 5, "eddpcdtc");
286 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
287 memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
288 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
289 *hcsum);
290 eddp->skb_offset += len;
291 } else {
292 while (len > 0) {
293 if (eddp->frag < 0) {
294 /* we're in skb->data */
295 left_in_frag = qeth_get_skb_data_len(eddp->skb)
296 - eddp->skb_offset;
297 src = eddp->skb->data + eddp->skb_offset;
298 } else {
299 frag = &skb_shinfo(eddp->skb)->
300 frags[eddp->frag];
301 left_in_frag = frag->size - eddp->frag_offset;
302 src = (u8 *)(
303 (page_to_pfn(frag->page) << PAGE_SHIFT)+
304 frag->page_offset + eddp->frag_offset);
305 }
306 if (left_in_frag <= 0) {
307 eddp->frag++;
308 eddp->frag_offset = 0;
309 continue;
310 }
311 copy_len = min(left_in_frag, len);
312 memcpy(dst, src, copy_len);
313 *hcsum = csum_partial(src, copy_len, *hcsum);
314 dst += copy_len;
315 eddp->frag_offset += copy_len;
316 eddp->skb_offset += copy_len;
317 len -= copy_len;
318 }
319 }
320}
321
322static inline void
323qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
324 struct qeth_eddp_data *eddp, int data_len,
325 u32 hcsum)
326{
327 u8 *page;
328 int page_remainder;
329 int page_offset;
330 struct qeth_eddp_element *element;
331 int first_lap = 1;
332
333 QETH_DBF_TEXT(trace, 5, "eddpcsdt");
334 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
335 page_offset = ctx->offset % PAGE_SIZE;
336 element = &ctx->elements[ctx->num_elements];
337 while (data_len){
338 page_remainder = PAGE_SIZE - page_offset;
339 if (page_remainder < data_len){
340 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
341 page_remainder, &hcsum);
342 element->length += page_remainder;
343 if (first_lap)
344 element->flags = SBAL_FLAGS_FIRST_FRAG;
345 else
346 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
347 ctx->num_elements++;
348 element++;
349 data_len -= page_remainder;
350 ctx->offset += page_remainder;
351 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
352 page_offset = 0;
353 element->addr = page + page_offset;
354 } else {
355 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
356 data_len, &hcsum);
357 element->length += data_len;
358 if (!first_lap)
359 element->flags = SBAL_FLAGS_LAST_FRAG;
360 ctx->num_elements++;
361 ctx->offset += data_len;
362 data_len = 0;
363 }
364 first_lap = 0;
365 }
366 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
367}
368
369static inline u32
370qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
371{
372 u32 phcsum; /* pseudo header checksum */
373
374 QETH_DBF_TEXT(trace, 5, "eddpckt4");
375 eddp->th.tcp.h.check = 0;
376 /* compute pseudo header checksum */
377 phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
378 eddp->thl + data_len, IPPROTO_TCP, 0);
379 /* compute checksum of tcp header */
380 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
381}
382
383static inline u32
384qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
385{
386 u32 proto;
387 u32 phcsum; /* pseudo header checksum */
388
389 QETH_DBF_TEXT(trace, 5, "eddpckt6");
390 eddp->th.tcp.h.check = 0;
391 /* compute pseudo header checksum */
392 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
393 sizeof(struct in6_addr), 0);
394 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
395 sizeof(struct in6_addr), phcsum);
396 proto = htonl(IPPROTO_TCP);
397 phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
398 return phcsum;
399}
400
401static inline struct qeth_eddp_data *
402qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
403{
404 struct qeth_eddp_data *eddp;
405
406 QETH_DBF_TEXT(trace, 5, "eddpcrda");
407 eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
408 if (eddp){
409 memset(eddp, 0, sizeof(struct qeth_eddp_data));
410 eddp->nhl = nhl;
411 eddp->thl = thl;
412 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
413 memcpy(&eddp->nh, nh, nhl);
414 memcpy(&eddp->th, th, thl);
415 eddp->frag = -1; /* initially we're in skb->data */
416 }
417 return eddp;
418}
419
420static inline void
421__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
422 struct qeth_eddp_data *eddp)
423{
424 struct tcphdr *tcph;
425 int data_len;
426 u32 hcsum;
427
428 QETH_DBF_TEXT(trace, 5, "eddpftcp");
429 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
430 tcph = eddp->skb->h.th;
431 while (eddp->skb_offset < eddp->skb->len) {
432 data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
433 (int)(eddp->skb->len - eddp->skb_offset));
434 /* prepare qdio hdr */
435 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
436 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
437 eddp->nhl + eddp->thl -
438 sizeof(struct qeth_hdr);
439#ifdef CONFIG_QETH_VLAN
440 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
441 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
442#endif /* CONFIG_QETH_VLAN */
443 } else
444 eddp->qh.hdr.l3.length = data_len + eddp->nhl +
445 eddp->thl;
446 /* prepare ip hdr */
447 if (eddp->skb->protocol == ETH_P_IP){
448 eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
449 eddp->thl;
450 eddp->nh.ip4.h.check = 0;
451 eddp->nh.ip4.h.check =
452 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
453 eddp->nh.ip4.h.ihl);
454 } else
455 eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
456 /* prepare tcp hdr */
457 if (data_len == (eddp->skb->len - eddp->skb_offset)){
458 /* last segment -> set FIN and PSH flags */
459 eddp->th.tcp.h.fin = tcph->fin;
460 eddp->th.tcp.h.psh = tcph->psh;
461 }
462 if (eddp->skb->protocol == ETH_P_IP)
463 hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
464 else
465 hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
466 /* fill the next segment into the context */
467 qeth_eddp_create_segment_hdrs(ctx, eddp);
468 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
469 if (eddp->skb_offset >= eddp->skb->len)
470 break;
471 /* prepare headers for next round */
472 if (eddp->skb->protocol == ETH_P_IP)
473 eddp->nh.ip4.h.id++;
474 eddp->th.tcp.h.seq += data_len;
475 }
476}
477
478static inline int
479qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
480 struct sk_buff *skb, struct qeth_hdr *qhdr)
481{
482 struct qeth_eddp_data *eddp = NULL;
483
484 QETH_DBF_TEXT(trace, 5, "eddpficx");
485 /* create our segmentation headers and copy original headers */
486 if (skb->protocol == ETH_P_IP)
487 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
488 skb->nh.iph->ihl*4,
489 (u8 *)skb->h.th, skb->h.th->doff*4);
490 else
491 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
492 sizeof(struct ipv6hdr),
493 (u8 *)skb->h.th, skb->h.th->doff*4);
494
495 if (eddp == NULL) {
496 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
497 return -ENOMEM;
498 }
499 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
500 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
501#ifdef CONFIG_QETH_VLAN
502 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
503 eddp->vlan[0] = __constant_htons(skb->protocol);
504 eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
505 }
506#endif /* CONFIG_QETH_VLAN */
507 }
508 /* the next flags will only be set on the last segment */
509 eddp->th.tcp.h.fin = 0;
510 eddp->th.tcp.h.psh = 0;
511 eddp->skb = skb;
512 /* begin segmentation and fill context */
513 __qeth_eddp_fill_context_tcp(ctx, eddp);
514 kfree(eddp);
515 return 0;
516}
517
518static inline void
519qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
520 int hdr_len)
521{
522 int skbs_per_page;
523
524 QETH_DBF_TEXT(trace, 5, "eddpcanp");
525 /* can we put multiple skbs in one page? */
526 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
527 if (skbs_per_page > 1){
528 ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
529 skbs_per_page + 1;
530 ctx->elements_per_skb = 1;
531 } else {
532 /* no -> how many elements per skb? */
533 ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
534 PAGE_SIZE) >> PAGE_SHIFT;
535 ctx->num_pages = ctx->elements_per_skb *
536 (skb_shinfo(skb)->tso_segs + 1);
537 }
538 ctx->num_elements = ctx->elements_per_skb *
539 (skb_shinfo(skb)->tso_segs + 1);
540}
541
542static inline struct qeth_eddp_context *
543qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
544 int hdr_len)
545{
546 struct qeth_eddp_context *ctx = NULL;
547 u8 *addr;
548 int i;
549
550 QETH_DBF_TEXT(trace, 5, "creddpcg");
551 /* create the context and allocate pages */
552 ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
553 if (ctx == NULL){
554 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
555 return NULL;
556 }
557 memset(ctx, 0, sizeof(struct qeth_eddp_context));
558 ctx->type = QETH_LARGE_SEND_EDDP;
559 qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
560 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
561 QETH_DBF_TEXT(trace, 2, "ceddpcis");
562 kfree(ctx);
563 return NULL;
564 }
565 ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC);
566 if (ctx->pages == NULL){
567 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
568 kfree(ctx);
569 return NULL;
570 }
571 memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *));
572 for (i = 0; i < ctx->num_pages; ++i){
573 addr = (u8 *)__get_free_page(GFP_ATOMIC);
574 if (addr == NULL){
575 QETH_DBF_TEXT(trace, 2, "ceddpcn3");
576 ctx->num_pages = i;
577 qeth_eddp_free_context(ctx);
578 return NULL;
579 }
580 memset(addr, 0, PAGE_SIZE);
581 ctx->pages[i] = addr;
582 }
583 ctx->elements = kmalloc(ctx->num_elements *
584 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
585 if (ctx->elements == NULL){
586 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
587 qeth_eddp_free_context(ctx);
588 return NULL;
589 }
590 memset(ctx->elements, 0,
591 ctx->num_elements * sizeof(struct qeth_eddp_element));
592 /* reset num_elements; will be incremented again in fill_buffer to
593 * reflect number of actually used elements */
594 ctx->num_elements = 0;
595 return ctx;
596}
597
598static inline struct qeth_eddp_context *
599qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
600 struct qeth_hdr *qhdr)
601{
602 struct qeth_eddp_context *ctx = NULL;
603
604 QETH_DBF_TEXT(trace, 5, "creddpct");
605 if (skb->protocol == ETH_P_IP)
606 ctx = qeth_eddp_create_context_generic(card, skb,
607 sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
608 skb->h.th->doff*4);
609 else if (skb->protocol == ETH_P_IPV6)
610 ctx = qeth_eddp_create_context_generic(card, skb,
611 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
612 skb->h.th->doff*4);
613 else
614 QETH_DBF_TEXT(trace, 2, "cetcpinv");
615
616 if (ctx == NULL) {
617 QETH_DBF_TEXT(trace, 2, "creddpnl");
618 return NULL;
619 }
620 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
621 QETH_DBF_TEXT(trace, 2, "ceddptfe");
622 qeth_eddp_free_context(ctx);
623 return NULL;
624 }
625 atomic_set(&ctx->refcnt, 1);
626 return ctx;
627}
628
629struct qeth_eddp_context *
630qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
631 struct qeth_hdr *qhdr)
632{
633 QETH_DBF_TEXT(trace, 5, "creddpc");
634 switch (skb->sk->sk_protocol){
635 case IPPROTO_TCP:
636 return qeth_eddp_create_context_tcp(card, skb, qhdr);
637 default:
638 QETH_DBF_TEXT(trace, 2, "eddpinvp");
639 }
640 return NULL;
641}
642
643
diff --git a/drivers/s390/net/qeth_eddp.h b/drivers/s390/net/qeth_eddp.h
new file mode 100644
index 000000000000..e1b51860bc57
--- /dev/null
+++ b/drivers/s390/net/qeth_eddp.h
@@ -0,0 +1,85 @@
1/*
2 * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.5 $)
3 *
4 * Header file for qeth enhanced device driver pakcing.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
9 *
10 * $Revision: 1.5 $ $Date: 2005/03/24 09:04:18 $
11 *
12 */
13#ifndef __QETH_EDDP_H__
14#define __QETH_EDDP_H__
15
16struct qeth_eddp_element {
17 u32 flags;
18 u32 length;
19 void *addr;
20};
21
22struct qeth_eddp_context {
23 atomic_t refcnt;
24 enum qeth_large_send_types type;
25 int num_pages; /* # of allocated pages */
26 u8 **pages; /* pointers to pages */
27 int offset; /* offset in ctx during creation */
28 int num_elements; /* # of required 'SBALEs' */
29 struct qeth_eddp_element *elements; /* array of 'SBALEs' */
30 int elements_per_skb; /* # of 'SBALEs' per skb **/
31};
32
33struct qeth_eddp_context_reference {
34 struct list_head list;
35 struct qeth_eddp_context *ctx;
36};
37
38extern struct qeth_eddp_context *
39qeth_eddp_create_context(struct qeth_card *,struct sk_buff *,struct qeth_hdr *);
40
41extern void
42qeth_eddp_put_context(struct qeth_eddp_context *);
43
44extern int
45qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,struct qeth_eddp_context *,int);
46
47extern void
48qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *);
49
50extern int
51qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
52 struct qeth_eddp_context *);
53/*
54 * Data used for fragmenting a IP packet.
55 */
56struct qeth_eddp_data {
57 struct qeth_hdr qh;
58 struct ethhdr mac;
59 u16 vlan[2];
60 union {
61 struct {
62 struct iphdr h;
63 u8 options[40];
64 } ip4;
65 struct {
66 struct ipv6hdr h;
67 } ip6;
68 } nh;
69 u8 nhl;
70 void *nh_in_ctx; /* address of nh within the ctx */
71 union {
72 struct {
73 struct tcphdr h;
74 u8 options[40];
75 } tcp;
76 } th;
77 u8 thl;
78 void *th_in_ctx; /* address of th within the ctx */
79 struct sk_buff *skb;
80 int skb_offset;
81 int frag;
82 int frag_offset;
83} __attribute__ ((packed));
84
85#endif /* __QETH_EDDP_H__ */
diff --git a/drivers/s390/net/qeth_fs.h b/drivers/s390/net/qeth_fs.h
new file mode 100644
index 000000000000..5c9a51ce91b6
--- /dev/null
+++ b/drivers/s390/net/qeth_fs.h
@@ -0,0 +1,163 @@
1/*
2 * linux/drivers/s390/net/qeth_fs.h
3 *
4 * Linux on zSeries OSA Express and HiperSockets support.
5 *
6 * This header file contains definitions related to sysfs and procfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
10 *
11 */
12#ifndef __QETH_FS_H__
13#define __QETH_FS_H__
14
15#define VERSION_QETH_FS_H "$Revision: 1.9 $"
16
17extern const char *VERSION_QETH_PROC_C;
18extern const char *VERSION_QETH_SYS_C;
19
20#ifdef CONFIG_PROC_FS
21extern int
22qeth_create_procfs_entries(void);
23
24extern void
25qeth_remove_procfs_entries(void);
26#else
27static inline int
28qeth_create_procfs_entries(void)
29{
30 return 0;
31}
32
33static inline void
34qeth_remove_procfs_entries(void)
35{
36}
37#endif /* CONFIG_PROC_FS */
38
39extern int
40qeth_create_device_attributes(struct device *dev);
41
42extern void
43qeth_remove_device_attributes(struct device *dev);
44
45extern int
46qeth_create_driver_attributes(void);
47
48extern void
49qeth_remove_driver_attributes(void);
50
51/*
52 * utility functions used in qeth_proc.c and qeth_sys.c
53 */
54
55static inline const char *
56qeth_get_checksum_str(struct qeth_card *card)
57{
58 if (card->options.checksum_type == SW_CHECKSUMMING)
59 return "sw";
60 else if (card->options.checksum_type == HW_CHECKSUMMING)
61 return "hw";
62 else
63 return "no";
64}
65
66static inline const char *
67qeth_get_prioq_str(struct qeth_card *card, char *buf)
68{
69 if (card->qdio.do_prio_queueing == QETH_NO_PRIO_QUEUEING)
70 sprintf(buf, "always_q_%i", card->qdio.default_out_queue);
71 else
72 strcpy(buf, (card->qdio.do_prio_queueing ==
73 QETH_PRIO_Q_ING_PREC)?
74 "by_prec." : "by_ToS");
75 return buf;
76}
77
78static inline const char *
79qeth_get_bufsize_str(struct qeth_card *card)
80{
81 if (card->qdio.in_buf_size == 16384)
82 return "16k";
83 else if (card->qdio.in_buf_size == 24576)
84 return "24k";
85 else if (card->qdio.in_buf_size == 32768)
86 return "32k";
87 else if (card->qdio.in_buf_size == 40960)
88 return "40k";
89 else
90 return "64k";
91}
92
93static inline const char *
94qeth_get_cardname(struct qeth_card *card)
95{
96 if (card->info.guestlan) {
97 switch (card->info.type) {
98 case QETH_CARD_TYPE_OSAE:
99 return " Guest LAN QDIO";
100 case QETH_CARD_TYPE_IQD:
101 return " Guest LAN Hiper";
102 default:
103 return " unknown";
104 }
105 } else {
106 switch (card->info.type) {
107 case QETH_CARD_TYPE_OSAE:
108 return " OSD Express";
109 case QETH_CARD_TYPE_IQD:
110 return " HiperSockets";
111 default:
112 return " unknown";
113 }
114 }
115 return " n/a";
116}
117
118/* max length to be returned: 14 */
119static inline const char *
120qeth_get_cardname_short(struct qeth_card *card)
121{
122 if (card->info.guestlan){
123 switch (card->info.type){
124 case QETH_CARD_TYPE_OSAE:
125 return "GuestLAN QDIO";
126 case QETH_CARD_TYPE_IQD:
127 return "GuestLAN Hiper";
128 default:
129 return "unknown";
130 }
131 } else {
132 switch (card->info.type) {
133 case QETH_CARD_TYPE_OSAE:
134 switch (card->info.link_type) {
135 case QETH_LINK_TYPE_FAST_ETH:
136 return "OSD_100";
137 case QETH_LINK_TYPE_HSTR:
138 return "HSTR";
139 case QETH_LINK_TYPE_GBIT_ETH:
140 return "OSD_1000";
141 case QETH_LINK_TYPE_10GBIT_ETH:
142 return "OSD_10GIG";
143 case QETH_LINK_TYPE_LANE_ETH100:
144 return "OSD_FE_LANE";
145 case QETH_LINK_TYPE_LANE_TR:
146 return "OSD_TR_LANE";
147 case QETH_LINK_TYPE_LANE_ETH1000:
148 return "OSD_GbE_LANE";
149 case QETH_LINK_TYPE_LANE:
150 return "OSD_ATM_LANE";
151 default:
152 return "OSD_Express";
153 }
154 case QETH_CARD_TYPE_IQD:
155 return "HiperSockets";
156 default:
157 return "unknown";
158 }
159 }
160 return "n/a";
161}
162
163#endif /* __QETH_FS_H__ */
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
new file mode 100644
index 000000000000..607b92542df6
--- /dev/null
+++ b/drivers/s390/net/qeth_main.c
@@ -0,0 +1,8236 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.206 $)
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Utz Bacher (utz.bacher@de.ibm.com)
11 * Rewritten by
12 * Frank Pavlic (pavlic@de.ibm.com) and
13 * Thomas Spatzier <tspat@de.ibm.com>
14 *
15 * $Revision: 1.206 $ $Date: 2005/03/24 09:04:18 $
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */
31
32/***
33 * eye catcher; just for debugging purposes
34 */
35void volatile
36qeth_eyecatcher(void)
37{
38 return;
39}
40
41#include <linux/config.h>
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44#include <linux/string.h>
45#include <linux/errno.h>
46#include <linux/mm.h>
47#include <linux/ip.h>
48#include <linux/inetdevice.h>
49#include <linux/netdevice.h>
50#include <linux/sched.h>
51#include <linux/workqueue.h>
52#include <linux/kernel.h>
53#include <linux/slab.h>
54#include <linux/interrupt.h>
55#include <linux/tcp.h>
56#include <linux/icmp.h>
57#include <linux/skbuff.h>
58#include <linux/in.h>
59#include <linux/igmp.h>
60#include <linux/init.h>
61#include <linux/reboot.h>
62#include <linux/mii.h>
63#include <linux/rcupdate.h>
64#include <linux/ethtool.h>
65
66#include <net/arp.h>
67#include <net/ip.h>
68#include <net/route.h>
69
70#include <asm/ebcdic.h>
71#include <asm/io.h>
72#include <asm/qeth.h>
73#include <asm/timex.h>
74#include <asm/semaphore.h>
75#include <asm/uaccess.h>
76
77#include "qeth.h"
78#include "qeth_mpc.h"
79#include "qeth_fs.h"
80#include "qeth_eddp.h"
81#include "qeth_tso.h"
82
83#define VERSION_QETH_C "$Revision: 1.206 $"
84static const char *version = "qeth S/390 OSA-Express driver";
85
86/**
87 * Debug Facility Stuff
88 */
89static debug_info_t *qeth_dbf_setup = NULL;
90static debug_info_t *qeth_dbf_data = NULL;
91static debug_info_t *qeth_dbf_misc = NULL;
92static debug_info_t *qeth_dbf_control = NULL;
93debug_info_t *qeth_dbf_trace = NULL;
94static debug_info_t *qeth_dbf_sense = NULL;
95static debug_info_t *qeth_dbf_qerr = NULL;
96
97DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
98
99/**
100 * some more definitions and declarations
101 */
102static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
103
104/* list of our cards */
105struct qeth_card_list_struct qeth_card_list;
106/*process list want to be notified*/
107spinlock_t qeth_notify_lock;
108struct list_head qeth_notify_list;
109
110static void qeth_send_control_data_cb(struct qeth_channel *,
111 struct qeth_cmd_buffer *);
112
113/**
114 * here we go with function implementation
115 */
116static void
117qeth_init_qdio_info(struct qeth_card *card);
118
119static int
120qeth_init_qdio_queues(struct qeth_card *card);
121
122static int
123qeth_alloc_qdio_buffers(struct qeth_card *card);
124
125static void
126qeth_free_qdio_buffers(struct qeth_card *);
127
128static void
129qeth_clear_qdio_buffers(struct qeth_card *);
130
131static void
132qeth_clear_ip_list(struct qeth_card *, int, int);
133
134static void
135qeth_clear_ipacmd_list(struct qeth_card *);
136
137static int
138qeth_qdio_clear_card(struct qeth_card *, int);
139
140static void
141qeth_clear_working_pool_list(struct qeth_card *);
142
143static void
144qeth_clear_cmd_buffers(struct qeth_channel *);
145
146static int
147qeth_stop(struct net_device *);
148
149static void
150qeth_clear_ipato_list(struct qeth_card *);
151
152static int
153qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
154
155static void
156qeth_irq_tasklet(unsigned long);
157
158static int
159qeth_set_online(struct ccwgroup_device *);
160
161static struct qeth_ipaddr *
162qeth_get_addr_buffer(enum qeth_prot_versions);
163
164static void
165qeth_set_multicast_list(struct net_device *);
166
167static void
168qeth_notify_processes(void)
169{
170 /*notify all registered processes */
171 struct qeth_notify_list_struct *n_entry;
172
173 QETH_DBF_TEXT(trace,3,"procnoti");
174 spin_lock(&qeth_notify_lock);
175 list_for_each_entry(n_entry, &qeth_notify_list, list) {
176 send_sig(n_entry->signum, n_entry->task, 1);
177 }
178 spin_unlock(&qeth_notify_lock);
179
180}
181int
182qeth_notifier_unregister(struct task_struct *p)
183{
184 struct qeth_notify_list_struct *n_entry, *tmp;
185
186 QETH_DBF_TEXT(trace, 2, "notunreg");
187 spin_lock(&qeth_notify_lock);
188 list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) {
189 if (n_entry->task == p) {
190 list_del(&n_entry->list);
191 kfree(n_entry);
192 goto out;
193 }
194 }
195out:
196 spin_unlock(&qeth_notify_lock);
197 return 0;
198}
199int
200qeth_notifier_register(struct task_struct *p, int signum)
201{
202 struct qeth_notify_list_struct *n_entry;
203
204
205 /*check first if entry already exists*/
206 spin_lock(&qeth_notify_lock);
207 list_for_each_entry(n_entry, &qeth_notify_list, list) {
208 if (n_entry->task == p) {
209 n_entry->signum = signum;
210 spin_unlock(&qeth_notify_lock);
211 return 0;
212 }
213 }
214 spin_unlock(&qeth_notify_lock);
215
216 n_entry = (struct qeth_notify_list_struct *)
217 kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL);
218 if (!n_entry)
219 return -ENOMEM;
220 n_entry->task = p;
221 n_entry->signum = signum;
222 spin_lock(&qeth_notify_lock);
223 list_add(&n_entry->list,&qeth_notify_list);
224 spin_unlock(&qeth_notify_lock);
225 return 0;
226}
227
228
229/**
230 * free channel command buffers
231 */
232static void
233qeth_clean_channel(struct qeth_channel *channel)
234{
235 int cnt;
236
237 QETH_DBF_TEXT(setup, 2, "freech");
238 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
239 kfree(channel->iob[cnt].data);
240}
241
242/**
243 * free card
244 */
245static void
246qeth_free_card(struct qeth_card *card)
247{
248
249 QETH_DBF_TEXT(setup, 2, "freecrd");
250 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
251 qeth_clean_channel(&card->read);
252 qeth_clean_channel(&card->write);
253 if (card->dev)
254 free_netdev(card->dev);
255 qeth_clear_ip_list(card, 0, 0);
256 qeth_clear_ipato_list(card);
257 kfree(card->ip_tbd_list);
258 qeth_free_qdio_buffers(card);
259 kfree(card);
260}
261
262/**
263 * alloc memory for command buffer per channel
264 */
265static int
266qeth_setup_channel(struct qeth_channel *channel)
267{
268 int cnt;
269
270 QETH_DBF_TEXT(setup, 2, "setupch");
271 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
272 channel->iob[cnt].data = (char *)
273 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
274 if (channel->iob[cnt].data == NULL)
275 break;
276 channel->iob[cnt].state = BUF_STATE_FREE;
277 channel->iob[cnt].channel = channel;
278 channel->iob[cnt].callback = qeth_send_control_data_cb;
279 channel->iob[cnt].rc = 0;
280 }
281 if (cnt < QETH_CMD_BUFFER_NO) {
282 while (cnt-- > 0)
283 kfree(channel->iob[cnt].data);
284 return -ENOMEM;
285 }
286 channel->buf_no = 0;
287 channel->io_buf_no = 0;
288 atomic_set(&channel->irq_pending, 0);
289 spin_lock_init(&channel->iob_lock);
290
291 init_waitqueue_head(&channel->wait_q);
292 channel->irq_tasklet.data = (unsigned long) channel;
293 channel->irq_tasklet.func = qeth_irq_tasklet;
294 return 0;
295}
296
297/**
298 * alloc memory for card structure
299 */
300static struct qeth_card *
301qeth_alloc_card(void)
302{
303 struct qeth_card *card;
304
305 QETH_DBF_TEXT(setup, 2, "alloccrd");
306 card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card),
307 GFP_DMA|GFP_KERNEL);
308 if (!card)
309 return NULL;
310 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
311 memset(card, 0, sizeof(struct qeth_card));
312 if (qeth_setup_channel(&card->read)) {
313 kfree(card);
314 return NULL;
315 }
316 if (qeth_setup_channel(&card->write)) {
317 qeth_clean_channel(&card->read);
318 kfree(card);
319 return NULL;
320 }
321 return card;
322}
323
324static long
325__qeth_check_irb_error(struct ccw_device *cdev, struct irb *irb)
326{
327 if (!IS_ERR(irb))
328 return 0;
329
330 switch (PTR_ERR(irb)) {
331 case -EIO:
332 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
333 QETH_DBF_TEXT(trace, 2, "ckirberr");
334 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
335 break;
336 case -ETIMEDOUT:
337 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
338 QETH_DBF_TEXT(trace, 2, "ckirberr");
339 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
340 break;
341 default:
342 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
343 cdev->dev.bus_id);
344 QETH_DBF_TEXT(trace, 2, "ckirberr");
345 QETH_DBF_TEXT(trace, 2, " rc???");
346 }
347 return PTR_ERR(irb);
348}
349
350static int
351qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
352{
353 int dstat,cstat;
354 char *sense;
355
356 sense = (char *) irb->ecw;
357 cstat = irb->scsw.cstat;
358 dstat = irb->scsw.dstat;
359
360 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
361 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
362 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
363 QETH_DBF_TEXT(trace,2, "CGENCHK");
364 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
365 cdev->dev.bus_id, dstat, cstat);
366 HEXDUMP16(WARN, "irb: ", irb);
367 HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32);
368 return 1;
369 }
370
371 if (dstat & DEV_STAT_UNIT_CHECK) {
372 if (sense[SENSE_RESETTING_EVENT_BYTE] &
373 SENSE_RESETTING_EVENT_FLAG) {
374 QETH_DBF_TEXT(trace,2,"REVIND");
375 return 1;
376 }
377 if (sense[SENSE_COMMAND_REJECT_BYTE] &
378 SENSE_COMMAND_REJECT_FLAG) {
379 QETH_DBF_TEXT(trace,2,"CMDREJi");
380 return 0;
381 }
382 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
383 QETH_DBF_TEXT(trace,2,"AFFE");
384 return 1;
385 }
386 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
387 QETH_DBF_TEXT(trace,2,"ZEROSEN");
388 return 0;
389 }
390 QETH_DBF_TEXT(trace,2,"DGENCHK");
391 return 1;
392 }
393 return 0;
394}
395static int qeth_issue_next_read(struct qeth_card *);
396
397/**
398 * interrupt handler
399 */
400static void
401qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
402{
403 int rc;
404 int cstat,dstat;
405 struct qeth_cmd_buffer *buffer;
406 struct qeth_channel *channel;
407 struct qeth_card *card;
408
409 QETH_DBF_TEXT(trace,5,"irq");
410
411 if (__qeth_check_irb_error(cdev, irb))
412 return;
413 cstat = irb->scsw.cstat;
414 dstat = irb->scsw.dstat;
415
416 card = CARD_FROM_CDEV(cdev);
417 if (!card)
418 return;
419
420 if (card->read.ccwdev == cdev){
421 channel = &card->read;
422 QETH_DBF_TEXT(trace,5,"read");
423 } else if (card->write.ccwdev == cdev) {
424 channel = &card->write;
425 QETH_DBF_TEXT(trace,5,"write");
426 } else {
427 channel = &card->data;
428 QETH_DBF_TEXT(trace,5,"data");
429 }
430 atomic_set(&channel->irq_pending, 0);
431
432 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
433 channel->state = CH_STATE_STOPPED;
434
435 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
436 channel->state = CH_STATE_HALTED;
437
438 /*let's wake up immediately on data channel*/
439 if ((channel == &card->data) && (intparm != 0))
440 goto out;
441
442 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
443 QETH_DBF_TEXT(trace, 6, "clrchpar");
444 /* we don't have to handle this further */
445 intparm = 0;
446 }
447 if (intparm == QETH_HALT_CHANNEL_PARM) {
448 QETH_DBF_TEXT(trace, 6, "hltchpar");
449 /* we don't have to handle this further */
450 intparm = 0;
451 }
452 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
453 (dstat & DEV_STAT_UNIT_CHECK) ||
454 (cstat)) {
455 if (irb->esw.esw0.erw.cons) {
456 /* TODO: we should make this s390dbf */
457 PRINT_WARN("sense data available on channel %s.\n",
458 CHANNEL_ID(channel));
459 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
460 HEXDUMP16(WARN,"irb: ",irb);
461 HEXDUMP16(WARN,"sense data: ",irb->ecw);
462 }
463 rc = qeth_get_problem(cdev,irb);
464 if (rc) {
465 qeth_schedule_recovery(card);
466 goto out;
467 }
468 }
469
470 if (intparm) {
471 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
472 buffer->state = BUF_STATE_PROCESSED;
473 }
474 if (channel == &card->data)
475 return;
476
477 if (channel == &card->read &&
478 channel->state == CH_STATE_UP)
479 qeth_issue_next_read(card);
480
481 tasklet_schedule(&channel->irq_tasklet);
482 return;
483out:
484 wake_up(&card->wait_q);
485}
486
487/**
488 * tasklet function scheduled from irq handler
489 */
490static void
491qeth_irq_tasklet(unsigned long data)
492{
493 struct qeth_card *card;
494 struct qeth_channel *channel;
495 struct qeth_cmd_buffer *iob;
496 __u8 index;
497
498 QETH_DBF_TEXT(trace,5,"irqtlet");
499 channel = (struct qeth_channel *) data;
500 iob = channel->iob;
501 index = channel->buf_no;
502 card = CARD_FROM_CDEV(channel->ccwdev);
503 while (iob[index].state == BUF_STATE_PROCESSED) {
504 if (iob[index].callback !=NULL) {
505 iob[index].callback(channel,iob + index);
506 }
507 index = (index + 1) % QETH_CMD_BUFFER_NO;
508 }
509 channel->buf_no = index;
510 wake_up(&card->wait_q);
511}
512
513static int qeth_stop_card(struct qeth_card *);
514
515static int
516qeth_set_offline(struct ccwgroup_device *cgdev)
517{
518 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
519 int rc = 0;
520 enum qeth_card_states recover_flag;
521
522 QETH_DBF_TEXT(setup, 3, "setoffl");
523 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
524
525 recover_flag = card->state;
526 if (qeth_stop_card(card) == -ERESTARTSYS){
527 PRINT_WARN("Stopping card %s interrupted by user!\n",
528 CARD_BUS_ID(card));
529 return -ERESTARTSYS;
530 }
531 if ((rc = ccw_device_set_offline(CARD_DDEV(card))) ||
532 (rc = ccw_device_set_offline(CARD_WDEV(card))) ||
533 (rc = ccw_device_set_offline(CARD_RDEV(card)))) {
534 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
535 }
536 if (recover_flag == CARD_STATE_UP)
537 card->state = CARD_STATE_RECOVER;
538 qeth_notify_processes();
539 return 0;
540}
541
542static int
543qeth_wait_for_threads(struct qeth_card *card, unsigned long threads);
544
545
546static void
547qeth_remove_device(struct ccwgroup_device *cgdev)
548{
549 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
550 unsigned long flags;
551
552 QETH_DBF_TEXT(setup, 3, "rmdev");
553 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
554
555 if (!card)
556 return;
557
558 if (qeth_wait_for_threads(card, 0xffffffff))
559 return;
560
561 if (cgdev->state == CCWGROUP_ONLINE){
562 card->use_hard_stop = 1;
563 qeth_set_offline(cgdev);
564 }
565 /* remove form our internal list */
566 write_lock_irqsave(&qeth_card_list.rwlock, flags);
567 list_del(&card->list);
568 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
569 if (card->dev)
570 unregister_netdev(card->dev);
571 qeth_remove_device_attributes(&cgdev->dev);
572 qeth_free_card(card);
573 cgdev->dev.driver_data = NULL;
574 put_device(&cgdev->dev);
575}
576
577static int
578qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
579static int
580qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
581
582/**
583 * Add/remove address to/from card's ip list, i.e. try to add or remove
584 * reference to/from an IP address that is already registered on the card.
585 * Returns:
586 * 0 address was on card and its reference count has been adjusted,
587 * but is still > 0, so nothing has to be done
588 * also returns 0 if card was not on card and the todo was to delete
589 * the address -> there is also nothing to be done
590 * 1 address was not on card and the todo is to add it to the card's ip
591 * list
592 * -1 address was on card and its reference count has been decremented
593 * to <= 0 by the todo -> address must be removed from card
594 */
595static int
596__qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
597 struct qeth_ipaddr **__addr)
598{
599 struct qeth_ipaddr *addr;
600 int found = 0;
601
602 list_for_each_entry(addr, &card->ip_list, entry) {
603 if ((addr->proto == QETH_PROT_IPV4) &&
604 (todo->proto == QETH_PROT_IPV4) &&
605 (addr->type == todo->type) &&
606 (addr->u.a4.addr == todo->u.a4.addr) &&
607 (addr->u.a4.mask == todo->u.a4.mask) ){
608 found = 1;
609 break;
610 }
611 if ((addr->proto == QETH_PROT_IPV6) &&
612 (todo->proto == QETH_PROT_IPV6) &&
613 (addr->type == todo->type) &&
614 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
615 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
616 sizeof(struct in6_addr)) == 0)) {
617 found = 1;
618 break;
619 }
620 }
621 if (found){
622 addr->users += todo->users;
623 if (addr->users <= 0){
624 *__addr = addr;
625 return -1;
626 } else {
627 /* for VIPA and RXIP limit refcount to 1 */
628 if (addr->type != QETH_IP_TYPE_NORMAL)
629 addr->users = 1;
630 return 0;
631 }
632 }
633 if (todo->users > 0){
634 /* for VIPA and RXIP limit refcount to 1 */
635 if (todo->type != QETH_IP_TYPE_NORMAL)
636 todo->users = 1;
637 return 1;
638 } else
639 return 0;
640}
641
642static inline int
643__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
644 int same_type)
645{
646 struct qeth_ipaddr *tmp;
647
648 list_for_each_entry(tmp, list, entry) {
649 if ((tmp->proto == QETH_PROT_IPV4) &&
650 (addr->proto == QETH_PROT_IPV4) &&
651 ((same_type && (tmp->type == addr->type)) ||
652 (!same_type && (tmp->type != addr->type)) ) &&
653 (tmp->u.a4.addr == addr->u.a4.addr) ){
654 return 1;
655 }
656 if ((tmp->proto == QETH_PROT_IPV6) &&
657 (addr->proto == QETH_PROT_IPV6) &&
658 ((same_type && (tmp->type == addr->type)) ||
659 (!same_type && (tmp->type != addr->type)) ) &&
660 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
661 sizeof(struct in6_addr)) == 0) ) {
662 return 1;
663 }
664 }
665 return 0;
666}
667
668/*
669 * Add IP to be added to todo list. If there is already an "add todo"
670 * in this list we just incremenent the reference count.
671 * Returns 0 if we just incremented reference count.
672 */
673static int
674__qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add)
675{
676 struct qeth_ipaddr *tmp, *t;
677 int found = 0;
678
679 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
680 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
681 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
682 return 0;
683 if ((tmp->proto == QETH_PROT_IPV4) &&
684 (addr->proto == QETH_PROT_IPV4) &&
685 (tmp->type == addr->type) &&
686 (tmp->is_multicast == addr->is_multicast) &&
687 (tmp->u.a4.addr == addr->u.a4.addr) &&
688 (tmp->u.a4.mask == addr->u.a4.mask) ){
689 found = 1;
690 break;
691 }
692 if ((tmp->proto == QETH_PROT_IPV6) &&
693 (addr->proto == QETH_PROT_IPV6) &&
694 (tmp->type == addr->type) &&
695 (tmp->is_multicast == addr->is_multicast) &&
696 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
697 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
698 sizeof(struct in6_addr)) == 0) ){
699 found = 1;
700 break;
701 }
702 }
703 if (found){
704 if (addr->users != 0)
705 tmp->users += addr->users;
706 else
707 tmp->users += add? 1:-1;
708 if (tmp->users == 0){
709 list_del(&tmp->entry);
710 kfree(tmp);
711 }
712 return 0;
713 } else {
714 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
715 list_add(&addr->entry, card->ip_tbd_list);
716 else {
717 if (addr->users == 0)
718 addr->users += add? 1:-1;
719 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
720 qeth_is_addr_covered_by_ipato(card, addr)){
721 QETH_DBF_TEXT(trace, 2, "tkovaddr");
722 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
723 }
724 list_add_tail(&addr->entry, card->ip_tbd_list);
725 }
726 return 1;
727 }
728}
729
730/**
731 * Remove IP address from list
732 */
733static int
734qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
735{
736 unsigned long flags;
737 int rc = 0;
738
739 QETH_DBF_TEXT(trace,4,"delip");
740 if (addr->proto == QETH_PROT_IPV4)
741 QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
742 else {
743 QETH_DBF_HEX(trace,4,&addr->u.a6.addr,8);
744 QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+8,8);
745 }
746 spin_lock_irqsave(&card->ip_lock, flags);
747 rc = __qeth_insert_ip_todo(card, addr, 0);
748 spin_unlock_irqrestore(&card->ip_lock, flags);
749 return rc;
750}
751
752static int
753qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
754{
755 unsigned long flags;
756 int rc = 0;
757
758 QETH_DBF_TEXT(trace,4,"addip");
759 if (addr->proto == QETH_PROT_IPV4)
760 QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4);
761 else {
762 QETH_DBF_HEX(trace,4,&addr->u.a6.addr,8);
763 QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+8,8);
764 }
765 spin_lock_irqsave(&card->ip_lock, flags);
766 rc = __qeth_insert_ip_todo(card, addr, 1);
767 spin_unlock_irqrestore(&card->ip_lock, flags);
768 return rc;
769}
770
771static inline void
772__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
773{
774 struct qeth_ipaddr *addr, *tmp;
775 int rc;
776
777 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
778 if (addr->is_multicast) {
779 spin_unlock_irqrestore(&card->ip_lock, *flags);
780 rc = qeth_deregister_addr_entry(card, addr);
781 spin_lock_irqsave(&card->ip_lock, *flags);
782 if (!rc) {
783 list_del(&addr->entry);
784 kfree(addr);
785 }
786 }
787 }
788}
789
790static void
791qeth_set_ip_addr_list(struct qeth_card *card)
792{
793 struct list_head *tbd_list;
794 struct qeth_ipaddr *todo, *addr;
795 unsigned long flags;
796 int rc;
797
798 QETH_DBF_TEXT(trace, 2, "sdiplist");
799 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
800
801 spin_lock_irqsave(&card->ip_lock, flags);
802 tbd_list = card->ip_tbd_list;
803 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
804 if (!card->ip_tbd_list) {
805 QETH_DBF_TEXT(trace, 0, "silnomem");
806 card->ip_tbd_list = tbd_list;
807 spin_unlock_irqrestore(&card->ip_lock, flags);
808 return;
809 } else
810 INIT_LIST_HEAD(card->ip_tbd_list);
811
812 while (!list_empty(tbd_list)){
813 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
814 list_del(&todo->entry);
815 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC){
816 __qeth_delete_all_mc(card, &flags);
817 kfree(todo);
818 continue;
819 }
820 rc = __qeth_ref_ip_on_card(card, todo, &addr);
821 if (rc == 0) {
822 /* nothing to be done; only adjusted refcount */
823 kfree(todo);
824 } else if (rc == 1) {
825 /* new entry to be added to on-card list */
826 spin_unlock_irqrestore(&card->ip_lock, flags);
827 rc = qeth_register_addr_entry(card, todo);
828 spin_lock_irqsave(&card->ip_lock, flags);
829 if (!rc)
830 list_add_tail(&todo->entry, &card->ip_list);
831 else
832 kfree(todo);
833 } else if (rc == -1) {
834 /* on-card entry to be removed */
835 list_del_init(&addr->entry);
836 spin_unlock_irqrestore(&card->ip_lock, flags);
837 rc = qeth_deregister_addr_entry(card, addr);
838 spin_lock_irqsave(&card->ip_lock, flags);
839 if (!rc)
840 kfree(addr);
841 else
842 list_add_tail(&addr->entry, &card->ip_list);
843 kfree(todo);
844 }
845 }
846 spin_unlock_irqrestore(&card->ip_lock, flags);
847 kfree(tbd_list);
848}
849
850static void qeth_delete_mc_addresses(struct qeth_card *);
851static void qeth_add_multicast_ipv4(struct qeth_card *);
852#ifdef CONFIG_QETH_IPV6
853static void qeth_add_multicast_ipv6(struct qeth_card *);
854#endif
855
856static inline int
857qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
858{
859 unsigned long flags;
860
861 spin_lock_irqsave(&card->thread_mask_lock, flags);
862 if ( !(card->thread_allowed_mask & thread) ||
863 (card->thread_start_mask & thread) ) {
864 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
865 return -EPERM;
866 }
867 card->thread_start_mask |= thread;
868 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
869 return 0;
870}
871
872static void
873qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
874{
875 unsigned long flags;
876
877 spin_lock_irqsave(&card->thread_mask_lock, flags);
878 card->thread_start_mask &= ~thread;
879 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
880 wake_up(&card->wait_q);
881}
882
883static void
884qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
885{
886 unsigned long flags;
887
888 spin_lock_irqsave(&card->thread_mask_lock, flags);
889 card->thread_running_mask &= ~thread;
890 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
891 wake_up(&card->wait_q);
892}
893
894static inline int
895__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
896{
897 unsigned long flags;
898 int rc = 0;
899
900 spin_lock_irqsave(&card->thread_mask_lock, flags);
901 if (card->thread_start_mask & thread){
902 if ((card->thread_allowed_mask & thread) &&
903 !(card->thread_running_mask & thread)){
904 rc = 1;
905 card->thread_start_mask &= ~thread;
906 card->thread_running_mask |= thread;
907 } else
908 rc = -EPERM;
909 }
910 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
911 return rc;
912}
913
914static int
915qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
916{
917 int rc = 0;
918
919 wait_event(card->wait_q,
920 (rc = __qeth_do_run_thread(card, thread)) >= 0);
921 return rc;
922}
923
924static int
925qeth_register_ip_addresses(void *ptr)
926{
927 struct qeth_card *card;
928
929 card = (struct qeth_card *) ptr;
930 daemonize("qeth_reg_ip");
931 QETH_DBF_TEXT(trace,4,"regipth1");
932 if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD))
933 return 0;
934 QETH_DBF_TEXT(trace,4,"regipth2");
935 qeth_set_ip_addr_list(card);
936 qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD);
937 return 0;
938}
939
940static int
941qeth_recover(void *ptr)
942{
943 struct qeth_card *card;
944 int rc = 0;
945
946 card = (struct qeth_card *) ptr;
947 daemonize("qeth_recover");
948 QETH_DBF_TEXT(trace,2,"recover1");
949 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
950 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
951 return 0;
952 QETH_DBF_TEXT(trace,2,"recover2");
953 PRINT_WARN("Recovery of device %s started ...\n",
954 CARD_BUS_ID(card));
955 card->use_hard_stop = 1;
956 qeth_set_offline(card->gdev);
957 rc = qeth_set_online(card->gdev);
958 if (!rc)
959 PRINT_INFO("Device %s successfully recovered!\n",
960 CARD_BUS_ID(card));
961 else
962 PRINT_INFO("Device %s could not be recovered!\n",
963 CARD_BUS_ID(card));
964 /* don't run another scheduled recovery */
965 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
966 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
967 return 0;
968}
969
970void
971qeth_schedule_recovery(struct qeth_card *card)
972{
973 QETH_DBF_TEXT(trace,2,"startrec");
974
975 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
976 schedule_work(&card->kernel_thread_starter);
977}
978
979static int
980qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
981{
982 unsigned long flags;
983 int rc = 0;
984
985 spin_lock_irqsave(&card->thread_mask_lock, flags);
986 QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
987 (u8) card->thread_start_mask,
988 (u8) card->thread_allowed_mask,
989 (u8) card->thread_running_mask);
990 rc = (card->thread_start_mask & thread);
991 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
992 return rc;
993}
994
995static void
996qeth_start_kernel_thread(struct qeth_card *card)
997{
998 QETH_DBF_TEXT(trace , 2, "strthrd");
999
1000 if (card->read.state != CH_STATE_UP &&
1001 card->write.state != CH_STATE_UP)
1002 return;
1003
1004 if (qeth_do_start_thread(card, QETH_SET_IP_THREAD))
1005 kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD);
1006 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1007 kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1008}
1009
1010
1011static void
1012qeth_set_intial_options(struct qeth_card *card)
1013{
1014 card->options.route4.type = NO_ROUTER;
1015#ifdef CONFIG_QETH_IPV6
1016 card->options.route6.type = NO_ROUTER;
1017#endif /* QETH_IPV6 */
1018 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1019 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1020 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1021 card->options.fake_broadcast = 0;
1022 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1023 card->options.fake_ll = 0;
1024 card->options.layer2 = 0;
1025}
1026
1027/**
1028 * initialize channels ,card and all state machines
1029 */
1030static int
1031qeth_setup_card(struct qeth_card *card)
1032{
1033
1034 QETH_DBF_TEXT(setup, 2, "setupcrd");
1035 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1036
1037 card->read.state = CH_STATE_DOWN;
1038 card->write.state = CH_STATE_DOWN;
1039 card->data.state = CH_STATE_DOWN;
1040 card->state = CARD_STATE_DOWN;
1041 card->lan_online = 0;
1042 card->use_hard_stop = 0;
1043 card->dev = NULL;
1044#ifdef CONFIG_QETH_VLAN
1045 spin_lock_init(&card->vlanlock);
1046 card->vlangrp = NULL;
1047#endif
1048 spin_lock_init(&card->ip_lock);
1049 spin_lock_init(&card->thread_mask_lock);
1050 card->thread_start_mask = 0;
1051 card->thread_allowed_mask = 0;
1052 card->thread_running_mask = 0;
1053 INIT_WORK(&card->kernel_thread_starter,
1054 (void *)qeth_start_kernel_thread,card);
1055 INIT_LIST_HEAD(&card->ip_list);
1056 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1057 if (!card->ip_tbd_list) {
1058 QETH_DBF_TEXT(setup, 0, "iptbdnom");
1059 return -ENOMEM;
1060 }
1061 INIT_LIST_HEAD(card->ip_tbd_list);
1062 INIT_LIST_HEAD(&card->cmd_waiter_list);
1063 init_waitqueue_head(&card->wait_q);
1064 /* intial options */
1065 qeth_set_intial_options(card);
1066 /* IP address takeover */
1067 INIT_LIST_HEAD(&card->ipato.entries);
1068 card->ipato.enabled = 0;
1069 card->ipato.invert4 = 0;
1070 card->ipato.invert6 = 0;
1071 /* init QDIO stuff */
1072 qeth_init_qdio_info(card);
1073 return 0;
1074}
1075
1076static int
1077is_1920_device (struct qeth_card *card)
1078{
1079 int single_queue = 0;
1080 struct ccw_device *ccwdev;
1081 struct channelPath_dsc {
1082 u8 flags;
1083 u8 lsn;
1084 u8 desc;
1085 u8 chpid;
1086 u8 swla;
1087 u8 zeroes;
1088 u8 chla;
1089 u8 chpp;
1090 } *chp_dsc;
1091
1092 QETH_DBF_TEXT(setup, 2, "chk_1920");
1093
1094 ccwdev = card->data.ccwdev;
1095 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1096 if (chp_dsc != NULL) {
1097 /* CHPP field bit 6 == 1 -> single queue */
1098 single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
1099 kfree(chp_dsc);
1100 }
1101 QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue);
1102 return single_queue;
1103}
1104
1105static int
1106qeth_determine_card_type(struct qeth_card *card)
1107{
1108 int i = 0;
1109
1110 QETH_DBF_TEXT(setup, 2, "detcdtyp");
1111
1112 while (known_devices[i][4]) {
1113 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1114 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1115 card->info.type = known_devices[i][4];
1116 if (is_1920_device(card)) {
1117 PRINT_INFO("Priority Queueing not able "
1118 "due to hardware limitations!\n");
1119 card->qdio.no_out_queues = 1;
1120 card->qdio.default_out_queue = 0;
1121 } else {
1122 card->qdio.no_out_queues = known_devices[i][8];
1123 }
1124 card->info.is_multicast_different = known_devices[i][9];
1125 return 0;
1126 }
1127 i++;
1128 }
1129 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1130 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1131 return -ENOENT;
1132}
1133
1134static int
1135qeth_probe_device(struct ccwgroup_device *gdev)
1136{
1137 struct qeth_card *card;
1138 struct device *dev;
1139 unsigned long flags;
1140 int rc;
1141
1142 QETH_DBF_TEXT(setup, 2, "probedev");
1143
1144 dev = &gdev->dev;
1145 if (!get_device(dev))
1146 return -ENODEV;
1147
1148 card = qeth_alloc_card();
1149 if (!card) {
1150 put_device(dev);
1151 QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
1152 return -ENOMEM;
1153 }
1154 card->read.ccwdev = gdev->cdev[0];
1155 card->write.ccwdev = gdev->cdev[1];
1156 card->data.ccwdev = gdev->cdev[2];
1157
1158 if ((rc = qeth_setup_card(card))){
1159 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1160 put_device(dev);
1161 qeth_free_card(card);
1162 return rc;
1163 }
1164 gdev->dev.driver_data = card;
1165 card->gdev = gdev;
1166 gdev->cdev[0]->handler = qeth_irq;
1167 gdev->cdev[1]->handler = qeth_irq;
1168 gdev->cdev[2]->handler = qeth_irq;
1169
1170 rc = qeth_create_device_attributes(dev);
1171 if (rc) {
1172 put_device(dev);
1173 qeth_free_card(card);
1174 return rc;
1175 }
1176 if ((rc = qeth_determine_card_type(card))){
1177 PRINT_WARN("%s: not a valid card type\n", __func__);
1178 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1179 put_device(dev);
1180 qeth_free_card(card);
1181 return rc;
1182 }
1183 /* insert into our internal list */
1184 write_lock_irqsave(&qeth_card_list.rwlock, flags);
1185 list_add_tail(&card->list, &qeth_card_list.list);
1186 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
1187 return rc;
1188}
1189
1190
1191static int
1192qeth_get_unitaddr(struct qeth_card *card)
1193{
1194 int length;
1195 char *prcd;
1196 int rc;
1197
1198 QETH_DBF_TEXT(setup, 2, "getunit");
1199 rc = read_conf_data(CARD_DDEV(card), (void **) &prcd, &length);
1200 if (rc) {
1201 PRINT_ERR("read_conf_data for device %s returned %i\n",
1202 CARD_DDEV_ID(card), rc);
1203 return rc;
1204 }
1205 card->info.chpid = prcd[30];
1206 card->info.unit_addr2 = prcd[31];
1207 card->info.cula = prcd[63];
1208 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1209 (prcd[0x11] == _ascebc['M']));
1210 return 0;
1211}
1212
1213static void
1214qeth_init_tokens(struct qeth_card *card)
1215{
1216 card->token.issuer_rm_w = 0x00010103UL;
1217 card->token.cm_filter_w = 0x00010108UL;
1218 card->token.cm_connection_w = 0x0001010aUL;
1219 card->token.ulp_filter_w = 0x0001010bUL;
1220 card->token.ulp_connection_w = 0x0001010dUL;
1221}
1222
1223static inline __u16
1224raw_devno_from_bus_id(char *id)
1225{
1226 id += (strlen(id) - 4);
1227 return (__u16) simple_strtoul(id, &id, 16);
1228}
1229/**
1230 * setup channel
1231 */
1232static void
1233qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len)
1234{
1235 struct qeth_card *card;
1236
1237 QETH_DBF_TEXT(trace, 4, "setupccw");
1238 card = CARD_FROM_CDEV(channel->ccwdev);
1239 if (channel == &card->read)
1240 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1241 else
1242 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1243 channel->ccw.count = len;
1244 channel->ccw.cda = (__u32) __pa(iob);
1245}
1246
1247/**
1248 * get free buffer for ccws (IDX activation, lancmds,ipassists...)
1249 */
1250static struct qeth_cmd_buffer *
1251__qeth_get_buffer(struct qeth_channel *channel)
1252{
1253 __u8 index;
1254
1255 QETH_DBF_TEXT(trace, 6, "getbuff");
1256 index = channel->io_buf_no;
1257 do {
1258 if (channel->iob[index].state == BUF_STATE_FREE) {
1259 channel->iob[index].state = BUF_STATE_LOCKED;
1260 channel->io_buf_no = (channel->io_buf_no + 1) %
1261 QETH_CMD_BUFFER_NO;
1262 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
1263 return channel->iob + index;
1264 }
1265 index = (index + 1) % QETH_CMD_BUFFER_NO;
1266 } while(index != channel->io_buf_no);
1267
1268 return NULL;
1269}
1270
1271/**
1272 * release command buffer
1273 */
1274static void
1275qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1276{
1277 unsigned long flags;
1278
1279 QETH_DBF_TEXT(trace, 6, "relbuff");
1280 spin_lock_irqsave(&channel->iob_lock, flags);
1281 memset(iob->data, 0, QETH_BUFSIZE);
1282 iob->state = BUF_STATE_FREE;
1283 iob->callback = qeth_send_control_data_cb;
1284 iob->rc = 0;
1285 spin_unlock_irqrestore(&channel->iob_lock, flags);
1286}
1287
1288static struct qeth_cmd_buffer *
1289qeth_get_buffer(struct qeth_channel *channel)
1290{
1291 struct qeth_cmd_buffer *buffer = NULL;
1292 unsigned long flags;
1293
1294 spin_lock_irqsave(&channel->iob_lock, flags);
1295 buffer = __qeth_get_buffer(channel);
1296 spin_unlock_irqrestore(&channel->iob_lock, flags);
1297 return buffer;
1298}
1299
1300static struct qeth_cmd_buffer *
1301qeth_wait_for_buffer(struct qeth_channel *channel)
1302{
1303 struct qeth_cmd_buffer *buffer;
1304 wait_event(channel->wait_q,
1305 ((buffer = qeth_get_buffer(channel)) != NULL));
1306 return buffer;
1307}
1308
1309static void
1310qeth_clear_cmd_buffers(struct qeth_channel *channel)
1311{
1312 int cnt = 0;
1313
1314 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1315 qeth_release_buffer(channel,&channel->iob[cnt]);
1316 channel->buf_no = 0;
1317 channel->io_buf_no = 0;
1318}
1319
1320/**
1321 * start IDX for read and write channel
1322 */
1323static int
1324qeth_idx_activate_get_answer(struct qeth_channel *channel,
1325 void (*idx_reply_cb)(struct qeth_channel *,
1326 struct qeth_cmd_buffer *))
1327{
1328 struct qeth_cmd_buffer *iob;
1329 unsigned long flags;
1330 int rc;
1331 struct qeth_card *card;
1332
1333 QETH_DBF_TEXT(setup, 2, "idxanswr");
1334 card = CARD_FROM_CDEV(channel->ccwdev);
1335 iob = qeth_get_buffer(channel);
1336 iob->callback = idx_reply_cb;
1337 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1338 channel->ccw.count = QETH_BUFSIZE;
1339 channel->ccw.cda = (__u32) __pa(iob->data);
1340
1341 wait_event(card->wait_q,
1342 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1343 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1344 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1345 rc = ccw_device_start(channel->ccwdev,
1346 &channel->ccw,(addr_t) iob, 0, 0);
1347 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1348
1349 if (rc) {
1350 PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc);
1351 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1352 atomic_set(&channel->irq_pending, 0);
1353 wake_up(&card->wait_q);
1354 return rc;
1355 }
1356 rc = wait_event_interruptible_timeout(card->wait_q,
1357 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1358 if (rc == -ERESTARTSYS)
1359 return rc;
1360 if (channel->state != CH_STATE_UP){
1361 rc = -ETIME;
1362 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1363 qeth_clear_cmd_buffers(channel);
1364 } else
1365 rc = 0;
1366 return rc;
1367}
1368
1369static int
1370qeth_idx_activate_channel(struct qeth_channel *channel,
1371 void (*idx_reply_cb)(struct qeth_channel *,
1372 struct qeth_cmd_buffer *))
1373{
1374 struct qeth_card *card;
1375 struct qeth_cmd_buffer *iob;
1376 unsigned long flags;
1377 __u16 temp;
1378 int rc;
1379
1380 card = CARD_FROM_CDEV(channel->ccwdev);
1381
1382 QETH_DBF_TEXT(setup, 2, "idxactch");
1383
1384 iob = qeth_get_buffer(channel);
1385 iob->callback = idx_reply_cb;
1386 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1387 channel->ccw.count = IDX_ACTIVATE_SIZE;
1388 channel->ccw.cda = (__u32) __pa(iob->data);
1389 if (channel == &card->write) {
1390 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1391 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1392 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1393 card->seqno.trans_hdr++;
1394 } else {
1395 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1396 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1397 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1398 }
1399 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1400 &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH);
1401 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1402 &card->info.func_level,sizeof(__u16));
1403 temp = raw_devno_from_bus_id(CARD_DDEV_ID(card));
1404 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1405 temp = (card->info.cula << 8) + card->info.unit_addr2;
1406 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1407
1408 wait_event(card->wait_q,
1409 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
1410 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1411 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1412 rc = ccw_device_start(channel->ccwdev,
1413 &channel->ccw,(addr_t) iob, 0, 0);
1414 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1415
1416 if (rc) {
1417 PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc);
1418 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1419 atomic_set(&channel->irq_pending, 0);
1420 wake_up(&card->wait_q);
1421 return rc;
1422 }
1423 rc = wait_event_interruptible_timeout(card->wait_q,
1424 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1425 if (rc == -ERESTARTSYS)
1426 return rc;
1427 if (channel->state != CH_STATE_ACTIVATING) {
1428 PRINT_WARN("qeth: IDX activate timed out!\n");
1429 QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1430 qeth_clear_cmd_buffers(channel);
1431 return -ETIME;
1432 }
1433 return qeth_idx_activate_get_answer(channel,idx_reply_cb);
1434}
1435
1436static int
1437qeth_peer_func_level(int level)
1438{
1439 if ((level & 0xff) == 8)
1440 return (level & 0xff) + 0x400;
1441 if (((level >> 8) & 3) == 1)
1442 return (level & 0xff) + 0x200;
1443 return level;
1444}
1445
1446static void
1447qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1448{
1449 struct qeth_card *card;
1450 __u16 temp;
1451
1452 QETH_DBF_TEXT(setup ,2, "idxwrcb");
1453
1454 if (channel->state == CH_STATE_DOWN) {
1455 channel->state = CH_STATE_ACTIVATING;
1456 goto out;
1457 }
1458 card = CARD_FROM_CDEV(channel->ccwdev);
1459
1460 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1461 PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative "
1462 "reply\n", CARD_WDEV_ID(card));
1463 goto out;
1464 }
1465 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1466 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1467 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1468 "function level mismatch "
1469 "(sent: 0x%x, received: 0x%x)\n",
1470 CARD_WDEV_ID(card), card->info.func_level, temp);
1471 goto out;
1472 }
1473 channel->state = CH_STATE_UP;
1474out:
1475 qeth_release_buffer(channel, iob);
1476}
1477
1478static int
1479qeth_check_idx_response(unsigned char *buffer)
1480{
1481 if (!buffer)
1482 return 0;
1483
1484 QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
1485 if ((buffer[2] & 0xc0) == 0xc0) {
1486 PRINT_WARN("received an IDX TERMINATE "
1487 "with cause code 0x%02x%s\n",
1488 buffer[4],
1489 ((buffer[4] == 0x22) ?
1490 " -- try another portname" : ""));
1491 QETH_DBF_TEXT(trace, 2, "ckidxres");
1492 QETH_DBF_TEXT(trace, 2, " idxterm");
1493 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1494 return -EIO;
1495 }
1496 return 0;
1497}
1498
1499static void
1500qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1501{
1502 struct qeth_card *card;
1503 __u16 temp;
1504
1505 QETH_DBF_TEXT(setup , 2, "idxrdcb");
1506 if (channel->state == CH_STATE_DOWN) {
1507 channel->state = CH_STATE_ACTIVATING;
1508 goto out;
1509 }
1510
1511 card = CARD_FROM_CDEV(channel->ccwdev);
1512 if (qeth_check_idx_response(iob->data)) {
1513 goto out;
1514 }
1515 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1516 PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative "
1517 "reply\n", CARD_RDEV_ID(card));
1518 goto out;
1519 }
1520
1521/**
1522 * temporary fix for microcode bug
1523 * to revert it,replace OR by AND
1524 */
1525 if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1526 (card->info.type == QETH_CARD_TYPE_OSAE) )
1527 card->info.portname_required = 1;
1528
1529 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1530 if (temp != qeth_peer_func_level(card->info.func_level)) {
1531 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1532 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1533 CARD_RDEV_ID(card), card->info.func_level, temp);
1534 goto out;
1535 }
1536 memcpy(&card->token.issuer_rm_r,
1537 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1538 QETH_MPC_TOKEN_LENGTH);
1539 memcpy(&card->info.mcl_level[0],
1540 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1541 channel->state = CH_STATE_UP;
1542out:
1543 qeth_release_buffer(channel,iob);
1544}
1545
1546static int
1547qeth_issue_next_read(struct qeth_card *card)
1548{
1549 int rc;
1550 struct qeth_cmd_buffer *iob;
1551
1552 QETH_DBF_TEXT(trace,5,"issnxrd");
1553 if (card->read.state != CH_STATE_UP)
1554 return -EIO;
1555 iob = qeth_get_buffer(&card->read);
1556 if (!iob) {
1557 PRINT_WARN("issue_next_read failed: no iob available!\n");
1558 return -ENOMEM;
1559 }
1560 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1561 wait_event(card->wait_q,
1562 atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0);
1563 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1564 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1565 (addr_t) iob, 0, 0);
1566 if (rc) {
1567 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
1568 atomic_set(&card->read.irq_pending, 0);
1569 qeth_schedule_recovery(card);
1570 wake_up(&card->wait_q);
1571 }
1572 return rc;
1573}
1574
1575static struct qeth_reply *
1576qeth_alloc_reply(struct qeth_card *card)
1577{
1578 struct qeth_reply *reply;
1579
1580 reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1581 if (reply){
1582 memset(reply, 0, sizeof(struct qeth_reply));
1583 atomic_set(&reply->refcnt, 1);
1584 reply->card = card;
1585 };
1586 return reply;
1587}
1588
1589static void
1590qeth_get_reply(struct qeth_reply *reply)
1591{
1592 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1593 atomic_inc(&reply->refcnt);
1594}
1595
1596static void
1597qeth_put_reply(struct qeth_reply *reply)
1598{
1599 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1600 if (atomic_dec_and_test(&reply->refcnt))
1601 kfree(reply);
1602}
1603
1604static void
1605qeth_cmd_timeout(unsigned long data)
1606{
1607 struct qeth_reply *reply, *list_reply, *r;
1608 unsigned long flags;
1609
1610 reply = (struct qeth_reply *) data;
1611 spin_lock_irqsave(&reply->card->lock, flags);
1612 list_for_each_entry_safe(list_reply, r,
1613 &reply->card->cmd_waiter_list, list) {
1614 if (reply == list_reply){
1615 qeth_get_reply(reply);
1616 list_del_init(&reply->list);
1617 spin_unlock_irqrestore(&reply->card->lock, flags);
1618 reply->rc = -ETIME;
1619 reply->received = 1;
1620 wake_up(&reply->wait_q);
1621 qeth_put_reply(reply);
1622 return;
1623 }
1624 }
1625 spin_unlock_irqrestore(&reply->card->lock, flags);
1626}
1627
1628static void
1629qeth_reset_ip_addresses(struct qeth_card *card)
1630{
1631 QETH_DBF_TEXT(trace, 2, "rstipadd");
1632
1633 qeth_clear_ip_list(card, 0, 1);
1634 /* this function will also schedule the SET_IP_THREAD */
1635 qeth_set_multicast_list(card->dev);
1636}
1637
1638static struct qeth_ipa_cmd *
1639qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1640{
1641 struct qeth_ipa_cmd *cmd = NULL;
1642
1643 QETH_DBF_TEXT(trace,5,"chkipad");
1644 if (IS_IPA(iob->data)){
1645 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
1646 if (IS_IPA_REPLY(cmd))
1647 return cmd;
1648 else {
1649 switch (cmd->hdr.command) {
1650 case IPA_CMD_STOPLAN:
1651 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
1652 "there is a network problem or "
1653 "someone pulled the cable or "
1654 "disabled the port.\n",
1655 QETH_CARD_IFNAME(card),
1656 card->info.chpid);
1657 card->lan_online = 0;
1658 netif_carrier_off(card->dev);
1659 return NULL;
1660 case IPA_CMD_STARTLAN:
1661 PRINT_INFO("Link reestablished on %s "
1662 "(CHPID 0x%X). Scheduling "
1663 "IP address reset.\n",
1664 QETH_CARD_IFNAME(card),
1665 card->info.chpid);
1666 card->lan_online = 1;
1667 netif_carrier_on(card->dev);
1668 qeth_reset_ip_addresses(card);
1669 return NULL;
1670 case IPA_CMD_REGISTER_LOCAL_ADDR:
1671 QETH_DBF_TEXT(trace,3, "irla");
1672 break;
1673 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
1674 QETH_DBF_TEXT(trace,3, "urla");
1675 break;
1676 default:
1677 PRINT_WARN("Received data is IPA "
1678 "but not a reply!\n");
1679 break;
1680 }
1681 }
1682 }
1683 return cmd;
1684}
1685
1686/**
1687 * wake all waiting ipa commands
1688 */
1689static void
1690qeth_clear_ipacmd_list(struct qeth_card *card)
1691{
1692 struct qeth_reply *reply, *r;
1693 unsigned long flags;
1694
1695 QETH_DBF_TEXT(trace, 4, "clipalst");
1696
1697 spin_lock_irqsave(&card->lock, flags);
1698 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1699 qeth_get_reply(reply);
1700 reply->rc = -EIO;
1701 reply->received = 1;
1702 list_del_init(&reply->list);
1703 wake_up(&reply->wait_q);
1704 qeth_put_reply(reply);
1705 }
1706 spin_unlock_irqrestore(&card->lock, flags);
1707}
1708
1709static void
1710qeth_send_control_data_cb(struct qeth_channel *channel,
1711 struct qeth_cmd_buffer *iob)
1712{
1713 struct qeth_card *card;
1714 struct qeth_reply *reply, *r;
1715 struct qeth_ipa_cmd *cmd;
1716 unsigned long flags;
1717 int keep_reply;
1718
1719 QETH_DBF_TEXT(trace,4,"sndctlcb");
1720
1721 card = CARD_FROM_CDEV(channel->ccwdev);
1722 if (qeth_check_idx_response(iob->data)) {
1723 qeth_clear_ipacmd_list(card);
1724 qeth_schedule_recovery(card);
1725 goto out;
1726 }
1727
1728 cmd = qeth_check_ipa_data(card, iob);
1729 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
1730 goto out;
1731
1732 spin_lock_irqsave(&card->lock, flags);
1733 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1734 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
1735 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
1736 qeth_get_reply(reply);
1737 list_del_init(&reply->list);
1738 spin_unlock_irqrestore(&card->lock, flags);
1739 keep_reply = 0;
1740 if (reply->callback != NULL) {
1741 if (cmd) {
1742 reply->offset = (__u16)((char*)cmd -
1743 (char *)iob->data);
1744 keep_reply = reply->callback(card,
1745 reply,
1746 (unsigned long)cmd);
1747 }
1748 else
1749 keep_reply = reply->callback(card,
1750 reply,
1751 (unsigned long)iob);
1752 }
1753 if (cmd)
1754 reply->rc = (u16) cmd->hdr.return_code;
1755 else if (iob->rc)
1756 reply->rc = iob->rc;
1757 if (keep_reply) {
1758 spin_lock_irqsave(&card->lock, flags);
1759 list_add_tail(&reply->list,
1760 &card->cmd_waiter_list);
1761 spin_unlock_irqrestore(&card->lock, flags);
1762 } else {
1763 reply->received = 1;
1764 wake_up(&reply->wait_q);
1765 }
1766 qeth_put_reply(reply);
1767 goto out;
1768 }
1769 }
1770 spin_unlock_irqrestore(&card->lock, flags);
1771out:
1772 memcpy(&card->seqno.pdu_hdr_ack,
1773 QETH_PDU_HEADER_SEQ_NO(iob->data),
1774 QETH_SEQ_NO_LENGTH);
1775 qeth_release_buffer(channel,iob);
1776}
1777
1778static int
1779qeth_send_control_data(struct qeth_card *card, int len,
1780 struct qeth_cmd_buffer *iob,
1781 int (*reply_cb)
1782 (struct qeth_card *, struct qeth_reply*, unsigned long),
1783 void *reply_param)
1784
1785{
1786 int rc;
1787 unsigned long flags;
1788 struct qeth_reply *reply;
1789 struct timer_list timer;
1790
1791 QETH_DBF_TEXT(trace, 2, "sendctl");
1792
1793 qeth_setup_ccw(&card->write,iob->data,len);
1794
1795 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1796 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1797 card->seqno.trans_hdr++;
1798
1799 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1800 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1801 card->seqno.pdu_hdr++;
1802 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1803 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1804 iob->callback = qeth_release_buffer;
1805
1806 reply = qeth_alloc_reply(card);
1807 if (!reply) {
1808 PRINT_WARN("Could no alloc qeth_reply!\n");
1809 return -ENOMEM;
1810 }
1811 reply->callback = reply_cb;
1812 reply->param = reply_param;
1813 if (card->state == CARD_STATE_DOWN)
1814 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1815 else
1816 reply->seqno = card->seqno.ipa++;
1817 init_timer(&timer);
1818 timer.function = qeth_cmd_timeout;
1819 timer.data = (unsigned long) reply;
1820 if (IS_IPA(iob->data))
1821 timer.expires = jiffies + QETH_IPA_TIMEOUT;
1822 else
1823 timer.expires = jiffies + QETH_TIMEOUT;
1824 init_waitqueue_head(&reply->wait_q);
1825 spin_lock_irqsave(&card->lock, flags);
1826 list_add_tail(&reply->list, &card->cmd_waiter_list);
1827 spin_unlock_irqrestore(&card->lock, flags);
1828 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1829 wait_event(card->wait_q,
1830 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
1831 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1832 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1833 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1834 (addr_t) iob, 0, 0);
1835 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1836 if (rc){
1837 PRINT_WARN("qeth_send_control_data: "
1838 "ccw_device_start rc = %i\n", rc);
1839 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1840 spin_lock_irqsave(&card->lock, flags);
1841 list_del_init(&reply->list);
1842 qeth_put_reply(reply);
1843 spin_unlock_irqrestore(&card->lock, flags);
1844 qeth_release_buffer(iob->channel, iob);
1845 atomic_set(&card->write.irq_pending, 0);
1846 wake_up(&card->wait_q);
1847 return rc;
1848 }
1849 add_timer(&timer);
1850 wait_event(reply->wait_q, reply->received);
1851 del_timer_sync(&timer);
1852 rc = reply->rc;
1853 qeth_put_reply(reply);
1854 return rc;
1855}
1856
1857static int
1858qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1859 int (*reply_cb)
1860 (struct qeth_card *,struct qeth_reply*, unsigned long),
1861 void *reply_param)
1862{
1863 int rc;
1864 char prot_type;
1865
1866 QETH_DBF_TEXT(trace,4,"sendipa");
1867
1868 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1869
1870 if (card->options.layer2)
1871 prot_type = QETH_PROT_LAYER2;
1872 else
1873 prot_type = QETH_PROT_TCPIP;
1874
1875 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
1876 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1877 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1878
1879 rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
1880 reply_cb, reply_param);
1881 return rc;
1882}
1883
1884
1885static int
1886qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1887 unsigned long data)
1888{
1889 struct qeth_cmd_buffer *iob;
1890
1891 QETH_DBF_TEXT(setup, 2, "cmenblcb");
1892
1893 iob = (struct qeth_cmd_buffer *) data;
1894 memcpy(&card->token.cm_filter_r,
1895 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1896 QETH_MPC_TOKEN_LENGTH);
1897 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1898 return 0;
1899}
1900
1901static int
1902qeth_cm_enable(struct qeth_card *card)
1903{
1904 int rc;
1905 struct qeth_cmd_buffer *iob;
1906
1907 QETH_DBF_TEXT(setup,2,"cmenable");
1908
1909 iob = qeth_wait_for_buffer(&card->write);
1910 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
1911 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
1912 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1913 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
1914 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
1915
1916 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
1917 qeth_cm_enable_cb, NULL);
1918 return rc;
1919}
1920
1921static int
1922qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1923 unsigned long data)
1924{
1925
1926 struct qeth_cmd_buffer *iob;
1927
1928 QETH_DBF_TEXT(setup, 2, "cmsetpcb");
1929
1930 iob = (struct qeth_cmd_buffer *) data;
1931 memcpy(&card->token.cm_connection_r,
1932 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
1933 QETH_MPC_TOKEN_LENGTH);
1934 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1935 return 0;
1936}
1937
1938static int
1939qeth_cm_setup(struct qeth_card *card)
1940{
1941 int rc;
1942 struct qeth_cmd_buffer *iob;
1943
1944 QETH_DBF_TEXT(setup,2,"cmsetup");
1945
1946 iob = qeth_wait_for_buffer(&card->write);
1947 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
1948 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
1949 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1950 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
1951 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
1952 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
1953 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
1954 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
1955 qeth_cm_setup_cb, NULL);
1956 return rc;
1957
1958}
1959
1960static int
1961qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1962 unsigned long data)
1963{
1964
1965 __u16 mtu, framesize;
1966 __u16 len;
1967 __u8 link_type;
1968 struct qeth_cmd_buffer *iob;
1969
1970 QETH_DBF_TEXT(setup, 2, "ulpenacb");
1971
1972 iob = (struct qeth_cmd_buffer *) data;
1973 memcpy(&card->token.ulp_filter_r,
1974 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1975 QETH_MPC_TOKEN_LENGTH);
1976 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
1977 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1978 mtu = qeth_get_mtu_outof_framesize(framesize);
1979 if (!mtu) {
1980 iob->rc = -EINVAL;
1981 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1982 return 0;
1983 }
1984 card->info.max_mtu = mtu;
1985 card->info.initial_mtu = mtu;
1986 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1987 } else {
1988 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1989 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
1990 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1991 }
1992
1993 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
1994 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
1995 memcpy(&link_type,
1996 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
1997 card->info.link_type = link_type;
1998 } else
1999 card->info.link_type = 0;
2000 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2001 return 0;
2002}
2003
2004static int
2005qeth_ulp_enable(struct qeth_card *card)
2006{
2007 int rc;
2008 char prot_type;
2009 struct qeth_cmd_buffer *iob;
2010
2011 /*FIXME: trace view callbacks*/
2012 QETH_DBF_TEXT(setup,2,"ulpenabl");
2013
2014 iob = qeth_wait_for_buffer(&card->write);
2015 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2016
2017 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
2018 (__u8) card->info.portno;
2019 if (card->options.layer2)
2020 prot_type = QETH_PROT_LAYER2;
2021 else
2022 prot_type = QETH_PROT_TCPIP;
2023
2024 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data),&prot_type,1);
2025 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2026 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2027 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2028 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2029 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
2030 card->info.portname, 9);
2031 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2032 qeth_ulp_enable_cb, NULL);
2033 return rc;
2034
2035}
2036
2037static inline __u16
2038__raw_devno_from_bus_id(char *id)
2039{
2040 id += (strlen(id) - 4);
2041 return (__u16) simple_strtoul(id, &id, 16);
2042}
2043
2044static int
2045qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2046 unsigned long data)
2047{
2048 struct qeth_cmd_buffer *iob;
2049
2050 QETH_DBF_TEXT(setup, 2, "ulpstpcb");
2051
2052 iob = (struct qeth_cmd_buffer *) data;
2053 memcpy(&card->token.ulp_connection_r,
2054 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2055 QETH_MPC_TOKEN_LENGTH);
2056 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2057 return 0;
2058}
2059
2060static int
2061qeth_ulp_setup(struct qeth_card *card)
2062{
2063 int rc;
2064 __u16 temp;
2065 struct qeth_cmd_buffer *iob;
2066
2067 QETH_DBF_TEXT(setup,2,"ulpsetup");
2068
2069 iob = qeth_wait_for_buffer(&card->write);
2070 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2071
2072 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2073 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2074 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2075 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2076 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2077 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2078
2079 temp = __raw_devno_from_bus_id(CARD_DDEV_ID(card));
2080 memcpy(QETH_ULP_SETUP_CUA(iob->data), &temp, 2);
2081 temp = (card->info.cula << 8) + card->info.unit_addr2;
2082 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2083 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2084 qeth_ulp_setup_cb, NULL);
2085 return rc;
2086}
2087
2088static inline int
2089qeth_check_for_inbound_error(struct qeth_qdio_buffer *buf,
2090 unsigned int qdio_error,
2091 unsigned int siga_error)
2092{
2093 int rc = 0;
2094
2095 if (qdio_error || siga_error) {
2096 QETH_DBF_TEXT(trace, 2, "qdinerr");
2097 QETH_DBF_TEXT(qerr, 2, "qdinerr");
2098 QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
2099 buf->buffer->element[15].flags & 0xff);
2100 QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
2101 buf->buffer->element[14].flags & 0xff);
2102 QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2103 QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
2104 rc = 1;
2105 }
2106 return rc;
2107}
2108
2109static inline struct sk_buff *
2110qeth_get_skb(unsigned int length)
2111{
2112 struct sk_buff* skb;
2113#ifdef CONFIG_QETH_VLAN
2114 if ((skb = dev_alloc_skb(length + VLAN_HLEN)))
2115 skb_reserve(skb, VLAN_HLEN);
2116#else
2117 skb = dev_alloc_skb(length);
2118#endif
2119 return skb;
2120}
2121
2122static inline struct sk_buff *
2123qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2124 struct qdio_buffer_element **__element, int *__offset,
2125 struct qeth_hdr **hdr)
2126{
2127 struct qdio_buffer_element *element = *__element;
2128 int offset = *__offset;
2129 struct sk_buff *skb = NULL;
2130 int skb_len;
2131 void *data_ptr;
2132 int data_len;
2133
2134 QETH_DBF_TEXT(trace,6,"nextskb");
2135 /* qeth_hdr must not cross element boundaries */
2136 if (element->length < offset + sizeof(struct qeth_hdr)){
2137 if (qeth_is_last_sbale(element))
2138 return NULL;
2139 element++;
2140 offset = 0;
2141 if (element->length < sizeof(struct qeth_hdr))
2142 return NULL;
2143 }
2144 *hdr = element->addr + offset;
2145
2146 offset += sizeof(struct qeth_hdr);
2147 if (card->options.layer2)
2148 skb_len = (*hdr)->hdr.l2.pkt_length;
2149 else
2150 skb_len = (*hdr)->hdr.l3.length;
2151
2152 if (!skb_len)
2153 return NULL;
2154 if (card->options.fake_ll){
2155 if (!(skb = qeth_get_skb(skb_len + QETH_FAKE_LL_LEN)))
2156 goto no_mem;
2157 skb_pull(skb, QETH_FAKE_LL_LEN);
2158 } else if (!(skb = qeth_get_skb(skb_len)))
2159 goto no_mem;
2160 data_ptr = element->addr + offset;
2161 while (skb_len) {
2162 data_len = min(skb_len, (int)(element->length - offset));
2163 if (data_len)
2164 memcpy(skb_put(skb, data_len), data_ptr, data_len);
2165 skb_len -= data_len;
2166 if (skb_len){
2167 if (qeth_is_last_sbale(element)){
2168 QETH_DBF_TEXT(trace,4,"unexeob");
2169 QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card));
2170 QETH_DBF_TEXT(qerr,2,"unexeob");
2171 QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
2172 QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
2173 dev_kfree_skb_any(skb);
2174 card->stats.rx_errors++;
2175 return NULL;
2176 }
2177 element++;
2178 offset = 0;
2179 data_ptr = element->addr;
2180 } else {
2181 offset += data_len;
2182 }
2183 }
2184 *__element = element;
2185 *__offset = offset;
2186 return skb;
2187no_mem:
2188 if (net_ratelimit()){
2189 PRINT_WARN("No memory for packet received on %s.\n",
2190 QETH_CARD_IFNAME(card));
2191 QETH_DBF_TEXT(trace,2,"noskbmem");
2192 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2193 }
2194 card->stats.rx_dropped++;
2195 return NULL;
2196}
2197
2198static inline unsigned short
2199qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2200{
2201 struct qeth_card *card;
2202 struct ethhdr *eth;
2203
2204 QETH_DBF_TEXT(trace,6,"typtrans");
2205
2206 card = (struct qeth_card *)dev->priv;
2207#ifdef CONFIG_TR
2208 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
2209 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
2210 return tr_type_trans(skb,dev);
2211#endif /* CONFIG_TR */
2212 skb->mac.raw = skb->data;
2213 skb_pull(skb, ETH_HLEN );
2214 eth = eth_hdr(skb);
2215
2216 if (*eth->h_dest & 1) {
2217 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
2218 skb->pkt_type = PACKET_BROADCAST;
2219 else
2220 skb->pkt_type = PACKET_MULTICAST;
2221 } else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
2222 skb->pkt_type = PACKET_OTHERHOST;
2223
2224 if (ntohs(eth->h_proto) >= 1536)
2225 return eth->h_proto;
2226 if (*(unsigned short *) (skb->data) == 0xFFFF)
2227 return htons(ETH_P_802_3);
2228 return htons(ETH_P_802_2);
2229}
2230
2231static inline void
2232qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2233 struct qeth_hdr *hdr)
2234{
2235 struct ethhdr *fake_hdr;
2236 struct iphdr *ip_hdr;
2237
2238 QETH_DBF_TEXT(trace,5,"skbfake");
2239 skb->mac.raw = skb->data - QETH_FAKE_LL_LEN;
2240 /* this is a fake ethernet header */
2241 fake_hdr = (struct ethhdr *) skb->mac.raw;
2242
2243 /* the destination MAC address */
2244 switch (skb->pkt_type){
2245 case PACKET_MULTICAST:
2246 switch (skb->protocol){
2247#ifdef CONFIG_QETH_IPV6
2248 case __constant_htons(ETH_P_IPV6):
2249 ndisc_mc_map((struct in6_addr *)
2250 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2251 fake_hdr->h_dest, card->dev, 0);
2252 break;
2253#endif /* CONFIG_QETH_IPV6 */
2254 case __constant_htons(ETH_P_IP):
2255 ip_hdr = (struct iphdr *)skb->data;
2256 if (card->dev->type == ARPHRD_IEEE802_TR)
2257 ip_tr_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2258 else
2259 ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2260 break;
2261 default:
2262 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2263 }
2264 break;
2265 case PACKET_BROADCAST:
2266 memset(fake_hdr->h_dest, 0xff, ETH_ALEN);
2267 break;
2268 default:
2269 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2270 }
2271 /* the source MAC address */
2272 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2273 memcpy(fake_hdr->h_source, &hdr->hdr.l3.dest_addr[2], ETH_ALEN);
2274 else
2275 memset(fake_hdr->h_source, 0, ETH_ALEN);
2276 /* the protocol */
2277 fake_hdr->h_proto = skb->protocol;
2278}
2279
2280static inline void
2281qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb,
2282 struct qeth_hdr *hdr)
2283{
2284#ifdef CONFIG_QETH_VLAN
2285 u16 *vlan_tag;
2286
2287 if (hdr->hdr.l3.ext_flags &
2288 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
2289 vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN);
2290 *vlan_tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
2291 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
2292 *(vlan_tag + 1) = skb->protocol;
2293 skb->protocol = __constant_htons(ETH_P_8021Q);
2294 }
2295#endif /* CONFIG_QETH_VLAN */
2296}
2297
2298static inline __u16
2299qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2300 struct qeth_hdr *hdr)
2301{
2302 unsigned short vlan_id = 0;
2303#ifdef CONFIG_QETH_VLAN
2304 struct vlan_hdr *vhdr;
2305#endif
2306
2307 skb->pkt_type = PACKET_HOST;
2308 skb->protocol = qeth_type_trans(skb, skb->dev);
2309 if (card->options.checksum_type == NO_CHECKSUMMING)
2310 skb->ip_summed = CHECKSUM_UNNECESSARY;
2311 else
2312 skb->ip_summed = CHECKSUM_NONE;
2313#ifdef CONFIG_QETH_VLAN
2314 if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) {
2315 vhdr = (struct vlan_hdr *) skb->data;
2316 skb->protocol =
2317 __constant_htons(vhdr->h_vlan_encapsulated_proto);
2318 vlan_id = hdr->hdr.l2.vlan_id;
2319 skb_pull(skb, VLAN_HLEN);
2320 }
2321#endif
2322 return vlan_id;
2323}
2324
2325static inline void
2326qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2327 struct qeth_hdr *hdr)
2328{
2329#ifdef CONFIG_QETH_IPV6
2330 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
2331 skb->pkt_type = PACKET_HOST;
2332 skb->protocol = qeth_type_trans(skb, card->dev);
2333 return;
2334 }
2335#endif /* CONFIG_QETH_IPV6 */
2336 skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
2337 ETH_P_IP);
2338 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK){
2339 case QETH_CAST_UNICAST:
2340 skb->pkt_type = PACKET_HOST;
2341 break;
2342 case QETH_CAST_MULTICAST:
2343 skb->pkt_type = PACKET_MULTICAST;
2344 card->stats.multicast++;
2345 break;
2346 case QETH_CAST_BROADCAST:
2347 skb->pkt_type = PACKET_BROADCAST;
2348 card->stats.multicast++;
2349 break;
2350 case QETH_CAST_ANYCAST:
2351 case QETH_CAST_NOCAST:
2352 default:
2353 skb->pkt_type = PACKET_HOST;
2354 }
2355 qeth_rebuild_skb_vlan(card, skb, hdr);
2356 if (card->options.fake_ll)
2357 qeth_rebuild_skb_fake_ll(card, skb, hdr);
2358 else
2359 skb->mac.raw = skb->data;
2360 skb->ip_summed = card->options.checksum_type;
2361 if (card->options.checksum_type == HW_CHECKSUMMING){
2362 if ( (hdr->hdr.l3.ext_flags &
2363 (QETH_HDR_EXT_CSUM_HDR_REQ |
2364 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
2365 (QETH_HDR_EXT_CSUM_HDR_REQ |
2366 QETH_HDR_EXT_CSUM_TRANSP_REQ) )
2367 skb->ip_summed = CHECKSUM_UNNECESSARY;
2368 else
2369 skb->ip_summed = SW_CHECKSUMMING;
2370 }
2371}
2372
2373static inline void
2374qeth_process_inbound_buffer(struct qeth_card *card,
2375 struct qeth_qdio_buffer *buf, int index)
2376{
2377 struct qdio_buffer_element *element;
2378 struct sk_buff *skb;
2379 struct qeth_hdr *hdr;
2380 int offset;
2381 int rxrc;
2382 __u16 vlan_tag = 0;
2383
2384 /* get first element of current buffer */
2385 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2386 offset = 0;
2387#ifdef CONFIG_QETH_PERF_STATS
2388 card->perf_stats.bufs_rec++;
2389#endif
2390 while((skb = qeth_get_next_skb(card, buf->buffer, &element,
2391 &offset, &hdr))) {
2392 skb->dev = card->dev;
2393 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
2394 vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr);
2395 else
2396 qeth_rebuild_skb(card, skb, hdr);
2397 /* is device UP ? */
2398 if (!(card->dev->flags & IFF_UP)){
2399 dev_kfree_skb_any(skb);
2400 continue;
2401 }
2402#ifdef CONFIG_QETH_VLAN
2403 if (vlan_tag)
2404 vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
2405 else
2406#endif
2407 rxrc = netif_rx(skb);
2408 card->dev->last_rx = jiffies;
2409 card->stats.rx_packets++;
2410 card->stats.rx_bytes += skb->len;
2411 }
2412}
2413
2414static inline struct qeth_buffer_pool_entry *
2415qeth_get_buffer_pool_entry(struct qeth_card *card)
2416{
2417 struct qeth_buffer_pool_entry *entry;
2418
2419 QETH_DBF_TEXT(trace, 6, "gtbfplen");
2420 if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
2421 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2422 struct qeth_buffer_pool_entry, list);
2423 list_del_init(&entry->list);
2424 return entry;
2425 }
2426 return NULL;
2427}
2428
2429static inline void
2430qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2431{
2432 struct qeth_buffer_pool_entry *pool_entry;
2433 int i;
2434
2435 pool_entry = qeth_get_buffer_pool_entry(card);
2436 /*
2437 * since the buffer is accessed only from the input_tasklet
2438 * there shouldn't be a need to synchronize; also, since we use
2439 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2440 * buffers
2441 */
2442 BUG_ON(!pool_entry);
2443
2444 buf->pool_entry = pool_entry;
2445 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
2446 buf->buffer->element[i].length = PAGE_SIZE;
2447 buf->buffer->element[i].addr = pool_entry->elements[i];
2448 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2449 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2450 else
2451 buf->buffer->element[i].flags = 0;
2452 }
2453 buf->state = QETH_QDIO_BUF_EMPTY;
2454}
2455
2456static inline void
2457qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2458 struct qeth_qdio_out_buffer *buf)
2459{
2460 int i;
2461 struct sk_buff *skb;
2462
2463 /* is PCI flag set on buffer? */
2464 if (buf->buffer->element[0].flags & 0x40)
2465 atomic_dec(&queue->set_pci_flags_count);
2466
2467 while ((skb = skb_dequeue(&buf->skb_list))){
2468 atomic_dec(&skb->users);
2469 dev_kfree_skb_any(skb);
2470 }
2471 qeth_eddp_buf_release_contexts(buf);
2472 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
2473 buf->buffer->element[i].length = 0;
2474 buf->buffer->element[i].addr = NULL;
2475 buf->buffer->element[i].flags = 0;
2476 }
2477 buf->next_element_to_fill = 0;
2478 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2479}
2480
2481static inline void
2482qeth_queue_input_buffer(struct qeth_card *card, int index)
2483{
2484 struct qeth_qdio_q *queue = card->qdio.in_q;
2485 int count;
2486 int i;
2487 int rc;
2488
2489 QETH_DBF_TEXT(trace,6,"queinbuf");
2490 count = (index < queue->next_buf_to_init)?
2491 card->qdio.in_buf_pool.buf_count -
2492 (queue->next_buf_to_init - index) :
2493 card->qdio.in_buf_pool.buf_count -
2494 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2495 /* only requeue at a certain threshold to avoid SIGAs */
2496 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
2497 for (i = queue->next_buf_to_init;
2498 i < queue->next_buf_to_init + count; ++i)
2499 qeth_init_input_buffer(card,
2500 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]);
2501 /*
2502 * according to old code it should be avoided to requeue all
2503 * 128 buffers in order to benefit from PCI avoidance.
2504 * this function keeps at least one buffer (the buffer at
2505 * 'index') un-requeued -> this buffer is the first buffer that
2506 * will be requeued the next time
2507 */
2508#ifdef CONFIG_QETH_PERF_STATS
2509 card->perf_stats.inbound_do_qdio_cnt++;
2510 card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros();
2511#endif
2512 rc = do_QDIO(CARD_DDEV(card),
2513 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2514 0, queue->next_buf_to_init, count, NULL);
2515#ifdef CONFIG_QETH_PERF_STATS
2516 card->perf_stats.inbound_do_qdio_time += qeth_get_micros() -
2517 card->perf_stats.inbound_do_qdio_start_time;
2518#endif
2519 if (rc){
2520 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2521 "return %i (device %s).\n",
2522 rc, CARD_DDEV_ID(card));
2523 QETH_DBF_TEXT(trace,2,"qinberr");
2524 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2525 }
2526 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2527 QDIO_MAX_BUFFERS_PER_Q;
2528 }
2529}
2530
2531static inline void
2532qeth_put_buffer_pool_entry(struct qeth_card *card,
2533 struct qeth_buffer_pool_entry *entry)
2534{
2535 QETH_DBF_TEXT(trace, 6, "ptbfplen");
2536 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2537}
2538
2539static void
2540qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2541 unsigned int qdio_err, unsigned int siga_err,
2542 unsigned int queue, int first_element, int count,
2543 unsigned long card_ptr)
2544{
2545 struct net_device *net_dev;
2546 struct qeth_card *card;
2547 struct qeth_qdio_buffer *buffer;
2548 int index;
2549 int i;
2550
2551 QETH_DBF_TEXT(trace, 6, "qdinput");
2552 card = (struct qeth_card *) card_ptr;
2553 net_dev = card->dev;
2554#ifdef CONFIG_QETH_PERF_STATS
2555 card->perf_stats.inbound_cnt++;
2556 card->perf_stats.inbound_start_time = qeth_get_micros();
2557#endif
2558 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2559 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2560 QETH_DBF_TEXT(trace, 1,"qdinchk");
2561 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2562 QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count);
2563 QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status);
2564 qeth_schedule_recovery(card);
2565 return;
2566 }
2567 }
2568 for (i = first_element; i < (first_element + count); ++i) {
2569 index = i % QDIO_MAX_BUFFERS_PER_Q;
2570 buffer = &card->qdio.in_q->bufs[index];
2571 if (!((status == QDIO_STATUS_LOOK_FOR_ERROR) &&
2572 qeth_check_for_inbound_error(buffer, qdio_err, siga_err)))
2573 qeth_process_inbound_buffer(card, buffer, index);
2574 /* clear buffer and give back to hardware */
2575 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
2576 qeth_queue_input_buffer(card, index);
2577 }
2578#ifdef CONFIG_QETH_PERF_STATS
2579 card->perf_stats.inbound_time += qeth_get_micros() -
2580 card->perf_stats.inbound_start_time;
2581#endif
2582}
2583
2584static inline int
2585qeth_handle_send_error(struct qeth_card *card,
2586 struct qeth_qdio_out_buffer *buffer,
2587 int qdio_err, int siga_err)
2588{
2589 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2590 int cc = siga_err & 3;
2591
2592 QETH_DBF_TEXT(trace, 6, "hdsnderr");
2593 switch (cc) {
2594 case 0:
2595 if (qdio_err){
2596 QETH_DBF_TEXT(trace, 1,"lnkfail");
2597 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2598 QETH_DBF_TEXT_(trace,1,"%04x %02x",
2599 (u16)qdio_err, (u8)sbalf15);
2600 return QETH_SEND_ERROR_LINK_FAILURE;
2601 }
2602 return QETH_SEND_ERROR_NONE;
2603 case 2:
2604 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2605 QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2606 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2607 return QETH_SEND_ERROR_KICK_IT;
2608 }
2609 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2610 return QETH_SEND_ERROR_RETRY;
2611 return QETH_SEND_ERROR_LINK_FAILURE;
2612 /* look at qdio_error and sbalf 15 */
2613 case 1:
2614 QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2615 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2616 return QETH_SEND_ERROR_LINK_FAILURE;
2617 case 3:
2618 QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2619 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2620 return QETH_SEND_ERROR_KICK_IT;
2621 }
2622 return QETH_SEND_ERROR_LINK_FAILURE;
2623}
2624
2625void
2626qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2627 int index, int count)
2628{
2629 struct qeth_qdio_out_buffer *buf;
2630 int rc;
2631 int i;
2632
2633 QETH_DBF_TEXT(trace, 6, "flushbuf");
2634
2635 for (i = index; i < index + count; ++i) {
2636 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2637 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2638 SBAL_FLAGS_LAST_ENTRY;
2639
2640 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2641 continue;
2642
2643 if (!queue->do_pack){
2644 if ((atomic_read(&queue->used_buffers) >=
2645 (QETH_HIGH_WATERMARK_PACK -
2646 QETH_WATERMARK_PACK_FUZZ)) &&
2647 !atomic_read(&queue->set_pci_flags_count)){
2648 /* it's likely that we'll go to packing
2649 * mode soon */
2650 atomic_inc(&queue->set_pci_flags_count);
2651 buf->buffer->element[0].flags |= 0x40;
2652 }
2653 } else {
2654 if (!atomic_read(&queue->set_pci_flags_count)){
2655 /*
2656 * there's no outstanding PCI any more, so we
2657 * have to request a PCI to be sure the the PCI
2658 * will wake at some time in the future then we
2659 * can flush packed buffers that might still be
2660 * hanging around, which can happen if no
2661 * further send was requested by the stack
2662 */
2663 atomic_inc(&queue->set_pci_flags_count);
2664 buf->buffer->element[0].flags |= 0x40;
2665 }
2666 }
2667 }
2668
2669 queue->card->dev->trans_start = jiffies;
2670#ifdef CONFIG_QETH_PERF_STATS
2671 queue->card->perf_stats.outbound_do_qdio_cnt++;
2672 queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros();
2673#endif
2674 if (under_int)
2675 rc = do_QDIO(CARD_DDEV(queue->card),
2676 QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT,
2677 queue->queue_no, index, count, NULL);
2678 else
2679 rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT,
2680 queue->queue_no, index, count, NULL);
2681#ifdef CONFIG_QETH_PERF_STATS
2682 queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() -
2683 queue->card->perf_stats.outbound_do_qdio_start_time;
2684#endif
2685 if (rc){
2686 QETH_DBF_SPRINTF(trace, 0, "qeth_flush_buffers: do_QDIO "
2687 "returned error (%i) on device %s.",
2688 rc, CARD_DDEV_ID(queue->card));
2689 QETH_DBF_TEXT(trace, 2, "flushbuf");
2690 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
2691 queue->card->stats.tx_errors += count;
2692 /* this must not happen under normal circumstances. if it
2693 * happens something is really wrong -> recover */
2694 qeth_schedule_recovery(queue->card);
2695 return;
2696 }
2697 atomic_add(count, &queue->used_buffers);
2698#ifdef CONFIG_QETH_PERF_STATS
2699 queue->card->perf_stats.bufs_sent += count;
2700#endif
2701}
2702
2703/*
2704 * Switched to packing state if the number of used buffers on a queue
2705 * reaches a certain limit.
2706 */
2707static inline void
2708qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2709{
2710 if (!queue->do_pack) {
2711 if (atomic_read(&queue->used_buffers)
2712 >= QETH_HIGH_WATERMARK_PACK){
2713 /* switch non-PACKING -> PACKING */
2714 QETH_DBF_TEXT(trace, 6, "np->pack");
2715#ifdef CONFIG_QETH_PERF_STATS
2716 queue->card->perf_stats.sc_dp_p++;
2717#endif
2718 queue->do_pack = 1;
2719 }
2720 }
2721}
2722
2723/*
2724 * Switches from packing to non-packing mode. If there is a packing
2725 * buffer on the queue this buffer will be prepared to be flushed.
2726 * In that case 1 is returned to inform the caller. If no buffer
2727 * has to be flushed, zero is returned.
2728 */
2729static inline int
2730qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2731{
2732 struct qeth_qdio_out_buffer *buffer;
2733 int flush_count = 0;
2734
2735 if (queue->do_pack) {
2736 if (atomic_read(&queue->used_buffers)
2737 <= QETH_LOW_WATERMARK_PACK) {
2738 /* switch PACKING -> non-PACKING */
2739 QETH_DBF_TEXT(trace, 6, "pack->np");
2740#ifdef CONFIG_QETH_PERF_STATS
2741 queue->card->perf_stats.sc_p_dp++;
2742#endif
2743 queue->do_pack = 0;
2744 /* flush packing buffers */
2745 buffer = &queue->bufs[queue->next_buf_to_fill];
2746 if ((atomic_read(&buffer->state) ==
2747 QETH_QDIO_BUF_EMPTY) &&
2748 (buffer->next_element_to_fill > 0)) {
2749 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
2750 flush_count++;
2751 queue->next_buf_to_fill =
2752 (queue->next_buf_to_fill + 1) %
2753 QDIO_MAX_BUFFERS_PER_Q;
2754 }
2755 }
2756 }
2757 return flush_count;
2758}
2759
2760/*
2761 * Called to flush a packing buffer if no more pci flags are on the queue.
2762 * Checks if there is a packing buffer and prepares it to be flushed.
2763 * In that case returns 1, otherwise zero.
2764 */
2765static inline int
2766qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2767{
2768 struct qeth_qdio_out_buffer *buffer;
2769
2770 buffer = &queue->bufs[queue->next_buf_to_fill];
2771 if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2772 (buffer->next_element_to_fill > 0)){
2773 /* it's a packing buffer */
2774 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2775 queue->next_buf_to_fill =
2776 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2777 return 1;
2778 }
2779 return 0;
2780}
2781
2782static inline void
2783qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2784{
2785 int index;
2786 int flush_cnt = 0;
2787 int q_was_packing = 0;
2788
2789 /*
2790 * check if weed have to switch to non-packing mode or if
2791 * we have to get a pci flag out on the queue
2792 */
2793 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2794 !atomic_read(&queue->set_pci_flags_count)){
2795 if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2796 QETH_OUT_Q_UNLOCKED) {
2797 /*
2798 * If we get in here, there was no action in
2799 * do_send_packet. So, we check if there is a
2800 * packing buffer to be flushed here.
2801 */
2802 netif_stop_queue(queue->card->dev);
2803 index = queue->next_buf_to_fill;
2804 q_was_packing = queue->do_pack;
2805 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
2806 if (!flush_cnt &&
2807 !atomic_read(&queue->set_pci_flags_count))
2808 flush_cnt +=
2809 qeth_flush_buffers_on_no_pci(queue);
2810#ifdef CONFIG_QETH_PERF_STATS
2811 if (q_was_packing)
2812 queue->card->perf_stats.bufs_sent_pack +=
2813 flush_cnt;
2814#endif
2815 if (flush_cnt)
2816 qeth_flush_buffers(queue, 1, index, flush_cnt);
2817 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2818 }
2819 }
2820}
2821
2822static void
2823qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
2824 unsigned int qdio_error, unsigned int siga_error,
2825 unsigned int __queue, int first_element, int count,
2826 unsigned long card_ptr)
2827{
2828 struct qeth_card *card = (struct qeth_card *) card_ptr;
2829 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2830 struct qeth_qdio_out_buffer *buffer;
2831 int i;
2832
2833 QETH_DBF_TEXT(trace, 6, "qdouhdl");
2834 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2835 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2836 QETH_DBF_SPRINTF(trace, 2, "On device %s: "
2837 "received active check "
2838 "condition (0x%08x).",
2839 CARD_BUS_ID(card), status);
2840 QETH_DBF_TEXT(trace, 2, "chkcond");
2841 QETH_DBF_TEXT_(trace, 2, "%08x", status);
2842 netif_stop_queue(card->dev);
2843 qeth_schedule_recovery(card);
2844 return;
2845 }
2846 }
2847#ifdef CONFIG_QETH_PERF_STATS
2848 card->perf_stats.outbound_handler_cnt++;
2849 card->perf_stats.outbound_handler_start_time = qeth_get_micros();
2850#endif
2851 for(i = first_element; i < (first_element + count); ++i){
2852 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2853 /*we only handle the KICK_IT error by doing a recovery */
2854 if (qeth_handle_send_error(card, buffer, qdio_error, siga_error)
2855 == QETH_SEND_ERROR_KICK_IT){
2856 netif_stop_queue(card->dev);
2857 qeth_schedule_recovery(card);
2858 return;
2859 }
2860 qeth_clear_output_buffer(queue, buffer);
2861 }
2862 atomic_sub(count, &queue->used_buffers);
2863 /* check if we need to do something on this outbound queue */
2864 if (card->info.type != QETH_CARD_TYPE_IQD)
2865 qeth_check_outbound_queue(queue);
2866
2867 netif_wake_queue(queue->card->dev);
2868#ifdef CONFIG_QETH_PERF_STATS
2869 card->perf_stats.outbound_handler_time += qeth_get_micros() -
2870 card->perf_stats.outbound_handler_start_time;
2871#endif
2872}
2873
2874static void
2875qeth_create_qib_param_field(struct qeth_card *card, char *param_field)
2876{
2877
2878 param_field[0] = _ascebc['P'];
2879 param_field[1] = _ascebc['C'];
2880 param_field[2] = _ascebc['I'];
2881 param_field[3] = _ascebc['T'];
2882 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2883 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2884 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2885}
2886
2887static void
2888qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field)
2889{
2890 param_field[16] = _ascebc['B'];
2891 param_field[17] = _ascebc['L'];
2892 param_field[18] = _ascebc['K'];
2893 param_field[19] = _ascebc['T'];
2894 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2895 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2896 *((unsigned int *) (&param_field[28])) = card->info.blkt.inter_packet_jumbo;
2897}
2898
2899static void
2900qeth_initialize_working_pool_list(struct qeth_card *card)
2901{
2902 struct qeth_buffer_pool_entry *entry;
2903
2904 QETH_DBF_TEXT(trace,5,"inwrklst");
2905
2906 list_for_each_entry(entry,
2907 &card->qdio.init_pool.entry_list, init_list) {
2908 qeth_put_buffer_pool_entry(card,entry);
2909 }
2910}
2911
2912static void
2913qeth_clear_working_pool_list(struct qeth_card *card)
2914{
2915 struct qeth_buffer_pool_entry *pool_entry, *tmp;
2916
2917 QETH_DBF_TEXT(trace,5,"clwrklst");
2918 list_for_each_entry_safe(pool_entry, tmp,
2919 &card->qdio.in_buf_pool.entry_list, list){
2920 list_del(&pool_entry->list);
2921 }
2922}
2923
2924static void
2925qeth_free_buffer_pool(struct qeth_card *card)
2926{
2927 struct qeth_buffer_pool_entry *pool_entry, *tmp;
2928 int i=0;
2929 QETH_DBF_TEXT(trace,5,"freepool");
2930 list_for_each_entry_safe(pool_entry, tmp,
2931 &card->qdio.init_pool.entry_list, init_list){
2932 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
2933 free_page((unsigned long)pool_entry->elements[i]);
2934 list_del(&pool_entry->init_list);
2935 kfree(pool_entry);
2936 }
2937}
2938
2939static int
2940qeth_alloc_buffer_pool(struct qeth_card *card)
2941{
2942 struct qeth_buffer_pool_entry *pool_entry;
2943 void *ptr;
2944 int i, j;
2945
2946 QETH_DBF_TEXT(trace,5,"alocpool");
2947 for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
2948 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
2949 if (!pool_entry){
2950 qeth_free_buffer_pool(card);
2951 return -ENOMEM;
2952 }
2953 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
2954 ptr = (void *) __get_free_page(GFP_KERNEL);
2955 if (!ptr) {
2956 while (j > 0)
2957 free_page((unsigned long)
2958 pool_entry->elements[--j]);
2959 kfree(pool_entry);
2960 qeth_free_buffer_pool(card);
2961 return -ENOMEM;
2962 }
2963 pool_entry->elements[j] = ptr;
2964 }
2965 list_add(&pool_entry->init_list,
2966 &card->qdio.init_pool.entry_list);
2967 }
2968 return 0;
2969}
2970
2971int
2972qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
2973{
2974 QETH_DBF_TEXT(trace, 2, "realcbp");
2975
2976 if ((card->state != CARD_STATE_DOWN) &&
2977 (card->state != CARD_STATE_RECOVER))
2978 return -EPERM;
2979
2980 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
2981 qeth_clear_working_pool_list(card);
2982 qeth_free_buffer_pool(card);
2983 card->qdio.in_buf_pool.buf_count = bufcnt;
2984 card->qdio.init_pool.buf_count = bufcnt;
2985 return qeth_alloc_buffer_pool(card);
2986}
2987
2988static int
2989qeth_alloc_qdio_buffers(struct qeth_card *card)
2990{
2991 int i, j;
2992
2993 QETH_DBF_TEXT(setup, 2, "allcqdbf");
2994
2995 if (card->qdio.state == QETH_QDIO_ALLOCATED)
2996 return 0;
2997
2998 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL);
2999 if (!card->qdio.in_q)
3000 return - ENOMEM;
3001 QETH_DBF_TEXT(setup, 2, "inq");
3002 QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
3003 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
3004 /* give inbound qeth_qdio_buffers their qdio_buffers */
3005 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3006 card->qdio.in_q->bufs[i].buffer =
3007 &card->qdio.in_q->qdio_bufs[i];
3008 /* inbound buffer pool */
3009 if (qeth_alloc_buffer_pool(card)){
3010 kfree(card->qdio.in_q);
3011 return -ENOMEM;
3012 }
3013 /* outbound */
3014 card->qdio.out_qs =
3015 kmalloc(card->qdio.no_out_queues *
3016 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
3017 if (!card->qdio.out_qs){
3018 qeth_free_buffer_pool(card);
3019 return -ENOMEM;
3020 }
3021 for (i = 0; i < card->qdio.no_out_queues; ++i){
3022 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
3023 GFP_KERNEL);
3024 if (!card->qdio.out_qs[i]){
3025 while (i > 0)
3026 kfree(card->qdio.out_qs[--i]);
3027 kfree(card->qdio.out_qs);
3028 return -ENOMEM;
3029 }
3030 QETH_DBF_TEXT_(setup, 2, "outq %i", i);
3031 QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
3032 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
3033 card->qdio.out_qs[i]->queue_no = i;
3034 /* give outbound qeth_qdio_buffers their qdio_buffers */
3035 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3036 card->qdio.out_qs[i]->bufs[j].buffer =
3037 &card->qdio.out_qs[i]->qdio_bufs[j];
3038 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
3039 skb_list);
3040 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
3041 }
3042 }
3043 card->qdio.state = QETH_QDIO_ALLOCATED;
3044 return 0;
3045}
3046
3047static void
3048qeth_free_qdio_buffers(struct qeth_card *card)
3049{
3050 int i, j;
3051
3052 QETH_DBF_TEXT(trace, 2, "freeqdbf");
3053 if (card->qdio.state == QETH_QDIO_UNINITIALIZED)
3054 return;
3055 kfree(card->qdio.in_q);
3056 /* inbound buffer pool */
3057 qeth_free_buffer_pool(card);
3058 /* free outbound qdio_qs */
3059 for (i = 0; i < card->qdio.no_out_queues; ++i){
3060 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3061 qeth_clear_output_buffer(card->qdio.out_qs[i],
3062 &card->qdio.out_qs[i]->bufs[j]);
3063 kfree(card->qdio.out_qs[i]);
3064 }
3065 kfree(card->qdio.out_qs);
3066 card->qdio.state = QETH_QDIO_UNINITIALIZED;
3067}
3068
3069static void
3070qeth_clear_qdio_buffers(struct qeth_card *card)
3071{
3072 int i, j;
3073
3074 QETH_DBF_TEXT(trace, 2, "clearqdbf");
3075 /* clear outbound buffers to free skbs */
3076 for (i = 0; i < card->qdio.no_out_queues; ++i)
3077 if (card->qdio.out_qs[i]){
3078 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3079 qeth_clear_output_buffer(card->qdio.out_qs[i],
3080 &card->qdio.out_qs[i]->bufs[j]);
3081 }
3082}
3083
3084static void
3085qeth_init_qdio_info(struct qeth_card *card)
3086{
3087 QETH_DBF_TEXT(setup, 4, "intqdinf");
3088 card->qdio.state = QETH_QDIO_UNINITIALIZED;
3089 /* inbound */
3090 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
3091 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
3092 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
3093 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
3094 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
3095 /* outbound */
3096 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
3097 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
3098}
3099
3100static int
3101qeth_init_qdio_queues(struct qeth_card *card)
3102{
3103 int i, j;
3104 int rc;
3105
3106 QETH_DBF_TEXT(setup, 2, "initqdqs");
3107
3108 /* inbound queue */
3109 memset(card->qdio.in_q->qdio_bufs, 0,
3110 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3111 qeth_initialize_working_pool_list(card);
3112 /*give only as many buffers to hardware as we have buffer pool entries*/
3113 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
3114 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3115 card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1;
3116 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3117 card->qdio.in_buf_pool.buf_count - 1, NULL);
3118 if (rc) {
3119 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3120 return rc;
3121 }
3122 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
3123 if (rc) {
3124 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3125 return rc;
3126 }
3127 /* outbound queue */
3128 for (i = 0; i < card->qdio.no_out_queues; ++i){
3129 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
3130 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3131 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3132 qeth_clear_output_buffer(card->qdio.out_qs[i],
3133 &card->qdio.out_qs[i]->bufs[j]);
3134 }
3135 card->qdio.out_qs[i]->card = card;
3136 card->qdio.out_qs[i]->next_buf_to_fill = 0;
3137 card->qdio.out_qs[i]->do_pack = 0;
3138 atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
3139 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
3140 atomic_set(&card->qdio.out_qs[i]->state,
3141 QETH_OUT_Q_UNLOCKED);
3142 }
3143 return 0;
3144}
3145
3146static int
3147qeth_qdio_establish(struct qeth_card *card)
3148{
3149 struct qdio_initialize init_data;
3150 char *qib_param_field;
3151 struct qdio_buffer **in_sbal_ptrs;
3152 struct qdio_buffer **out_sbal_ptrs;
3153 int i, j, k;
3154 int rc;
3155
3156 QETH_DBF_TEXT(setup, 2, "qdioest");
3157
3158 qib_param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3159 GFP_KERNEL);
3160 if (!qib_param_field)
3161 return -ENOMEM;
3162
3163 memset(qib_param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char));
3164
3165 qeth_create_qib_param_field(card, qib_param_field);
3166 qeth_create_qib_param_field_blkt(card, qib_param_field);
3167
3168 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3169 GFP_KERNEL);
3170 if (!in_sbal_ptrs) {
3171 kfree(qib_param_field);
3172 return -ENOMEM;
3173 }
3174 for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3175 in_sbal_ptrs[i] = (struct qdio_buffer *)
3176 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3177
3178 out_sbal_ptrs =
3179 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3180 sizeof(void *), GFP_KERNEL);
3181 if (!out_sbal_ptrs) {
3182 kfree(in_sbal_ptrs);
3183 kfree(qib_param_field);
3184 return -ENOMEM;
3185 }
3186 for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3187 for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){
3188 out_sbal_ptrs[k] = (struct qdio_buffer *)
3189 virt_to_phys(card->qdio.out_qs[i]->
3190 bufs[j].buffer);
3191 }
3192
3193 memset(&init_data, 0, sizeof(struct qdio_initialize));
3194 init_data.cdev = CARD_DDEV(card);
3195 init_data.q_format = qeth_get_qdio_q_format(card);
3196 init_data.qib_param_field_format = 0;
3197 init_data.qib_param_field = qib_param_field;
3198 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3199 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3200 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3201 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3202 init_data.no_input_qs = 1;
3203 init_data.no_output_qs = card->qdio.no_out_queues;
3204 init_data.input_handler = (qdio_handler_t *)
3205 qeth_qdio_input_handler;
3206 init_data.output_handler = (qdio_handler_t *)
3207 qeth_qdio_output_handler;
3208 init_data.int_parm = (unsigned long) card;
3209 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3210 QDIO_OUTBOUND_0COPY_SBALS |
3211 QDIO_USE_OUTBOUND_PCIS;
3212 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3213 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3214
3215 if (!(rc = qdio_initialize(&init_data)))
3216 card->qdio.state = QETH_QDIO_ESTABLISHED;
3217
3218 kfree(out_sbal_ptrs);
3219 kfree(in_sbal_ptrs);
3220 kfree(qib_param_field);
3221 return rc;
3222}
3223
3224static int
3225qeth_qdio_activate(struct qeth_card *card)
3226{
3227 QETH_DBF_TEXT(setup,3,"qdioact");
3228 return qdio_activate(CARD_DDEV(card), 0);
3229}
3230
3231static int
3232qeth_clear_channel(struct qeth_channel *channel)
3233{
3234 unsigned long flags;
3235 struct qeth_card *card;
3236 int rc;
3237
3238 QETH_DBF_TEXT(trace,3,"clearch");
3239 card = CARD_FROM_CDEV(channel->ccwdev);
3240 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3241 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
3242 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3243
3244 if (rc)
3245 return rc;
3246 rc = wait_event_interruptible_timeout(card->wait_q,
3247 channel->state==CH_STATE_STOPPED, QETH_TIMEOUT);
3248 if (rc == -ERESTARTSYS)
3249 return rc;
3250 if (channel->state != CH_STATE_STOPPED)
3251 return -ETIME;
3252 channel->state = CH_STATE_DOWN;
3253 return 0;
3254}
3255
3256static int
3257qeth_halt_channel(struct qeth_channel *channel)
3258{
3259 unsigned long flags;
3260 struct qeth_card *card;
3261 int rc;
3262
3263 QETH_DBF_TEXT(trace,3,"haltch");
3264 card = CARD_FROM_CDEV(channel->ccwdev);
3265 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3266 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
3267 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3268
3269 if (rc)
3270 return rc;
3271 rc = wait_event_interruptible_timeout(card->wait_q,
3272 channel->state==CH_STATE_HALTED, QETH_TIMEOUT);
3273 if (rc == -ERESTARTSYS)
3274 return rc;
3275 if (channel->state != CH_STATE_HALTED)
3276 return -ETIME;
3277 return 0;
3278}
3279
3280static int
3281qeth_halt_channels(struct qeth_card *card)
3282{
3283 int rc = 0;
3284
3285 QETH_DBF_TEXT(trace,3,"haltchs");
3286 if ((rc = qeth_halt_channel(&card->read)))
3287 return rc;
3288 if ((rc = qeth_halt_channel(&card->write)))
3289 return rc;
3290 return qeth_halt_channel(&card->data);
3291}
3292static int
3293qeth_clear_channels(struct qeth_card *card)
3294{
3295 int rc = 0;
3296
3297 QETH_DBF_TEXT(trace,3,"clearchs");
3298 if ((rc = qeth_clear_channel(&card->read)))
3299 return rc;
3300 if ((rc = qeth_clear_channel(&card->write)))
3301 return rc;
3302 return qeth_clear_channel(&card->data);
3303}
3304
3305static int
3306qeth_clear_halt_card(struct qeth_card *card, int halt)
3307{
3308 int rc = 0;
3309
3310 QETH_DBF_TEXT(trace,3,"clhacrd");
3311 QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
3312
3313 if (halt)
3314 rc = qeth_halt_channels(card);
3315 if (rc)
3316 return rc;
3317 return qeth_clear_channels(card);
3318}
3319
3320static int
3321qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
3322{
3323 int rc = 0;
3324
3325 QETH_DBF_TEXT(trace,3,"qdioclr");
3326 if (card->qdio.state == QETH_QDIO_ESTABLISHED){
3327 if ((rc = qdio_cleanup(CARD_DDEV(card),
3328 (card->info.type == QETH_CARD_TYPE_IQD) ?
3329 QDIO_FLAG_CLEANUP_USING_HALT :
3330 QDIO_FLAG_CLEANUP_USING_CLEAR)))
3331 QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
3332 card->qdio.state = QETH_QDIO_ALLOCATED;
3333 }
3334 if ((rc = qeth_clear_halt_card(card, use_halt)))
3335 QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
3336 card->state = CARD_STATE_DOWN;
3337 return rc;
3338}
3339
3340static int
3341qeth_dm_act(struct qeth_card *card)
3342{
3343 int rc;
3344 struct qeth_cmd_buffer *iob;
3345
3346 QETH_DBF_TEXT(setup,2,"dmact");
3347
3348 iob = qeth_wait_for_buffer(&card->write);
3349 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
3350
3351 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
3352 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
3353 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
3354 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3355 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
3356 return rc;
3357}
3358
3359static int
3360qeth_mpc_initialize(struct qeth_card *card)
3361{
3362 int rc;
3363
3364 QETH_DBF_TEXT(setup,2,"mpcinit");
3365
3366 if ((rc = qeth_issue_next_read(card))){
3367 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3368 return rc;
3369 }
3370 if ((rc = qeth_cm_enable(card))){
3371 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3372 return rc;
3373 }
3374 if ((rc = qeth_cm_setup(card))){
3375 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3376 return rc;
3377 }
3378 if ((rc = qeth_ulp_enable(card))){
3379 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3380 return rc;
3381 }
3382 if ((rc = qeth_ulp_setup(card))){
3383 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3384 return rc;
3385 }
3386 if ((rc = qeth_alloc_qdio_buffers(card))){
3387 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3388 return rc;
3389 }
3390 if ((rc = qeth_qdio_establish(card))){
3391 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3392 qeth_free_qdio_buffers(card);
3393 goto out_qdio;
3394 }
3395 if ((rc = qeth_qdio_activate(card))){
3396 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
3397 goto out_qdio;
3398 }
3399 if ((rc = qeth_dm_act(card))){
3400 QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
3401 goto out_qdio;
3402 }
3403
3404 return 0;
3405out_qdio:
3406 qeth_qdio_clear_card(card, card->info.type==QETH_CARD_TYPE_OSAE);
3407 return rc;
3408}
3409
3410static struct net_device *
3411qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
3412{
3413 struct net_device *dev = NULL;
3414
3415 switch (type) {
3416 case QETH_CARD_TYPE_OSAE:
3417 switch (linktype) {
3418 case QETH_LINK_TYPE_LANE_TR:
3419 case QETH_LINK_TYPE_HSTR:
3420#ifdef CONFIG_TR
3421 dev = alloc_trdev(0);
3422#endif /* CONFIG_TR */
3423 break;
3424 default:
3425 dev = alloc_etherdev(0);
3426 }
3427 break;
3428 case QETH_CARD_TYPE_IQD:
3429 dev = alloc_netdev(0, "hsi%d", ether_setup);
3430 break;
3431 default:
3432 dev = alloc_etherdev(0);
3433 }
3434 return dev;
3435}
3436
3437/*hard_header fake function; used in case fake_ll is set */
3438static int
3439qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3440 unsigned short type, void *daddr, void *saddr,
3441 unsigned len)
3442{
3443 struct ethhdr *hdr;
3444
3445 hdr = (struct ethhdr *)skb_push(skb, QETH_FAKE_LL_LEN);
3446 memcpy(hdr->h_source, dev->dev_addr, ETH_ALEN);
3447 memcpy(hdr->h_dest, "FAKELL", ETH_ALEN);
3448 if (type != ETH_P_802_3)
3449 hdr->h_proto = htons(type);
3450 else
3451 hdr->h_proto = htons(len);
3452 return QETH_FAKE_LL_LEN;
3453}
3454
3455static inline int
3456qeth_send_packet(struct qeth_card *, struct sk_buff *);
3457
3458static int
3459qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3460{
3461 int rc;
3462 struct qeth_card *card;
3463
3464 QETH_DBF_TEXT(trace, 6, "hrdstxmi");
3465 card = (struct qeth_card *)dev->priv;
3466 if (skb==NULL) {
3467 card->stats.tx_dropped++;
3468 card->stats.tx_errors++;
3469 /* return OK; otherwise ksoftirqd goes to 100% */
3470 return NETDEV_TX_OK;
3471 }
3472 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
3473 card->stats.tx_dropped++;
3474 card->stats.tx_errors++;
3475 card->stats.tx_carrier_errors++;
3476 dev_kfree_skb_any(skb);
3477 /* return OK; otherwise ksoftirqd goes to 100% */
3478 return NETDEV_TX_OK;
3479 }
3480#ifdef CONFIG_QETH_PERF_STATS
3481 card->perf_stats.outbound_cnt++;
3482 card->perf_stats.outbound_start_time = qeth_get_micros();
3483#endif
3484 netif_stop_queue(dev);
3485 if ((rc = qeth_send_packet(card, skb))) {
3486 if (rc == -EBUSY) {
3487 return NETDEV_TX_BUSY;
3488 } else {
3489 card->stats.tx_errors++;
3490 card->stats.tx_dropped++;
3491 dev_kfree_skb_any(skb);
3492 /*set to OK; otherwise ksoftirqd goes to 100% */
3493 rc = NETDEV_TX_OK;
3494 }
3495 }
3496 netif_wake_queue(dev);
3497#ifdef CONFIG_QETH_PERF_STATS
3498 card->perf_stats.outbound_time += qeth_get_micros() -
3499 card->perf_stats.outbound_start_time;
3500#endif
3501 return rc;
3502}
3503
3504static int
3505qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
3506{
3507 int rc = 0;
3508#ifdef CONFIG_QETH_VLAN
3509 struct vlan_group *vg;
3510 int i;
3511
3512 if (!(vg = card->vlangrp))
3513 return rc;
3514
3515 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
3516 if (vg->vlan_devices[i] == dev){
3517 rc = QETH_VLAN_CARD;
3518 break;
3519 }
3520 }
3521#endif
3522 return rc;
3523}
3524
3525static int
3526qeth_verify_dev(struct net_device *dev)
3527{
3528 struct qeth_card *card;
3529 unsigned long flags;
3530 int rc = 0;
3531
3532 read_lock_irqsave(&qeth_card_list.rwlock, flags);
3533 list_for_each_entry(card, &qeth_card_list.list, list){
3534 if (card->dev == dev){
3535 rc = QETH_REAL_CARD;
3536 break;
3537 }
3538 rc = qeth_verify_vlan_dev(dev, card);
3539 if (rc)
3540 break;
3541 }
3542 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
3543
3544 return rc;
3545}
3546
3547static struct qeth_card *
3548qeth_get_card_from_dev(struct net_device *dev)
3549{
3550 struct qeth_card *card = NULL;
3551 int rc;
3552
3553 rc = qeth_verify_dev(dev);
3554 if (rc == QETH_REAL_CARD)
3555 card = (struct qeth_card *)dev->priv;
3556 else if (rc == QETH_VLAN_CARD)
3557 card = (struct qeth_card *)
3558 VLAN_DEV_INFO(dev)->real_dev->priv;
3559
3560 QETH_DBF_TEXT_(trace, 4, "%d", rc);
3561 return card ;
3562}
3563
3564static void
3565qeth_tx_timeout(struct net_device *dev)
3566{
3567 struct qeth_card *card;
3568
3569 card = (struct qeth_card *) dev->priv;
3570 card->stats.tx_errors++;
3571 qeth_schedule_recovery(card);
3572}
3573
3574static int
3575qeth_open(struct net_device *dev)
3576{
3577 struct qeth_card *card;
3578
3579 QETH_DBF_TEXT(trace, 4, "qethopen");
3580
3581 card = (struct qeth_card *) dev->priv;
3582
3583 if (card->state != CARD_STATE_SOFTSETUP)
3584 return -ENODEV;
3585
3586 if ( (card->options.layer2) &&
3587 (!card->info.layer2_mac_registered)) {
3588 QETH_DBF_TEXT(trace,4,"nomacadr");
3589 return -EPERM;
3590 }
3591 card->dev->flags |= IFF_UP;
3592 netif_start_queue(dev);
3593 card->data.state = CH_STATE_UP;
3594 card->state = CARD_STATE_UP;
3595
3596 if (!card->lan_online){
3597 if (netif_carrier_ok(dev))
3598 netif_carrier_off(dev);
3599 }
3600 return 0;
3601}
3602
3603static int
3604qeth_stop(struct net_device *dev)
3605{
3606 struct qeth_card *card;
3607
3608 QETH_DBF_TEXT(trace, 4, "qethstop");
3609
3610 card = (struct qeth_card *) dev->priv;
3611
3612 netif_stop_queue(dev);
3613 card->dev->flags &= ~IFF_UP;
3614 if (card->state == CARD_STATE_UP)
3615 card->state = CARD_STATE_SOFTSETUP;
3616 return 0;
3617}
3618
3619static inline int
3620qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3621{
3622 int cast_type = RTN_UNSPEC;
3623
3624 if (skb->dst && skb->dst->neighbour){
3625 cast_type = skb->dst->neighbour->type;
3626 if ((cast_type == RTN_BROADCAST) ||
3627 (cast_type == RTN_MULTICAST) ||
3628 (cast_type == RTN_ANYCAST))
3629 return cast_type;
3630 else
3631 return RTN_UNSPEC;
3632 }
3633 /* try something else */
3634 if (skb->protocol == ETH_P_IPV6)
3635 return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0;
3636 else if (skb->protocol == ETH_P_IP)
3637 return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0;
3638 /* ... */
3639 if (!memcmp(skb->data, skb->dev->broadcast, 6))
3640 return RTN_BROADCAST;
3641 else {
3642 u16 hdr_mac;
3643
3644 hdr_mac = *((u16 *)skb->data);
3645 /* tr multicast? */
3646 switch (card->info.link_type) {
3647 case QETH_LINK_TYPE_HSTR:
3648 case QETH_LINK_TYPE_LANE_TR:
3649 if ((hdr_mac == QETH_TR_MAC_NC) ||
3650 (hdr_mac == QETH_TR_MAC_C))
3651 return RTN_MULTICAST;
3652 /* eth or so multicast? */
3653 default:
3654 if ((hdr_mac == QETH_ETH_MAC_V4) ||
3655 (hdr_mac == QETH_ETH_MAC_V6))
3656 return RTN_MULTICAST;
3657 }
3658 }
3659 return cast_type;
3660}
3661
3662static inline int
3663qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3664 int ipv, int cast_type)
3665{
3666 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
3667 return card->qdio.default_out_queue;
3668 switch (card->qdio.no_out_queues) {
3669 case 4:
3670 if (cast_type && card->info.is_multicast_different)
3671 return card->info.is_multicast_different &
3672 (card->qdio.no_out_queues - 1);
3673 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3674 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
3675 if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT)
3676 return 3;
3677 if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY)
3678 return 2;
3679 if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT)
3680 return 1;
3681 if (skb->nh.iph->tos & IP_TOS_LOWDELAY)
3682 return 0;
3683 }
3684 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
3685 return 3 - (skb->nh.iph->tos >> 6);
3686 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3687 /* TODO: IPv6!!! */
3688 }
3689 return card->qdio.default_out_queue;
3690 case 1: /* fallthrough for single-out-queue 1920-device */
3691 default:
3692 return card->qdio.default_out_queue;
3693 }
3694}
3695
3696static inline int
3697qeth_get_ip_version(struct sk_buff *skb)
3698{
3699 switch (skb->protocol) {
3700 case ETH_P_IPV6:
3701 return 6;
3702 case ETH_P_IP:
3703 return 4;
3704 default:
3705 return 0;
3706 }
3707}
3708
3709static inline int
3710qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
3711 struct qeth_hdr **hdr, int ipv)
3712{
3713 int rc = 0;
3714#ifdef CONFIG_QETH_VLAN
3715 u16 *tag;
3716#endif
3717
3718 QETH_DBF_TEXT(trace, 6, "prepskb");
3719
3720 rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
3721 if (rc)
3722 return rc;
3723#ifdef CONFIG_QETH_VLAN
3724 if (card->vlangrp && vlan_tx_tag_present(*skb) &&
3725 ((ipv == 6) || card->options.layer2) ) {
3726 /*
3727 * Move the mac addresses (6 bytes src, 6 bytes dest)
3728 * to the beginning of the new header. We are using three
3729 * memcpys instead of one memmove to save cycles.
3730 */
3731 skb_push(*skb, VLAN_HLEN);
3732 memcpy((*skb)->data, (*skb)->data + 4, 4);
3733 memcpy((*skb)->data + 4, (*skb)->data + 8, 4);
3734 memcpy((*skb)->data + 8, (*skb)->data + 12, 4);
3735 tag = (u16 *)((*skb)->data + 12);
3736 /*
3737 * first two bytes = ETH_P_8021Q (0x8100)
3738 * second two bytes = VLANID
3739 */
3740 *tag = __constant_htons(ETH_P_8021Q);
3741 *(tag + 1) = htons(vlan_tx_tag_get(*skb));
3742 }
3743#endif
3744 *hdr = (struct qeth_hdr *)
3745 qeth_push_skb(card, skb, sizeof(struct qeth_hdr));
3746 if (hdr == NULL)
3747 return -EINVAL;
3748 return 0;
3749}
3750
3751static inline u8
3752qeth_get_qeth_hdr_flags4(int cast_type)
3753{
3754 if (cast_type == RTN_MULTICAST)
3755 return QETH_CAST_MULTICAST;
3756 if (cast_type == RTN_BROADCAST)
3757 return QETH_CAST_BROADCAST;
3758 return QETH_CAST_UNICAST;
3759}
3760
3761static inline u8
3762qeth_get_qeth_hdr_flags6(int cast_type)
3763{
3764 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
3765 if (cast_type == RTN_MULTICAST)
3766 return ct | QETH_CAST_MULTICAST;
3767 if (cast_type == RTN_ANYCAST)
3768 return ct | QETH_CAST_ANYCAST;
3769 if (cast_type == RTN_BROADCAST)
3770 return ct | QETH_CAST_BROADCAST;
3771 return ct | QETH_CAST_UNICAST;
3772}
3773
3774static inline void
3775qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3776 struct sk_buff *skb)
3777{
3778 __u16 hdr_mac;
3779
3780 if (!memcmp(skb->data+QETH_HEADER_SIZE,
3781 skb->dev->broadcast,6)) { /* broadcast? */
3782 *(__u32 *)hdr->hdr.l2.flags |=
3783 QETH_LAYER2_FLAG_BROADCAST << 8;
3784 return;
3785 }
3786 hdr_mac=*((__u16*)skb->data);
3787 /* tr multicast? */
3788 switch (card->info.link_type) {
3789 case QETH_LINK_TYPE_HSTR:
3790 case QETH_LINK_TYPE_LANE_TR:
3791 if ((hdr_mac == QETH_TR_MAC_NC) ||
3792 (hdr_mac == QETH_TR_MAC_C) )
3793 *(__u32 *)hdr->hdr.l2.flags |=
3794 QETH_LAYER2_FLAG_MULTICAST << 8;
3795 else
3796 *(__u32 *)hdr->hdr.l2.flags |=
3797 QETH_LAYER2_FLAG_UNICAST << 8;
3798 break;
3799 /* eth or so multicast? */
3800 default:
3801 if ( (hdr_mac==QETH_ETH_MAC_V4) ||
3802 (hdr_mac==QETH_ETH_MAC_V6) )
3803 *(__u32 *)hdr->hdr.l2.flags |=
3804 QETH_LAYER2_FLAG_MULTICAST << 8;
3805 else
3806 *(__u32 *)hdr->hdr.l2.flags |=
3807 QETH_LAYER2_FLAG_UNICAST << 8;
3808 }
3809}
3810
3811static inline void
3812qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3813 struct sk_buff *skb, int cast_type)
3814{
3815 memset(hdr, 0, sizeof(struct qeth_hdr));
3816 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
3817
3818 /* set byte 0 to "0x02" and byte 3 to casting flags */
3819 if (cast_type==RTN_MULTICAST)
3820 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_MULTICAST << 8;
3821 else if (cast_type==RTN_BROADCAST)
3822 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_BROADCAST << 8;
3823 else
3824 qeth_layer2_get_packet_type(card, hdr, skb);
3825
3826 hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
3827#ifdef CONFIG_QETH_VLAN
3828 /* VSWITCH relies on the VLAN
3829 * information to be present in
3830 * the QDIO header */
3831 if ((card->vlangrp != NULL) &&
3832 vlan_tx_tag_present(skb)) {
3833 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_VLAN << 8;
3834 hdr->hdr.l2.vlan_id = vlan_tx_tag_get(skb);
3835 }
3836#endif
3837}
3838
3839void
3840qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3841 struct sk_buff *skb, int ipv, int cast_type)
3842{
3843 QETH_DBF_TEXT(trace, 6, "fillhdr");
3844
3845 memset(hdr, 0, sizeof(struct qeth_hdr));
3846 if (card->options.layer2) {
3847 qeth_layer2_fill_header(card, hdr, skb, cast_type);
3848 return;
3849 }
3850 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
3851 hdr->hdr.l3.ext_flags = 0;
3852#ifdef CONFIG_QETH_VLAN
3853 /*
3854 * before we're going to overwrite this location with next hop ip.
3855 * v6 uses passthrough, v4 sets the tag in the QDIO header.
3856 */
3857 if (card->vlangrp && vlan_tx_tag_present(skb)) {
3858 hdr->hdr.l3.ext_flags = (ipv == 4) ?
3859 QETH_HDR_EXT_VLAN_FRAME :
3860 QETH_HDR_EXT_INCLUDE_VLAN_TAG;
3861 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
3862 }
3863#endif /* CONFIG_QETH_VLAN */
3864 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
3865 if (ipv == 4) { /* IPv4 */
3866 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags4(cast_type);
3867 memset(hdr->hdr.l3.dest_addr, 0, 12);
3868 if ((skb->dst) && (skb->dst->neighbour)) {
3869 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
3870 *((u32 *) skb->dst->neighbour->primary_key);
3871 } else {
3872 /* fill in destination address used in ip header */
3873 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = skb->nh.iph->daddr;
3874 }
3875 } else if (ipv == 6) { /* IPv6 or passthru */
3876 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type);
3877 if ((skb->dst) && (skb->dst->neighbour)) {
3878 memcpy(hdr->hdr.l3.dest_addr,
3879 skb->dst->neighbour->primary_key, 16);
3880 } else {
3881 /* fill in destination address used in ip header */
3882 memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16);
3883 }
3884 } else { /* passthrough */
3885 if (!memcmp(skb->data + sizeof(struct qeth_hdr),
3886 skb->dev->broadcast, 6)) { /* broadcast? */
3887 hdr->hdr.l3.flags = QETH_CAST_BROADCAST | QETH_HDR_PASSTHRU;
3888 } else {
3889 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
3890 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
3891 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
3892 }
3893 }
3894}
3895
3896static inline void
3897__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer,
3898 int *next_element_to_fill)
3899{
3900 int length = skb->len;
3901 struct skb_frag_struct *frag;
3902 int fragno;
3903 unsigned long addr;
3904 int element;
3905 int first_lap = 1;
3906
3907 fragno = skb_shinfo(skb)->nr_frags; /* start with last frag */
3908 element = *next_element_to_fill + fragno;
3909 while (length > 0) {
3910 if (fragno > 0) {
3911 frag = &skb_shinfo(skb)->frags[fragno - 1];
3912 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
3913 frag->page_offset;
3914 buffer->element[element].addr = (char *)addr;
3915 buffer->element[element].length = frag->size;
3916 length -= frag->size;
3917 if (first_lap)
3918 buffer->element[element].flags =
3919 SBAL_FLAGS_LAST_FRAG;
3920 else
3921 buffer->element[element].flags =
3922 SBAL_FLAGS_MIDDLE_FRAG;
3923 } else {
3924 buffer->element[element].addr = skb->data;
3925 buffer->element[element].length = length;
3926 length = 0;
3927 buffer->element[element].flags =
3928 SBAL_FLAGS_FIRST_FRAG;
3929 }
3930 element--;
3931 fragno--;
3932 first_lap = 0;
3933 }
3934 *next_element_to_fill += skb_shinfo(skb)->nr_frags + 1;
3935}
3936
3937static inline void
3938__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
3939 int *next_element_to_fill)
3940{
3941 int length = skb->len;
3942 int length_here;
3943 int element;
3944 char *data;
3945 int first_lap = 1;
3946
3947 element = *next_element_to_fill;
3948 data = skb->data;
3949 while (length > 0) {
3950 /* length_here is the remaining amount of data in this page */
3951 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3952 if (length < length_here)
3953 length_here = length;
3954 buffer->element[element].addr = data;
3955 buffer->element[element].length = length_here;
3956 length -= length_here;
3957 if (!length){
3958 if (first_lap)
3959 buffer->element[element].flags = 0;
3960 else
3961 buffer->element[element].flags =
3962 SBAL_FLAGS_LAST_FRAG;
3963 } else {
3964 if (first_lap)
3965 buffer->element[element].flags =
3966 SBAL_FLAGS_FIRST_FRAG;
3967 else
3968 buffer->element[element].flags =
3969 SBAL_FLAGS_MIDDLE_FRAG;
3970 }
3971 data += length_here;
3972 element++;
3973 first_lap = 0;
3974 }
3975 *next_element_to_fill = element;
3976}
3977
3978static inline int
3979qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3980 struct qeth_qdio_out_buffer *buf,
3981 struct sk_buff *skb)
3982{
3983 struct qdio_buffer *buffer;
3984 int flush_cnt = 0;
3985
3986 QETH_DBF_TEXT(trace, 6, "qdfillbf");
3987 buffer = buf->buffer;
3988 atomic_inc(&skb->users);
3989 skb_queue_tail(&buf->skb_list, skb);
3990 if (skb_shinfo(skb)->nr_frags == 0)
3991 __qeth_fill_buffer(skb, buffer,
3992 (int *)&buf->next_element_to_fill);
3993 else
3994 __qeth_fill_buffer_frag(skb, buffer,
3995 (int *)&buf->next_element_to_fill);
3996
3997 if (!queue->do_pack) {
3998 QETH_DBF_TEXT(trace, 6, "fillbfnp");
3999 /* set state to PRIMED -> will be flushed */
4000 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4001 flush_cnt = 1;
4002 } else {
4003 QETH_DBF_TEXT(trace, 6, "fillbfpa");
4004#ifdef CONFIG_QETH_PERF_STATS
4005 queue->card->perf_stats.skbs_sent_pack++;
4006#endif
4007 if (buf->next_element_to_fill >=
4008 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
4009 /*
4010 * packed buffer if full -> set state PRIMED
4011 * -> will be flushed
4012 */
4013 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4014 flush_cnt = 1;
4015 }
4016 }
4017 return flush_cnt;
4018}
4019
4020static inline int
4021qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4022 struct sk_buff *skb, struct qeth_hdr *hdr,
4023 int elements_needed,
4024 struct qeth_eddp_context *ctx)
4025{
4026 struct qeth_qdio_out_buffer *buffer;
4027 int buffers_needed = 0;
4028 int flush_cnt = 0;
4029 int index;
4030
4031 QETH_DBF_TEXT(trace, 6, "dosndpfa");
4032
4033 /* spin until we get the queue ... */
4034 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
4035 QETH_OUT_Q_LOCKED,
4036 &queue->state));
4037 /* ... now we've got the queue */
4038 index = queue->next_buf_to_fill;
4039 buffer = &queue->bufs[queue->next_buf_to_fill];
4040 /*
4041 * check if buffer is empty to make sure that we do not 'overtake'
4042 * ourselves and try to fill a buffer that is already primed
4043 */
4044 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4045 card->stats.tx_dropped++;
4046 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4047 return -EBUSY;
4048 }
4049 if (ctx == NULL)
4050 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
4051 QDIO_MAX_BUFFERS_PER_Q;
4052 else {
4053 buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx);
4054 if (buffers_needed < 0) {
4055 card->stats.tx_dropped++;
4056 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4057 return -EBUSY;
4058 }
4059 queue->next_buf_to_fill =
4060 (queue->next_buf_to_fill + buffers_needed) %
4061 QDIO_MAX_BUFFERS_PER_Q;
4062 }
4063 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4064 if (ctx == NULL) {
4065 qeth_fill_buffer(queue, buffer, skb);
4066 qeth_flush_buffers(queue, 0, index, 1);
4067 } else {
4068 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
4069 WARN_ON(buffers_needed != flush_cnt);
4070 qeth_flush_buffers(queue, 0, index, flush_cnt);
4071 }
4072 return 0;
4073}
4074
4075static inline int
4076qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4077 struct sk_buff *skb, struct qeth_hdr *hdr,
4078 int elements_needed, struct qeth_eddp_context *ctx)
4079{
4080 struct qeth_qdio_out_buffer *buffer;
4081 int start_index;
4082 int flush_count = 0;
4083 int do_pack = 0;
4084 int tmp;
4085 int rc = 0;
4086
4087 QETH_DBF_TEXT(trace, 6, "dosndpkt");
4088
4089 /* spin until we get the queue ... */
4090 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
4091 QETH_OUT_Q_LOCKED,
4092 &queue->state));
4093 start_index = queue->next_buf_to_fill;
4094 buffer = &queue->bufs[queue->next_buf_to_fill];
4095 /*
4096 * check if buffer is empty to make sure that we do not 'overtake'
4097 * ourselves and try to fill a buffer that is already primed
4098 */
4099 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
4100 card->stats.tx_dropped++;
4101 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4102 return -EBUSY;
4103 }
4104 /* check if we need to switch packing state of this queue */
4105 qeth_switch_to_packing_if_needed(queue);
4106 if (queue->do_pack){
4107 do_pack = 1;
4108 if (ctx == NULL) {
4109 /* does packet fit in current buffer? */
4110 if((QETH_MAX_BUFFER_ELEMENTS(card) -
4111 buffer->next_element_to_fill) < elements_needed){
4112 /* ... no -> set state PRIMED */
4113 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
4114 flush_count++;
4115 queue->next_buf_to_fill =
4116 (queue->next_buf_to_fill + 1) %
4117 QDIO_MAX_BUFFERS_PER_Q;
4118 buffer = &queue->bufs[queue->next_buf_to_fill];
4119 /* we did a step forward, so check buffer state
4120 * again */
4121 if (atomic_read(&buffer->state) !=
4122 QETH_QDIO_BUF_EMPTY){
4123 card->stats.tx_dropped++;
4124 qeth_flush_buffers(queue, 0, start_index, flush_count);
4125 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4126 return -EBUSY;
4127 }
4128 }
4129 } else {
4130 /* check if we have enough elements (including following
4131 * free buffers) to handle eddp context */
4132 if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
4133 printk("eddp tx_dropped 1\n");
4134 card->stats.tx_dropped++;
4135 rc = -EBUSY;
4136 goto out;
4137 }
4138 }
4139 }
4140 if (ctx == NULL)
4141 tmp = qeth_fill_buffer(queue, buffer, skb);
4142 else {
4143 tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill);
4144 if (tmp < 0) {
4145 printk("eddp tx_dropped 2\n");
4146 card->stats.tx_dropped++;
4147 rc = - EBUSY;
4148 goto out;
4149 }
4150 }
4151 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
4152 QDIO_MAX_BUFFERS_PER_Q;
4153 flush_count += tmp;
4154out:
4155 if (flush_count)
4156 qeth_flush_buffers(queue, 0, start_index, flush_count);
4157 /*
4158 * queue->state will go from LOCKED -> UNLOCKED or from
4159 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4160 * (switch packing state or flush buffer to get another pci flag out).
4161 * In that case we will enter this loop
4162 */
4163 while (atomic_dec_return(&queue->state)){
4164 flush_count = 0;
4165 start_index = queue->next_buf_to_fill;
4166 /* check if we can go back to non-packing state */
4167 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
4168 /*
4169 * check if we need to flush a packing buffer to get a pci
4170 * flag out on the queue
4171 */
4172 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
4173 flush_count += qeth_flush_buffers_on_no_pci(queue);
4174 if (flush_count)
4175 qeth_flush_buffers(queue, 0, start_index, flush_count);
4176 }
4177 /* at this point the queue is UNLOCKED again */
4178#ifdef CONFIG_QETH_PERF_STATS
4179 if (do_pack)
4180 queue->card->perf_stats.bufs_sent_pack += flush_count;
4181#endif /* CONFIG_QETH_PERF_STATS */
4182
4183 return rc;
4184}
4185
4186static inline int
4187qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4188{
4189 int ipv = 0;
4190 int cast_type;
4191 struct qeth_qdio_out_q *queue;
4192 struct qeth_hdr *hdr;
4193 int elements_needed = 0;
4194 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
4195 struct qeth_eddp_context *ctx = NULL;
4196 int rc;
4197
4198 QETH_DBF_TEXT(trace, 6, "sendpkt");
4199
4200 if (!card->options.layer2) {
4201 ipv = qeth_get_ip_version(skb);
4202 if ((card->dev->hard_header == qeth_fake_header) && ipv) {
4203 if ((skb = qeth_pskb_unshare(skb,GFP_ATOMIC)) == NULL) {
4204 card->stats.tx_dropped++;
4205 dev_kfree_skb_irq(skb);
4206 return 0;
4207 }
4208 skb_pull(skb, QETH_FAKE_LL_LEN);
4209 }
4210 }
4211 cast_type = qeth_get_cast_type(card, skb);
4212 if ((cast_type == RTN_BROADCAST) && (card->info.broadcast_capable == 0)){
4213 card->stats.tx_dropped++;
4214 card->stats.tx_errors++;
4215 dev_kfree_skb_any(skb);
4216 return NETDEV_TX_OK;
4217 }
4218 queue = card->qdio.out_qs
4219 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
4220
4221 if (skb_shinfo(skb)->tso_size)
4222 large_send = card->options.large_send;
4223
4224 if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))){
4225 QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
4226 return rc;
4227 }
4228 /*are we able to do TSO ? If so ,prepare and send it from here */
4229 if ((large_send == QETH_LARGE_SEND_TSO) &&
4230 (cast_type == RTN_UNSPEC)) {
4231 rc = qeth_tso_send_packet(card, skb, queue,
4232 ipv, cast_type);
4233 goto do_statistics;
4234 }
4235
4236 qeth_fill_header(card, hdr, skb, ipv, cast_type);
4237 if (large_send == QETH_LARGE_SEND_EDDP) {
4238 ctx = qeth_eddp_create_context(card, skb, hdr);
4239 if (ctx == NULL) {
4240 PRINT_WARN("could not create eddp context\n");
4241 return -EINVAL;
4242 }
4243 } else {
4244 elements_needed = qeth_get_elements_no(card,(void*) hdr, skb);
4245 if (!elements_needed)
4246 return -EINVAL;
4247 }
4248
4249 if (card->info.type != QETH_CARD_TYPE_IQD)
4250 rc = qeth_do_send_packet(card, queue, skb, hdr,
4251 elements_needed, ctx);
4252 else
4253 rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
4254 elements_needed, ctx);
4255do_statistics:
4256 if (!rc){
4257 card->stats.tx_packets++;
4258 card->stats.tx_bytes += skb->len;
4259#ifdef CONFIG_QETH_PERF_STATS
4260 if (skb_shinfo(skb)->tso_size) {
4261 card->perf_stats.large_send_bytes += skb->len;
4262 card->perf_stats.large_send_cnt++;
4263 }
4264 if (skb_shinfo(skb)->nr_frags > 0){
4265 card->perf_stats.sg_skbs_sent++;
4266 /* nr_frags + skb->data */
4267 card->perf_stats.sg_frags_sent +=
4268 skb_shinfo(skb)->nr_frags + 1;
4269 }
4270#endif /* CONFIG_QETH_PERF_STATS */
4271 }
4272 if (ctx != NULL) {
4273 /* drop creator's reference */
4274 qeth_eddp_put_context(ctx);
4275 /* free skb; it's not referenced by a buffer */
4276 if (rc == 0)
4277 dev_kfree_skb_any(skb);
4278
4279 }
4280 return rc;
4281}
4282
4283static int
4284qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4285{
4286 struct qeth_card *card = (struct qeth_card *) dev->priv;
4287 int rc = 0;
4288
4289 switch(regnum){
4290 case MII_BMCR: /* Basic mode control register */
4291 rc = BMCR_FULLDPLX;
4292 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
4293 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
4294 rc |= BMCR_SPEED100;
4295 break;
4296 case MII_BMSR: /* Basic mode status register */
4297 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4298 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4299 BMSR_100BASE4;
4300 break;
4301 case MII_PHYSID1: /* PHYS ID 1 */
4302 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4303 dev->dev_addr[2];
4304 rc = (rc >> 5) & 0xFFFF;
4305 break;
4306 case MII_PHYSID2: /* PHYS ID 2 */
4307 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4308 break;
4309 case MII_ADVERTISE: /* Advertisement control reg */
4310 rc = ADVERTISE_ALL;
4311 break;
4312 case MII_LPA: /* Link partner ability reg */
4313 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4314 LPA_100BASE4 | LPA_LPACK;
4315 break;
4316 case MII_EXPANSION: /* Expansion register */
4317 break;
4318 case MII_DCOUNTER: /* disconnect counter */
4319 break;
4320 case MII_FCSCOUNTER: /* false carrier counter */
4321 break;
4322 case MII_NWAYTEST: /* N-way auto-neg test register */
4323 break;
4324 case MII_RERRCOUNTER: /* rx error counter */
4325 rc = card->stats.rx_errors;
4326 break;
4327 case MII_SREVISION: /* silicon revision */
4328 break;
4329 case MII_RESV1: /* reserved 1 */
4330 break;
4331 case MII_LBRERROR: /* loopback, rx, bypass error */
4332 break;
4333 case MII_PHYADDR: /* physical address */
4334 break;
4335 case MII_RESV2: /* reserved 2 */
4336 break;
4337 case MII_TPISTATUS: /* TPI status for 10mbps */
4338 break;
4339 case MII_NCONFIG: /* network interface config */
4340 break;
4341 default:
4342 rc = 0;
4343 break;
4344 }
4345 return rc;
4346}
4347
4348static void
4349qeth_mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
4350{
4351 switch(regnum){
4352 case MII_BMCR: /* Basic mode control register */
4353 case MII_BMSR: /* Basic mode status register */
4354 case MII_PHYSID1: /* PHYS ID 1 */
4355 case MII_PHYSID2: /* PHYS ID 2 */
4356 case MII_ADVERTISE: /* Advertisement control reg */
4357 case MII_LPA: /* Link partner ability reg */
4358 case MII_EXPANSION: /* Expansion register */
4359 case MII_DCOUNTER: /* disconnect counter */
4360 case MII_FCSCOUNTER: /* false carrier counter */
4361 case MII_NWAYTEST: /* N-way auto-neg test register */
4362 case MII_RERRCOUNTER: /* rx error counter */
4363 case MII_SREVISION: /* silicon revision */
4364 case MII_RESV1: /* reserved 1 */
4365 case MII_LBRERROR: /* loopback, rx, bypass error */
4366 case MII_PHYADDR: /* physical address */
4367 case MII_RESV2: /* reserved 2 */
4368 case MII_TPISTATUS: /* TPI status for 10mbps */
4369 case MII_NCONFIG: /* network interface config */
4370 default:
4371 break;
4372 }
4373}
4374
4375static inline const char *
4376qeth_arp_get_error_cause(int *rc)
4377{
4378 switch (*rc) {
4379 case QETH_IPA_ARP_RC_FAILED:
4380 *rc = -EIO;
4381 return "operation failed";
4382 case QETH_IPA_ARP_RC_NOTSUPP:
4383 *rc = -EOPNOTSUPP;
4384 return "operation not supported";
4385 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
4386 *rc = -EINVAL;
4387 return "argument out of range";
4388 case QETH_IPA_ARP_RC_Q_NOTSUPP:
4389 *rc = -EOPNOTSUPP;
4390 return "query operation not supported";
4391 case QETH_IPA_ARP_RC_Q_NO_DATA:
4392 *rc = -ENOENT;
4393 return "no query data available";
4394 default:
4395 return "unknown error";
4396 }
4397}
4398
4399static int
4400qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
4401 __u16, long);
4402
4403static int
4404qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4405{
4406 int tmp;
4407 int rc;
4408
4409 QETH_DBF_TEXT(trace,3,"arpstnoe");
4410
4411 /* TODO: really not supported by GuestLAN? */
4412 if (card->info.guestlan)
4413 return -EOPNOTSUPP;
4414 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4415 PRINT_WARN("ARP processing not supported "
4416 "on %s!\n", QETH_CARD_IFNAME(card));
4417 return -EOPNOTSUPP;
4418 }
4419 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4420 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
4421 no_entries);
4422 if (rc) {
4423 tmp = rc;
4424 PRINT_WARN("Could not set number of ARP entries on %s: "
4425 "%s (0x%x/%d)\n",
4426 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4427 tmp, tmp);
4428 }
4429 return rc;
4430}
4431
4432static inline void
4433qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4434 struct qeth_arp_query_data *qdata,
4435 int entry_size, int uentry_size)
4436{
4437 char *entry_ptr;
4438 char *uentry_ptr;
4439 int i;
4440
4441 entry_ptr = (char *)&qdata->data;
4442 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
4443 for (i = 0; i < qdata->no_entries; ++i){
4444 /* strip off 32 bytes "media specific information" */
4445 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
4446 entry_ptr += entry_size;
4447 uentry_ptr += uentry_size;
4448 }
4449}
4450
4451static int
4452qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply,
4453 unsigned long data)
4454{
4455 struct qeth_ipa_cmd *cmd;
4456 struct qeth_arp_query_data *qdata;
4457 struct qeth_arp_query_info *qinfo;
4458 int entry_size;
4459 int uentry_size;
4460 int i;
4461
4462 QETH_DBF_TEXT(trace,4,"arpquecb");
4463
4464 qinfo = (struct qeth_arp_query_info *) reply->param;
4465 cmd = (struct qeth_ipa_cmd *) data;
4466 if (cmd->hdr.return_code) {
4467 QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code);
4468 return 0;
4469 }
4470 if (cmd->data.setassparms.hdr.return_code) {
4471 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
4472 QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code);
4473 return 0;
4474 }
4475 qdata = &cmd->data.setassparms.data.query_arp;
4476 switch(qdata->reply_bits){
4477 case 5:
4478 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
4479 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4480 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
4481 break;
4482 case 7:
4483 /* fall through to default */
4484 default:
4485 /* tr is the same as eth -> entry7 */
4486 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
4487 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4488 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
4489 break;
4490 }
4491 /* check if there is enough room in userspace */
4492 if ((qinfo->udata_len - qinfo->udata_offset) <
4493 qdata->no_entries * uentry_size){
4494 QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
4495 cmd->hdr.return_code = -ENOMEM;
4496 PRINT_WARN("query ARP user space buffer is too small for "
4497 "the returned number of ARP entries. "
4498 "Aborting query!\n");
4499 goto out_error;
4500 }
4501 QETH_DBF_TEXT_(trace, 4, "anore%i",
4502 cmd->data.setassparms.hdr.number_of_replies);
4503 QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
4504 QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
4505
4506 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
4507 /* strip off "media specific information" */
4508 qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size,
4509 uentry_size);
4510 } else
4511 /*copy entries to user buffer*/
4512 memcpy(qinfo->udata + qinfo->udata_offset,
4513 (char *)&qdata->data, qdata->no_entries*uentry_size);
4514
4515 qinfo->no_entries += qdata->no_entries;
4516 qinfo->udata_offset += (qdata->no_entries*uentry_size);
4517 /* check if all replies received ... */
4518 if (cmd->data.setassparms.hdr.seq_no <
4519 cmd->data.setassparms.hdr.number_of_replies)
4520 return 1;
4521 memcpy(qinfo->udata, &qinfo->no_entries, 4);
4522 /* keep STRIP_ENTRIES flag so the user program can distinguish
4523 * stripped entries from normal ones */
4524 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4525 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
4526 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2);
4527 return 0;
4528out_error:
4529 i = 0;
4530 memcpy(qinfo->udata, &i, 4);
4531 return 0;
4532}
4533
4534static int
4535qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4536 int len, int (*reply_cb)(struct qeth_card *,
4537 struct qeth_reply *,
4538 unsigned long),
4539 void *reply_param)
4540{
4541 QETH_DBF_TEXT(trace,4,"sendarp");
4542
4543 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4544 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4545 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4546 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4547 reply_cb, reply_param);
4548}
4549
4550static int
4551qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
4552 int len, int (*reply_cb)(struct qeth_card *,
4553 struct qeth_reply *,
4554 unsigned long),
4555 void *reply_param)
4556{
4557 u16 s1, s2;
4558
4559 QETH_DBF_TEXT(trace,4,"sendsnmp");
4560
4561 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
4562 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
4563 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
4564 /* adjust PDU length fields in IPA_PDU_HEADER */
4565 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
4566 s2 = (u32) len;
4567 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
4568 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
4569 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
4570 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
4571 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
4572 reply_cb, reply_param);
4573}
4574
4575static struct qeth_cmd_buffer *
4576qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
4577 __u16, __u16, enum qeth_prot_versions);
4578static int
4579qeth_arp_query(struct qeth_card *card, char *udata)
4580{
4581 struct qeth_cmd_buffer *iob;
4582 struct qeth_arp_query_info qinfo = {0, };
4583 int tmp;
4584 int rc;
4585
4586 QETH_DBF_TEXT(trace,3,"arpquery");
4587
4588 /*
4589 * currently GuestLAN does only deliver all zeros on query arp,
4590 * even though arp processing is supported (according to IPA supp.
4591 * funcs flags); since all zeros is no valueable information,
4592 * we say EOPNOTSUPP for all ARP functions
4593 */
4594 /*if (card->info.guestlan)
4595 return -EOPNOTSUPP; */
4596 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
4597 IPA_ARP_PROCESSING)) {
4598 PRINT_WARN("ARP processing not supported "
4599 "on %s!\n", QETH_CARD_IFNAME(card));
4600 return -EOPNOTSUPP;
4601 }
4602 /* get size of userspace buffer and mask_bits -> 6 bytes */
4603 if (copy_from_user(&qinfo, udata, 6))
4604 return -EFAULT;
4605 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL)))
4606 return -ENOMEM;
4607 memset(qinfo.udata, 0, qinfo.udata_len);
4608 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
4609 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4610 IPA_CMD_ASS_ARP_QUERY_INFO,
4611 sizeof(int),QETH_PROT_IPV4);
4612
4613 rc = qeth_send_ipa_arp_cmd(card, iob,
4614 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
4615 qeth_arp_query_cb, (void *)&qinfo);
4616 if (rc) {
4617 tmp = rc;
4618 PRINT_WARN("Error while querying ARP cache on %s: %s "
4619 "(0x%x/%d)\n",
4620 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4621 tmp, tmp);
4622 copy_to_user(udata, qinfo.udata, 4);
4623 } else {
4624 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4625 }
4626 kfree(qinfo.udata);
4627 return rc;
4628}
4629
4630/**
4631 * SNMP command callback
4632 */
4633static int
4634qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply,
4635 unsigned long sdata)
4636{
4637 struct qeth_ipa_cmd *cmd;
4638 struct qeth_arp_query_info *qinfo;
4639 struct qeth_snmp_cmd *snmp;
4640 unsigned char *data;
4641 __u16 data_len;
4642
4643 QETH_DBF_TEXT(trace,3,"snpcmdcb");
4644
4645 cmd = (struct qeth_ipa_cmd *) sdata;
4646 data = (unsigned char *)((char *)cmd - reply->offset);
4647 qinfo = (struct qeth_arp_query_info *) reply->param;
4648 snmp = &cmd->data.setadapterparms.data.snmp;
4649
4650 if (cmd->hdr.return_code) {
4651 QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code);
4652 return 0;
4653 }
4654 if (cmd->data.setadapterparms.hdr.return_code) {
4655 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
4656 QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code);
4657 return 0;
4658 }
4659 data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data));
4660 if (cmd->data.setadapterparms.hdr.seq_no == 1)
4661 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
4662 else
4663 data_len -= (__u16)((char*)&snmp->request - (char *)cmd);
4664
4665 /* check if there is enough room in userspace */
4666 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4667 QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
4668 cmd->hdr.return_code = -ENOMEM;
4669 return 0;
4670 }
4671 QETH_DBF_TEXT_(trace, 4, "snore%i",
4672 cmd->data.setadapterparms.hdr.used_total);
4673 QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no);
4674 /*copy entries to user buffer*/
4675 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4676 memcpy(qinfo->udata + qinfo->udata_offset,
4677 (char *)snmp,
4678 data_len + offsetof(struct qeth_snmp_cmd,data));
4679 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
4680 } else {
4681 memcpy(qinfo->udata + qinfo->udata_offset,
4682 (char *)&snmp->request, data_len);
4683 }
4684 qinfo->udata_offset += data_len;
4685 /* check if all replies received ... */
4686 QETH_DBF_TEXT_(trace, 4, "srtot%i",
4687 cmd->data.setadapterparms.hdr.used_total);
4688 QETH_DBF_TEXT_(trace, 4, "srseq%i",
4689 cmd->data.setadapterparms.hdr.seq_no);
4690 if (cmd->data.setadapterparms.hdr.seq_no <
4691 cmd->data.setadapterparms.hdr.used_total)
4692 return 1;
4693 return 0;
4694}
4695
4696static struct qeth_cmd_buffer *
4697qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds,
4698 enum qeth_prot_versions );
4699
4700static struct qeth_cmd_buffer *
4701qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen)
4702{
4703 struct qeth_cmd_buffer *iob;
4704 struct qeth_ipa_cmd *cmd;
4705
4706 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS,
4707 QETH_PROT_IPV4);
4708 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4709 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
4710 cmd->data.setadapterparms.hdr.command_code = command;
4711 cmd->data.setadapterparms.hdr.used_total = 1;
4712 cmd->data.setadapterparms.hdr.seq_no = 1;
4713
4714 return iob;
4715}
4716
4717/**
4718 * function to send SNMP commands to OSA-E card
4719 */
4720static int
4721qeth_snmp_command(struct qeth_card *card, char *udata)
4722{
4723 struct qeth_cmd_buffer *iob;
4724 struct qeth_ipa_cmd *cmd;
4725 struct qeth_snmp_ureq *ureq;
4726 int req_len;
4727 struct qeth_arp_query_info qinfo = {0, };
4728 int rc = 0;
4729
4730 QETH_DBF_TEXT(trace,3,"snmpcmd");
4731
4732 if (card->info.guestlan)
4733 return -EOPNOTSUPP;
4734
4735 if ((!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) &&
4736 (!card->options.layer2) ) {
4737 PRINT_WARN("SNMP Query MIBS not supported "
4738 "on %s!\n", QETH_CARD_IFNAME(card));
4739 return -EOPNOTSUPP;
4740 }
4741 /* skip 4 bytes (data_len struct member) to get req_len */
4742 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4743 return -EFAULT;
4744 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
4745 if (!ureq) {
4746 QETH_DBF_TEXT(trace, 2, "snmpnome");
4747 return -ENOMEM;
4748 }
4749 if (copy_from_user(ureq, udata,
4750 req_len+sizeof(struct qeth_snmp_ureq_hdr))){
4751 kfree(ureq);
4752 return -EFAULT;
4753 }
4754 qinfo.udata_len = ureq->hdr.data_len;
4755 if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){
4756 kfree(ureq);
4757 return -ENOMEM;
4758 }
4759 memset(qinfo.udata, 0, qinfo.udata_len);
4760 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4761
4762 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4763 QETH_SNMP_SETADP_CMDLENGTH + req_len);
4764 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4765 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4766 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
4767 qeth_snmp_command_cb, (void *)&qinfo);
4768 if (rc)
4769 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
4770 QETH_CARD_IFNAME(card), rc);
4771 else
4772 copy_to_user(udata, qinfo.udata, qinfo.udata_len);
4773
4774 kfree(ureq);
4775 kfree(qinfo.udata);
4776 return rc;
4777}
4778
4779static int
4780qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *,
4781 unsigned long);
4782
4783static int
4784qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *,
4785 __u16, long,
4786 int (*reply_cb)
4787 (struct qeth_card *, struct qeth_reply *, unsigned long),
4788 void *reply_param);
4789
4790static int
4791qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4792{
4793 struct qeth_cmd_buffer *iob;
4794 char buf[16];
4795 int tmp;
4796 int rc;
4797
4798 QETH_DBF_TEXT(trace,3,"arpadent");
4799
4800 /*
4801 * currently GuestLAN does only deliver all zeros on query arp,
4802 * even though arp processing is supported (according to IPA supp.
4803 * funcs flags); since all zeros is no valueable information,
4804 * we say EOPNOTSUPP for all ARP functions
4805 */
4806 if (card->info.guestlan)
4807 return -EOPNOTSUPP;
4808 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4809 PRINT_WARN("ARP processing not supported "
4810 "on %s!\n", QETH_CARD_IFNAME(card));
4811 return -EOPNOTSUPP;
4812 }
4813
4814 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4815 IPA_CMD_ASS_ARP_ADD_ENTRY,
4816 sizeof(struct qeth_arp_cache_entry),
4817 QETH_PROT_IPV4);
4818 rc = qeth_send_setassparms(card, iob,
4819 sizeof(struct qeth_arp_cache_entry),
4820 (unsigned long) entry,
4821 qeth_default_setassparms_cb, NULL);
4822 if (rc) {
4823 tmp = rc;
4824 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
4825 PRINT_WARN("Could not add ARP entry for address %s on %s: "
4826 "%s (0x%x/%d)\n",
4827 buf, QETH_CARD_IFNAME(card),
4828 qeth_arp_get_error_cause(&rc), tmp, tmp);
4829 }
4830 return rc;
4831}
4832
4833static int
4834qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4835{
4836 struct qeth_cmd_buffer *iob;
4837 char buf[16] = {0, };
4838 int tmp;
4839 int rc;
4840
4841 QETH_DBF_TEXT(trace,3,"arprment");
4842
4843 /*
4844 * currently GuestLAN does only deliver all zeros on query arp,
4845 * even though arp processing is supported (according to IPA supp.
4846 * funcs flags); since all zeros is no valueable information,
4847 * we say EOPNOTSUPP for all ARP functions
4848 */
4849 if (card->info.guestlan)
4850 return -EOPNOTSUPP;
4851 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4852 PRINT_WARN("ARP processing not supported "
4853 "on %s!\n", QETH_CARD_IFNAME(card));
4854 return -EOPNOTSUPP;
4855 }
4856 memcpy(buf, entry, 12);
4857 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
4858 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
4859 12,
4860 QETH_PROT_IPV4);
4861 rc = qeth_send_setassparms(card, iob,
4862 12, (unsigned long)buf,
4863 qeth_default_setassparms_cb, NULL);
4864 if (rc) {
4865 tmp = rc;
4866 memset(buf, 0, 16);
4867 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
4868 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
4869 "%s (0x%x/%d)\n",
4870 buf, QETH_CARD_IFNAME(card),
4871 qeth_arp_get_error_cause(&rc), tmp, tmp);
4872 }
4873 return rc;
4874}
4875
4876static int
4877qeth_arp_flush_cache(struct qeth_card *card)
4878{
4879 int rc;
4880 int tmp;
4881
4882 QETH_DBF_TEXT(trace,3,"arpflush");
4883
4884 /*
4885 * currently GuestLAN does only deliver all zeros on query arp,
4886 * even though arp processing is supported (according to IPA supp.
4887 * funcs flags); since all zeros is no valueable information,
4888 * we say EOPNOTSUPP for all ARP functions
4889 */
4890 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
4891 return -EOPNOTSUPP;
4892 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4893 PRINT_WARN("ARP processing not supported "
4894 "on %s!\n", QETH_CARD_IFNAME(card));
4895 return -EOPNOTSUPP;
4896 }
4897 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4898 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
4899 if (rc){
4900 tmp = rc;
4901 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
4902 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4903 tmp, tmp);
4904 }
4905 return rc;
4906}
4907
4908static int
4909qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4910{
4911 struct qeth_card *card = (struct qeth_card *)dev->priv;
4912 struct qeth_arp_cache_entry arp_entry;
4913 struct mii_ioctl_data *mii_data;
4914 int rc = 0;
4915
4916 if (!card)
4917 return -ENODEV;
4918
4919 if ((card->state != CARD_STATE_UP) &&
4920 (card->state != CARD_STATE_SOFTSETUP))
4921 return -ENODEV;
4922
4923 switch (cmd){
4924 case SIOC_QETH_ARP_SET_NO_ENTRIES:
4925 if ( !capable(CAP_NET_ADMIN) ||
4926 (card->options.layer2) ) {
4927 rc = -EPERM;
4928 break;
4929 }
4930 rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
4931 break;
4932 case SIOC_QETH_ARP_QUERY_INFO:
4933 if ( !capable(CAP_NET_ADMIN) ||
4934 (card->options.layer2) ) {
4935 rc = -EPERM;
4936 break;
4937 }
4938 rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data);
4939 break;
4940 case SIOC_QETH_ARP_ADD_ENTRY:
4941 if ( !capable(CAP_NET_ADMIN) ||
4942 (card->options.layer2) ) {
4943 rc = -EPERM;
4944 break;
4945 }
4946 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
4947 sizeof(struct qeth_arp_cache_entry)))
4948 rc = -EFAULT;
4949 else
4950 rc = qeth_arp_add_entry(card, &arp_entry);
4951 break;
4952 case SIOC_QETH_ARP_REMOVE_ENTRY:
4953 if ( !capable(CAP_NET_ADMIN) ||
4954 (card->options.layer2) ) {
4955 rc = -EPERM;
4956 break;
4957 }
4958 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
4959 sizeof(struct qeth_arp_cache_entry)))
4960 rc = -EFAULT;
4961 else
4962 rc = qeth_arp_remove_entry(card, &arp_entry);
4963 break;
4964 case SIOC_QETH_ARP_FLUSH_CACHE:
4965 if ( !capable(CAP_NET_ADMIN) ||
4966 (card->options.layer2) ) {
4967 rc = -EPERM;
4968 break;
4969 }
4970 rc = qeth_arp_flush_cache(card);
4971 break;
4972 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
4973 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
4974 break;
4975 case SIOC_QETH_GET_CARD_TYPE:
4976 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
4977 !card->info.guestlan)
4978 return 1;
4979 return 0;
4980 break;
4981 case SIOCGMIIPHY:
4982 mii_data = if_mii(rq);
4983 mii_data->phy_id = 0;
4984 break;
4985 case SIOCGMIIREG:
4986 mii_data = if_mii(rq);
4987 if (mii_data->phy_id != 0)
4988 rc = -EINVAL;
4989 else
4990 mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
4991 mii_data->reg_num);
4992 break;
4993 case SIOCSMIIREG:
4994 rc = -EOPNOTSUPP;
4995 break;
4996 /* TODO: remove return if qeth_mdio_write does something */
4997 if (!capable(CAP_NET_ADMIN)){
4998 rc = -EPERM;
4999 break;
5000 }
5001 mii_data = if_mii(rq);
5002 if (mii_data->phy_id != 0)
5003 rc = -EINVAL;
5004 else
5005 qeth_mdio_write(dev, mii_data->phy_id, mii_data->reg_num,
5006 mii_data->val_in);
5007 break;
5008 default:
5009 rc = -EOPNOTSUPP;
5010 }
5011 if (rc)
5012 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
5013 return rc;
5014}
5015
5016static struct net_device_stats *
5017qeth_get_stats(struct net_device *dev)
5018{
5019 struct qeth_card *card;
5020
5021 card = (struct qeth_card *) (dev->priv);
5022
5023 QETH_DBF_TEXT(trace,5,"getstat");
5024
5025 return &card->stats;
5026}
5027
5028static int
5029qeth_change_mtu(struct net_device *dev, int new_mtu)
5030{
5031 struct qeth_card *card;
5032 char dbf_text[15];
5033
5034 card = (struct qeth_card *) (dev->priv);
5035
5036 QETH_DBF_TEXT(trace,4,"chgmtu");
5037 sprintf(dbf_text, "%8x", new_mtu);
5038 QETH_DBF_TEXT(trace,4,dbf_text);
5039
5040 if (new_mtu < 64)
5041 return -EINVAL;
5042 if (new_mtu > 65535)
5043 return -EINVAL;
5044 if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) &&
5045 (!qeth_mtu_is_valid(card, new_mtu)))
5046 return -EINVAL;
5047 dev->mtu = new_mtu;
5048 return 0;
5049}
5050
5051#ifdef CONFIG_QETH_VLAN
5052static void
5053qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5054{
5055 struct qeth_card *card;
5056 unsigned long flags;
5057
5058 QETH_DBF_TEXT(trace,4,"vlanreg");
5059
5060 card = (struct qeth_card *) dev->priv;
5061 spin_lock_irqsave(&card->vlanlock, flags);
5062 card->vlangrp = grp;
5063 spin_unlock_irqrestore(&card->vlanlock, flags);
5064}
5065
5066static inline void
5067qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5068 unsigned short vid)
5069{
5070 int i;
5071 struct sk_buff *skb;
5072 struct sk_buff_head tmp_list;
5073
5074 skb_queue_head_init(&tmp_list);
5075 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
5076 while ((skb = skb_dequeue(&buf->skb_list))){
5077 if (vlan_tx_tag_present(skb) &&
5078 (vlan_tx_tag_get(skb) == vid)) {
5079 atomic_dec(&skb->users);
5080 dev_kfree_skb(skb);
5081 } else
5082 skb_queue_tail(&tmp_list, skb);
5083 }
5084 }
5085 while ((skb = skb_dequeue(&tmp_list)))
5086 skb_queue_tail(&buf->skb_list, skb);
5087}
5088
5089static void
5090qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid)
5091{
5092 int i, j;
5093
5094 QETH_DBF_TEXT(trace, 4, "frvlskbs");
5095 for (i = 0; i < card->qdio.no_out_queues; ++i){
5096 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
5097 qeth_free_vlan_buffer(card, &card->qdio.
5098 out_qs[i]->bufs[j], vid);
5099 }
5100}
5101
5102static void
5103qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
5104{
5105 struct in_device *in_dev;
5106 struct in_ifaddr *ifa;
5107 struct qeth_ipaddr *addr;
5108
5109 QETH_DBF_TEXT(trace, 4, "frvaddr4");
5110 if (!card->vlangrp)
5111 return;
5112 rcu_read_lock();
5113 in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]);
5114 if (!in_dev)
5115 goto out;
5116 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
5117 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
5118 if (addr){
5119 addr->u.a4.addr = ifa->ifa_address;
5120 addr->u.a4.mask = ifa->ifa_mask;
5121 addr->type = QETH_IP_TYPE_NORMAL;
5122 if (!qeth_delete_ip(card, addr))
5123 kfree(addr);
5124 }
5125 }
5126out:
5127 rcu_read_unlock();
5128}
5129
5130static void
5131qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
5132{
5133#ifdef CONFIG_QETH_IPV6
5134 struct inet6_dev *in6_dev;
5135 struct inet6_ifaddr *ifa;
5136 struct qeth_ipaddr *addr;
5137
5138 QETH_DBF_TEXT(trace, 4, "frvaddr6");
5139 if (!card->vlangrp)
5140 return;
5141 in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]);
5142 if (!in6_dev)
5143 return;
5144 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
5145 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
5146 if (addr){
5147 memcpy(&addr->u.a6.addr, &ifa->addr,
5148 sizeof(struct in6_addr));
5149 addr->u.a6.pfxlen = ifa->prefix_len;
5150 addr->type = QETH_IP_TYPE_NORMAL;
5151 if (!qeth_delete_ip(card, addr))
5152 kfree(addr);
5153 }
5154 }
5155 in6_dev_put(in6_dev);
5156#endif /* CONFIG_QETH_IPV6 */
5157}
5158
5159static void
5160qeth_layer2_send_setdelvlan(struct qeth_card *card, __u16 i,
5161 enum qeth_ipa_cmds ipacmd)
5162{
5163 int rc;
5164 struct qeth_ipa_cmd *cmd;
5165 struct qeth_cmd_buffer *iob;
5166
5167 QETH_DBF_TEXT_(trace, 4, "L2sdv%x",ipacmd);
5168 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5169 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5170 cmd->data.setdelvlan.vlan_id = i;
5171
5172 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5173 if (rc) {
5174 PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
5175 "Continuing\n",i, QETH_CARD_IFNAME(card), rc);
5176 QETH_DBF_TEXT_(trace, 2, "L2VL%4x", ipacmd);
5177 QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card));
5178 QETH_DBF_TEXT_(trace, 2, "err%d", rc);
5179 }
5180}
5181
5182static void
5183qeth_layer2_process_vlans(struct qeth_card *card, int clear)
5184{
5185 unsigned short i;
5186
5187 QETH_DBF_TEXT(trace, 3, "L2prcvln");
5188
5189 if (!card->vlangrp)
5190 return;
5191 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5192 if (card->vlangrp->vlan_devices[i] == NULL)
5193 continue;
5194 if (clear)
5195 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
5196 else
5197 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_SETVLAN);
5198 }
5199}
5200
5201/*add_vid is layer 2 used only ....*/
5202static void
5203qeth_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
5204{
5205 struct qeth_card *card;
5206
5207 QETH_DBF_TEXT_(trace, 4, "aid:%d", vid);
5208
5209 card = (struct qeth_card *) dev->priv;
5210 if (!card->options.layer2)
5211 return;
5212 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
5213}
5214
5215/*... kill_vid used for both modes*/
5216static void
5217qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
5218{
5219 struct qeth_card *card;
5220 unsigned long flags;
5221
5222 QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
5223
5224 card = (struct qeth_card *) dev->priv;
5225 /* free all skbs for the vlan device */
5226 qeth_free_vlan_skbs(card, vid);
5227 spin_lock_irqsave(&card->vlanlock, flags);
5228 /* unregister IP addresses of vlan device */
5229 qeth_free_vlan_addresses4(card, vid);
5230 qeth_free_vlan_addresses6(card, vid);
5231 if (card->vlangrp)
5232 card->vlangrp->vlan_devices[vid] = NULL;
5233 spin_unlock_irqrestore(&card->vlanlock, flags);
5234 if (card->options.layer2)
5235 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
5236 qeth_set_multicast_list(card->dev);
5237}
5238#endif
5239
5240/**
5241 * set multicast address on card
5242 */
5243static void
5244qeth_set_multicast_list(struct net_device *dev)
5245{
5246 struct qeth_card *card = (struct qeth_card *) dev->priv;
5247
5248 QETH_DBF_TEXT(trace,3,"setmulti");
5249 qeth_delete_mc_addresses(card);
5250 qeth_add_multicast_ipv4(card);
5251#ifdef CONFIG_QETH_IPV6
5252 qeth_add_multicast_ipv6(card);
5253#endif
5254 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
5255 schedule_work(&card->kernel_thread_starter);
5256}
5257
5258static int
5259qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np)
5260{
5261 return 0;
5262}
5263
5264static void
5265qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
5266{
5267 if (dev->type == ARPHRD_IEEE802_TR)
5268 ip_tr_mc_map(ipm, mac);
5269 else
5270 ip_eth_mc_map(ipm, mac);
5271}
5272
5273static struct qeth_ipaddr *
5274qeth_get_addr_buffer(enum qeth_prot_versions prot)
5275{
5276 struct qeth_ipaddr *addr;
5277
5278 addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
5279 if (addr == NULL) {
5280 PRINT_WARN("Not enough memory to add address\n");
5281 return NULL;
5282 }
5283 memset(addr,0,sizeof(struct qeth_ipaddr));
5284 addr->type = QETH_IP_TYPE_NORMAL;
5285 addr->proto = prot;
5286 return addr;
5287}
5288
5289static void
5290qeth_delete_mc_addresses(struct qeth_card *card)
5291{
5292 struct qeth_ipaddr *iptodo;
5293 unsigned long flags;
5294
5295 QETH_DBF_TEXT(trace,4,"delmc");
5296 iptodo = qeth_get_addr_buffer(QETH_PROT_IPV4);
5297 if (!iptodo) {
5298 QETH_DBF_TEXT(trace, 2, "dmcnomem");
5299 return;
5300 }
5301 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
5302 spin_lock_irqsave(&card->ip_lock, flags);
5303 if (!__qeth_insert_ip_todo(card, iptodo, 0))
5304 kfree(iptodo);
5305 spin_unlock_irqrestore(&card->ip_lock, flags);
5306}
5307
5308static inline void
5309qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5310{
5311 struct qeth_ipaddr *ipm;
5312 struct ip_mc_list *im4;
5313 char buf[MAX_ADDR_LEN];
5314
5315 QETH_DBF_TEXT(trace,4,"addmc");
5316 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
5317 qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
5318 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
5319 if (!ipm)
5320 continue;
5321 ipm->u.a4.addr = im4->multiaddr;
5322 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5323 ipm->is_multicast = 1;
5324 if (!qeth_add_ip(card,ipm))
5325 kfree(ipm);
5326 }
5327}
5328
5329static inline void
5330qeth_add_vlan_mc(struct qeth_card *card)
5331{
5332#ifdef CONFIG_QETH_VLAN
5333 struct in_device *in_dev;
5334 struct vlan_group *vg;
5335 int i;
5336
5337 QETH_DBF_TEXT(trace,4,"addmcvl");
5338 if ( ((card->options.layer2 == 0) &&
5339 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5340 (card->vlangrp == NULL) )
5341 return ;
5342
5343 vg = card->vlangrp;
5344 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5345 if (vg->vlan_devices[i] == NULL ||
5346 !(vg->vlan_devices[i]->flags & IFF_UP))
5347 continue;
5348 in_dev = in_dev_get(vg->vlan_devices[i]);
5349 if (!in_dev)
5350 continue;
5351 read_lock(&in_dev->mc_list_lock);
5352 qeth_add_mc(card,in_dev);
5353 read_unlock(&in_dev->mc_list_lock);
5354 in_dev_put(in_dev);
5355 }
5356#endif
5357}
5358
5359static void
5360qeth_add_multicast_ipv4(struct qeth_card *card)
5361{
5362 struct in_device *in4_dev;
5363
5364 QETH_DBF_TEXT(trace,4,"chkmcv4");
5365 in4_dev = in_dev_get(card->dev);
5366 if (in4_dev == NULL)
5367 return;
5368 read_lock(&in4_dev->mc_list_lock);
5369 qeth_add_mc(card, in4_dev);
5370 qeth_add_vlan_mc(card);
5371 read_unlock(&in4_dev->mc_list_lock);
5372 in_dev_put(in4_dev);
5373}
5374
5375#ifdef CONFIG_QETH_IPV6
5376static inline void
5377qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
5378{
5379 struct qeth_ipaddr *ipm;
5380 struct ifmcaddr6 *im6;
5381 char buf[MAX_ADDR_LEN];
5382
5383 QETH_DBF_TEXT(trace,4,"addmc6");
5384 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
5385 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
5386 ipm = qeth_get_addr_buffer(QETH_PROT_IPV6);
5387 if (!ipm)
5388 continue;
5389 ipm->is_multicast = 1;
5390 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5391 memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr,
5392 sizeof(struct in6_addr));
5393 if (!qeth_add_ip(card,ipm))
5394 kfree(ipm);
5395 }
5396}
5397
5398static inline void
5399qeth_add_vlan_mc6(struct qeth_card *card)
5400{
5401#ifdef CONFIG_QETH_VLAN
5402 struct inet6_dev *in_dev;
5403 struct vlan_group *vg;
5404 int i;
5405
5406 QETH_DBF_TEXT(trace,4,"admc6vl");
5407 if ( ((card->options.layer2 == 0) &&
5408 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5409 (card->vlangrp == NULL))
5410 return ;
5411
5412 vg = card->vlangrp;
5413 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5414 if (vg->vlan_devices[i] == NULL ||
5415 !(vg->vlan_devices[i]->flags & IFF_UP))
5416 continue;
5417 in_dev = in6_dev_get(vg->vlan_devices[i]);
5418 if (!in_dev)
5419 continue;
5420 read_lock(&in_dev->lock);
5421 qeth_add_mc6(card,in_dev);
5422 read_unlock(&in_dev->lock);
5423 in6_dev_put(in_dev);
5424 }
5425#endif /* CONFIG_QETH_VLAN */
5426}
5427
5428static void
5429qeth_add_multicast_ipv6(struct qeth_card *card)
5430{
5431 struct inet6_dev *in6_dev;
5432
5433 QETH_DBF_TEXT(trace,4,"chkmcv6");
5434 if ((card->options.layer2 == 0) &&
5435 (!qeth_is_supported(card, IPA_IPV6)) )
5436 return ;
5437
5438 in6_dev = in6_dev_get(card->dev);
5439 if (in6_dev == NULL)
5440 return;
5441 read_lock(&in6_dev->lock);
5442 qeth_add_mc6(card, in6_dev);
5443 qeth_add_vlan_mc6(card);
5444 read_unlock(&in6_dev->lock);
5445 in6_dev_put(in6_dev);
5446}
5447#endif /* CONFIG_QETH_IPV6 */
5448
5449static int
5450qeth_layer2_send_setdelmac(struct qeth_card *card, __u8 *mac,
5451 enum qeth_ipa_cmds ipacmd,
5452 int (*reply_cb) (struct qeth_card *,
5453 struct qeth_reply*,
5454 unsigned long))
5455{
5456 struct qeth_ipa_cmd *cmd;
5457 struct qeth_cmd_buffer *iob;
5458
5459 QETH_DBF_TEXT(trace, 2, "L2sdmac");
5460 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5461 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5462 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
5463 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
5464 return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
5465}
5466
5467static int
5468qeth_layer2_send_setgroupmac_cb(struct qeth_card *card,
5469 struct qeth_reply *reply,
5470 unsigned long data)
5471{
5472 struct qeth_ipa_cmd *cmd;
5473 __u8 *mac;
5474
5475 QETH_DBF_TEXT(trace, 2, "L2Sgmacb");
5476 cmd = (struct qeth_ipa_cmd *) data;
5477 mac = &cmd->data.setdelmac.mac[0];
5478 /* MAC already registered, needed in couple/uncouple case */
5479 if (cmd->hdr.return_code == 0x2005) {
5480 PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
5481 "already existing on %s \n",
5482 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5483 QETH_CARD_IFNAME(card));
5484 cmd->hdr.return_code = 0;
5485 }
5486 if (cmd->hdr.return_code)
5487 PRINT_ERR("Could not set group MAC " \
5488 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5489 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5490 QETH_CARD_IFNAME(card),cmd->hdr.return_code);
5491 return 0;
5492}
5493
5494static int
5495qeth_layer2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
5496{
5497 QETH_DBF_TEXT(trace, 2, "L2Sgmac");
5498 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
5499 qeth_layer2_send_setgroupmac_cb);
5500}
5501
5502static int
5503qeth_layer2_send_delgroupmac_cb(struct qeth_card *card,
5504 struct qeth_reply *reply,
5505 unsigned long data)
5506{
5507 struct qeth_ipa_cmd *cmd;
5508 __u8 *mac;
5509
5510 QETH_DBF_TEXT(trace, 2, "L2Dgmacb");
5511 cmd = (struct qeth_ipa_cmd *) data;
5512 mac = &cmd->data.setdelmac.mac[0];
5513 if (cmd->hdr.return_code)
5514 PRINT_ERR("Could not delete group MAC " \
5515 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
5516 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
5517 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
5518 return 0;
5519}
5520
5521static int
5522qeth_layer2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
5523{
5524 QETH_DBF_TEXT(trace, 2, "L2Dgmac");
5525 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
5526 qeth_layer2_send_delgroupmac_cb);
5527}
5528
5529static int
5530qeth_layer2_send_setmac_cb(struct qeth_card *card,
5531 struct qeth_reply *reply,
5532 unsigned long data)
5533{
5534 struct qeth_ipa_cmd *cmd;
5535
5536 QETH_DBF_TEXT(trace, 2, "L2Smaccb");
5537 cmd = (struct qeth_ipa_cmd *) data;
5538 if (cmd->hdr.return_code) {
5539 QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code);
5540 PRINT_WARN("Error in registering MAC address on " \
5541 "device %s: x%x\n", CARD_BUS_ID(card),
5542 cmd->hdr.return_code);
5543 card->info.layer2_mac_registered = 0;
5544 cmd->hdr.return_code = -EIO;
5545 } else {
5546 card->info.layer2_mac_registered = 1;
5547 memcpy(card->dev->dev_addr,cmd->data.setdelmac.mac,
5548 OSA_ADDR_LEN);
5549 PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
5550 "successfully registered on device %s\n",
5551 card->dev->dev_addr[0], card->dev->dev_addr[1],
5552 card->dev->dev_addr[2], card->dev->dev_addr[3],
5553 card->dev->dev_addr[4], card->dev->dev_addr[5],
5554 card->dev->name);
5555 }
5556 return 0;
5557}
5558
5559static int
5560qeth_layer2_send_setmac(struct qeth_card *card, __u8 *mac)
5561{
5562 QETH_DBF_TEXT(trace, 2, "L2Setmac");
5563 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
5564 qeth_layer2_send_setmac_cb);
5565}
5566
5567static int
5568qeth_layer2_send_delmac_cb(struct qeth_card *card,
5569 struct qeth_reply *reply,
5570 unsigned long data)
5571{
5572 struct qeth_ipa_cmd *cmd;
5573
5574 QETH_DBF_TEXT(trace, 2, "L2Dmaccb");
5575 cmd = (struct qeth_ipa_cmd *) data;
5576 if (cmd->hdr.return_code) {
5577 PRINT_WARN("Error in deregistering MAC address on " \
5578 "device %s: x%x\n", CARD_BUS_ID(card),
5579 cmd->hdr.return_code);
5580 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
5581 cmd->hdr.return_code = -EIO;
5582 return 0;
5583 }
5584 card->info.layer2_mac_registered = 0;
5585
5586 return 0;
5587}
5588static int
5589qeth_layer2_send_delmac(struct qeth_card *card, __u8 *mac)
5590{
5591 QETH_DBF_TEXT(trace, 2, "L2Delmac");
5592 if (!card->info.layer2_mac_registered)
5593 return 0;
5594 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
5595 qeth_layer2_send_delmac_cb);
5596}
5597
5598static int
5599qeth_layer2_set_mac_address(struct net_device *dev, void *p)
5600{
5601 struct sockaddr *addr = p;
5602 struct qeth_card *card;
5603 int rc = 0;
5604
5605 QETH_DBF_TEXT(trace, 3, "setmac");
5606
5607 if (qeth_verify_dev(dev) != QETH_REAL_CARD) {
5608 QETH_DBF_TEXT(trace, 3, "setmcINV");
5609 return -EOPNOTSUPP;
5610 }
5611 card = (struct qeth_card *) dev->priv;
5612
5613 if (!card->options.layer2) {
5614 PRINT_WARN("Setting MAC address on %s is not supported"
5615 "in Layer 3 mode.\n", dev->name);
5616 QETH_DBF_TEXT(trace, 3, "setmcLY3");
5617 return -EOPNOTSUPP;
5618 }
5619 QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
5620 QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
5621 rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]);
5622 if (!rc)
5623 rc = qeth_layer2_send_setmac(card, addr->sa_data);
5624 return rc;
5625}
5626
5627static void
5628qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd,
5629 __u8 command, enum qeth_prot_versions prot)
5630{
5631 memset(cmd, 0, sizeof (struct qeth_ipa_cmd));
5632 cmd->hdr.command = command;
5633 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
5634 cmd->hdr.seqno = card->seqno.ipa;
5635 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
5636 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
5637 if (card->options.layer2)
5638 cmd->hdr.prim_version_no = 2;
5639 else
5640 cmd->hdr.prim_version_no = 1;
5641 cmd->hdr.param_count = 1;
5642 cmd->hdr.prot_version = prot;
5643 cmd->hdr.ipa_supported = 0;
5644 cmd->hdr.ipa_enabled = 0;
5645}
5646
5647static struct qeth_cmd_buffer *
5648qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
5649 enum qeth_prot_versions prot)
5650{
5651 struct qeth_cmd_buffer *iob;
5652 struct qeth_ipa_cmd *cmd;
5653
5654 iob = qeth_wait_for_buffer(&card->write);
5655 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5656 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
5657
5658 return iob;
5659}
5660
5661static int
5662qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
5663{
5664 int rc;
5665 struct qeth_cmd_buffer *iob;
5666 struct qeth_ipa_cmd *cmd;
5667
5668 QETH_DBF_TEXT(trace,4,"setdelmc");
5669
5670 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
5671 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5672 memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN);
5673 if (addr->proto == QETH_PROT_IPV6)
5674 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
5675 sizeof(struct in6_addr));
5676 else
5677 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4);
5678
5679 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5680
5681 return rc;
5682}
5683static inline void
5684qeth_fill_netmask(u8 *netmask, unsigned int len)
5685{
5686 int i,j;
5687 for (i=0;i<16;i++) {
5688 j=(len)-(i*8);
5689 if (j >= 8)
5690 netmask[i] = 0xff;
5691 else if (j > 0)
5692 netmask[i] = (u8)(0xFF00>>j);
5693 else
5694 netmask[i] = 0;
5695 }
5696}
5697
5698static int
5699qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr,
5700 int ipacmd, unsigned int flags)
5701{
5702 int rc;
5703 struct qeth_cmd_buffer *iob;
5704 struct qeth_ipa_cmd *cmd;
5705 __u8 netmask[16];
5706
5707 QETH_DBF_TEXT(trace,4,"setdelip");
5708 QETH_DBF_TEXT_(trace,4,"flags%02X", flags);
5709
5710 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
5711 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5712 if (addr->proto == QETH_PROT_IPV6) {
5713 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
5714 sizeof(struct in6_addr));
5715 qeth_fill_netmask(netmask,addr->u.a6.pfxlen);
5716 memcpy(cmd->data.setdelip6.mask, netmask,
5717 sizeof(struct in6_addr));
5718 cmd->data.setdelip6.flags = flags;
5719 } else {
5720 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
5721 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
5722 cmd->data.setdelip4.flags = flags;
5723 }
5724
5725 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
5726
5727 return rc;
5728}
5729
5730static int
5731qeth_layer2_register_addr_entry(struct qeth_card *card,
5732 struct qeth_ipaddr *addr)
5733{
5734 if (!addr->is_multicast)
5735 return 0;
5736 QETH_DBF_TEXT(trace, 2, "setgmac");
5737 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
5738 return qeth_layer2_send_setgroupmac(card, &addr->mac[0]);
5739}
5740
5741static int
5742qeth_layer2_deregister_addr_entry(struct qeth_card *card,
5743 struct qeth_ipaddr *addr)
5744{
5745 if (!addr->is_multicast)
5746 return 0;
5747 QETH_DBF_TEXT(trace, 2, "delgmac");
5748 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
5749 return qeth_layer2_send_delgroupmac(card, &addr->mac[0]);
5750}
5751
5752static int
5753qeth_layer3_register_addr_entry(struct qeth_card *card,
5754 struct qeth_ipaddr *addr)
5755{
5756 char buf[50];
5757 int rc;
5758 int cnt = 3;
5759
5760 if (addr->proto == QETH_PROT_IPV4) {
5761 QETH_DBF_TEXT(trace, 2,"setaddr4");
5762 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
5763 } else if (addr->proto == QETH_PROT_IPV6) {
5764 QETH_DBF_TEXT(trace, 2, "setaddr6");
5765 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
5766 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
5767 } else {
5768 QETH_DBF_TEXT(trace, 2, "setaddr?");
5769 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
5770 }
5771 do {
5772 if (addr->is_multicast)
5773 rc = qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM);
5774 else
5775 rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP,
5776 addr->set_flags);
5777 if (rc)
5778 QETH_DBF_TEXT(trace, 2, "failed");
5779 } while ((--cnt > 0) && rc);
5780 if (rc){
5781 QETH_DBF_TEXT(trace, 2, "FAILED");
5782 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
5783 PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
5784 buf, rc, rc);
5785 }
5786 return rc;
5787}
5788
5789static int
5790qeth_layer3_deregister_addr_entry(struct qeth_card *card,
5791 struct qeth_ipaddr *addr)
5792{
5793 //char buf[50];
5794 int rc;
5795
5796 if (addr->proto == QETH_PROT_IPV4) {
5797 QETH_DBF_TEXT(trace, 2,"deladdr4");
5798 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
5799 } else if (addr->proto == QETH_PROT_IPV6) {
5800 QETH_DBF_TEXT(trace, 2, "deladdr6");
5801 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
5802 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
5803 } else {
5804 QETH_DBF_TEXT(trace, 2, "deladdr?");
5805 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
5806 }
5807 if (addr->is_multicast)
5808 rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM);
5809 else
5810 rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP,
5811 addr->del_flags);
5812 if (rc) {
5813 QETH_DBF_TEXT(trace, 2, "failed");
5814 /* TODO: re-activate this warning as soon as we have a
5815 * clean mirco code
5816 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
5817 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
5818 buf, rc);
5819 */
5820 }
5821 return rc;
5822}
5823
5824static int
5825qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
5826{
5827 if (card->options.layer2)
5828 return qeth_layer2_register_addr_entry(card, addr);
5829
5830 return qeth_layer3_register_addr_entry(card, addr);
5831}
5832
5833static int
5834qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
5835{
5836 if (card->options.layer2)
5837 return qeth_layer2_deregister_addr_entry(card, addr);
5838
5839 return qeth_layer3_deregister_addr_entry(card, addr);
5840}
5841
5842static u32
5843qeth_ethtool_get_tx_csum(struct net_device *dev)
5844{
5845 /* We may need to say that we support tx csum offload if
5846 * we do EDDP or TSO. There are discussions going on to
5847 * enforce rules in the stack and in ethtool that make
5848 * SG and TSO depend on HW_CSUM. At the moment there are
5849 * no such rules....
5850 * If we say yes here, we have to checksum outbound packets
5851 * any time. */
5852 return 0;
5853}
5854
5855static int
5856qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
5857{
5858 return -EINVAL;
5859}
5860
5861static u32
5862qeth_ethtool_get_rx_csum(struct net_device *dev)
5863{
5864 struct qeth_card *card = (struct qeth_card *)dev->priv;
5865
5866 return (card->options.checksum_type == HW_CHECKSUMMING);
5867}
5868
5869static int
5870qeth_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5871{
5872 struct qeth_card *card = (struct qeth_card *)dev->priv;
5873
5874 if ((card->state != CARD_STATE_DOWN) &&
5875 (card->state != CARD_STATE_RECOVER))
5876 return -EPERM;
5877 if (data)
5878 card->options.checksum_type = HW_CHECKSUMMING;
5879 else
5880 card->options.checksum_type = SW_CHECKSUMMING;
5881 return 0;
5882}
5883
5884static u32
5885qeth_ethtool_get_sg(struct net_device *dev)
5886{
5887 struct qeth_card *card = (struct qeth_card *)dev->priv;
5888
5889 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
5890 (dev->features & NETIF_F_SG));
5891}
5892
5893static int
5894qeth_ethtool_set_sg(struct net_device *dev, u32 data)
5895{
5896 struct qeth_card *card = (struct qeth_card *)dev->priv;
5897
5898 if (data) {
5899 if (card->options.large_send != QETH_LARGE_SEND_NO)
5900 dev->features |= NETIF_F_SG;
5901 else {
5902 dev->features &= ~NETIF_F_SG;
5903 return -EINVAL;
5904 }
5905 } else
5906 dev->features &= ~NETIF_F_SG;
5907 return 0;
5908}
5909
5910static u32
5911qeth_ethtool_get_tso(struct net_device *dev)
5912{
5913 struct qeth_card *card = (struct qeth_card *)dev->priv;
5914
5915 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
5916 (dev->features & NETIF_F_TSO));
5917}
5918
5919static int
5920qeth_ethtool_set_tso(struct net_device *dev, u32 data)
5921{
5922 struct qeth_card *card = (struct qeth_card *)dev->priv;
5923
5924 if (data) {
5925 if (card->options.large_send != QETH_LARGE_SEND_NO)
5926 dev->features |= NETIF_F_TSO;
5927 else {
5928 dev->features &= ~NETIF_F_TSO;
5929 return -EINVAL;
5930 }
5931 } else
5932 dev->features &= ~NETIF_F_TSO;
5933 return 0;
5934}
5935
5936static struct ethtool_ops qeth_ethtool_ops = {
5937 .get_tx_csum = qeth_ethtool_get_tx_csum,
5938 .set_tx_csum = qeth_ethtool_set_tx_csum,
5939 .get_rx_csum = qeth_ethtool_get_rx_csum,
5940 .set_rx_csum = qeth_ethtool_set_rx_csum,
5941 .get_sg = qeth_ethtool_get_sg,
5942 .set_sg = qeth_ethtool_set_sg,
5943 .get_tso = qeth_ethtool_get_tso,
5944 .set_tso = qeth_ethtool_set_tso,
5945};
5946
5947static int
5948qeth_netdev_init(struct net_device *dev)
5949{
5950 struct qeth_card *card;
5951
5952 card = (struct qeth_card *) dev->priv;
5953
5954 QETH_DBF_TEXT(trace,3,"initdev");
5955
5956 dev->tx_timeout = &qeth_tx_timeout;
5957 dev->watchdog_timeo = QETH_TX_TIMEOUT;
5958 dev->open = qeth_open;
5959 dev->stop = qeth_stop;
5960 dev->hard_start_xmit = qeth_hard_start_xmit;
5961 dev->do_ioctl = qeth_do_ioctl;
5962 dev->get_stats = qeth_get_stats;
5963 dev->change_mtu = qeth_change_mtu;
5964 dev->neigh_setup = qeth_neigh_setup;
5965 dev->set_multicast_list = qeth_set_multicast_list;
5966#ifdef CONFIG_QETH_VLAN
5967 dev->vlan_rx_register = qeth_vlan_rx_register;
5968 dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
5969 dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid;
5970#endif
5971 dev->hard_header = card->orig_hard_header;
5972 if (qeth_get_netdev_flags(card) & IFF_NOARP) {
5973 dev->rebuild_header = NULL;
5974 dev->hard_header = NULL;
5975 if (card->options.fake_ll)
5976 dev->hard_header = qeth_fake_header;
5977 dev->header_cache_update = NULL;
5978 dev->hard_header_cache = NULL;
5979 }
5980#ifdef CONFIG_QETH_IPV6
5981 /*IPv6 address autoconfiguration stuff*/
5982 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
5983 card->dev->dev_id = card->info.unique_id & 0xffff;
5984#endif
5985 dev->hard_header_parse = NULL;
5986 dev->set_mac_address = qeth_layer2_set_mac_address;
5987 dev->flags |= qeth_get_netdev_flags(card);
5988 if ((card->options.fake_broadcast) ||
5989 (card->info.broadcast_capable))
5990 dev->flags |= IFF_BROADCAST;
5991 dev->hard_header_len =
5992 qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
5993 dev->addr_len = OSA_ADDR_LEN;
5994 dev->mtu = card->info.initial_mtu;
5995
5996 SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
5997
5998 SET_MODULE_OWNER(dev);
5999 return 0;
6000}
6001
6002static void
6003qeth_init_func_level(struct qeth_card *card)
6004{
6005 if (card->ipato.enabled) {
6006 if (card->info.type == QETH_CARD_TYPE_IQD)
6007 card->info.func_level =
6008 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
6009 else
6010 card->info.func_level =
6011 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
6012 } else {
6013 if (card->info.type == QETH_CARD_TYPE_IQD)
6014 card->info.func_level =
6015 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
6016 else
6017 card->info.func_level =
6018 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
6019 }
6020}
6021
6022/**
6023 * hardsetup card, initialize MPC and QDIO stuff
6024 */
6025static int
6026qeth_hardsetup_card(struct qeth_card *card)
6027{
6028 int retries = 3;
6029 int rc;
6030
6031 QETH_DBF_TEXT(setup, 2, "hrdsetup");
6032
6033retry:
6034 if (retries < 3){
6035 PRINT_WARN("Retrying to do IDX activates.\n");
6036 ccw_device_set_offline(CARD_DDEV(card));
6037 ccw_device_set_offline(CARD_WDEV(card));
6038 ccw_device_set_offline(CARD_RDEV(card));
6039 ccw_device_set_online(CARD_RDEV(card));
6040 ccw_device_set_online(CARD_WDEV(card));
6041 ccw_device_set_online(CARD_DDEV(card));
6042 }
6043 rc = qeth_qdio_clear_card(card,card->info.type==QETH_CARD_TYPE_OSAE);
6044 if (rc == -ERESTARTSYS) {
6045 QETH_DBF_TEXT(setup, 2, "break1");
6046 return rc;
6047 } else if (rc) {
6048 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6049 if (--retries < 0)
6050 goto out;
6051 else
6052 goto retry;
6053 }
6054 if ((rc = qeth_get_unitaddr(card))){
6055 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6056 return rc;
6057 }
6058 qeth_init_tokens(card);
6059 qeth_init_func_level(card);
6060 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
6061 if (rc == -ERESTARTSYS) {
6062 QETH_DBF_TEXT(setup, 2, "break2");
6063 return rc;
6064 } else if (rc) {
6065 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6066 if (--retries < 0)
6067 goto out;
6068 else
6069 goto retry;
6070 }
6071 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
6072 if (rc == -ERESTARTSYS) {
6073 QETH_DBF_TEXT(setup, 2, "break3");
6074 return rc;
6075 } else if (rc) {
6076 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6077 if (--retries < 0)
6078 goto out;
6079 else
6080 goto retry;
6081 }
6082 if ((rc = qeth_mpc_initialize(card))){
6083 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6084 goto out;
6085 }
6086 /*network device will be recovered*/
6087 if (card->dev) {
6088 card->dev->hard_header = card->orig_hard_header;
6089 return 0;
6090 }
6091 /* at first set_online allocate netdev */
6092 card->dev = qeth_get_netdevice(card->info.type,
6093 card->info.link_type);
6094 if (!card->dev){
6095 qeth_qdio_clear_card(card, card->info.type ==
6096 QETH_CARD_TYPE_OSAE);
6097 rc = -ENODEV;
6098 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
6099 goto out;
6100 }
6101 card->dev->priv = card;
6102 card->orig_hard_header = card->dev->hard_header;
6103 card->dev->type = qeth_get_arphdr_type(card->info.type,
6104 card->info.link_type);
6105 card->dev->init = qeth_netdev_init;
6106 return 0;
6107out:
6108 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
6109 return rc;
6110}
6111
6112static int
6113qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6114 unsigned long data)
6115{
6116 struct qeth_ipa_cmd *cmd;
6117
6118 QETH_DBF_TEXT(trace,4,"defadpcb");
6119
6120 cmd = (struct qeth_ipa_cmd *) data;
6121 if (cmd->hdr.return_code == 0){
6122 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6123 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6124 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6125#ifdef CONFIG_QETH_IPV6
6126 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6127 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6128#endif
6129 }
6130 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
6131 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
6132 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
6133 QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
6134 }
6135 return 0;
6136}
6137
6138static int
6139qeth_default_setadapterparms_cb(struct qeth_card *card,
6140 struct qeth_reply *reply,
6141 unsigned long data)
6142{
6143 struct qeth_ipa_cmd *cmd;
6144
6145 QETH_DBF_TEXT(trace,4,"defadpcb");
6146
6147 cmd = (struct qeth_ipa_cmd *) data;
6148 if (cmd->hdr.return_code == 0)
6149 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
6150 return 0;
6151}
6152
6153static int
6154qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6155 unsigned long data)
6156{
6157 struct qeth_ipa_cmd *cmd;
6158
6159 QETH_DBF_TEXT(trace,3,"quyadpcb");
6160
6161 cmd = (struct qeth_ipa_cmd *) data;
6162 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
6163 card->info.link_type =
6164 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
6165 card->options.adp.supported_funcs =
6166 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
6167 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
6168}
6169
6170static int
6171qeth_query_setadapterparms(struct qeth_card *card)
6172{
6173 int rc;
6174 struct qeth_cmd_buffer *iob;
6175
6176 QETH_DBF_TEXT(trace,3,"queryadp");
6177 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
6178 sizeof(struct qeth_ipacmd_setadpparms));
6179 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
6180 return rc;
6181}
6182
6183static int
6184qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
6185 struct qeth_reply *reply,
6186 unsigned long data)
6187{
6188 struct qeth_ipa_cmd *cmd;
6189
6190 QETH_DBF_TEXT(trace,4,"chgmaccb");
6191
6192 cmd = (struct qeth_ipa_cmd *) data;
6193 memcpy(card->dev->dev_addr,
6194 &cmd->data.setadapterparms.data.change_addr.addr,OSA_ADDR_LEN);
6195 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
6196 return 0;
6197}
6198
6199static int
6200qeth_setadpparms_change_macaddr(struct qeth_card *card)
6201{
6202 int rc;
6203 struct qeth_cmd_buffer *iob;
6204 struct qeth_ipa_cmd *cmd;
6205
6206 QETH_DBF_TEXT(trace,4,"chgmac");
6207
6208 iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS,
6209 sizeof(struct qeth_ipacmd_setadpparms));
6210 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6211 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
6212 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
6213 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
6214 card->dev->dev_addr, OSA_ADDR_LEN);
6215 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
6216 NULL);
6217 return rc;
6218}
6219
6220static int
6221qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6222{
6223 int rc;
6224 struct qeth_cmd_buffer *iob;
6225 struct qeth_ipa_cmd *cmd;
6226
6227 QETH_DBF_TEXT(trace,4,"adpmode");
6228
6229 iob = qeth_get_adapter_cmd(card, command,
6230 sizeof(struct qeth_ipacmd_setadpparms));
6231 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6232 cmd->data.setadapterparms.data.mode = mode;
6233 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
6234 NULL);
6235 return rc;
6236}
6237
6238static inline int
6239qeth_setadapter_hstr(struct qeth_card *card)
6240{
6241 int rc;
6242
6243 QETH_DBF_TEXT(trace,4,"adphstr");
6244
6245 if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) {
6246 rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE,
6247 card->options.broadcast_mode);
6248 if (rc)
6249 PRINT_WARN("couldn't set broadcast mode on "
6250 "device %s: x%x\n",
6251 CARD_BUS_ID(card), rc);
6252 rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS,
6253 card->options.macaddr_mode);
6254 if (rc)
6255 PRINT_WARN("couldn't set macaddr mode on "
6256 "device %s: x%x\n", CARD_BUS_ID(card), rc);
6257 return rc;
6258 }
6259 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
6260 PRINT_WARN("set adapter parameters not available "
6261 "to set broadcast mode, using ALLRINGS "
6262 "on device %s:\n", CARD_BUS_ID(card));
6263 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
6264 PRINT_WARN("set adapter parameters not available "
6265 "to set macaddr mode, using NONCANONICAL "
6266 "on device %s:\n", CARD_BUS_ID(card));
6267 return 0;
6268}
6269
6270static int
6271qeth_setadapter_parms(struct qeth_card *card)
6272{
6273 int rc;
6274
6275 QETH_DBF_TEXT(setup, 2, "setadprm");
6276
6277 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){
6278 PRINT_WARN("set adapter parameters not supported "
6279 "on device %s.\n",
6280 CARD_BUS_ID(card));
6281 QETH_DBF_TEXT(setup, 2, " notsupp");
6282 return 0;
6283 }
6284 rc = qeth_query_setadapterparms(card);
6285 if (rc) {
6286 PRINT_WARN("couldn't set adapter parameters on device %s: "
6287 "x%x\n", CARD_BUS_ID(card), rc);
6288 return rc;
6289 }
6290 if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) {
6291 rc = qeth_setadpparms_change_macaddr(card);
6292 if (rc)
6293 PRINT_WARN("couldn't get MAC address on "
6294 "device %s: x%x\n",
6295 CARD_BUS_ID(card), rc);
6296 }
6297
6298 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
6299 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
6300 rc = qeth_setadapter_hstr(card);
6301
6302 return rc;
6303}
6304
6305static int
6306qeth_layer2_initialize(struct qeth_card *card)
6307{
6308 int rc = 0;
6309
6310
6311 QETH_DBF_TEXT(setup, 2, "doL2init");
6312 QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card));
6313
6314 rc = qeth_setadpparms_change_macaddr(card);
6315 if (rc) {
6316 PRINT_WARN("couldn't get MAC address on "
6317 "device %s: x%x\n",
6318 CARD_BUS_ID(card), rc);
6319 QETH_DBF_TEXT_(setup, 2,"1err%d",rc);
6320 return rc;
6321 }
6322 QETH_DBF_HEX(setup,2, card->dev->dev_addr, OSA_ADDR_LEN);
6323
6324 rc = qeth_layer2_send_setmac(card, &card->dev->dev_addr[0]);
6325 if (rc)
6326 QETH_DBF_TEXT_(setup, 2,"2err%d",rc);
6327 return 0;
6328}
6329
6330
6331static int
6332qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
6333 enum qeth_prot_versions prot)
6334{
6335 int rc;
6336 struct qeth_cmd_buffer *iob;
6337
6338 iob = qeth_get_ipacmd_buffer(card,ipacmd,prot);
6339 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6340
6341 return rc;
6342}
6343
6344static int
6345qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot)
6346{
6347 int rc;
6348
6349 QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot);
6350
6351 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot);
6352 return rc;
6353}
6354
6355static int
6356qeth_send_stoplan(struct qeth_card *card)
6357{
6358 int rc = 0;
6359
6360 /*
6361 * TODO: according to the IPA format document page 14,
6362 * TCP/IP (we!) never issue a STOPLAN
6363 * is this right ?!?
6364 */
6365 QETH_DBF_TEXT(trace, 2, "stoplan");
6366
6367 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4);
6368 return rc;
6369}
6370
6371static int
6372qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
6373 unsigned long data)
6374{
6375 struct qeth_ipa_cmd *cmd;
6376
6377 QETH_DBF_TEXT(setup, 2, "qipasscb");
6378
6379 cmd = (struct qeth_ipa_cmd *) data;
6380 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
6381 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
6382 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6383 } else {
6384#ifdef CONFIG_QETH_IPV6
6385 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
6386 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6387#endif
6388 }
6389 QETH_DBF_TEXT(setup, 2, "suppenbl");
6390 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_supported);
6391 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_enabled);
6392 return 0;
6393}
6394
6395static int
6396qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
6397{
6398 int rc;
6399 struct qeth_cmd_buffer *iob;
6400
6401 QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
6402 if (card->options.layer2) {
6403 QETH_DBF_TEXT(setup, 2, "noprmly2");
6404 return -EPERM;
6405 }
6406
6407 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot);
6408 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
6409 return rc;
6410}
6411
6412static struct qeth_cmd_buffer *
6413qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func,
6414 __u16 cmd_code, __u16 len,
6415 enum qeth_prot_versions prot)
6416{
6417 struct qeth_cmd_buffer *iob;
6418 struct qeth_ipa_cmd *cmd;
6419
6420 QETH_DBF_TEXT(trace,4,"getasscm");
6421 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot);
6422
6423 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6424 cmd->data.setassparms.hdr.assist_no = ipa_func;
6425 cmd->data.setassparms.hdr.length = 8 + len;
6426 cmd->data.setassparms.hdr.command_code = cmd_code;
6427 cmd->data.setassparms.hdr.return_code = 0;
6428 cmd->data.setassparms.hdr.seq_no = 0;
6429
6430 return iob;
6431}
6432
6433static int
6434qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob,
6435 __u16 len, long data,
6436 int (*reply_cb)
6437 (struct qeth_card *,struct qeth_reply *,unsigned long),
6438 void *reply_param)
6439{
6440 int rc;
6441 struct qeth_ipa_cmd *cmd;
6442
6443 QETH_DBF_TEXT(trace,4,"sendassp");
6444
6445 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6446 if (len <= sizeof(__u32))
6447 cmd->data.setassparms.data.flags_32bit = (__u32) data;
6448 else if (len > sizeof(__u32))
6449 memcpy(&cmd->data.setassparms.data, (void *) data, len);
6450
6451 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
6452 return rc;
6453}
6454
6455#ifdef CONFIG_QETH_IPV6
6456static int
6457qeth_send_simple_setassparms_ipv6(struct qeth_card *card,
6458 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
6459
6460{
6461 int rc;
6462 struct qeth_cmd_buffer *iob;
6463
6464 QETH_DBF_TEXT(trace,4,"simassp6");
6465 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6466 0, QETH_PROT_IPV6);
6467 rc = qeth_send_setassparms(card, iob, 0, 0,
6468 qeth_default_setassparms_cb, NULL);
6469 return rc;
6470}
6471#endif
6472
6473static int
6474qeth_send_simple_setassparms(struct qeth_card *card,
6475 enum qeth_ipa_funcs ipa_func,
6476 __u16 cmd_code, long data)
6477{
6478 int rc;
6479 int length = 0;
6480 struct qeth_cmd_buffer *iob;
6481
6482 QETH_DBF_TEXT(trace,4,"simassp4");
6483 if (data)
6484 length = sizeof(__u32);
6485 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6486 length, QETH_PROT_IPV4);
6487 rc = qeth_send_setassparms(card, iob, length, data,
6488 qeth_default_setassparms_cb, NULL);
6489 return rc;
6490}
6491
6492static inline int
6493qeth_start_ipa_arp_processing(struct qeth_card *card)
6494{
6495 int rc;
6496
6497 QETH_DBF_TEXT(trace,3,"ipaarp");
6498
6499 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
6500 PRINT_WARN("ARP processing not supported "
6501 "on %s!\n", QETH_CARD_IFNAME(card));
6502 return 0;
6503 }
6504 rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING,
6505 IPA_CMD_ASS_START, 0);
6506 if (rc) {
6507 PRINT_WARN("Could not start ARP processing "
6508 "assist on %s: 0x%x\n",
6509 QETH_CARD_IFNAME(card), rc);
6510 }
6511 return rc;
6512}
6513
6514static int
6515qeth_start_ipa_ip_fragmentation(struct qeth_card *card)
6516{
6517 int rc;
6518
6519 QETH_DBF_TEXT(trace,3,"ipaipfrg");
6520
6521 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
6522 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
6523 QETH_CARD_IFNAME(card));
6524 return -EOPNOTSUPP;
6525 }
6526
6527 rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
6528 IPA_CMD_ASS_START, 0);
6529 if (rc) {
6530 PRINT_WARN("Could not start Hardware IP fragmentation "
6531 "assist on %s: 0x%x\n",
6532 QETH_CARD_IFNAME(card), rc);
6533 } else
6534 PRINT_INFO("Hardware IP fragmentation enabled \n");
6535 return rc;
6536}
6537
6538static int
6539qeth_start_ipa_source_mac(struct qeth_card *card)
6540{
6541 int rc;
6542
6543 QETH_DBF_TEXT(trace,3,"stsrcmac");
6544
6545 if (!card->options.fake_ll)
6546 return -EOPNOTSUPP;
6547
6548 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
6549 PRINT_INFO("Inbound source address not "
6550 "supported on %s\n", QETH_CARD_IFNAME(card));
6551 return -EOPNOTSUPP;
6552 }
6553
6554 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
6555 IPA_CMD_ASS_START, 0);
6556 if (rc)
6557 PRINT_WARN("Could not start inbound source "
6558 "assist on %s: 0x%x\n",
6559 QETH_CARD_IFNAME(card), rc);
6560 return rc;
6561}
6562
6563static int
6564qeth_start_ipa_vlan(struct qeth_card *card)
6565{
6566 int rc = 0;
6567
6568 QETH_DBF_TEXT(trace,3,"strtvlan");
6569
6570#ifdef CONFIG_QETH_VLAN
6571 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
6572 PRINT_WARN("VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
6573 return -EOPNOTSUPP;
6574 }
6575
6576 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
6577 IPA_CMD_ASS_START,0);
6578 if (rc) {
6579 PRINT_WARN("Could not start vlan "
6580 "assist on %s: 0x%x\n",
6581 QETH_CARD_IFNAME(card), rc);
6582 } else {
6583 PRINT_INFO("VLAN enabled \n");
6584 card->dev->features |=
6585 NETIF_F_HW_VLAN_FILTER |
6586 NETIF_F_HW_VLAN_TX |
6587 NETIF_F_HW_VLAN_RX;
6588 }
6589#endif /* QETH_VLAN */
6590 return rc;
6591}
6592
6593static int
6594qeth_start_ipa_multicast(struct qeth_card *card)
6595{
6596 int rc;
6597
6598 QETH_DBF_TEXT(trace,3,"stmcast");
6599
6600 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
6601 PRINT_WARN("Multicast not supported on %s\n",
6602 QETH_CARD_IFNAME(card));
6603 return -EOPNOTSUPP;
6604 }
6605
6606 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
6607 IPA_CMD_ASS_START,0);
6608 if (rc) {
6609 PRINT_WARN("Could not start multicast "
6610 "assist on %s: rc=%i\n",
6611 QETH_CARD_IFNAME(card), rc);
6612 } else {
6613 PRINT_INFO("Multicast enabled\n");
6614 card->dev->flags |= IFF_MULTICAST;
6615 }
6616 return rc;
6617}
6618
6619#ifdef CONFIG_QETH_IPV6
6620static int
6621qeth_softsetup_ipv6(struct qeth_card *card)
6622{
6623 int rc;
6624
6625 QETH_DBF_TEXT(trace,3,"softipv6");
6626
6627 netif_stop_queue(card->dev);
6628 rc = qeth_send_startlan(card, QETH_PROT_IPV6);
6629 if (rc) {
6630 PRINT_ERR("IPv6 startlan failed on %s\n",
6631 QETH_CARD_IFNAME(card));
6632 return rc;
6633 }
6634 netif_wake_queue(card->dev);
6635 rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
6636 if (rc) {
6637 PRINT_ERR("IPv6 query ipassist failed on %s\n",
6638 QETH_CARD_IFNAME(card));
6639 return rc;
6640 }
6641 rc = qeth_send_simple_setassparms(card, IPA_IPV6,
6642 IPA_CMD_ASS_START, 3);
6643 if (rc) {
6644 PRINT_WARN("IPv6 start assist (version 4) failed "
6645 "on %s: 0x%x\n",
6646 QETH_CARD_IFNAME(card), rc);
6647 return rc;
6648 }
6649 rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6,
6650 IPA_CMD_ASS_START);
6651 if (rc) {
6652 PRINT_WARN("IPV6 start assist (version 6) failed "
6653 "on %s: 0x%x\n",
6654 QETH_CARD_IFNAME(card), rc);
6655 return rc;
6656 }
6657 rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
6658 IPA_CMD_ASS_START);
6659 if (rc) {
6660 PRINT_WARN("Could not enable passthrough "
6661 "on %s: 0x%x\n",
6662 QETH_CARD_IFNAME(card), rc);
6663 return rc;
6664 }
6665 PRINT_INFO("IPV6 enabled \n");
6666 return 0;
6667}
6668
6669#endif
6670
6671static int
6672qeth_start_ipa_ipv6(struct qeth_card *card)
6673{
6674 int rc = 0;
6675#ifdef CONFIG_QETH_IPV6
6676 QETH_DBF_TEXT(trace,3,"strtipv6");
6677
6678 if (!qeth_is_supported(card, IPA_IPV6)) {
6679 PRINT_WARN("IPv6 not supported on %s\n",
6680 QETH_CARD_IFNAME(card));
6681 return 0;
6682 }
6683 rc = qeth_softsetup_ipv6(card);
6684#endif
6685 return rc ;
6686}
6687
6688static int
6689qeth_start_ipa_broadcast(struct qeth_card *card)
6690{
6691 int rc;
6692
6693 QETH_DBF_TEXT(trace,3,"stbrdcst");
6694 card->info.broadcast_capable = 0;
6695 if (!qeth_is_supported(card, IPA_FILTERING)) {
6696 PRINT_WARN("Broadcast not supported on %s\n",
6697 QETH_CARD_IFNAME(card));
6698 rc = -EOPNOTSUPP;
6699 goto out;
6700 }
6701 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
6702 IPA_CMD_ASS_START, 0);
6703 if (rc) {
6704 PRINT_WARN("Could not enable broadcasting filtering "
6705 "on %s: 0x%x\n",
6706 QETH_CARD_IFNAME(card), rc);
6707 goto out;
6708 }
6709
6710 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
6711 IPA_CMD_ASS_CONFIGURE, 1);
6712 if (rc) {
6713 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
6714 QETH_CARD_IFNAME(card), rc);
6715 goto out;
6716 }
6717 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
6718 PRINT_INFO("Broadcast enabled \n");
6719 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
6720 IPA_CMD_ASS_ENABLE, 1);
6721 if (rc) {
6722 PRINT_WARN("Could not set up broadcast echo filtering on "
6723 "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
6724 goto out;
6725 }
6726 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
6727out:
6728 if (card->info.broadcast_capable)
6729 card->dev->flags |= IFF_BROADCAST;
6730 else
6731 card->dev->flags &= ~IFF_BROADCAST;
6732 return rc;
6733}
6734
6735static int
6736qeth_send_checksum_command(struct qeth_card *card)
6737{
6738 int rc;
6739
6740 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
6741 IPA_CMD_ASS_START, 0);
6742 if (rc) {
6743 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
6744 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
6745 QETH_CARD_IFNAME(card), rc);
6746 return rc;
6747 }
6748 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
6749 IPA_CMD_ASS_ENABLE,
6750 card->info.csum_mask);
6751 if (rc) {
6752 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
6753 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
6754 QETH_CARD_IFNAME(card), rc);
6755 return rc;
6756 }
6757 return 0;
6758}
6759
6760static int
6761qeth_start_ipa_checksum(struct qeth_card *card)
6762{
6763 int rc = 0;
6764
6765 QETH_DBF_TEXT(trace,3,"strtcsum");
6766
6767 if (card->options.checksum_type == NO_CHECKSUMMING) {
6768 PRINT_WARN("Using no checksumming on %s.\n",
6769 QETH_CARD_IFNAME(card));
6770 return 0;
6771 }
6772 if (card->options.checksum_type == SW_CHECKSUMMING) {
6773 PRINT_WARN("Using SW checksumming on %s.\n",
6774 QETH_CARD_IFNAME(card));
6775 return 0;
6776 }
6777 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
6778 PRINT_WARN("Inbound HW Checksumming not "
6779 "supported on %s,\ncontinuing "
6780 "using Inbound SW Checksumming\n",
6781 QETH_CARD_IFNAME(card));
6782 card->options.checksum_type = SW_CHECKSUMMING;
6783 return 0;
6784 }
6785 rc = qeth_send_checksum_command(card);
6786 if (!rc) {
6787 PRINT_INFO("HW Checksumming (inbound) enabled \n");
6788 }
6789 return rc;
6790}
6791
6792static int
6793qeth_start_ipa_tso(struct qeth_card *card)
6794{
6795 int rc;
6796
6797 QETH_DBF_TEXT(trace,3,"sttso");
6798
6799 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
6800 PRINT_WARN("Outbound TSO not supported on %s\n",
6801 QETH_CARD_IFNAME(card));
6802 rc = -EOPNOTSUPP;
6803 } else {
6804 rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
6805 IPA_CMD_ASS_START,0);
6806 if (rc)
6807 PRINT_WARN("Could not start outbound TSO "
6808 "assist on %s: rc=%i\n",
6809 QETH_CARD_IFNAME(card), rc);
6810 else
6811 PRINT_INFO("Outbound TSO enabled\n");
6812 }
6813 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
6814 card->options.large_send = QETH_LARGE_SEND_NO;
6815 card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG);
6816 }
6817 return rc;
6818}
6819
6820static int
6821qeth_start_ipassists(struct qeth_card *card)
6822{
6823 QETH_DBF_TEXT(trace,3,"strtipas");
6824 qeth_start_ipa_arp_processing(card); /* go on*/
6825 qeth_start_ipa_ip_fragmentation(card); /* go on*/
6826 qeth_start_ipa_source_mac(card); /* go on*/
6827 qeth_start_ipa_vlan(card); /* go on*/
6828 qeth_start_ipa_multicast(card); /* go on*/
6829 qeth_start_ipa_ipv6(card); /* go on*/
6830 qeth_start_ipa_broadcast(card); /* go on*/
6831 qeth_start_ipa_checksum(card); /* go on*/
6832 qeth_start_ipa_tso(card); /* go on*/
6833 return 0;
6834}
6835
6836static int
6837qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type,
6838 enum qeth_prot_versions prot)
6839{
6840 int rc;
6841 struct qeth_ipa_cmd *cmd;
6842 struct qeth_cmd_buffer *iob;
6843
6844 QETH_DBF_TEXT(trace,4,"setroutg");
6845 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
6846 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6847 cmd->data.setrtg.type = (type);
6848 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6849
6850 return rc;
6851
6852}
6853
6854static void
6855qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type,
6856 enum qeth_prot_versions prot)
6857{
6858 if (card->info.type == QETH_CARD_TYPE_IQD) {
6859 switch (*type) {
6860 case NO_ROUTER:
6861 case PRIMARY_CONNECTOR:
6862 case SECONDARY_CONNECTOR:
6863 case MULTICAST_ROUTER:
6864 return;
6865 default:
6866 goto out_inval;
6867 }
6868 } else {
6869 switch (*type) {
6870 case NO_ROUTER:
6871 case PRIMARY_ROUTER:
6872 case SECONDARY_ROUTER:
6873 return;
6874 case MULTICAST_ROUTER:
6875 if (qeth_is_ipafunc_supported(card, prot,
6876 IPA_OSA_MC_ROUTER))
6877 return;
6878 default:
6879 goto out_inval;
6880 }
6881 }
6882out_inval:
6883 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
6884 "Router status set to 'no router'.\n",
6885 ((*type == PRIMARY_ROUTER)? "primary router" :
6886 (*type == SECONDARY_ROUTER)? "secondary router" :
6887 (*type == PRIMARY_CONNECTOR)? "primary connector" :
6888 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
6889 (*type == MULTICAST_ROUTER)? "multicast router" :
6890 "unknown"),
6891 card->dev->name);
6892 *type = NO_ROUTER;
6893}
6894
6895int
6896qeth_setrouting_v4(struct qeth_card *card)
6897{
6898 int rc;
6899
6900 QETH_DBF_TEXT(trace,3,"setrtg4");
6901
6902 qeth_correct_routing_type(card, &card->options.route4.type,
6903 QETH_PROT_IPV4);
6904
6905 rc = qeth_send_setrouting(card, card->options.route4.type,
6906 QETH_PROT_IPV4);
6907 if (rc) {
6908 card->options.route4.type = NO_ROUTER;
6909 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
6910 "Type set to 'no router'.\n",
6911 rc, QETH_CARD_IFNAME(card));
6912 }
6913 return rc;
6914}
6915
6916int
6917qeth_setrouting_v6(struct qeth_card *card)
6918{
6919 int rc = 0;
6920
6921 QETH_DBF_TEXT(trace,3,"setrtg6");
6922#ifdef CONFIG_QETH_IPV6
6923
6924 qeth_correct_routing_type(card, &card->options.route6.type,
6925 QETH_PROT_IPV6);
6926
6927 if ((card->options.route6.type == NO_ROUTER) ||
6928 ((card->info.type == QETH_CARD_TYPE_OSAE) &&
6929 (card->options.route6.type == MULTICAST_ROUTER) &&
6930 !qeth_is_supported6(card,IPA_OSA_MC_ROUTER)))
6931 return 0;
6932 rc = qeth_send_setrouting(card, card->options.route6.type,
6933 QETH_PROT_IPV6);
6934 if (rc) {
6935 card->options.route6.type = NO_ROUTER;
6936 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
6937 "Type set to 'no router'.\n",
6938 rc, QETH_CARD_IFNAME(card));
6939 }
6940#endif
6941 return rc;
6942}
6943
6944int
6945qeth_set_large_send(struct qeth_card *card)
6946{
6947 int rc = 0;
6948
6949 if (card->dev == NULL)
6950 return 0;
6951
6952 netif_stop_queue(card->dev);
6953 switch (card->options.large_send) {
6954 case QETH_LARGE_SEND_EDDP:
6955 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
6956 break;
6957 case QETH_LARGE_SEND_TSO:
6958 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
6959 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
6960 } else {
6961 PRINT_WARN("TSO not supported on %s. "
6962 "large_send set to 'no'.\n",
6963 card->dev->name);
6964 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
6965 card->options.large_send = QETH_LARGE_SEND_NO;
6966 rc = -EOPNOTSUPP;
6967 }
6968 break;
6969 default: /* includes QETH_LARGE_SEND_NO */
6970 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
6971 break;
6972 }
6973
6974 netif_wake_queue(card->dev);
6975 return rc;
6976}
6977
6978/*
6979 * softsetup card: init IPA stuff
6980 */
6981static int
6982qeth_softsetup_card(struct qeth_card *card)
6983{
6984 int rc;
6985
6986 QETH_DBF_TEXT(setup, 2, "softsetp");
6987
6988 if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){
6989 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6990 if (rc == 0xe080){
6991 PRINT_WARN("LAN on card %s if offline! "
6992 "Continuing softsetup.\n",
6993 CARD_BUS_ID(card));
6994 card->lan_online = 0;
6995 } else
6996 return rc;
6997 } else
6998 card->lan_online = 1;
6999 if (card->options.layer2) {
7000 card->dev->features |=
7001 NETIF_F_HW_VLAN_FILTER |
7002 NETIF_F_HW_VLAN_TX |
7003 NETIF_F_HW_VLAN_RX;
7004 card->dev->flags|=IFF_MULTICAST|IFF_BROADCAST;
7005 card->info.broadcast_capable=1;
7006 if ((rc = qeth_layer2_initialize(card))) {
7007 QETH_DBF_TEXT_(setup, 2, "L2err%d", rc);
7008 return rc;
7009 }
7010#ifdef CONFIG_QETH_VLAN
7011 qeth_layer2_process_vlans(card, 0);
7012#endif
7013 goto out;
7014 }
7015 if ((card->options.large_send == QETH_LARGE_SEND_EDDP) ||
7016 (card->options.large_send == QETH_LARGE_SEND_TSO))
7017 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
7018 else
7019 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7020
7021 if ((rc = qeth_setadapter_parms(card)))
7022 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7023 if ((rc = qeth_start_ipassists(card)))
7024 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7025 if ((rc = qeth_setrouting_v4(card)))
7026 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7027 if ((rc = qeth_setrouting_v6(card)))
7028 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7029out:
7030 netif_stop_queue(card->dev);
7031 return 0;
7032}
7033
7034#ifdef CONFIG_QETH_IPV6
7035static int
7036qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply,
7037 unsigned long data)
7038{
7039 struct qeth_ipa_cmd *cmd;
7040
7041 cmd = (struct qeth_ipa_cmd *) data;
7042 if (cmd->hdr.return_code == 0)
7043 card->info.unique_id = *((__u16 *)
7044 &cmd->data.create_destroy_addr.unique_id[6]);
7045 else {
7046 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7047 UNIQUE_ID_NOT_BY_CARD;
7048 PRINT_WARN("couldn't get a unique id from the card on device "
7049 "%s (result=x%x), using default id. ipv6 "
7050 "autoconfig on other lpars may lead to duplicate "
7051 "ip addresses. please use manually "
7052 "configured ones.\n",
7053 CARD_BUS_ID(card), cmd->hdr.return_code);
7054 }
7055 return 0;
7056}
7057#endif
7058
7059static int
7060qeth_put_unique_id(struct qeth_card *card)
7061{
7062
7063 int rc = 0;
7064#ifdef CONFIG_QETH_IPV6
7065 struct qeth_cmd_buffer *iob;
7066 struct qeth_ipa_cmd *cmd;
7067
7068 QETH_DBF_TEXT(trace,2,"puniqeid");
7069
7070 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
7071 UNIQUE_ID_NOT_BY_CARD)
7072 return -1;
7073 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
7074 QETH_PROT_IPV6);
7075 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7076 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7077 card->info.unique_id;
7078 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
7079 card->dev->dev_addr, OSA_ADDR_LEN);
7080 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7081#else
7082 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7083 UNIQUE_ID_NOT_BY_CARD;
7084#endif
7085 return rc;
7086}
7087
7088/**
7089 * Clear IP List
7090 */
7091static void
7092qeth_clear_ip_list(struct qeth_card *card, int clean, int recover)
7093{
7094 struct qeth_ipaddr *addr, *tmp;
7095 unsigned long flags;
7096
7097 QETH_DBF_TEXT(trace,4,"clearip");
7098 spin_lock_irqsave(&card->ip_lock, flags);
7099 /* clear todo list */
7100 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry){
7101 list_del(&addr->entry);
7102 kfree(addr);
7103 }
7104
7105 while (!list_empty(&card->ip_list)) {
7106 addr = list_entry(card->ip_list.next,
7107 struct qeth_ipaddr, entry);
7108 list_del_init(&addr->entry);
7109 if (clean) {
7110 spin_unlock_irqrestore(&card->ip_lock, flags);
7111 qeth_deregister_addr_entry(card, addr);
7112 spin_lock_irqsave(&card->ip_lock, flags);
7113 }
7114 if (!recover || addr->is_multicast) {
7115 kfree(addr);
7116 continue;
7117 }
7118 list_add_tail(&addr->entry, card->ip_tbd_list);
7119 }
7120 spin_unlock_irqrestore(&card->ip_lock, flags);
7121}
7122
7123static void
7124qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7125 int clear_start_mask)
7126{
7127 unsigned long flags;
7128
7129 spin_lock_irqsave(&card->thread_mask_lock, flags);
7130 card->thread_allowed_mask = threads;
7131 if (clear_start_mask)
7132 card->thread_start_mask &= threads;
7133 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7134 wake_up(&card->wait_q);
7135}
7136
7137static inline int
7138qeth_threads_running(struct qeth_card *card, unsigned long threads)
7139{
7140 unsigned long flags;
7141 int rc = 0;
7142
7143 spin_lock_irqsave(&card->thread_mask_lock, flags);
7144 rc = (card->thread_running_mask & threads);
7145 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7146 return rc;
7147}
7148
7149static int
7150qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
7151{
7152 return wait_event_interruptible(card->wait_q,
7153 qeth_threads_running(card, threads) == 0);
7154}
7155
7156static int
7157qeth_stop_card(struct qeth_card *card)
7158{
7159 int rc = 0;
7160
7161 QETH_DBF_TEXT(setup ,2,"stopcard");
7162 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7163
7164 qeth_set_allowed_threads(card, 0, 1);
7165 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
7166 return -ERESTARTSYS;
7167 if (card->read.state == CH_STATE_UP &&
7168 card->write.state == CH_STATE_UP &&
7169 (card->state == CARD_STATE_UP)) {
7170 rtnl_lock();
7171 dev_close(card->dev);
7172 rtnl_unlock();
7173 if (!card->use_hard_stop) {
7174 __u8 *mac = &card->dev->dev_addr[0];
7175 rc = qeth_layer2_send_delmac(card, mac);
7176 QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc);
7177 if ((rc = qeth_send_stoplan(card)))
7178 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7179 }
7180 card->state = CARD_STATE_SOFTSETUP;
7181 }
7182 if (card->state == CARD_STATE_SOFTSETUP) {
7183#ifdef CONFIG_QETH_VLAN
7184 if (card->options.layer2)
7185 qeth_layer2_process_vlans(card, 1);
7186#endif
7187 qeth_clear_ip_list(card, !card->use_hard_stop, 1);
7188 qeth_clear_ipacmd_list(card);
7189 card->state = CARD_STATE_HARDSETUP;
7190 }
7191 if (card->state == CARD_STATE_HARDSETUP) {
7192 if ((!card->use_hard_stop) &&
7193 (!card->options.layer2))
7194 if ((rc = qeth_put_unique_id(card)))
7195 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7196 qeth_qdio_clear_card(card, 0);
7197 qeth_clear_qdio_buffers(card);
7198 qeth_clear_working_pool_list(card);
7199 card->state = CARD_STATE_DOWN;
7200 }
7201 if (card->state == CARD_STATE_DOWN) {
7202 qeth_clear_cmd_buffers(&card->read);
7203 qeth_clear_cmd_buffers(&card->write);
7204 }
7205 card->use_hard_stop = 0;
7206 return rc;
7207}
7208
7209
7210static int
7211qeth_get_unique_id(struct qeth_card *card)
7212{
7213 int rc = 0;
7214#ifdef CONFIG_QETH_IPV6
7215 struct qeth_cmd_buffer *iob;
7216 struct qeth_ipa_cmd *cmd;
7217
7218 QETH_DBF_TEXT(setup, 2, "guniqeid");
7219
7220 if (!qeth_is_supported(card,IPA_IPV6)) {
7221 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7222 UNIQUE_ID_NOT_BY_CARD;
7223 return 0;
7224 }
7225
7226 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
7227 QETH_PROT_IPV6);
7228 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7229 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7230 card->info.unique_id;
7231
7232 rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL);
7233#else
7234 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7235 UNIQUE_ID_NOT_BY_CARD;
7236#endif
7237 return rc;
7238}
7239static void
7240qeth_print_status_with_portname(struct qeth_card *card)
7241{
7242 char dbf_text[15];
7243 int i;
7244
7245 sprintf(dbf_text, "%s", card->info.portname + 1);
7246 for (i = 0; i < 8; i++)
7247 dbf_text[i] =
7248 (char) _ebcasc[(__u8) dbf_text[i]];
7249 dbf_text[8] = 0;
7250 printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
7251 "with link type %s (portname: %s)\n",
7252 CARD_RDEV_ID(card),
7253 CARD_WDEV_ID(card),
7254 CARD_DDEV_ID(card),
7255 qeth_get_cardname(card),
7256 (card->info.mcl_level[0]) ? " (level: " : "",
7257 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7258 (card->info.mcl_level[0]) ? ")" : "",
7259 qeth_get_cardname_short(card),
7260 dbf_text);
7261
7262}
7263
7264static void
7265qeth_print_status_no_portname(struct qeth_card *card)
7266{
7267 if (card->info.portname[0])
7268 printk("qeth: Device %s/%s/%s is a%s "
7269 "card%s%s%s\nwith link type %s "
7270 "(no portname needed by interface).\n",
7271 CARD_RDEV_ID(card),
7272 CARD_WDEV_ID(card),
7273 CARD_DDEV_ID(card),
7274 qeth_get_cardname(card),
7275 (card->info.mcl_level[0]) ? " (level: " : "",
7276 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7277 (card->info.mcl_level[0]) ? ")" : "",
7278 qeth_get_cardname_short(card));
7279 else
7280 printk("qeth: Device %s/%s/%s is a%s "
7281 "card%s%s%s\nwith link type %s.\n",
7282 CARD_RDEV_ID(card),
7283 CARD_WDEV_ID(card),
7284 CARD_DDEV_ID(card),
7285 qeth_get_cardname(card),
7286 (card->info.mcl_level[0]) ? " (level: " : "",
7287 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7288 (card->info.mcl_level[0]) ? ")" : "",
7289 qeth_get_cardname_short(card));
7290}
7291
7292static void
7293qeth_print_status_message(struct qeth_card *card)
7294{
7295 switch (card->info.type) {
7296 case QETH_CARD_TYPE_OSAE:
7297 /* VM will use a non-zero first character
7298 * to indicate a HiperSockets like reporting
7299 * of the level OSA sets the first character to zero
7300 * */
7301 if (!card->info.mcl_level[0]) {
7302 sprintf(card->info.mcl_level,"%02x%02x",
7303 card->info.mcl_level[2],
7304 card->info.mcl_level[3]);
7305
7306 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
7307 break;
7308 }
7309 /* fallthrough */
7310 case QETH_CARD_TYPE_IQD:
7311 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
7312 card->info.mcl_level[0]];
7313 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
7314 card->info.mcl_level[1]];
7315 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
7316 card->info.mcl_level[2]];
7317 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
7318 card->info.mcl_level[3]];
7319 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
7320 break;
7321 default:
7322 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
7323 }
7324 if (card->info.portname_required)
7325 qeth_print_status_with_portname(card);
7326 else
7327 qeth_print_status_no_portname(card);
7328}
7329
7330static int
7331qeth_register_netdev(struct qeth_card *card)
7332{
7333 QETH_DBF_TEXT(setup, 3, "regnetd");
7334 if (card->dev->reg_state != NETREG_UNINITIALIZED) {
7335 qeth_netdev_init(card->dev);
7336 return 0;
7337 }
7338 /* sysfs magic */
7339 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
7340 return register_netdev(card->dev);
7341}
7342
7343static void
7344qeth_start_again(struct qeth_card *card)
7345{
7346 QETH_DBF_TEXT(setup ,2, "startag");
7347
7348 rtnl_lock();
7349 dev_open(card->dev);
7350 rtnl_unlock();
7351 /* this also sets saved unicast addresses */
7352 qeth_set_multicast_list(card->dev);
7353}
7354
7355
7356/* Layer 2 specific stuff */
7357#define IGNORE_PARAM_EQ(option,value,reset_value,msg) \
7358 if (card->options.option == value) { \
7359 PRINT_ERR("%s not supported with layer 2 " \
7360 "functionality, ignoring option on read" \
7361 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7362 card->options.option = reset_value; \
7363 }
7364#define IGNORE_PARAM_NEQ(option,value,reset_value,msg) \
7365 if (card->options.option != value) { \
7366 PRINT_ERR("%s not supported with layer 2 " \
7367 "functionality, ignoring option on read" \
7368 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
7369 card->options.option = reset_value; \
7370 }
7371
7372
7373static void qeth_make_parameters_consistent(struct qeth_card *card)
7374{
7375
7376 if (card->options.layer2) {
7377 if (card->info.type == QETH_CARD_TYPE_IQD) {
7378 PRINT_ERR("Device %s does not support " \
7379 "layer 2 functionality. " \
7380 "Ignoring layer2 option.\n",CARD_BUS_ID(card));
7381 }
7382 IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
7383 "Routing options are");
7384#ifdef CONFIG_QETH_IPV6
7385 IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
7386 "Routing options are");
7387#endif
7388 IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
7389 QETH_CHECKSUM_DEFAULT,
7390 "Checksumming options are");
7391 IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
7392 QETH_TR_BROADCAST_ALLRINGS,
7393 "Broadcast mode options are");
7394 IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
7395 QETH_TR_MACADDR_NONCANONICAL,
7396 "Canonical MAC addr options are");
7397 IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
7398 "Broadcast faking options are");
7399 IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
7400 DEFAULT_ADD_HHLEN,"Option add_hhlen is");
7401 IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
7402 }
7403}
7404
7405
7406static int
7407qeth_set_online(struct ccwgroup_device *gdev)
7408{
7409 struct qeth_card *card = gdev->dev.driver_data;
7410 int rc = 0;
7411 enum qeth_card_states recover_flag;
7412
7413 BUG_ON(!card);
7414 QETH_DBF_TEXT(setup ,2, "setonlin");
7415 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7416
7417 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
7418 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){
7419 PRINT_WARN("set_online of card %s interrupted by user!\n",
7420 CARD_BUS_ID(card));
7421 return -ERESTARTSYS;
7422 }
7423
7424 recover_flag = card->state;
7425 if ((rc = ccw_device_set_online(CARD_RDEV(card))) ||
7426 (rc = ccw_device_set_online(CARD_WDEV(card))) ||
7427 (rc = ccw_device_set_online(CARD_DDEV(card)))){
7428 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7429 return -EIO;
7430 }
7431
7432 if (card->options.layer2)
7433 qeth_make_parameters_consistent(card);
7434
7435 if ((rc = qeth_hardsetup_card(card))){
7436 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7437 goto out_remove;
7438 }
7439 card->state = CARD_STATE_HARDSETUP;
7440
7441 if (!(rc = qeth_query_ipassists(card,QETH_PROT_IPV4)))
7442 rc = qeth_get_unique_id(card);
7443
7444 if (rc && card->options.layer2 == 0) {
7445 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7446 goto out_remove;
7447 }
7448 qeth_print_status_message(card);
7449 if ((rc = qeth_register_netdev(card))){
7450 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7451 goto out_remove;
7452 }
7453 if ((rc = qeth_softsetup_card(card))){
7454 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7455 goto out_remove;
7456 }
7457 card->state = CARD_STATE_SOFTSETUP;
7458
7459 if ((rc = qeth_init_qdio_queues(card))){
7460 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
7461 goto out_remove;
7462 }
7463/*maybe it was set offline without ifconfig down
7464 * we can also use this state for recovery purposes*/
7465 qeth_set_allowed_threads(card, 0xffffffff, 0);
7466 if (recover_flag == CARD_STATE_RECOVER)
7467 qeth_start_again(card);
7468 qeth_notify_processes();
7469 return 0;
7470out_remove:
7471 card->use_hard_stop = 1;
7472 qeth_stop_card(card);
7473 ccw_device_set_offline(CARD_DDEV(card));
7474 ccw_device_set_offline(CARD_WDEV(card));
7475 ccw_device_set_offline(CARD_RDEV(card));
7476 if (recover_flag == CARD_STATE_RECOVER)
7477 card->state = CARD_STATE_RECOVER;
7478 else
7479 card->state = CARD_STATE_DOWN;
7480 return -ENODEV;
7481}
7482
7483static struct ccw_device_id qeth_ids[] = {
7484 {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
7485 {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
7486 {},
7487};
7488MODULE_DEVICE_TABLE(ccw, qeth_ids);
7489
7490struct device *qeth_root_dev = NULL;
7491
7492struct ccwgroup_driver qeth_ccwgroup_driver = {
7493 .owner = THIS_MODULE,
7494 .name = "qeth",
7495 .driver_id = 0xD8C5E3C8,
7496 .probe = qeth_probe_device,
7497 .remove = qeth_remove_device,
7498 .set_online = qeth_set_online,
7499 .set_offline = qeth_set_offline,
7500};
7501
7502struct ccw_driver qeth_ccw_driver = {
7503 .name = "qeth",
7504 .ids = qeth_ids,
7505 .probe = ccwgroup_probe_ccwdev,
7506 .remove = ccwgroup_remove_ccwdev,
7507};
7508
7509
7510static void
7511qeth_unregister_dbf_views(void)
7512{
7513 if (qeth_dbf_setup)
7514 debug_unregister(qeth_dbf_setup);
7515 if (qeth_dbf_qerr)
7516 debug_unregister(qeth_dbf_qerr);
7517 if (qeth_dbf_sense)
7518 debug_unregister(qeth_dbf_sense);
7519 if (qeth_dbf_misc)
7520 debug_unregister(qeth_dbf_misc);
7521 if (qeth_dbf_data)
7522 debug_unregister(qeth_dbf_data);
7523 if (qeth_dbf_control)
7524 debug_unregister(qeth_dbf_control);
7525 if (qeth_dbf_trace)
7526 debug_unregister(qeth_dbf_trace);
7527}
7528static int
7529qeth_register_dbf_views(void)
7530{
7531 qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
7532 QETH_DBF_SETUP_INDEX,
7533 QETH_DBF_SETUP_NR_AREAS,
7534 QETH_DBF_SETUP_LEN);
7535 qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
7536 QETH_DBF_MISC_INDEX,
7537 QETH_DBF_MISC_NR_AREAS,
7538 QETH_DBF_MISC_LEN);
7539 qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
7540 QETH_DBF_DATA_INDEX,
7541 QETH_DBF_DATA_NR_AREAS,
7542 QETH_DBF_DATA_LEN);
7543 qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
7544 QETH_DBF_CONTROL_INDEX,
7545 QETH_DBF_CONTROL_NR_AREAS,
7546 QETH_DBF_CONTROL_LEN);
7547 qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
7548 QETH_DBF_SENSE_INDEX,
7549 QETH_DBF_SENSE_NR_AREAS,
7550 QETH_DBF_SENSE_LEN);
7551 qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
7552 QETH_DBF_QERR_INDEX,
7553 QETH_DBF_QERR_NR_AREAS,
7554 QETH_DBF_QERR_LEN);
7555 qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
7556 QETH_DBF_TRACE_INDEX,
7557 QETH_DBF_TRACE_NR_AREAS,
7558 QETH_DBF_TRACE_LEN);
7559
7560 if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
7561 (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
7562 (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
7563 (qeth_dbf_trace == NULL)) {
7564 qeth_unregister_dbf_views();
7565 return -ENOMEM;
7566 }
7567 debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
7568 debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
7569
7570 debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
7571 debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
7572
7573 debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
7574 debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
7575
7576 debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
7577 debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
7578
7579 debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
7580 debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
7581
7582 debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
7583 debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
7584
7585 debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
7586 debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
7587
7588 return 0;
7589}
7590
7591#ifdef CONFIG_QETH_IPV6
7592extern struct neigh_table arp_tbl;
7593static struct neigh_ops *arp_direct_ops;
7594static int (*qeth_old_arp_constructor) (struct neighbour *);
7595
7596static struct neigh_ops arp_direct_ops_template = {
7597 .family = AF_INET,
7598 .destructor = NULL,
7599 .solicit = NULL,
7600 .error_report = NULL,
7601 .output = dev_queue_xmit,
7602 .connected_output = dev_queue_xmit,
7603 .hh_output = dev_queue_xmit,
7604 .queue_xmit = dev_queue_xmit
7605};
7606
7607static int
7608qeth_arp_constructor(struct neighbour *neigh)
7609{
7610 struct net_device *dev = neigh->dev;
7611 struct in_device *in_dev;
7612 struct neigh_parms *parms;
7613 struct qeth_card *card;
7614
7615 card = qeth_get_card_from_dev(dev);
7616 if (card == NULL)
7617 goto out;
7618 if((card->options.layer2) ||
7619 (card->dev->hard_header == qeth_fake_header))
7620 goto out;
7621
7622 rcu_read_lock();
7623 in_dev = rcu_dereference(__in_dev_get(dev));
7624 if (in_dev == NULL) {
7625 rcu_read_unlock();
7626 return -EINVAL;
7627 }
7628
7629 parms = in_dev->arp_parms;
7630 __neigh_parms_put(neigh->parms);
7631 neigh->parms = neigh_parms_clone(parms);
7632 rcu_read_unlock();
7633
7634 neigh->type = inet_addr_type(*(u32 *) neigh->primary_key);
7635 neigh->nud_state = NUD_NOARP;
7636 neigh->ops = arp_direct_ops;
7637 neigh->output = neigh->ops->queue_xmit;
7638 return 0;
7639out:
7640 return qeth_old_arp_constructor(neigh);
7641}
7642#endif /*CONFIG_QETH_IPV6*/
7643
7644/*
7645 * IP address takeover related functions
7646 */
7647static void
7648qeth_clear_ipato_list(struct qeth_card *card)
7649{
7650 struct qeth_ipato_entry *ipatoe, *tmp;
7651 unsigned long flags;
7652
7653 spin_lock_irqsave(&card->ip_lock, flags);
7654 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
7655 list_del(&ipatoe->entry);
7656 kfree(ipatoe);
7657 }
7658 spin_unlock_irqrestore(&card->ip_lock, flags);
7659}
7660
7661int
7662qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new)
7663{
7664 struct qeth_ipato_entry *ipatoe;
7665 unsigned long flags;
7666 int rc = 0;
7667
7668 QETH_DBF_TEXT(trace, 2, "addipato");
7669 spin_lock_irqsave(&card->ip_lock, flags);
7670 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
7671 if (ipatoe->proto != new->proto)
7672 continue;
7673 if (!memcmp(ipatoe->addr, new->addr,
7674 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
7675 (ipatoe->mask_bits == new->mask_bits)){
7676 PRINT_WARN("ipato entry already exists!\n");
7677 rc = -EEXIST;
7678 break;
7679 }
7680 }
7681 if (!rc) {
7682 list_add_tail(&new->entry, &card->ipato.entries);
7683 }
7684 spin_unlock_irqrestore(&card->ip_lock, flags);
7685 return rc;
7686}
7687
7688void
7689qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
7690 u8 *addr, int mask_bits)
7691{
7692 struct qeth_ipato_entry *ipatoe, *tmp;
7693 unsigned long flags;
7694
7695 QETH_DBF_TEXT(trace, 2, "delipato");
7696 spin_lock_irqsave(&card->ip_lock, flags);
7697 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){
7698 if (ipatoe->proto != proto)
7699 continue;
7700 if (!memcmp(ipatoe->addr, addr,
7701 (proto == QETH_PROT_IPV4)? 4:16) &&
7702 (ipatoe->mask_bits == mask_bits)){
7703 list_del(&ipatoe->entry);
7704 kfree(ipatoe);
7705 }
7706 }
7707 spin_unlock_irqrestore(&card->ip_lock, flags);
7708}
7709
7710static inline void
7711qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
7712{
7713 int i, j;
7714 u8 octet;
7715
7716 for (i = 0; i < len; ++i){
7717 octet = addr[i];
7718 for (j = 7; j >= 0; --j){
7719 bits[i*8 + j] = octet & 1;
7720 octet >>= 1;
7721 }
7722 }
7723}
7724
7725static int
7726qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr)
7727{
7728 struct qeth_ipato_entry *ipatoe;
7729 u8 addr_bits[128] = {0, };
7730 u8 ipatoe_bits[128] = {0, };
7731 int rc = 0;
7732
7733 if (!card->ipato.enabled)
7734 return 0;
7735
7736 qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
7737 (addr->proto == QETH_PROT_IPV4)? 4:16);
7738 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
7739 if (addr->proto != ipatoe->proto)
7740 continue;
7741 qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
7742 (ipatoe->proto==QETH_PROT_IPV4) ?
7743 4:16);
7744 if (addr->proto == QETH_PROT_IPV4)
7745 rc = !memcmp(addr_bits, ipatoe_bits,
7746 min(32, ipatoe->mask_bits));
7747 else
7748 rc = !memcmp(addr_bits, ipatoe_bits,
7749 min(128, ipatoe->mask_bits));
7750 if (rc)
7751 break;
7752 }
7753 /* invert? */
7754 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
7755 rc = !rc;
7756 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
7757 rc = !rc;
7758
7759 return rc;
7760}
7761
7762/*
7763 * VIPA related functions
7764 */
7765int
7766qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
7767 const u8 *addr)
7768{
7769 struct qeth_ipaddr *ipaddr;
7770 unsigned long flags;
7771 int rc = 0;
7772
7773 ipaddr = qeth_get_addr_buffer(proto);
7774 if (ipaddr){
7775 if (proto == QETH_PROT_IPV4){
7776 QETH_DBF_TEXT(trace, 2, "addvipa4");
7777 memcpy(&ipaddr->u.a4.addr, addr, 4);
7778 ipaddr->u.a4.mask = 0;
7779#ifdef CONFIG_QETH_IPV6
7780 } else if (proto == QETH_PROT_IPV6){
7781 QETH_DBF_TEXT(trace, 2, "addvipa6");
7782 memcpy(&ipaddr->u.a6.addr, addr, 16);
7783 ipaddr->u.a6.pfxlen = 0;
7784#endif
7785 }
7786 ipaddr->type = QETH_IP_TYPE_VIPA;
7787 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
7788 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
7789 } else
7790 return -ENOMEM;
7791 spin_lock_irqsave(&card->ip_lock, flags);
7792 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
7793 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
7794 rc = -EEXIST;
7795 spin_unlock_irqrestore(&card->ip_lock, flags);
7796 if (rc){
7797 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
7798 return rc;
7799 }
7800 if (!qeth_add_ip(card, ipaddr))
7801 kfree(ipaddr);
7802 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7803 schedule_work(&card->kernel_thread_starter);
7804 return rc;
7805}
7806
7807void
7808qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
7809 const u8 *addr)
7810{
7811 struct qeth_ipaddr *ipaddr;
7812
7813 ipaddr = qeth_get_addr_buffer(proto);
7814 if (ipaddr){
7815 if (proto == QETH_PROT_IPV4){
7816 QETH_DBF_TEXT(trace, 2, "delvipa4");
7817 memcpy(&ipaddr->u.a4.addr, addr, 4);
7818 ipaddr->u.a4.mask = 0;
7819#ifdef CONFIG_QETH_IPV6
7820 } else if (proto == QETH_PROT_IPV6){
7821 QETH_DBF_TEXT(trace, 2, "delvipa6");
7822 memcpy(&ipaddr->u.a6.addr, addr, 16);
7823 ipaddr->u.a6.pfxlen = 0;
7824#endif
7825 }
7826 ipaddr->type = QETH_IP_TYPE_VIPA;
7827 } else
7828 return;
7829 if (!qeth_delete_ip(card, ipaddr))
7830 kfree(ipaddr);
7831 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7832 schedule_work(&card->kernel_thread_starter);
7833}
7834
7835/*
7836 * proxy ARP related functions
7837 */
7838int
7839qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
7840 const u8 *addr)
7841{
7842 struct qeth_ipaddr *ipaddr;
7843 unsigned long flags;
7844 int rc = 0;
7845
7846 ipaddr = qeth_get_addr_buffer(proto);
7847 if (ipaddr){
7848 if (proto == QETH_PROT_IPV4){
7849 QETH_DBF_TEXT(trace, 2, "addrxip4");
7850 memcpy(&ipaddr->u.a4.addr, addr, 4);
7851 ipaddr->u.a4.mask = 0;
7852#ifdef CONFIG_QETH_IPV6
7853 } else if (proto == QETH_PROT_IPV6){
7854 QETH_DBF_TEXT(trace, 2, "addrxip6");
7855 memcpy(&ipaddr->u.a6.addr, addr, 16);
7856 ipaddr->u.a6.pfxlen = 0;
7857#endif
7858 }
7859 ipaddr->type = QETH_IP_TYPE_RXIP;
7860 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
7861 ipaddr->del_flags = 0;
7862 } else
7863 return -ENOMEM;
7864 spin_lock_irqsave(&card->ip_lock, flags);
7865 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
7866 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
7867 rc = -EEXIST;
7868 spin_unlock_irqrestore(&card->ip_lock, flags);
7869 if (rc){
7870 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
7871 return rc;
7872 }
7873 if (!qeth_add_ip(card, ipaddr))
7874 kfree(ipaddr);
7875 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7876 schedule_work(&card->kernel_thread_starter);
7877 return 0;
7878}
7879
7880void
7881qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
7882 const u8 *addr)
7883{
7884 struct qeth_ipaddr *ipaddr;
7885
7886 ipaddr = qeth_get_addr_buffer(proto);
7887 if (ipaddr){
7888 if (proto == QETH_PROT_IPV4){
7889 QETH_DBF_TEXT(trace, 2, "addrxip4");
7890 memcpy(&ipaddr->u.a4.addr, addr, 4);
7891 ipaddr->u.a4.mask = 0;
7892#ifdef CONFIG_QETH_IPV6
7893 } else if (proto == QETH_PROT_IPV6){
7894 QETH_DBF_TEXT(trace, 2, "addrxip6");
7895 memcpy(&ipaddr->u.a6.addr, addr, 16);
7896 ipaddr->u.a6.pfxlen = 0;
7897#endif
7898 }
7899 ipaddr->type = QETH_IP_TYPE_RXIP;
7900 } else
7901 return;
7902 if (!qeth_delete_ip(card, ipaddr))
7903 kfree(ipaddr);
7904 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7905 schedule_work(&card->kernel_thread_starter);
7906}
7907
7908/**
7909 * IP event handler
7910 */
7911static int
7912qeth_ip_event(struct notifier_block *this,
7913 unsigned long event,void *ptr)
7914{
7915 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
7916 struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev;
7917 struct qeth_ipaddr *addr;
7918 struct qeth_card *card;
7919
7920 QETH_DBF_TEXT(trace,3,"ipevent");
7921 card = qeth_get_card_from_dev(dev);
7922 if (!card)
7923 return NOTIFY_DONE;
7924 if (card->options.layer2)
7925 return NOTIFY_DONE;
7926
7927 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
7928 if (addr != NULL) {
7929 addr->u.a4.addr = ifa->ifa_address;
7930 addr->u.a4.mask = ifa->ifa_mask;
7931 addr->type = QETH_IP_TYPE_NORMAL;
7932 } else
7933 goto out;
7934
7935 switch(event) {
7936 case NETDEV_UP:
7937 if (!qeth_add_ip(card, addr))
7938 kfree(addr);
7939 break;
7940 case NETDEV_DOWN:
7941 if (!qeth_delete_ip(card, addr))
7942 kfree(addr);
7943 break;
7944 default:
7945 break;
7946 }
7947 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
7948 schedule_work(&card->kernel_thread_starter);
7949out:
7950 return NOTIFY_DONE;
7951}
7952
7953static struct notifier_block qeth_ip_notifier = {
7954 qeth_ip_event,
7955 0
7956};
7957
7958#ifdef CONFIG_QETH_IPV6
7959/**
7960 * IPv6 event handler
7961 */
7962static int
7963qeth_ip6_event(struct notifier_block *this,
7964 unsigned long event,void *ptr)
7965{
7966
7967 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
7968 struct net_device *dev = (struct net_device *)ifa->idev->dev;
7969 struct qeth_ipaddr *addr;
7970 struct qeth_card *card;
7971
7972 QETH_DBF_TEXT(trace,3,"ip6event");
7973
7974 card = qeth_get_card_from_dev(dev);
7975 if (!card)
7976 return NOTIFY_DONE;
7977 if (!qeth_is_supported(card, IPA_IPV6))
7978 return NOTIFY_DONE;
7979
7980 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
7981 if (addr != NULL) {
7982 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
7983 addr->u.a6.pfxlen = ifa->prefix_len;
7984 addr->type = QETH_IP_TYPE_NORMAL;
7985 } else
7986 goto out;
7987
7988 switch(event) {
7989 case NETDEV_UP:
7990 if (!qeth_add_ip(card, addr))
7991 kfree(addr);
7992 break;
7993 case NETDEV_DOWN:
7994 if (!qeth_delete_ip(card, addr))
7995 kfree(addr);
7996 break;
7997 default:
7998 break;
7999 }
8000 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
8001 schedule_work(&card->kernel_thread_starter);
8002out:
8003 return NOTIFY_DONE;
8004}
8005
8006static struct notifier_block qeth_ip6_notifier = {
8007 qeth_ip6_event,
8008 0
8009};
8010#endif
8011
8012static int
8013qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
8014{
8015
8016 struct device *entry;
8017 struct qeth_card *card;
8018
8019 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
8020 list_for_each_entry(entry, &qeth_ccwgroup_driver.driver.devices,
8021 driver_list) {
8022 card = (struct qeth_card *) entry->driver_data;
8023 qeth_clear_ip_list(card, 0, 0);
8024 qeth_qdio_clear_card(card, 0);
8025 }
8026 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
8027 return NOTIFY_DONE;
8028}
8029
8030
8031static struct notifier_block qeth_reboot_notifier = {
8032 qeth_reboot_event,
8033 0
8034};
8035
8036static int
8037qeth_register_notifiers(void)
8038{
8039 int r;
8040
8041 QETH_DBF_TEXT(trace,5,"regnotif");
8042 if ((r = register_reboot_notifier(&qeth_reboot_notifier)))
8043 return r;
8044 if ((r = register_inetaddr_notifier(&qeth_ip_notifier)))
8045 goto out_reboot;
8046#ifdef CONFIG_QETH_IPV6
8047 if ((r = register_inet6addr_notifier(&qeth_ip6_notifier)))
8048 goto out_ipv4;
8049#endif
8050 return 0;
8051
8052#ifdef CONFIG_QETH_IPV6
8053out_ipv4:
8054 unregister_inetaddr_notifier(&qeth_ip_notifier);
8055#endif
8056out_reboot:
8057 unregister_reboot_notifier(&qeth_reboot_notifier);
8058 return r;
8059}
8060
8061/**
8062 * unregister all event notifiers
8063 */
8064static void
8065qeth_unregister_notifiers(void)
8066{
8067
8068 QETH_DBF_TEXT(trace,5,"unregnot");
8069 BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier));
8070 BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier));
8071#ifdef CONFIG_QETH_IPV6
8072 BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier));
8073#endif /* QETH_IPV6 */
8074
8075}
8076
8077#ifdef CONFIG_QETH_IPV6
8078static int
8079qeth_ipv6_init(void)
8080{
8081 qeth_old_arp_constructor = arp_tbl.constructor;
8082 write_lock(&arp_tbl.lock);
8083 arp_tbl.constructor = qeth_arp_constructor;
8084 write_unlock(&arp_tbl.lock);
8085
8086 arp_direct_ops = (struct neigh_ops*)
8087 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
8088 if (!arp_direct_ops)
8089 return -ENOMEM;
8090
8091 memcpy(arp_direct_ops, &arp_direct_ops_template,
8092 sizeof(struct neigh_ops));
8093
8094 return 0;
8095}
8096
8097static void
8098qeth_ipv6_uninit(void)
8099{
8100 write_lock(&arp_tbl.lock);
8101 arp_tbl.constructor = qeth_old_arp_constructor;
8102 write_unlock(&arp_tbl.lock);
8103 kfree(arp_direct_ops);
8104}
8105#endif /* CONFIG_QETH_IPV6 */
8106
8107static void
8108qeth_sysfs_unregister(void)
8109{
8110 qeth_remove_driver_attributes();
8111 ccw_driver_unregister(&qeth_ccw_driver);
8112 ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
8113 s390_root_dev_unregister(qeth_root_dev);
8114}
8115/**
8116 * register qeth at sysfs
8117 */
8118static int
8119qeth_sysfs_register(void)
8120{
8121 int rc=0;
8122
8123 rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
8124 if (rc)
8125 return rc;
8126 rc = ccw_driver_register(&qeth_ccw_driver);
8127 if (rc)
8128 return rc;
8129 rc = qeth_create_driver_attributes();
8130 if (rc)
8131 return rc;
8132 qeth_root_dev = s390_root_dev_register("qeth");
8133 if (IS_ERR(qeth_root_dev)) {
8134 rc = PTR_ERR(qeth_root_dev);
8135 return rc;
8136 }
8137 return 0;
8138}
8139
8140/***
8141 * init function
8142 */
8143static int __init
8144qeth_init(void)
8145{
8146 int rc=0;
8147
8148 qeth_eyecatcher();
8149 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
8150 version, VERSION_QETH_C, VERSION_QETH_H,
8151 VERSION_QETH_MPC_H, VERSION_QETH_MPC_C,
8152 VERSION_QETH_FS_H, VERSION_QETH_PROC_C,
8153 VERSION_QETH_SYS_C, QETH_VERSION_IPV6,
8154 QETH_VERSION_VLAN);
8155
8156 INIT_LIST_HEAD(&qeth_card_list.list);
8157 INIT_LIST_HEAD(&qeth_notify_list);
8158 spin_lock_init(&qeth_notify_lock);
8159 rwlock_init(&qeth_card_list.rwlock);
8160
8161 if (qeth_register_dbf_views())
8162 goto out_err;
8163 if (qeth_sysfs_register())
8164 goto out_sysfs;
8165
8166#ifdef CONFIG_QETH_IPV6
8167 if (qeth_ipv6_init()) {
8168 PRINT_ERR("Out of memory during ipv6 init.\n");
8169 goto out_sysfs;
8170 }
8171#endif /* QETH_IPV6 */
8172 if (qeth_register_notifiers())
8173 goto out_ipv6;
8174 if (qeth_create_procfs_entries())
8175 goto out_notifiers;
8176
8177 return rc;
8178
8179out_notifiers:
8180 qeth_unregister_notifiers();
8181out_ipv6:
8182#ifdef CONFIG_QETH_IPV6
8183 qeth_ipv6_uninit();
8184#endif /* QETH_IPV6 */
8185out_sysfs:
8186 qeth_sysfs_unregister();
8187 qeth_unregister_dbf_views();
8188out_err:
8189 PRINT_ERR("Initialization failed");
8190 return rc;
8191}
8192
8193static void
8194__exit qeth_exit(void)
8195{
8196 struct qeth_card *card, *tmp;
8197 unsigned long flags;
8198
8199 QETH_DBF_TEXT(trace,1, "cleanup.");
8200
8201 /*
8202 * Weed would not need to clean up our devices here, because the
8203 * common device layer calls qeth_remove_device for each device
8204 * as soon as we unregister our driver (done in qeth_sysfs_unregister).
8205 * But we do cleanup here so we can do a "soft" shutdown of our cards.
8206 * qeth_remove_device called by the common device layer would otherwise
8207 * do a "hard" shutdown (card->use_hard_stop is set to one in
8208 * qeth_remove_device).
8209 */
8210again:
8211 read_lock_irqsave(&qeth_card_list.rwlock, flags);
8212 list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){
8213 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8214 qeth_set_offline(card->gdev);
8215 qeth_remove_device(card->gdev);
8216 goto again;
8217 }
8218 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8219#ifdef CONFIG_QETH_IPV6
8220 qeth_ipv6_uninit();
8221#endif
8222 qeth_unregister_notifiers();
8223 qeth_remove_procfs_entries();
8224 qeth_sysfs_unregister();
8225 qeth_unregister_dbf_views();
8226 printk("qeth: removed\n");
8227}
8228
8229EXPORT_SYMBOL(qeth_eyecatcher);
8230module_init(qeth_init);
8231module_exit(qeth_exit);
8232MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
8233MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
8234 "Copyright 2000,2003 IBM Corporation\n");
8235
8236MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_mpc.c b/drivers/s390/net/qeth_mpc.c
new file mode 100644
index 000000000000..f685ecc7da99
--- /dev/null
+++ b/drivers/s390/net/qeth_mpc.c
@@ -0,0 +1,168 @@
1/*
2 * linux/drivers/s390/net/qeth_mpc.c
3 *
4 * Linux on zSeries OSA Express and HiperSockets support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 * Author(s): Frank Pavlic <pavlic@de.ibm.com>
8 * Thomas Spatzier <tspat@de.ibm.com>
9 *
10 */
11#include <asm/cio.h>
12#include "qeth_mpc.h"
13
14const char *VERSION_QETH_MPC_C = "$Revision: 1.11 $";
15
16unsigned char IDX_ACTIVATE_READ[]={
17 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
18 0x19,0x01,0x01,0x80, 0x00,0x00,0x00,0x00,
19 0x00,0x00,0x00,0x00, 0x00,0x00,0xc8,0xc1,
20 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00,
21 0x00,0x00
22};
23
24unsigned char IDX_ACTIVATE_WRITE[]={
25 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
26 0x15,0x01,0x01,0x80, 0x00,0x00,0x00,0x00,
27 0xff,0xff,0x00,0x00, 0x00,0x00,0xc8,0xc1,
28 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00,
29 0x00,0x00
30};
31
32unsigned char CM_ENABLE[]={
33 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x01,
34 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x63,
35 0x10,0x00,0x00,0x01,
36 0x00,0x00,0x00,0x00,
37 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00,
38 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x23,
39 0x00,0x00,0x23,0x05, 0x00,0x00,0x00,0x00,
40 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
41 0x01,0x00,0x00,0x23, 0x00,0x00,0x00,0x40,
42 0x00,0x0c,0x41,0x02, 0x00,0x17,0x00,0x00,
43 0x00,0x00,0x00,0x00,
44 0x00,0x0b,0x04,0x01,
45 0x7e,0x04,0x05,0x00, 0x01,0x01,0x0f,
46 0x00,
47 0x0c,0x04,0x02,0xff, 0xff,0xff,0xff,0xff,
48 0xff,0xff,0xff
49};
50
51unsigned char CM_SETUP[]={
52 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x02,
53 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x64,
54 0x10,0x00,0x00,0x01,
55 0x00,0x00,0x00,0x00,
56 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00,
57 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x24,
58 0x00,0x00,0x24,0x05, 0x00,0x00,0x00,0x00,
59 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
60 0x01,0x00,0x00,0x24, 0x00,0x00,0x00,0x40,
61 0x00,0x0c,0x41,0x04, 0x00,0x18,0x00,0x00,
62 0x00,0x00,0x00,0x00,
63 0x00,0x09,0x04,0x04,
64 0x05,0x00,0x01,0x01, 0x11,
65 0x00,0x09,0x04,
66 0x05,0x05,0x00,0x00, 0x00,0x00,
67 0x00,0x06,
68 0x04,0x06,0xc8,0x00
69};
70
71unsigned char ULP_ENABLE[]={
72 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x03,
73 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6b,
74 0x10,0x00,0x00,0x01,
75 0x00,0x00,0x00,0x00,
76 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x01,
77 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x2b,
78 0x00,0x00,0x2b,0x05, 0x20,0x01,0x00,0x00,
79 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
80 0x01,0x00,0x00,0x2b, 0x00,0x00,0x00,0x40,
81 0x00,0x0c,0x41,0x02, 0x00,0x1f,0x00,0x00,
82 0x00,0x00,0x00,0x00,
83 0x00,0x0b,0x04,0x01,
84 0x03,0x04,0x05,0x00, 0x01,0x01,0x12,
85 0x00,
86 0x14,0x04,0x0a,0x00, 0x20,0x00,0x00,0xff,
87 0xff,0x00,0x08,0xc8, 0xe8,0xc4,0xf1,0xc7,
88 0xf1,0x00,0x00
89};
90
91unsigned char ULP_SETUP[]={
92 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x04,
93 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6c,
94 0x10,0x00,0x00,0x01,
95 0x00,0x00,0x00,0x00,
96 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x02,
97 0x00,0x00,0x00,0x01, 0x00,0x24,0x00,0x2c,
98 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00,
99 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
100 0x01,0x00,0x00,0x2c, 0x00,0x00,0x00,0x40,
101 0x00,0x0c,0x41,0x04, 0x00,0x20,0x00,0x00,
102 0x00,0x00,0x00,0x00,
103 0x00,0x09,0x04,0x04,
104 0x05,0x00,0x01,0x01, 0x14,
105 0x00,0x09,0x04,
106 0x05,0x05,0x30,0x01, 0x00,0x00,
107 0x00,0x06,
108 0x04,0x06,0x40,0x00,
109 0x00,0x08,0x04,0x0b,
110 0x00,0x00,0x00,0x00
111};
112
113unsigned char DM_ACT[]={
114 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x05,
115 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x55,
116 0x10,0x00,0x00,0x01,
117 0x00,0x00,0x00,0x00,
118 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x03,
119 0x00,0x00,0x00,0x02, 0x00,0x24,0x00,0x15,
120 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00,
121 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
122 0x01,0x00,0x00,0x15, 0x00,0x00,0x00,0x40,
123 0x00,0x0c,0x43,0x60, 0x00,0x09,0x00,0x00,
124 0x00,0x00,0x00,0x00,
125 0x00,0x09,0x04,0x04,
126 0x05,0x40,0x01,0x01, 0x00
127};
128
129unsigned char IPA_PDU_HEADER[]={
130 0x00,0xe0,0x00,0x00, 0x77,0x77,0x77,0x77,
131 0x00,0x00,0x00,0x14, 0x00,0x00,
132 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))/256,
133 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))%256,
134 0x10,0x00,0x00,0x01, 0x00,0x00,0x00,0x00,
135 0xc1,0x03,0x00,0x01, 0x00,0x00,0x00,0x00,
136 0x00,0x00,0x00,0x00, 0x00,0x24,
137 sizeof(struct qeth_ipa_cmd)/256,
138 sizeof(struct qeth_ipa_cmd)%256,
139 0x00,
140 sizeof(struct qeth_ipa_cmd)/256,
141 sizeof(struct qeth_ipa_cmd),0x05, 0x77,0x77,0x77,0x77,
142 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
143 0x01,0x00,
144 sizeof(struct qeth_ipa_cmd)/256,
145 sizeof(struct qeth_ipa_cmd)%256,
146 0x00,0x00,0x00,0x40,
147};
148
149unsigned char WRITE_CCW[]={
150 0x01,CCW_FLAG_SLI,0,0,
151 0,0,0,0
152};
153
154unsigned char READ_CCW[]={
155 0x02,CCW_FLAG_SLI,0,0,
156 0,0,0,0
157};
158
159
160
161
162
163
164
165
166
167
168
diff --git a/drivers/s390/net/qeth_mpc.h b/drivers/s390/net/qeth_mpc.h
new file mode 100644
index 000000000000..3d916b5c5d09
--- /dev/null
+++ b/drivers/s390/net/qeth_mpc.h
@@ -0,0 +1,538 @@
1/*
2 * linux/drivers/s390/net/qeth_mpc.h
3 *
4 * Linux on zSeries OSA Express and HiperSockets support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Thomas Spatzier <tspat@de.ibm.com>
9 * Frank Pavlic <pavlic@de.ibm.com>
10 *
11 */
12#ifndef __QETH_MPC_H__
13#define __QETH_MPC_H__
14
15#include <asm/qeth.h>
16
17#define VERSION_QETH_MPC_H "$Revision: 1.43 $"
18
19extern const char *VERSION_QETH_MPC_C;
20
21#define IPA_PDU_HEADER_SIZE 0x40
22#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer+0x0e)
23#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer+0x26)
24#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer+0x2a)
25#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer+0x3a)
26
27extern unsigned char IPA_PDU_HEADER[];
28#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer+0x2c)
29
30#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
31
32#define QETH_SEQ_NO_LENGTH 4
33#define QETH_MPC_TOKEN_LENGTH 4
34#define QETH_MCL_LENGTH 4
35#define OSA_ADDR_LEN 6
36
37#define QETH_TIMEOUT (10 * HZ)
38#define QETH_IPA_TIMEOUT (45 * HZ)
39#define QETH_IDX_COMMAND_SEQNO 0xffff0000
40#define SR_INFO_LEN 16
41
42#define QETH_CLEAR_CHANNEL_PARM -10
43#define QETH_HALT_CHANNEL_PARM -11
44
45/*****************************************************************************/
46/* IP Assist related definitions */
47/*****************************************************************************/
48#define IPA_CMD_INITIATOR_HOST 0x00
49#define IPA_CMD_INITIATOR_HYDRA 0x01
50#define IPA_CMD_PRIM_VERSION_NO 0x01
51
52enum qeth_card_types {
53 QETH_CARD_TYPE_UNKNOWN = 0,
54 QETH_CARD_TYPE_OSAE = 10,
55 QETH_CARD_TYPE_IQD = 1234,
56};
57
58#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
59/* only the first two bytes are looked at in qeth_get_cardname_short */
60enum qeth_link_types {
61 QETH_LINK_TYPE_FAST_ETH = 0x01,
62 QETH_LINK_TYPE_HSTR = 0x02,
63 QETH_LINK_TYPE_GBIT_ETH = 0x03,
64 QETH_LINK_TYPE_10GBIT_ETH = 0x10,
65 QETH_LINK_TYPE_LANE_ETH100 = 0x81,
66 QETH_LINK_TYPE_LANE_TR = 0x82,
67 QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
68 QETH_LINK_TYPE_LANE = 0x88,
69 QETH_LINK_TYPE_ATM_NATIVE = 0x90,
70};
71
72enum qeth_tr_macaddr_modes {
73 QETH_TR_MACADDR_NONCANONICAL = 0,
74 QETH_TR_MACADDR_CANONICAL = 1,
75};
76
77enum qeth_tr_broadcast_modes {
78 QETH_TR_BROADCAST_ALLRINGS = 0,
79 QETH_TR_BROADCAST_LOCAL = 1,
80};
81
82/* these values match CHECKSUM_* in include/linux/skbuff.h */
83enum qeth_checksum_types {
84 SW_CHECKSUMMING = 0, /* TODO: set to bit flag used in IPA Command */
85 HW_CHECKSUMMING = 1,
86 NO_CHECKSUMMING = 2,
87};
88#define QETH_CHECKSUM_DEFAULT SW_CHECKSUMMING
89
90/*
91 * Routing stuff
92 */
93#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
94enum qeth_routing_types {
95 NO_ROUTER = 0, /* TODO: set to bit flag used in IPA Command */
96 PRIMARY_ROUTER = 1,
97 SECONDARY_ROUTER = 2,
98 MULTICAST_ROUTER = 3,
99 PRIMARY_CONNECTOR = 4,
100 SECONDARY_CONNECTOR = 5,
101};
102
103
104/* IPA Commands */
105enum qeth_ipa_cmds {
106 IPA_CMD_STARTLAN = 0x01,
107 IPA_CMD_STOPLAN = 0x02,
108 IPA_CMD_SETVMAC = 0x21,
109 IPA_CMD_DELVMAC = 0x22,
110 IPA_CMD_SETGMAC = 0x23,
111 IPA_CMD_DELGMAC = 0x24,
112 IPA_CMD_SETVLAN = 0x25,
113 IPA_CMD_DELVLAN = 0x26,
114 IPA_CMD_SETIP = 0xb1,
115 IPA_CMD_DELIP = 0xb7,
116 IPA_CMD_QIPASSIST = 0xb2,
117 IPA_CMD_SETASSPARMS = 0xb3,
118 IPA_CMD_SETIPM = 0xb4,
119 IPA_CMD_DELIPM = 0xb5,
120 IPA_CMD_SETRTG = 0xb6,
121 IPA_CMD_SETADAPTERPARMS = 0xb8,
122 IPA_CMD_IPFRAME = 0xb9,
123 IPA_CMD_ADD_ADDR_ENTRY = 0xc1,
124 IPA_CMD_DELETE_ADDR_ENTRY = 0xc2,
125 IPA_CMD_CREATE_ADDR = 0xc3,
126 IPA_CMD_DESTROY_ADDR = 0xc4,
127 IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1,
128 IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2,
129};
130
131enum qeth_ip_ass_cmds {
132 IPA_CMD_ASS_START = 0x0001,
133 IPA_CMD_ASS_STOP = 0x0002,
134 IPA_CMD_ASS_CONFIGURE = 0x0003,
135 IPA_CMD_ASS_ENABLE = 0x0004,
136};
137
138enum qeth_arp_process_subcmds {
139 IPA_CMD_ASS_ARP_SET_NO_ENTRIES = 0x0003,
140 IPA_CMD_ASS_ARP_QUERY_CACHE = 0x0004,
141 IPA_CMD_ASS_ARP_ADD_ENTRY = 0x0005,
142 IPA_CMD_ASS_ARP_REMOVE_ENTRY = 0x0006,
143 IPA_CMD_ASS_ARP_FLUSH_CACHE = 0x0007,
144 IPA_CMD_ASS_ARP_QUERY_INFO = 0x0104,
145 IPA_CMD_ASS_ARP_QUERY_STATS = 0x0204,
146};
147
148/* Return Codes for IPA Commands */
149enum qeth_ipa_return_codes {
150 IPA_RC_SUCCESS = 0x0000,
151 IPA_RC_NOTSUPP = 0x0001,
152 IPA_RC_NO_ACCESS = 0x0002,
153 IPA_RC_FAILED = 0x0003,
154 IPA_RC_DATA_MISMATCH = 0xe001,
155 IPA_RC_INVALID_LAN_TYPE = 0xe003,
156 IPA_RC_INVALID_LAN_NO = 0xe004,
157 IPA_RC_IPADDR_ALREADY_REG = 0xe005,
158 IPA_RC_IPADDR_TABLE_FULL = 0xe006,
159 IPA_RC_IPADDR_ALREADY_USED = 0xe00a,
160 IPA_RC_ASSNO_NOT_SUPP = 0xe00d,
161 IPA_RC_ASSCMD_START_FAILED = 0xe00e,
162 IPA_RC_ASSCMD_PART_SUCCESS = 0xe00f,
163 IPA_RC_IPADDR_NOT_DEFINED = 0xe010,
164 IPA_RC_LAN_OFFLINE = 0xe080,
165};
166
167/* IPA function flags; each flag marks availability of respective function */
168enum qeth_ipa_funcs {
169 IPA_ARP_PROCESSING = 0x00000001L,
170 IPA_INBOUND_CHECKSUM = 0x00000002L,
171 IPA_OUTBOUND_CHECKSUM = 0x00000004L,
172 IPA_IP_FRAGMENTATION = 0x00000008L,
173 IPA_FILTERING = 0x00000010L,
174 IPA_IPV6 = 0x00000020L,
175 IPA_MULTICASTING = 0x00000040L,
176 IPA_IP_REASSEMBLY = 0x00000080L,
177 IPA_QUERY_ARP_COUNTERS = 0x00000100L,
178 IPA_QUERY_ARP_ADDR_INFO = 0x00000200L,
179 IPA_SETADAPTERPARMS = 0x00000400L,
180 IPA_VLAN_PRIO = 0x00000800L,
181 IPA_PASSTHRU = 0x00001000L,
182 IPA_FULL_VLAN = 0x00004000L,
183 IPA_SOURCE_MAC = 0x00010000L,
184 IPA_OSA_MC_ROUTER = 0x00020000L,
185 IPA_QUERY_ARP_ASSIST = 0x00040000L,
186 IPA_INBOUND_TSO = 0x00080000L,
187 IPA_OUTBOUND_TSO = 0x00100000L,
188};
189
190/* SETIP/DELIP IPA Command: ***************************************************/
191enum qeth_ipa_setdelip_flags {
192 QETH_IPA_SETDELIP_DEFAULT = 0x00L, /* default */
193 QETH_IPA_SETIP_VIPA_FLAG = 0x01L, /* no grat. ARP */
194 QETH_IPA_SETIP_TAKEOVER_FLAG = 0x02L, /* nofail on grat. ARP */
195 QETH_IPA_DELIP_ADDR_2_B_TAKEN_OVER = 0x20L,
196 QETH_IPA_DELIP_VIPA_FLAG = 0x40L,
197 QETH_IPA_DELIP_ADDR_NEEDS_SETIP = 0x80L,
198};
199
200/* SETADAPTER IPA Command: ****************************************************/
201enum qeth_ipa_setadp_cmd {
202 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x01,
203 IPA_SETADP_ALTER_MAC_ADDRESS = 0x02,
204 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x04,
205 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x08,
206 IPA_SETADP_SET_ADDRESSING_MODE = 0x10,
207 IPA_SETADP_SET_CONFIG_PARMS = 0x20,
208 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x40,
209 IPA_SETADP_SET_BROADCAST_MODE = 0x80,
210 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100,
211 IPA_SETADP_SET_SNMP_CONTROL = 0x0200,
212 IPA_SETADP_READ_SNMP_PARMS = 0x0400,
213 IPA_SETADP_WRITE_SNMP_PARMS = 0x0800,
214 IPA_SETADP_QUERY_CARD_INFO = 0x1000,
215};
216enum qeth_ipa_mac_ops {
217 CHANGE_ADDR_READ_MAC = 0,
218 CHANGE_ADDR_REPLACE_MAC = 1,
219 CHANGE_ADDR_ADD_MAC = 2,
220 CHANGE_ADDR_DEL_MAC = 4,
221 CHANGE_ADDR_RESET_MAC = 8,
222};
223enum qeth_ipa_addr_ops {
224 CHANGE_ADDR_READ_ADDR = 0,
225 CHANGE_ADDR_ADD_ADDR = 1,
226 CHANGE_ADDR_DEL_ADDR = 2,
227 CHANGE_ADDR_FLUSH_ADDR_TABLE = 4,
228
229
230};
231/* (SET)DELIP(M) IPA stuff ***************************************************/
232struct qeth_ipacmd_setdelip4 {
233 __u8 ip_addr[4];
234 __u8 mask[4];
235 __u32 flags;
236} __attribute__ ((packed));
237
238struct qeth_ipacmd_setdelip6 {
239 __u8 ip_addr[16];
240 __u8 mask[16];
241 __u32 flags;
242} __attribute__ ((packed));
243
244struct qeth_ipacmd_setdelipm {
245 __u8 mac[6];
246 __u8 padding[2];
247 __u8 ip6[12];
248 __u8 ip4[4];
249} __attribute__ ((packed));
250
251struct qeth_ipacmd_layer2setdelmac {
252 __u32 mac_length;
253 __u8 mac[6];
254} __attribute__ ((packed));
255
256struct qeth_ipacmd_layer2setdelvlan {
257 __u16 vlan_id;
258} __attribute__ ((packed));
259
260
261struct qeth_ipacmd_setassparms_hdr {
262 __u32 assist_no;
263 __u16 length;
264 __u16 command_code;
265 __u16 return_code;
266 __u8 number_of_replies;
267 __u8 seq_no;
268} __attribute__((packed));
269
270struct qeth_arp_query_data {
271 __u16 request_bits;
272 __u16 reply_bits;
273 __u32 no_entries;
274 char data;
275} __attribute__((packed));
276
277/* used as parameter for arp_query reply */
278struct qeth_arp_query_info {
279 __u32 udata_len;
280 __u16 mask_bits;
281 __u32 udata_offset;
282 __u32 no_entries;
283 char *udata;
284};
285
286/* SETASSPARMS IPA Command: */
287struct qeth_ipacmd_setassparms {
288 struct qeth_ipacmd_setassparms_hdr hdr;
289 union {
290 __u32 flags_32bit;
291 struct qeth_arp_cache_entry add_arp_entry;
292 struct qeth_arp_query_data query_arp;
293 __u8 ip[16];
294 } data;
295} __attribute__ ((packed));
296
297
298/* SETRTG IPA Command: ****************************************************/
299struct qeth_set_routing {
300 __u8 type;
301};
302
303/* SETADAPTERPARMS IPA Command: *******************************************/
304struct qeth_query_cmds_supp {
305 __u32 no_lantypes_supp;
306 __u8 lan_type;
307 __u8 reserved1[3];
308 __u32 supported_cmds;
309 __u8 reserved2[8];
310} __attribute__ ((packed));
311
312struct qeth_change_addr {
313 __u32 cmd;
314 __u32 addr_size;
315 __u32 no_macs;
316 __u8 addr[OSA_ADDR_LEN];
317} __attribute__ ((packed));
318
319
320struct qeth_snmp_cmd {
321 __u8 token[16];
322 __u32 request;
323 __u32 interface;
324 __u32 returncode;
325 __u32 firmwarelevel;
326 __u32 seqno;
327 __u8 data;
328} __attribute__ ((packed));
329
330struct qeth_snmp_ureq_hdr {
331 __u32 data_len;
332 __u32 req_len;
333 __u32 reserved1;
334 __u32 reserved2;
335} __attribute__ ((packed));
336
337struct qeth_snmp_ureq {
338 struct qeth_snmp_ureq_hdr hdr;
339 struct qeth_snmp_cmd cmd;
340} __attribute__((packed));
341
342struct qeth_ipacmd_setadpparms_hdr {
343 __u32 supp_hw_cmds;
344 __u32 reserved1;
345 __u16 cmdlength;
346 __u16 reserved2;
347 __u32 command_code;
348 __u16 return_code;
349 __u8 used_total;
350 __u8 seq_no;
351 __u32 reserved3;
352} __attribute__ ((packed));
353
354struct qeth_ipacmd_setadpparms {
355 struct qeth_ipacmd_setadpparms_hdr hdr;
356 union {
357 struct qeth_query_cmds_supp query_cmds_supp;
358 struct qeth_change_addr change_addr;
359 struct qeth_snmp_cmd snmp;
360 __u32 mode;
361 } data;
362} __attribute__ ((packed));
363
364/* IPFRAME IPA Command: ***************************************************/
365/* TODO: define in analogy to commands define above */
366
367/* ADD_ADDR_ENTRY IPA Command: ********************************************/
368/* TODO: define in analogy to commands define above */
369
370/* DELETE_ADDR_ENTRY IPA Command: *****************************************/
371/* TODO: define in analogy to commands define above */
372
373/* CREATE_ADDR IPA Command: ***********************************************/
374struct qeth_create_destroy_address {
375 __u8 unique_id[8];
376} __attribute__ ((packed));
377
378/* REGISTER_LOCAL_ADDR IPA Command: ***************************************/
379/* TODO: define in analogy to commands define above */
380
381/* UNREGISTER_LOCAL_ADDR IPA Command: *************************************/
382/* TODO: define in analogy to commands define above */
383
384/* Header for each IPA command */
385struct qeth_ipacmd_hdr {
386 __u8 command;
387 __u8 initiator;
388 __u16 seqno;
389 __u16 return_code;
390 __u8 adapter_type;
391 __u8 rel_adapter_no;
392 __u8 prim_version_no;
393 __u8 param_count;
394 __u16 prot_version;
395 __u32 ipa_supported;
396 __u32 ipa_enabled;
397} __attribute__ ((packed));
398
399/* The IPA command itself */
400struct qeth_ipa_cmd {
401 struct qeth_ipacmd_hdr hdr;
402 union {
403 struct qeth_ipacmd_setdelip4 setdelip4;
404 struct qeth_ipacmd_setdelip6 setdelip6;
405 struct qeth_ipacmd_setdelipm setdelipm;
406 struct qeth_ipacmd_setassparms setassparms;
407 struct qeth_ipacmd_layer2setdelmac setdelmac;
408 struct qeth_ipacmd_layer2setdelvlan setdelvlan;
409 struct qeth_create_destroy_address create_destroy_addr;
410 struct qeth_ipacmd_setadpparms setadapterparms;
411 struct qeth_set_routing setrtg;
412 } data;
413} __attribute__ ((packed));
414
415/*
416 * special command for ARP processing.
417 * this is not included in setassparms command before, because we get
418 * problem with the size of struct qeth_ipacmd_setassparms otherwise
419 */
420enum qeth_ipa_arp_return_codes {
421 QETH_IPA_ARP_RC_SUCCESS = 0x0000,
422 QETH_IPA_ARP_RC_FAILED = 0x0001,
423 QETH_IPA_ARP_RC_NOTSUPP = 0x0002,
424 QETH_IPA_ARP_RC_OUT_OF_RANGE = 0x0003,
425 QETH_IPA_ARP_RC_Q_NOTSUPP = 0x0004,
426 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
427};
428
429#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
430 sizeof(struct qeth_ipacmd_setassparms_hdr))
431#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \
432 QETH_SETASS_BASE_LEN)
433#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
434 sizeof(struct qeth_ipacmd_setadpparms_hdr))
435#define QETH_SNMP_SETADP_CMDLENGTH 16
436
437#define QETH_ARP_DATA_SIZE 3968
438#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
439/* Helper functions */
440#define IS_IPA_REPLY(cmd) (cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST)
441
442/*****************************************************************************/
443/* END OF IP Assist related definitions */
444/*****************************************************************************/
445
446
447extern unsigned char WRITE_CCW[];
448extern unsigned char READ_CCW[];
449
450extern unsigned char CM_ENABLE[];
451#define CM_ENABLE_SIZE 0x63
452#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer+0x2c)
453#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53)
454#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer+0x5b)
455
456#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \
457 (PDU_ENCAPSULATION(buffer)+ 0x13)
458
459
460extern unsigned char CM_SETUP[];
461#define CM_SETUP_SIZE 0x64
462#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer+0x2c)
463#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51)
464#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a)
465
466#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
467 (PDU_ENCAPSULATION(buffer) + 0x1a)
468
469extern unsigned char ULP_ENABLE[];
470#define ULP_ENABLE_SIZE 0x6b
471#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer+0x61)
472#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer+0x2c)
473#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53)
474#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer+0x62)
475#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \
476 (PDU_ENCAPSULATION(buffer) + 0x13)
477#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \
478 (PDU_ENCAPSULATION(buffer)+ 0x1f)
479#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \
480 (PDU_ENCAPSULATION(buffer) + 0x17)
481#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
482 (PDU_ENCAPSULATION(buffer)+ 0x2b)
483/* Layer 2 defintions */
484#define QETH_PROT_LAYER2 0x08
485#define QETH_PROT_TCPIP 0x03
486#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer+0x50)
487#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer+0x19)
488
489extern unsigned char ULP_SETUP[];
490#define ULP_SETUP_SIZE 0x6c
491#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer+0x2c)
492#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51)
493#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a)
494#define QETH_ULP_SETUP_CUA(buffer) (buffer+0x68)
495#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer+0x6a)
496
497#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \
498 (PDU_ENCAPSULATION(buffer)+0x1a)
499
500
501extern unsigned char DM_ACT[];
502#define DM_ACT_SIZE 0x55
503#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer+0x2c)
504#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer+0x51)
505
506
507
508#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer+4)
509#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer+0x1c)
510#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer+0x20)
511
512extern unsigned char IDX_ACTIVATE_READ[];
513extern unsigned char IDX_ACTIVATE_WRITE[];
514
515#define IDX_ACTIVATE_SIZE 0x22
516#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer+0x0c)
517#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b]&0x80)
518#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer+0x10)
519#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer+0x16)
520#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer+0x1e)
521#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer+0x20)
522#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08]&3)==2)
523#define QETH_IDX_REPLY_LEVEL(buffer) (buffer+0x12)
524
525#define PDU_ENCAPSULATION(buffer) \
526 (buffer + *(buffer + (*(buffer+0x0b)) + \
527 *(buffer + *(buffer+0x0b)+0x11) +0x07))
528
529#define IS_IPA(buffer) \
530 ((buffer) && \
531 ( *(buffer + ((*(buffer+0x0b))+4) )==0xc1) )
532
533#define ADDR_FRAME_TYPE_DIX 1
534#define ADDR_FRAME_TYPE_802_3 2
535#define ADDR_FRAME_TYPE_TR_WITHOUT_SR 0x10
536#define ADDR_FRAME_TYPE_TR_WITH_SR 0x20
537
538#endif
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
new file mode 100644
index 000000000000..04719196fd20
--- /dev/null
+++ b/drivers/s390/net/qeth_proc.c
@@ -0,0 +1,495 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_fs.c ($Revision: 1.13 $)
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 * This file contains code related to procfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 *
10 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 *
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/list.h>
18#include <linux/rwsem.h>
19
20#include "qeth.h"
21#include "qeth_mpc.h"
22#include "qeth_fs.h"
23
24const char *VERSION_QETH_PROC_C = "$Revision: 1.13 $";
25
26/***** /proc/qeth *****/
27#define QETH_PROCFILE_NAME "qeth"
28static struct proc_dir_entry *qeth_procfile;
29
30static void *
31qeth_procfile_seq_start(struct seq_file *s, loff_t *offset)
32{
33 struct list_head *next_card = NULL;
34 int i = 0;
35
36 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
37
38 if (*offset == 0)
39 return SEQ_START_TOKEN;
40
41 /* get card at pos *offset */
42 list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices)
43 if (++i == *offset)
44 return next_card;
45
46 return NULL;
47}
48
49static void
50qeth_procfile_seq_stop(struct seq_file *s, void* it)
51{
52 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
53}
54
55static void *
56qeth_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset)
57{
58 struct list_head *next_card = NULL;
59 struct list_head *current_card;
60
61 if (it == SEQ_START_TOKEN) {
62 next_card = qeth_ccwgroup_driver.driver.devices.next;
63 if (next_card->next == next_card) /* list empty */
64 return NULL;
65 (*offset)++;
66 } else {
67 current_card = (struct list_head *)it;
68 if (current_card->next == &qeth_ccwgroup_driver.driver.devices)
69 return NULL; /* end of list reached */
70 next_card = current_card->next;
71 (*offset)++;
72 }
73
74 return next_card;
75}
76
77static inline const char *
78qeth_get_router_str(struct qeth_card *card, int ipv)
79{
80 int routing_type = 0;
81
82 if (ipv == 4){
83 routing_type = card->options.route4.type;
84 } else {
85#ifdef CONFIG_QETH_IPV6
86 routing_type = card->options.route6.type;
87#else
88 return "n/a";
89#endif /* CONFIG_QETH_IPV6 */
90 }
91
92 if (routing_type == PRIMARY_ROUTER)
93 return "pri";
94 else if (routing_type == SECONDARY_ROUTER)
95 return "sec";
96 else if (routing_type == MULTICAST_ROUTER) {
97 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
98 return "mc+";
99 return "mc";
100 } else if (routing_type == PRIMARY_CONNECTOR) {
101 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
102 return "p+c";
103 return "p.c";
104 } else if (routing_type == SECONDARY_CONNECTOR) {
105 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
106 return "s+c";
107 return "s.c";
108 } else if (routing_type == NO_ROUTER)
109 return "no";
110 else
111 return "unk";
112}
113
114static int
115qeth_procfile_seq_show(struct seq_file *s, void *it)
116{
117 struct device *device;
118 struct qeth_card *card;
119 char tmp[12]; /* for qeth_get_prioq_str */
120
121 if (it == SEQ_START_TOKEN){
122 seq_printf(s, "devices CHPID interface "
123 "cardtype port chksum prio-q'ing rtr4 "
124 "rtr6 fsz cnt\n");
125 seq_printf(s, "-------------------------- ----- ---------- "
126 "-------------- ---- ------ ---------- ---- "
127 "---- ----- -----\n");
128 } else {
129 device = list_entry(it, struct device, driver_list);
130 card = device->driver_data;
131 seq_printf(s, "%s/%s/%s x%02X %-10s %-14s %-4i ",
132 CARD_RDEV_ID(card),
133 CARD_WDEV_ID(card),
134 CARD_DDEV_ID(card),
135 card->info.chpid,
136 QETH_CARD_IFNAME(card),
137 qeth_get_cardname_short(card),
138 card->info.portno);
139 if (card->lan_online)
140 seq_printf(s, "%-6s %-10s %-4s %-4s %-5s %-5i\n",
141 qeth_get_checksum_str(card),
142 qeth_get_prioq_str(card, tmp),
143 qeth_get_router_str(card, 4),
144 qeth_get_router_str(card, 6),
145 qeth_get_bufsize_str(card),
146 card->qdio.in_buf_pool.buf_count);
147 else
148 seq_printf(s, " +++ LAN OFFLINE +++\n");
149 }
150 return 0;
151}
152
153static struct seq_operations qeth_procfile_seq_ops = {
154 .start = qeth_procfile_seq_start,
155 .stop = qeth_procfile_seq_stop,
156 .next = qeth_procfile_seq_next,
157 .show = qeth_procfile_seq_show,
158};
159
160static int
161qeth_procfile_open(struct inode *inode, struct file *file)
162{
163 return seq_open(file, &qeth_procfile_seq_ops);
164}
165
166static struct file_operations qeth_procfile_fops = {
167 .owner = THIS_MODULE,
168 .open = qeth_procfile_open,
169 .read = seq_read,
170 .llseek = seq_lseek,
171 .release = seq_release,
172};
173
174/***** /proc/qeth_perf *****/
175#define QETH_PERF_PROCFILE_NAME "qeth_perf"
176static struct proc_dir_entry *qeth_perf_procfile;
177
178#ifdef CONFIG_QETH_PERF_STATS
179
180static void *
181qeth_perf_procfile_seq_start(struct seq_file *s, loff_t *offset)
182{
183 struct list_head *next_card = NULL;
184 int i = 0;
185
186 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
187 /* get card at pos *offset */
188 list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices){
189 if (i == *offset)
190 return next_card;
191 i++;
192 }
193 return NULL;
194}
195
196static void
197qeth_perf_procfile_seq_stop(struct seq_file *s, void* it)
198{
199 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
200}
201
202static void *
203qeth_perf_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset)
204{
205 struct list_head *current_card = (struct list_head *)it;
206
207 if (current_card->next == &qeth_ccwgroup_driver.driver.devices)
208 return NULL; /* end of list reached */
209 (*offset)++;
210 return current_card->next;
211}
212
213static int
214qeth_perf_procfile_seq_show(struct seq_file *s, void *it)
215{
216 struct device *device;
217 struct qeth_card *card;
218
219 device = list_entry(it, struct device, driver_list);
220 card = device->driver_data;
221 seq_printf(s, "For card with devnos %s/%s/%s (%s):\n",
222 CARD_RDEV_ID(card),
223 CARD_WDEV_ID(card),
224 CARD_DDEV_ID(card),
225 QETH_CARD_IFNAME(card)
226 );
227 seq_printf(s, " Skb's/buffers received : %li/%i\n"
228 " Skb's/buffers sent : %li/%i\n\n",
229 card->stats.rx_packets, card->perf_stats.bufs_rec,
230 card->stats.tx_packets, card->perf_stats.bufs_sent
231 );
232 seq_printf(s, " Skb's/buffers sent without packing : %li/%i\n"
233 " Skb's/buffers sent with packing : %i/%i\n\n",
234 card->stats.tx_packets - card->perf_stats.skbs_sent_pack,
235 card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack,
236 card->perf_stats.skbs_sent_pack,
237 card->perf_stats.bufs_sent_pack
238 );
239 seq_printf(s, " Skbs sent in SG mode : %i\n"
240 " Skb fragments sent in SG mode : %i\n\n",
241 card->perf_stats.sg_skbs_sent,
242 card->perf_stats.sg_frags_sent);
243 seq_printf(s, " large_send tx (in Kbytes) : %i\n"
244 " large_send count : %i\n\n",
245 card->perf_stats.large_send_bytes >> 10,
246 card->perf_stats.large_send_cnt);
247 seq_printf(s, " Packing state changes no pkg.->packing : %i/%i\n"
248 " Watermarks L/H : %i/%i\n"
249 " Current buffer usage (outbound q's) : "
250 "%i/%i/%i/%i\n\n",
251 card->perf_stats.sc_dp_p, card->perf_stats.sc_p_dp,
252 QETH_LOW_WATERMARK_PACK, QETH_HIGH_WATERMARK_PACK,
253 atomic_read(&card->qdio.out_qs[0]->used_buffers),
254 (card->qdio.no_out_queues > 1)?
255 atomic_read(&card->qdio.out_qs[1]->used_buffers)
256 : 0,
257 (card->qdio.no_out_queues > 2)?
258 atomic_read(&card->qdio.out_qs[2]->used_buffers)
259 : 0,
260 (card->qdio.no_out_queues > 3)?
261 atomic_read(&card->qdio.out_qs[3]->used_buffers)
262 : 0
263 );
264 seq_printf(s, " Inbound handler time (in us) : %i\n"
265 " Inbound handler count : %i\n"
266 " Inbound do_QDIO time (in us) : %i\n"
267 " Inbound do_QDIO count : %i\n\n"
268 " Outbound handler time (in us) : %i\n"
269 " Outbound handler count : %i\n\n"
270 " Outbound time (in us, incl QDIO) : %i\n"
271 " Outbound count : %i\n"
272 " Outbound do_QDIO time (in us) : %i\n"
273 " Outbound do_QDIO count : %i\n\n",
274 card->perf_stats.inbound_time,
275 card->perf_stats.inbound_cnt,
276 card->perf_stats.inbound_do_qdio_time,
277 card->perf_stats.inbound_do_qdio_cnt,
278 card->perf_stats.outbound_handler_time,
279 card->perf_stats.outbound_handler_cnt,
280 card->perf_stats.outbound_time,
281 card->perf_stats.outbound_cnt,
282 card->perf_stats.outbound_do_qdio_time,
283 card->perf_stats.outbound_do_qdio_cnt
284 );
285 return 0;
286}
287
288static struct seq_operations qeth_perf_procfile_seq_ops = {
289 .start = qeth_perf_procfile_seq_start,
290 .stop = qeth_perf_procfile_seq_stop,
291 .next = qeth_perf_procfile_seq_next,
292 .show = qeth_perf_procfile_seq_show,
293};
294
295static int
296qeth_perf_procfile_open(struct inode *inode, struct file *file)
297{
298 return seq_open(file, &qeth_perf_procfile_seq_ops);
299}
300
301static struct file_operations qeth_perf_procfile_fops = {
302 .owner = THIS_MODULE,
303 .open = qeth_perf_procfile_open,
304 .read = seq_read,
305 .llseek = seq_lseek,
306 .release = seq_release,
307};
308
309#define qeth_perf_procfile_created qeth_perf_procfile
310#else
311#define qeth_perf_procfile_created 1
312#endif /* CONFIG_QETH_PERF_STATS */
313
314/***** /proc/qeth_ipa_takeover *****/
315#define QETH_IPATO_PROCFILE_NAME "qeth_ipa_takeover"
316static struct proc_dir_entry *qeth_ipato_procfile;
317
318static void *
319qeth_ipato_procfile_seq_start(struct seq_file *s, loff_t *offset)
320{
321 struct list_head *next_card = NULL;
322 int i = 0;
323
324 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
325 /* TODO: finish this */
326 /*
327 * maybe SEQ_SATRT_TOKEN can be returned for offset 0
328 * output driver settings then;
329 * else output setting for respective card
330 */
331 /* get card at pos *offset */
332 list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices){
333 if (i == *offset)
334 return next_card;
335 i++;
336 }
337 return NULL;
338}
339
340static void
341qeth_ipato_procfile_seq_stop(struct seq_file *s, void* it)
342{
343 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
344}
345
346static void *
347qeth_ipato_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset)
348{
349 struct list_head *current_card = (struct list_head *)it;
350
351 /* TODO: finish this */
352 /*
353 * maybe SEQ_SATRT_TOKEN can be returned for offset 0
354 * output driver settings then;
355 * else output setting for respective card
356 */
357 if (current_card->next == &qeth_ccwgroup_driver.driver.devices)
358 return NULL; /* end of list reached */
359 (*offset)++;
360 return current_card->next;
361}
362
363static int
364qeth_ipato_procfile_seq_show(struct seq_file *s, void *it)
365{
366 struct device *device;
367 struct qeth_card *card;
368
369 /* TODO: finish this */
370 /*
371 * maybe SEQ_SATRT_TOKEN can be returned for offset 0
372 * output driver settings then;
373 * else output setting for respective card
374 */
375 device = list_entry(it, struct device, driver_list);
376 card = device->driver_data;
377
378 return 0;
379}
380
381static struct seq_operations qeth_ipato_procfile_seq_ops = {
382 .start = qeth_ipato_procfile_seq_start,
383 .stop = qeth_ipato_procfile_seq_stop,
384 .next = qeth_ipato_procfile_seq_next,
385 .show = qeth_ipato_procfile_seq_show,
386};
387
388static int
389qeth_ipato_procfile_open(struct inode *inode, struct file *file)
390{
391 return seq_open(file, &qeth_ipato_procfile_seq_ops);
392}
393
394static struct file_operations qeth_ipato_procfile_fops = {
395 .owner = THIS_MODULE,
396 .open = qeth_ipato_procfile_open,
397 .read = seq_read,
398 .llseek = seq_lseek,
399 .release = seq_release,
400};
401
402int __init
403qeth_create_procfs_entries(void)
404{
405 qeth_procfile = create_proc_entry(QETH_PROCFILE_NAME,
406 S_IFREG | 0444, NULL);
407 if (qeth_procfile)
408 qeth_procfile->proc_fops = &qeth_procfile_fops;
409
410#ifdef CONFIG_QETH_PERF_STATS
411 qeth_perf_procfile = create_proc_entry(QETH_PERF_PROCFILE_NAME,
412 S_IFREG | 0444, NULL);
413 if (qeth_perf_procfile)
414 qeth_perf_procfile->proc_fops = &qeth_perf_procfile_fops;
415#endif /* CONFIG_QETH_PERF_STATS */
416
417 qeth_ipato_procfile = create_proc_entry(QETH_IPATO_PROCFILE_NAME,
418 S_IFREG | 0444, NULL);
419 if (qeth_ipato_procfile)
420 qeth_ipato_procfile->proc_fops = &qeth_ipato_procfile_fops;
421
422 if (qeth_procfile &&
423 qeth_ipato_procfile &&
424 qeth_perf_procfile_created)
425 return 0;
426 else
427 return -ENOMEM;
428}
429
430void __exit
431qeth_remove_procfs_entries(void)
432{
433 if (qeth_procfile)
434 remove_proc_entry(QETH_PROCFILE_NAME, NULL);
435 if (qeth_perf_procfile)
436 remove_proc_entry(QETH_PERF_PROCFILE_NAME, NULL);
437 if (qeth_ipato_procfile)
438 remove_proc_entry(QETH_IPATO_PROCFILE_NAME, NULL);
439}
440
441
442/* ONLY FOR DEVELOPMENT! -> make it as module */
443/*
444static void
445qeth_create_sysfs_entries(void)
446{
447 struct device *dev;
448
449 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
450
451 list_for_each_entry(dev, &qeth_ccwgroup_driver.driver.devices,
452 driver_list)
453 qeth_create_device_attributes(dev);
454
455 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
456}
457
458static void
459qeth_remove_sysfs_entries(void)
460{
461 struct device *dev;
462
463 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
464
465 list_for_each_entry(dev, &qeth_ccwgroup_driver.driver.devices,
466 driver_list)
467 qeth_remove_device_attributes(dev);
468
469 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
470}
471
472static int __init
473qeth_fs_init(void)
474{
475 printk(KERN_INFO "qeth_fs_init\n");
476 qeth_create_procfs_entries();
477 qeth_create_sysfs_entries();
478
479 return 0;
480}
481
482static void __exit
483qeth_fs_exit(void)
484{
485 printk(KERN_INFO "qeth_fs_exit\n");
486 qeth_remove_procfs_entries();
487 qeth_remove_sysfs_entries();
488}
489
490
491module_init(qeth_fs_init);
492module_exit(qeth_fs_exit);
493
494MODULE_LICENSE("GPL");
495*/
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
new file mode 100644
index 000000000000..240348398211
--- /dev/null
+++ b/drivers/s390/net/qeth_sys.c
@@ -0,0 +1,1788 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.51 $)
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 * This file contains code related to sysfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 *
10 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 * Frank Pavlic <pavlic@de.ibm.com>
12 *
13 */
14#include <linux/list.h>
15#include <linux/rwsem.h>
16
17#include <asm/ebcdic.h>
18
19#include "qeth.h"
20#include "qeth_mpc.h"
21#include "qeth_fs.h"
22
23const char *VERSION_QETH_SYS_C = "$Revision: 1.51 $";
24
25/*****************************************************************************/
26/* */
27/* /sys-fs stuff UNDER DEVELOPMENT !!! */
28/* */
29/*****************************************************************************/
30//low/high watermark
31
32static ssize_t
33qeth_dev_state_show(struct device *dev, char *buf)
34{
35 struct qeth_card *card = dev->driver_data;
36 if (!card)
37 return -EINVAL;
38
39 switch (card->state) {
40 case CARD_STATE_DOWN:
41 return sprintf(buf, "DOWN\n");
42 case CARD_STATE_HARDSETUP:
43 return sprintf(buf, "HARDSETUP\n");
44 case CARD_STATE_SOFTSETUP:
45 return sprintf(buf, "SOFTSETUP\n");
46 case CARD_STATE_UP:
47 if (card->lan_online)
48 return sprintf(buf, "UP (LAN ONLINE)\n");
49 else
50 return sprintf(buf, "UP (LAN OFFLINE)\n");
51 case CARD_STATE_RECOVER:
52 return sprintf(buf, "RECOVER\n");
53 default:
54 return sprintf(buf, "UNKNOWN\n");
55 }
56}
57
58static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
59
60static ssize_t
61qeth_dev_chpid_show(struct device *dev, char *buf)
62{
63 struct qeth_card *card = dev->driver_data;
64 if (!card)
65 return -EINVAL;
66
67 return sprintf(buf, "%02X\n", card->info.chpid);
68}
69
70static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
71
72static ssize_t
73qeth_dev_if_name_show(struct device *dev, char *buf)
74{
75 struct qeth_card *card = dev->driver_data;
76 if (!card)
77 return -EINVAL;
78 return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
79}
80
81static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
82
83static ssize_t
84qeth_dev_card_type_show(struct device *dev, char *buf)
85{
86 struct qeth_card *card = dev->driver_data;
87 if (!card)
88 return -EINVAL;
89
90 return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
91}
92
93static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
94
95static ssize_t
96qeth_dev_portno_show(struct device *dev, char *buf)
97{
98 struct qeth_card *card = dev->driver_data;
99 if (!card)
100 return -EINVAL;
101
102 return sprintf(buf, "%i\n", card->info.portno);
103}
104
105static ssize_t
106qeth_dev_portno_store(struct device *dev, const char *buf, size_t count)
107{
108 struct qeth_card *card = dev->driver_data;
109 char *tmp;
110 unsigned int portno;
111
112 if (!card)
113 return -EINVAL;
114
115 if ((card->state != CARD_STATE_DOWN) &&
116 (card->state != CARD_STATE_RECOVER))
117 return -EPERM;
118
119 portno = simple_strtoul(buf, &tmp, 16);
120 if ((portno < 0) || (portno > MAX_PORTNO)){
121 PRINT_WARN("portno 0x%X is out of range\n", portno);
122 return -EINVAL;
123 }
124
125 card->info.portno = portno;
126 return count;
127}
128
129static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
130
131static ssize_t
132qeth_dev_portname_show(struct device *dev, char *buf)
133{
134 struct qeth_card *card = dev->driver_data;
135 char portname[9] = {0, };
136
137 if (!card)
138 return -EINVAL;
139
140 if (card->info.portname_required) {
141 memcpy(portname, card->info.portname + 1, 8);
142 EBCASC(portname, 8);
143 return sprintf(buf, "%s\n", portname);
144 } else
145 return sprintf(buf, "no portname required\n");
146}
147
148static ssize_t
149qeth_dev_portname_store(struct device *dev, const char *buf, size_t count)
150{
151 struct qeth_card *card = dev->driver_data;
152 char *tmp;
153 int i;
154
155 if (!card)
156 return -EINVAL;
157
158 if ((card->state != CARD_STATE_DOWN) &&
159 (card->state != CARD_STATE_RECOVER))
160 return -EPERM;
161
162 tmp = strsep((char **) &buf, "\n");
163 if ((strlen(tmp) > 8) || (strlen(tmp) < 2))
164 return -EINVAL;
165
166 card->info.portname[0] = strlen(tmp);
167 /* for beauty reasons */
168 for (i = 1; i < 9; i++)
169 card->info.portname[i] = ' ';
170 strcpy(card->info.portname + 1, tmp);
171 ASCEBC(card->info.portname + 1, 8);
172
173 return count;
174}
175
176static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
177 qeth_dev_portname_store);
178
179static ssize_t
180qeth_dev_checksum_show(struct device *dev, char *buf)
181{
182 struct qeth_card *card = dev->driver_data;
183
184 if (!card)
185 return -EINVAL;
186
187 return sprintf(buf, "%s checksumming\n", qeth_get_checksum_str(card));
188}
189
190static ssize_t
191qeth_dev_checksum_store(struct device *dev, const char *buf, size_t count)
192{
193 struct qeth_card *card = dev->driver_data;
194 char *tmp;
195
196 if (!card)
197 return -EINVAL;
198
199 if ((card->state != CARD_STATE_DOWN) &&
200 (card->state != CARD_STATE_RECOVER))
201 return -EPERM;
202
203 tmp = strsep((char **) &buf, "\n");
204 if (!strcmp(tmp, "sw_checksumming"))
205 card->options.checksum_type = SW_CHECKSUMMING;
206 else if (!strcmp(tmp, "hw_checksumming"))
207 card->options.checksum_type = HW_CHECKSUMMING;
208 else if (!strcmp(tmp, "no_checksumming"))
209 card->options.checksum_type = NO_CHECKSUMMING;
210 else {
211 PRINT_WARN("Unknown checksumming type '%s'\n", tmp);
212 return -EINVAL;
213 }
214 return count;
215}
216
217static DEVICE_ATTR(checksumming, 0644, qeth_dev_checksum_show,
218 qeth_dev_checksum_store);
219
220static ssize_t
221qeth_dev_prioqing_show(struct device *dev, char *buf)
222{
223 struct qeth_card *card = dev->driver_data;
224
225 if (!card)
226 return -EINVAL;
227
228 switch (card->qdio.do_prio_queueing) {
229 case QETH_PRIO_Q_ING_PREC:
230 return sprintf(buf, "%s\n", "by precedence");
231 case QETH_PRIO_Q_ING_TOS:
232 return sprintf(buf, "%s\n", "by type of service");
233 default:
234 return sprintf(buf, "always queue %i\n",
235 card->qdio.default_out_queue);
236 }
237}
238
239static ssize_t
240qeth_dev_prioqing_store(struct device *dev, const char *buf, size_t count)
241{
242 struct qeth_card *card = dev->driver_data;
243 char *tmp;
244
245 if (!card)
246 return -EINVAL;
247
248 if ((card->state != CARD_STATE_DOWN) &&
249 (card->state != CARD_STATE_RECOVER))
250 return -EPERM;
251
252 /* check if 1920 devices are supported ,
253 * if though we have to permit priority queueing
254 */
255 if (card->qdio.no_out_queues == 1) {
256 PRINT_WARN("Priority queueing disabled due "
257 "to hardware limitations!\n");
258 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
259 return -EPERM;
260 }
261
262 tmp = strsep((char **) &buf, "\n");
263 if (!strcmp(tmp, "prio_queueing_prec"))
264 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
265 else if (!strcmp(tmp, "prio_queueing_tos"))
266 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
267 else if (!strcmp(tmp, "no_prio_queueing:0")) {
268 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
269 card->qdio.default_out_queue = 0;
270 } else if (!strcmp(tmp, "no_prio_queueing:1")) {
271 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
272 card->qdio.default_out_queue = 1;
273 } else if (!strcmp(tmp, "no_prio_queueing:2")) {
274 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
275 card->qdio.default_out_queue = 2;
276 } else if (!strcmp(tmp, "no_prio_queueing:3")) {
277 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
278 card->qdio.default_out_queue = 3;
279 } else if (!strcmp(tmp, "no_prio_queueing")) {
280 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
281 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
282 } else {
283 PRINT_WARN("Unknown queueing type '%s'\n", tmp);
284 return -EINVAL;
285 }
286 return count;
287}
288
289static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
290 qeth_dev_prioqing_store);
291
292static ssize_t
293qeth_dev_bufcnt_show(struct device *dev, char *buf)
294{
295 struct qeth_card *card = dev->driver_data;
296
297 if (!card)
298 return -EINVAL;
299
300 return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
301}
302
303static ssize_t
304qeth_dev_bufcnt_store(struct device *dev, const char *buf, size_t count)
305{
306 struct qeth_card *card = dev->driver_data;
307 char *tmp;
308 int cnt, old_cnt;
309 int rc;
310
311 if (!card)
312 return -EINVAL;
313
314 if ((card->state != CARD_STATE_DOWN) &&
315 (card->state != CARD_STATE_RECOVER))
316 return -EPERM;
317
318 old_cnt = card->qdio.in_buf_pool.buf_count;
319 cnt = simple_strtoul(buf, &tmp, 10);
320 cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
321 ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
322 if (old_cnt != cnt) {
323 if ((rc = qeth_realloc_buffer_pool(card, cnt)))
324 PRINT_WARN("Error (%d) while setting "
325 "buffer count.\n", rc);
326 }
327 return count;
328}
329
330static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
331 qeth_dev_bufcnt_store);
332
333static inline ssize_t
334qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route,
335 char *buf)
336{
337 switch (route->type) {
338 case PRIMARY_ROUTER:
339 return sprintf(buf, "%s\n", "primary router");
340 case SECONDARY_ROUTER:
341 return sprintf(buf, "%s\n", "secondary router");
342 case MULTICAST_ROUTER:
343 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
344 return sprintf(buf, "%s\n", "multicast router+");
345 else
346 return sprintf(buf, "%s\n", "multicast router");
347 case PRIMARY_CONNECTOR:
348 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
349 return sprintf(buf, "%s\n", "primary connector+");
350 else
351 return sprintf(buf, "%s\n", "primary connector");
352 case SECONDARY_CONNECTOR:
353 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
354 return sprintf(buf, "%s\n", "secondary connector+");
355 else
356 return sprintf(buf, "%s\n", "secondary connector");
357 default:
358 return sprintf(buf, "%s\n", "no");
359 }
360}
361
362static ssize_t
363qeth_dev_route4_show(struct device *dev, char *buf)
364{
365 struct qeth_card *card = dev->driver_data;
366
367 if (!card)
368 return -EINVAL;
369
370 return qeth_dev_route_show(card, &card->options.route4, buf);
371}
372
373static inline ssize_t
374qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route,
375 enum qeth_prot_versions prot, const char *buf, size_t count)
376{
377 enum qeth_routing_types old_route_type = route->type;
378 char *tmp;
379 int rc;
380
381 tmp = strsep((char **) &buf, "\n");
382
383 if (!strcmp(tmp, "no_router")){
384 route->type = NO_ROUTER;
385 } else if (!strcmp(tmp, "primary_connector")) {
386 route->type = PRIMARY_CONNECTOR;
387 } else if (!strcmp(tmp, "secondary_connector")) {
388 route->type = SECONDARY_CONNECTOR;
389 } else if (!strcmp(tmp, "multicast_router")) {
390 route->type = MULTICAST_ROUTER;
391 } else if (!strcmp(tmp, "primary_router")) {
392 route->type = PRIMARY_ROUTER;
393 } else if (!strcmp(tmp, "secondary_router")) {
394 route->type = SECONDARY_ROUTER;
395 } else if (!strcmp(tmp, "multicast_router")) {
396 route->type = MULTICAST_ROUTER;
397 } else {
398 PRINT_WARN("Invalid routing type '%s'.\n", tmp);
399 return -EINVAL;
400 }
401 if (((card->state == CARD_STATE_SOFTSETUP) ||
402 (card->state == CARD_STATE_UP)) &&
403 (old_route_type != route->type)){
404 if (prot == QETH_PROT_IPV4)
405 rc = qeth_setrouting_v4(card);
406 else if (prot == QETH_PROT_IPV6)
407 rc = qeth_setrouting_v6(card);
408 }
409 return count;
410}
411
412static ssize_t
413qeth_dev_route4_store(struct device *dev, const char *buf, size_t count)
414{
415 struct qeth_card *card = dev->driver_data;
416
417 if (!card)
418 return -EINVAL;
419
420 return qeth_dev_route_store(card, &card->options.route4,
421 QETH_PROT_IPV4, buf, count);
422}
423
424static DEVICE_ATTR(route4, 0644, qeth_dev_route4_show, qeth_dev_route4_store);
425
426#ifdef CONFIG_QETH_IPV6
427static ssize_t
428qeth_dev_route6_show(struct device *dev, char *buf)
429{
430 struct qeth_card *card = dev->driver_data;
431
432 if (!card)
433 return -EINVAL;
434
435 if (!qeth_is_supported(card, IPA_IPV6))
436 return sprintf(buf, "%s\n", "n/a");
437
438 return qeth_dev_route_show(card, &card->options.route6, buf);
439}
440
441static ssize_t
442qeth_dev_route6_store(struct device *dev, const char *buf, size_t count)
443{
444 struct qeth_card *card = dev->driver_data;
445
446 if (!card)
447 return -EINVAL;
448
449 if (!qeth_is_supported(card, IPA_IPV6)){
450 PRINT_WARN("IPv6 not supported for interface %s.\n"
451 "Routing status no changed.\n",
452 QETH_CARD_IFNAME(card));
453 return -ENOTSUPP;
454 }
455
456 return qeth_dev_route_store(card, &card->options.route6,
457 QETH_PROT_IPV6, buf, count);
458}
459
460static DEVICE_ATTR(route6, 0644, qeth_dev_route6_show, qeth_dev_route6_store);
461#endif
462
463static ssize_t
464qeth_dev_add_hhlen_show(struct device *dev, char *buf)
465{
466 struct qeth_card *card = dev->driver_data;
467
468 if (!card)
469 return -EINVAL;
470
471 return sprintf(buf, "%i\n", card->options.add_hhlen);
472}
473
474static ssize_t
475qeth_dev_add_hhlen_store(struct device *dev, const char *buf, size_t count)
476{
477 struct qeth_card *card = dev->driver_data;
478 char *tmp;
479 int i;
480
481 if (!card)
482 return -EINVAL;
483
484 if ((card->state != CARD_STATE_DOWN) &&
485 (card->state != CARD_STATE_RECOVER))
486 return -EPERM;
487
488 i = simple_strtoul(buf, &tmp, 10);
489 if ((i < 0) || (i > MAX_ADD_HHLEN)) {
490 PRINT_WARN("add_hhlen out of range\n");
491 return -EINVAL;
492 }
493 card->options.add_hhlen = i;
494
495 return count;
496}
497
498static DEVICE_ATTR(add_hhlen, 0644, qeth_dev_add_hhlen_show,
499 qeth_dev_add_hhlen_store);
500
501static ssize_t
502qeth_dev_fake_ll_show(struct device *dev, char *buf)
503{
504 struct qeth_card *card = dev->driver_data;
505
506 if (!card)
507 return -EINVAL;
508
509 return sprintf(buf, "%i\n", card->options.fake_ll? 1:0);
510}
511
512static ssize_t
513qeth_dev_fake_ll_store(struct device *dev, const char *buf, size_t count)
514{
515 struct qeth_card *card = dev->driver_data;
516 char *tmp;
517 int i;
518
519 if (!card)
520 return -EINVAL;
521
522 if ((card->state != CARD_STATE_DOWN) &&
523 (card->state != CARD_STATE_RECOVER))
524 return -EPERM;
525
526 i = simple_strtoul(buf, &tmp, 16);
527 if ((i != 0) && (i != 1)) {
528 PRINT_WARN("fake_ll: write 0 or 1 to this file!\n");
529 return -EINVAL;
530 }
531 card->options.fake_ll = i;
532 return count;
533}
534
535static DEVICE_ATTR(fake_ll, 0644, qeth_dev_fake_ll_show,
536 qeth_dev_fake_ll_store);
537
538static ssize_t
539qeth_dev_fake_broadcast_show(struct device *dev, char *buf)
540{
541 struct qeth_card *card = dev->driver_data;
542
543 if (!card)
544 return -EINVAL;
545
546 return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
547}
548
549static ssize_t
550qeth_dev_fake_broadcast_store(struct device *dev, const char *buf, size_t count)
551{
552 struct qeth_card *card = dev->driver_data;
553 char *tmp;
554 int i;
555
556 if (!card)
557 return -EINVAL;
558
559 if ((card->state != CARD_STATE_DOWN) &&
560 (card->state != CARD_STATE_RECOVER))
561 return -EPERM;
562
563 i = simple_strtoul(buf, &tmp, 16);
564 if ((i == 0) || (i == 1))
565 card->options.fake_broadcast = i;
566 else {
567 PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n");
568 return -EINVAL;
569 }
570 return count;
571}
572
573static DEVICE_ATTR(fake_broadcast, 0644, qeth_dev_fake_broadcast_show,
574 qeth_dev_fake_broadcast_store);
575
576static ssize_t
577qeth_dev_recover_store(struct device *dev, const char *buf, size_t count)
578{
579 struct qeth_card *card = dev->driver_data;
580 char *tmp;
581 int i;
582
583 if (!card)
584 return -EINVAL;
585
586 if (card->state != CARD_STATE_UP)
587 return -EPERM;
588
589 i = simple_strtoul(buf, &tmp, 16);
590 if (i == 1)
591 qeth_schedule_recovery(card);
592
593 return count;
594}
595
596static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
597
598static ssize_t
599qeth_dev_broadcast_mode_show(struct device *dev, char *buf)
600{
601 struct qeth_card *card = dev->driver_data;
602
603 if (!card)
604 return -EINVAL;
605
606 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
607 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
608 return sprintf(buf, "n/a\n");
609
610 return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
611 QETH_TR_BROADCAST_ALLRINGS)?
612 "all rings":"local");
613}
614
615static ssize_t
616qeth_dev_broadcast_mode_store(struct device *dev, const char *buf, size_t count)
617{
618 struct qeth_card *card = dev->driver_data;
619 char *tmp;
620
621 if (!card)
622 return -EINVAL;
623
624 if ((card->state != CARD_STATE_DOWN) &&
625 (card->state != CARD_STATE_RECOVER))
626 return -EPERM;
627
628 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
629 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){
630 PRINT_WARN("Device is not a tokenring device!\n");
631 return -EINVAL;
632 }
633
634 tmp = strsep((char **) &buf, "\n");
635
636 if (!strcmp(tmp, "local")){
637 card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
638 return count;
639 } else if (!strcmp(tmp, "all_rings")) {
640 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
641 return count;
642 } else {
643 PRINT_WARN("broadcast_mode: invalid mode %s!\n",
644 tmp);
645 return -EINVAL;
646 }
647 return count;
648}
649
650static DEVICE_ATTR(broadcast_mode, 0644, qeth_dev_broadcast_mode_show,
651 qeth_dev_broadcast_mode_store);
652
653static ssize_t
654qeth_dev_canonical_macaddr_show(struct device *dev, char *buf)
655{
656 struct qeth_card *card = dev->driver_data;
657
658 if (!card)
659 return -EINVAL;
660
661 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
662 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
663 return sprintf(buf, "n/a\n");
664
665 return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
666 QETH_TR_MACADDR_CANONICAL)? 1:0);
667}
668
669static ssize_t
670qeth_dev_canonical_macaddr_store(struct device *dev, const char *buf,
671 size_t count)
672{
673 struct qeth_card *card = dev->driver_data;
674 char *tmp;
675 int i;
676
677 if (!card)
678 return -EINVAL;
679
680 if ((card->state != CARD_STATE_DOWN) &&
681 (card->state != CARD_STATE_RECOVER))
682 return -EPERM;
683
684 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
685 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){
686 PRINT_WARN("Device is not a tokenring device!\n");
687 return -EINVAL;
688 }
689
690 i = simple_strtoul(buf, &tmp, 16);
691 if ((i == 0) || (i == 1))
692 card->options.macaddr_mode = i?
693 QETH_TR_MACADDR_CANONICAL :
694 QETH_TR_MACADDR_NONCANONICAL;
695 else {
696 PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n");
697 return -EINVAL;
698 }
699 return count;
700}
701
702static DEVICE_ATTR(canonical_macaddr, 0644, qeth_dev_canonical_macaddr_show,
703 qeth_dev_canonical_macaddr_store);
704
705static ssize_t
706qeth_dev_layer2_show(struct device *dev, char *buf)
707{
708 struct qeth_card *card = dev->driver_data;
709
710 if (!card)
711 return -EINVAL;
712
713 return sprintf(buf, "%i\n", card->options.layer2 ? 1:0);
714}
715
716static ssize_t
717qeth_dev_layer2_store(struct device *dev, const char *buf, size_t count)
718{
719 struct qeth_card *card = dev->driver_data;
720 char *tmp;
721 int i;
722
723 if (!card)
724 return -EINVAL;
725
726 if (((card->state != CARD_STATE_DOWN) &&
727 (card->state != CARD_STATE_RECOVER)) ||
728 (card->info.type != QETH_CARD_TYPE_OSAE))
729 return -EPERM;
730
731 i = simple_strtoul(buf, &tmp, 16);
732 if ((i == 0) || (i == 1))
733 card->options.layer2 = i;
734 else {
735 PRINT_WARN("layer2: write 0 or 1 to this file!\n");
736 return -EINVAL;
737 }
738 return count;
739}
740
741static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
742 qeth_dev_layer2_store);
743
744static ssize_t
745qeth_dev_large_send_show(struct device *dev, char *buf)
746{
747 struct qeth_card *card = dev->driver_data;
748
749 if (!card)
750 return -EINVAL;
751
752 switch (card->options.large_send) {
753 case QETH_LARGE_SEND_NO:
754 return sprintf(buf, "%s\n", "no");
755 case QETH_LARGE_SEND_EDDP:
756 return sprintf(buf, "%s\n", "EDDP");
757 case QETH_LARGE_SEND_TSO:
758 return sprintf(buf, "%s\n", "TSO");
759 default:
760 return sprintf(buf, "%s\n", "N/A");
761 }
762}
763
764static ssize_t
765qeth_dev_large_send_store(struct device *dev, const char *buf, size_t count)
766{
767 struct qeth_card *card = dev->driver_data;
768 enum qeth_large_send_types type;
769 int rc = 0;
770 char *tmp;
771
772 if (!card)
773 return -EINVAL;
774
775 tmp = strsep((char **) &buf, "\n");
776
777 if (!strcmp(tmp, "no")){
778 type = QETH_LARGE_SEND_NO;
779 } else if (!strcmp(tmp, "EDDP")) {
780 type = QETH_LARGE_SEND_EDDP;
781 } else if (!strcmp(tmp, "TSO")) {
782 type = QETH_LARGE_SEND_TSO;
783 } else {
784 PRINT_WARN("large_send: invalid mode %s!\n", tmp);
785 return -EINVAL;
786 }
787 if (card->options.large_send == type)
788 return count;
789 card->options.large_send = type;
790 if ((rc = qeth_set_large_send(card)))
791 return rc;
792
793 return count;
794}
795
796static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show,
797 qeth_dev_large_send_store);
798
799static ssize_t
800qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value )
801{
802
803 if (!card)
804 return -EINVAL;
805
806 return sprintf(buf, "%i\n", value);
807}
808
809static ssize_t
810qeth_dev_blkt_store(struct qeth_card *card, const char *buf, size_t count,
811 int *value, int max_value)
812{
813 char *tmp;
814 int i;
815
816 if (!card)
817 return -EINVAL;
818
819 if ((card->state != CARD_STATE_DOWN) &&
820 (card->state != CARD_STATE_RECOVER))
821 return -EPERM;
822
823 i = simple_strtoul(buf, &tmp, 10);
824 if (i <= max_value) {
825 *value = i;
826 } else {
827 PRINT_WARN("blkt total time: write values between"
828 " 0 and %d to this file!\n", max_value);
829 return -EINVAL;
830 }
831 return count;
832}
833
834static ssize_t
835qeth_dev_blkt_total_show(struct device *dev, char *buf)
836{
837 struct qeth_card *card = dev->driver_data;
838
839 return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
840}
841
842
843static ssize_t
844qeth_dev_blkt_total_store(struct device *dev, const char *buf, size_t count)
845{
846 struct qeth_card *card = dev->driver_data;
847
848 return qeth_dev_blkt_store(card, buf, count,
849 &card->info.blkt.time_total,1000);
850}
851
852
853
854static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
855 qeth_dev_blkt_total_store);
856
857static ssize_t
858qeth_dev_blkt_inter_show(struct device *dev, char *buf)
859{
860 struct qeth_card *card = dev->driver_data;
861
862 return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
863}
864
865
866static ssize_t
867qeth_dev_blkt_inter_store(struct device *dev, const char *buf, size_t count)
868{
869 struct qeth_card *card = dev->driver_data;
870
871 return qeth_dev_blkt_store(card, buf, count,
872 &card->info.blkt.inter_packet,100);
873}
874
875static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
876 qeth_dev_blkt_inter_store);
877
878static ssize_t
879qeth_dev_blkt_inter_jumbo_show(struct device *dev, char *buf)
880{
881 struct qeth_card *card = dev->driver_data;
882
883 return qeth_dev_blkt_show(buf, card,
884 card->info.blkt.inter_packet_jumbo);
885}
886
887
888static ssize_t
889qeth_dev_blkt_inter_jumbo_store(struct device *dev, const char *buf, size_t count)
890{
891 struct qeth_card *card = dev->driver_data;
892
893 return qeth_dev_blkt_store(card, buf, count,
894 &card->info.blkt.inter_packet_jumbo,100);
895}
896
897static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
898 qeth_dev_blkt_inter_jumbo_store);
899
900static struct device_attribute * qeth_blkt_device_attrs[] = {
901 &dev_attr_total,
902 &dev_attr_inter,
903 &dev_attr_inter_jumbo,
904 NULL,
905};
906
907static struct attribute_group qeth_device_blkt_group = {
908 .name = "blkt",
909 .attrs = (struct attribute **)qeth_blkt_device_attrs,
910};
911
912static struct device_attribute * qeth_device_attrs[] = {
913 &dev_attr_state,
914 &dev_attr_chpid,
915 &dev_attr_if_name,
916 &dev_attr_card_type,
917 &dev_attr_portno,
918 &dev_attr_portname,
919 &dev_attr_checksumming,
920 &dev_attr_priority_queueing,
921 &dev_attr_buffer_count,
922 &dev_attr_route4,
923#ifdef CONFIG_QETH_IPV6
924 &dev_attr_route6,
925#endif
926 &dev_attr_add_hhlen,
927 &dev_attr_fake_ll,
928 &dev_attr_fake_broadcast,
929 &dev_attr_recover,
930 &dev_attr_broadcast_mode,
931 &dev_attr_canonical_macaddr,
932 &dev_attr_layer2,
933 &dev_attr_large_send,
934 NULL,
935};
936
937static struct attribute_group qeth_device_attr_group = {
938 .attrs = (struct attribute **)qeth_device_attrs,
939};
940
941
942#define QETH_DEVICE_ATTR(_id,_name,_mode,_show,_store) \
943struct device_attribute dev_attr_##_id = { \
944 .attr = {.name=__stringify(_name), .mode=_mode, .owner=THIS_MODULE },\
945 .show = _show, \
946 .store = _store, \
947};
948
949int
950qeth_check_layer2(struct qeth_card *card)
951{
952 if (card->options.layer2)
953 return -EPERM;
954 return 0;
955}
956
957
958static ssize_t
959qeth_dev_ipato_enable_show(struct device *dev, char *buf)
960{
961 struct qeth_card *card = dev->driver_data;
962
963 if (!card)
964 return -EINVAL;
965
966 if (qeth_check_layer2(card))
967 return -EPERM;
968 return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
969}
970
971static ssize_t
972qeth_dev_ipato_enable_store(struct device *dev, const char *buf, size_t count)
973{
974 struct qeth_card *card = dev->driver_data;
975 char *tmp;
976
977 if (!card)
978 return -EINVAL;
979
980 if ((card->state != CARD_STATE_DOWN) &&
981 (card->state != CARD_STATE_RECOVER))
982 return -EPERM;
983
984 if (qeth_check_layer2(card))
985 return -EPERM;
986
987 tmp = strsep((char **) &buf, "\n");
988 if (!strcmp(tmp, "toggle")){
989 card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
990 } else if (!strcmp(tmp, "1")){
991 card->ipato.enabled = 1;
992 } else if (!strcmp(tmp, "0")){
993 card->ipato.enabled = 0;
994 } else {
995 PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to "
996 "this file\n");
997 return -EINVAL;
998 }
999 return count;
1000}
1001
1002static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
1003 qeth_dev_ipato_enable_show,
1004 qeth_dev_ipato_enable_store);
1005
1006static ssize_t
1007qeth_dev_ipato_invert4_show(struct device *dev, char *buf)
1008{
1009 struct qeth_card *card = dev->driver_data;
1010
1011 if (!card)
1012 return -EINVAL;
1013
1014 if (qeth_check_layer2(card))
1015 return -EPERM;
1016
1017 return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
1018}
1019
1020static ssize_t
1021qeth_dev_ipato_invert4_store(struct device *dev, const char *buf, size_t count)
1022{
1023 struct qeth_card *card = dev->driver_data;
1024 char *tmp;
1025
1026 if (!card)
1027 return -EINVAL;
1028
1029 if (qeth_check_layer2(card))
1030 return -EPERM;
1031
1032 tmp = strsep((char **) &buf, "\n");
1033 if (!strcmp(tmp, "toggle")){
1034 card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
1035 } else if (!strcmp(tmp, "1")){
1036 card->ipato.invert4 = 1;
1037 } else if (!strcmp(tmp, "0")){
1038 card->ipato.invert4 = 0;
1039 } else {
1040 PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to "
1041 "this file\n");
1042 return -EINVAL;
1043 }
1044 return count;
1045}
1046
1047static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
1048 qeth_dev_ipato_invert4_show,
1049 qeth_dev_ipato_invert4_store);
1050
1051static inline ssize_t
1052qeth_dev_ipato_add_show(char *buf, struct qeth_card *card,
1053 enum qeth_prot_versions proto)
1054{
1055 struct qeth_ipato_entry *ipatoe;
1056 unsigned long flags;
1057 char addr_str[40];
1058 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1059 int i = 0;
1060
1061 if (qeth_check_layer2(card))
1062 return -EPERM;
1063
1064 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1065 /* add strlen for "/<mask>\n" */
1066 entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
1067 spin_lock_irqsave(&card->ip_lock, flags);
1068 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
1069 if (ipatoe->proto != proto)
1070 continue;
1071 /* String must not be longer than PAGE_SIZE. So we check if
1072 * string length gets near PAGE_SIZE. Then we can savely display
1073 * the next IPv6 address (worst case, compared to IPv4) */
1074 if ((PAGE_SIZE - i) <= entry_len)
1075 break;
1076 qeth_ipaddr_to_string(proto, ipatoe->addr, addr_str);
1077 i += snprintf(buf + i, PAGE_SIZE - i,
1078 "%s/%i\n", addr_str, ipatoe->mask_bits);
1079 }
1080 spin_unlock_irqrestore(&card->ip_lock, flags);
1081 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1082
1083 return i;
1084}
1085
1086static ssize_t
1087qeth_dev_ipato_add4_show(struct device *dev, char *buf)
1088{
1089 struct qeth_card *card = dev->driver_data;
1090
1091 if (!card)
1092 return -EINVAL;
1093
1094 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
1095}
1096
1097static inline int
1098qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1099 u8 *addr, int *mask_bits)
1100{
1101 const char *start, *end;
1102 char *tmp;
1103 char buffer[49] = {0, };
1104
1105 start = buf;
1106 /* get address string */
1107 end = strchr(start, '/');
1108 if (!end){
1109 PRINT_WARN("Invalid format for ipato_addx/delx. "
1110 "Use <ip addr>/<mask bits>\n");
1111 return -EINVAL;
1112 }
1113 strncpy(buffer, start, end - start);
1114 if (qeth_string_to_ipaddr(buffer, proto, addr)){
1115 PRINT_WARN("Invalid IP address format!\n");
1116 return -EINVAL;
1117 }
1118 start = end + 1;
1119 *mask_bits = simple_strtoul(start, &tmp, 10);
1120
1121 return 0;
1122}
1123
1124static inline ssize_t
1125qeth_dev_ipato_add_store(const char *buf, size_t count,
1126 struct qeth_card *card, enum qeth_prot_versions proto)
1127{
1128 struct qeth_ipato_entry *ipatoe;
1129 u8 addr[16];
1130 int mask_bits;
1131 int rc;
1132
1133 if (qeth_check_layer2(card))
1134 return -EPERM;
1135 if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits)))
1136 return rc;
1137
1138 if (!(ipatoe = kmalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL))){
1139 PRINT_WARN("No memory to allocate ipato entry\n");
1140 return -ENOMEM;
1141 }
1142 memset(ipatoe, 0, sizeof(struct qeth_ipato_entry));
1143 ipatoe->proto = proto;
1144 memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
1145 ipatoe->mask_bits = mask_bits;
1146
1147 if ((rc = qeth_add_ipato_entry(card, ipatoe))){
1148 kfree(ipatoe);
1149 return rc;
1150 }
1151
1152 return count;
1153}
1154
1155static ssize_t
1156qeth_dev_ipato_add4_store(struct device *dev, const char *buf, size_t count)
1157{
1158 struct qeth_card *card = dev->driver_data;
1159
1160 if (!card)
1161 return -EINVAL;
1162
1163 return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
1164}
1165
1166static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
1167 qeth_dev_ipato_add4_show,
1168 qeth_dev_ipato_add4_store);
1169
1170static inline ssize_t
1171qeth_dev_ipato_del_store(const char *buf, size_t count,
1172 struct qeth_card *card, enum qeth_prot_versions proto)
1173{
1174 u8 addr[16];
1175 int mask_bits;
1176 int rc;
1177
1178 if (qeth_check_layer2(card))
1179 return -EPERM;
1180 if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits)))
1181 return rc;
1182
1183 qeth_del_ipato_entry(card, proto, addr, mask_bits);
1184
1185 return count;
1186}
1187
1188static ssize_t
1189qeth_dev_ipato_del4_store(struct device *dev, const char *buf, size_t count)
1190{
1191 struct qeth_card *card = dev->driver_data;
1192
1193 if (!card)
1194 return -EINVAL;
1195
1196 return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
1197}
1198
1199static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
1200 qeth_dev_ipato_del4_store);
1201
1202#ifdef CONFIG_QETH_IPV6
1203static ssize_t
1204qeth_dev_ipato_invert6_show(struct device *dev, char *buf)
1205{
1206 struct qeth_card *card = dev->driver_data;
1207
1208 if (!card)
1209 return -EINVAL;
1210
1211 if (qeth_check_layer2(card))
1212 return -EPERM;
1213
1214 return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
1215}
1216
1217static ssize_t
1218qeth_dev_ipato_invert6_store(struct device *dev, const char *buf, size_t count)
1219{
1220 struct qeth_card *card = dev->driver_data;
1221 char *tmp;
1222
1223 if (!card)
1224 return -EINVAL;
1225
1226 if (qeth_check_layer2(card))
1227 return -EPERM;
1228
1229 tmp = strsep((char **) &buf, "\n");
1230 if (!strcmp(tmp, "toggle")){
1231 card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
1232 } else if (!strcmp(tmp, "1")){
1233 card->ipato.invert6 = 1;
1234 } else if (!strcmp(tmp, "0")){
1235 card->ipato.invert6 = 0;
1236 } else {
1237 PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to "
1238 "this file\n");
1239 return -EINVAL;
1240 }
1241 return count;
1242}
1243
1244static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
1245 qeth_dev_ipato_invert6_show,
1246 qeth_dev_ipato_invert6_store);
1247
1248
1249static ssize_t
1250qeth_dev_ipato_add6_show(struct device *dev, char *buf)
1251{
1252 struct qeth_card *card = dev->driver_data;
1253
1254 if (!card)
1255 return -EINVAL;
1256
1257 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
1258}
1259
1260static ssize_t
1261qeth_dev_ipato_add6_store(struct device *dev, const char *buf, size_t count)
1262{
1263 struct qeth_card *card = dev->driver_data;
1264
1265 if (!card)
1266 return -EINVAL;
1267
1268 return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
1269}
1270
1271static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
1272 qeth_dev_ipato_add6_show,
1273 qeth_dev_ipato_add6_store);
1274
1275static ssize_t
1276qeth_dev_ipato_del6_store(struct device *dev, const char *buf, size_t count)
1277{
1278 struct qeth_card *card = dev->driver_data;
1279
1280 if (!card)
1281 return -EINVAL;
1282
1283 return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
1284}
1285
1286static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
1287 qeth_dev_ipato_del6_store);
1288#endif /* CONFIG_QETH_IPV6 */
1289
1290static struct device_attribute * qeth_ipato_device_attrs[] = {
1291 &dev_attr_ipato_enable,
1292 &dev_attr_ipato_invert4,
1293 &dev_attr_ipato_add4,
1294 &dev_attr_ipato_del4,
1295#ifdef CONFIG_QETH_IPV6
1296 &dev_attr_ipato_invert6,
1297 &dev_attr_ipato_add6,
1298 &dev_attr_ipato_del6,
1299#endif
1300 NULL,
1301};
1302
1303static struct attribute_group qeth_device_ipato_group = {
1304 .name = "ipa_takeover",
1305 .attrs = (struct attribute **)qeth_ipato_device_attrs,
1306};
1307
1308static inline ssize_t
1309qeth_dev_vipa_add_show(char *buf, struct qeth_card *card,
1310 enum qeth_prot_versions proto)
1311{
1312 struct qeth_ipaddr *ipaddr;
1313 char addr_str[40];
1314 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1315 unsigned long flags;
1316 int i = 0;
1317
1318 if (qeth_check_layer2(card))
1319 return -EPERM;
1320
1321 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1322 entry_len += 2; /* \n + terminator */
1323 spin_lock_irqsave(&card->ip_lock, flags);
1324 list_for_each_entry(ipaddr, &card->ip_list, entry){
1325 if (ipaddr->proto != proto)
1326 continue;
1327 if (ipaddr->type != QETH_IP_TYPE_VIPA)
1328 continue;
1329 /* String must not be longer than PAGE_SIZE. So we check if
1330 * string length gets near PAGE_SIZE. Then we can savely display
1331 * the next IPv6 address (worst case, compared to IPv4) */
1332 if ((PAGE_SIZE - i) <= entry_len)
1333 break;
1334 qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str);
1335 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
1336 }
1337 spin_unlock_irqrestore(&card->ip_lock, flags);
1338 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1339
1340 return i;
1341}
1342
1343static ssize_t
1344qeth_dev_vipa_add4_show(struct device *dev, char *buf)
1345{
1346 struct qeth_card *card = dev->driver_data;
1347
1348 if (!card)
1349 return -EINVAL;
1350
1351 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
1352}
1353
1354static inline int
1355qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1356 u8 *addr)
1357{
1358 if (qeth_string_to_ipaddr(buf, proto, addr)){
1359 PRINT_WARN("Invalid IP address format!\n");
1360 return -EINVAL;
1361 }
1362 return 0;
1363}
1364
1365static inline ssize_t
1366qeth_dev_vipa_add_store(const char *buf, size_t count,
1367 struct qeth_card *card, enum qeth_prot_versions proto)
1368{
1369 u8 addr[16] = {0, };
1370 int rc;
1371
1372 if (qeth_check_layer2(card))
1373 return -EPERM;
1374 if ((rc = qeth_parse_vipae(buf, proto, addr)))
1375 return rc;
1376
1377 if ((rc = qeth_add_vipa(card, proto, addr)))
1378 return rc;
1379
1380 return count;
1381}
1382
1383static ssize_t
1384qeth_dev_vipa_add4_store(struct device *dev, const char *buf, size_t count)
1385{
1386 struct qeth_card *card = dev->driver_data;
1387
1388 if (!card)
1389 return -EINVAL;
1390
1391 return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
1392}
1393
1394static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
1395 qeth_dev_vipa_add4_show,
1396 qeth_dev_vipa_add4_store);
1397
1398static inline ssize_t
1399qeth_dev_vipa_del_store(const char *buf, size_t count,
1400 struct qeth_card *card, enum qeth_prot_versions proto)
1401{
1402 u8 addr[16];
1403 int rc;
1404
1405 if (qeth_check_layer2(card))
1406 return -EPERM;
1407 if ((rc = qeth_parse_vipae(buf, proto, addr)))
1408 return rc;
1409
1410 qeth_del_vipa(card, proto, addr);
1411
1412 return count;
1413}
1414
1415static ssize_t
1416qeth_dev_vipa_del4_store(struct device *dev, const char *buf, size_t count)
1417{
1418 struct qeth_card *card = dev->driver_data;
1419
1420 if (!card)
1421 return -EINVAL;
1422
1423 return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
1424}
1425
1426static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
1427 qeth_dev_vipa_del4_store);
1428
1429#ifdef CONFIG_QETH_IPV6
1430static ssize_t
1431qeth_dev_vipa_add6_show(struct device *dev, char *buf)
1432{
1433 struct qeth_card *card = dev->driver_data;
1434
1435 if (!card)
1436 return -EINVAL;
1437
1438 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
1439}
1440
1441static ssize_t
1442qeth_dev_vipa_add6_store(struct device *dev, const char *buf, size_t count)
1443{
1444 struct qeth_card *card = dev->driver_data;
1445
1446 if (!card)
1447 return -EINVAL;
1448
1449 return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
1450}
1451
1452static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
1453 qeth_dev_vipa_add6_show,
1454 qeth_dev_vipa_add6_store);
1455
1456static ssize_t
1457qeth_dev_vipa_del6_store(struct device *dev, const char *buf, size_t count)
1458{
1459 struct qeth_card *card = dev->driver_data;
1460
1461 if (!card)
1462 return -EINVAL;
1463
1464 if (qeth_check_layer2(card))
1465 return -EPERM;
1466
1467 return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
1468}
1469
1470static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
1471 qeth_dev_vipa_del6_store);
1472#endif /* CONFIG_QETH_IPV6 */
1473
1474static struct device_attribute * qeth_vipa_device_attrs[] = {
1475 &dev_attr_vipa_add4,
1476 &dev_attr_vipa_del4,
1477#ifdef CONFIG_QETH_IPV6
1478 &dev_attr_vipa_add6,
1479 &dev_attr_vipa_del6,
1480#endif
1481 NULL,
1482};
1483
1484static struct attribute_group qeth_device_vipa_group = {
1485 .name = "vipa",
1486 .attrs = (struct attribute **)qeth_vipa_device_attrs,
1487};
1488
1489static inline ssize_t
1490qeth_dev_rxip_add_show(char *buf, struct qeth_card *card,
1491 enum qeth_prot_versions proto)
1492{
1493 struct qeth_ipaddr *ipaddr;
1494 char addr_str[40];
1495 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1496 unsigned long flags;
1497 int i = 0;
1498
1499 if (qeth_check_layer2(card))
1500 return -EPERM;
1501
1502 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1503 entry_len += 2; /* \n + terminator */
1504 spin_lock_irqsave(&card->ip_lock, flags);
1505 list_for_each_entry(ipaddr, &card->ip_list, entry){
1506 if (ipaddr->proto != proto)
1507 continue;
1508 if (ipaddr->type != QETH_IP_TYPE_RXIP)
1509 continue;
1510 /* String must not be longer than PAGE_SIZE. So we check if
1511 * string length gets near PAGE_SIZE. Then we can savely display
1512 * the next IPv6 address (worst case, compared to IPv4) */
1513 if ((PAGE_SIZE - i) <= entry_len)
1514 break;
1515 qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str);
1516 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
1517 }
1518 spin_unlock_irqrestore(&card->ip_lock, flags);
1519 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1520
1521 return i;
1522}
1523
1524static ssize_t
1525qeth_dev_rxip_add4_show(struct device *dev, char *buf)
1526{
1527 struct qeth_card *card = dev->driver_data;
1528
1529 if (!card)
1530 return -EINVAL;
1531
1532 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
1533}
1534
1535static inline int
1536qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1537 u8 *addr)
1538{
1539 if (qeth_string_to_ipaddr(buf, proto, addr)){
1540 PRINT_WARN("Invalid IP address format!\n");
1541 return -EINVAL;
1542 }
1543 return 0;
1544}
1545
1546static inline ssize_t
1547qeth_dev_rxip_add_store(const char *buf, size_t count,
1548 struct qeth_card *card, enum qeth_prot_versions proto)
1549{
1550 u8 addr[16] = {0, };
1551 int rc;
1552
1553 if (qeth_check_layer2(card))
1554 return -EPERM;
1555 if ((rc = qeth_parse_rxipe(buf, proto, addr)))
1556 return rc;
1557
1558 if ((rc = qeth_add_rxip(card, proto, addr)))
1559 return rc;
1560
1561 return count;
1562}
1563
1564static ssize_t
1565qeth_dev_rxip_add4_store(struct device *dev, const char *buf, size_t count)
1566{
1567 struct qeth_card *card = dev->driver_data;
1568
1569 if (!card)
1570 return -EINVAL;
1571
1572 return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
1573}
1574
1575static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
1576 qeth_dev_rxip_add4_show,
1577 qeth_dev_rxip_add4_store);
1578
1579static inline ssize_t
1580qeth_dev_rxip_del_store(const char *buf, size_t count,
1581 struct qeth_card *card, enum qeth_prot_versions proto)
1582{
1583 u8 addr[16];
1584 int rc;
1585
1586 if (qeth_check_layer2(card))
1587 return -EPERM;
1588 if ((rc = qeth_parse_rxipe(buf, proto, addr)))
1589 return rc;
1590
1591 qeth_del_rxip(card, proto, addr);
1592
1593 return count;
1594}
1595
1596static ssize_t
1597qeth_dev_rxip_del4_store(struct device *dev, const char *buf, size_t count)
1598{
1599 struct qeth_card *card = dev->driver_data;
1600
1601 if (!card)
1602 return -EINVAL;
1603
1604 return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
1605}
1606
1607static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
1608 qeth_dev_rxip_del4_store);
1609
1610#ifdef CONFIG_QETH_IPV6
1611static ssize_t
1612qeth_dev_rxip_add6_show(struct device *dev, char *buf)
1613{
1614 struct qeth_card *card = dev->driver_data;
1615
1616 if (!card)
1617 return -EINVAL;
1618
1619 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
1620}
1621
1622static ssize_t
1623qeth_dev_rxip_add6_store(struct device *dev, const char *buf, size_t count)
1624{
1625 struct qeth_card *card = dev->driver_data;
1626
1627 if (!card)
1628 return -EINVAL;
1629
1630 return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
1631}
1632
1633static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
1634 qeth_dev_rxip_add6_show,
1635 qeth_dev_rxip_add6_store);
1636
1637static ssize_t
1638qeth_dev_rxip_del6_store(struct device *dev, const char *buf, size_t count)
1639{
1640 struct qeth_card *card = dev->driver_data;
1641
1642 if (!card)
1643 return -EINVAL;
1644
1645 return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
1646}
1647
1648static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
1649 qeth_dev_rxip_del6_store);
1650#endif /* CONFIG_QETH_IPV6 */
1651
1652static struct device_attribute * qeth_rxip_device_attrs[] = {
1653 &dev_attr_rxip_add4,
1654 &dev_attr_rxip_del4,
1655#ifdef CONFIG_QETH_IPV6
1656 &dev_attr_rxip_add6,
1657 &dev_attr_rxip_del6,
1658#endif
1659 NULL,
1660};
1661
1662static struct attribute_group qeth_device_rxip_group = {
1663 .name = "rxip",
1664 .attrs = (struct attribute **)qeth_rxip_device_attrs,
1665};
1666
1667int
1668qeth_create_device_attributes(struct device *dev)
1669{
1670 int ret;
1671
1672 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group)))
1673 return ret;
1674 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group))){
1675 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1676 return ret;
1677 }
1678 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group))){
1679 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1680 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1681 return ret;
1682 }
1683 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group))){
1684 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1685 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1686 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1687 }
1688 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group)))
1689 return ret;
1690
1691 return ret;
1692}
1693
1694void
1695qeth_remove_device_attributes(struct device *dev)
1696{
1697 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1698 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1699 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1700 sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
1701 sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
1702}
1703
1704/**********************/
1705/* DRIVER ATTRIBUTES */
1706/**********************/
1707static ssize_t
1708qeth_driver_group_store(struct device_driver *ddrv, const char *buf,
1709 size_t count)
1710{
1711 const char *start, *end;
1712 char bus_ids[3][BUS_ID_SIZE], *argv[3];
1713 int i;
1714 int err;
1715
1716 start = buf;
1717 for (i = 0; i < 3; i++) {
1718 static const char delim[] = { ',', ',', '\n' };
1719 int len;
1720
1721 if (!(end = strchr(start, delim[i])))
1722 return -EINVAL;
1723 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start);
1724 strncpy(bus_ids[i], start, len);
1725 bus_ids[i][len] = '\0';
1726 start = end + 1;
1727 argv[i] = bus_ids[i];
1728 }
1729 err = ccwgroup_create(qeth_root_dev, qeth_ccwgroup_driver.driver_id,
1730 &qeth_ccw_driver, 3, argv);
1731 if (err)
1732 return err;
1733 else
1734 return count;
1735}
1736
1737
1738static DRIVER_ATTR(group, 0200, 0, qeth_driver_group_store);
1739
1740static ssize_t
1741qeth_driver_notifier_register_store(struct device_driver *ddrv, const char *buf,
1742 size_t count)
1743{
1744 int rc;
1745 int signum;
1746 char *tmp, *tmp2;
1747
1748 tmp = strsep((char **) &buf, "\n");
1749 if (!strncmp(tmp, "unregister", 10)){
1750 if ((rc = qeth_notifier_unregister(current)))
1751 return rc;
1752 return count;
1753 }
1754
1755 signum = simple_strtoul(tmp, &tmp2, 10);
1756 if ((signum < 0) || (signum > 32)){
1757 PRINT_WARN("Signal number %d is out of range\n", signum);
1758 return -EINVAL;
1759 }
1760 if ((rc = qeth_notifier_register(current, signum)))
1761 return rc;
1762
1763 return count;
1764}
1765
1766static DRIVER_ATTR(notifier_register, 0200, 0,
1767 qeth_driver_notifier_register_store);
1768
1769int
1770qeth_create_driver_attributes(void)
1771{
1772 int rc;
1773
1774 if ((rc = driver_create_file(&qeth_ccwgroup_driver.driver,
1775 &driver_attr_group)))
1776 return rc;
1777 return driver_create_file(&qeth_ccwgroup_driver.driver,
1778 &driver_attr_notifier_register);
1779}
1780
1781void
1782qeth_remove_driver_attributes(void)
1783{
1784 driver_remove_file(&qeth_ccwgroup_driver.driver,
1785 &driver_attr_group);
1786 driver_remove_file(&qeth_ccwgroup_driver.driver,
1787 &driver_attr_notifier_register);
1788}
diff --git a/drivers/s390/net/qeth_tso.c b/drivers/s390/net/qeth_tso.c
new file mode 100644
index 000000000000..c91976274e7b
--- /dev/null
+++ b/drivers/s390/net/qeth_tso.c
@@ -0,0 +1,285 @@
1/*
2 * linux/drivers/s390/net/qeth_tso.c ($Revision: 1.6 $)
3 *
4 * Header file for qeth TCP Segmentation Offload support.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Frank Pavlic <pavlic@de.ibm.com>
9 *
10 * $Revision: 1.6 $ $Date: 2005/03/24 09:04:18 $
11 *
12 */
13
14#include <linux/skbuff.h>
15#include <linux/tcp.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <net/ip6_checksum.h>
19#include "qeth.h"
20#include "qeth_mpc.h"
21#include "qeth_tso.h"
22
23/**
24 * skb already partially prepared
25 * classic qdio header in skb->data
26 * */
27static inline struct qeth_hdr_tso *
28qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
29{
30 int rc = 0;
31
32 QETH_DBF_TEXT(trace, 5, "tsoprsk");
33 rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
34 if (rc)
35 return NULL;
36
37 return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
38}
39
40/**
41 * fill header for a TSO packet
42 */
43static inline void
44qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
45{
46 struct qeth_hdr_tso *hdr;
47 struct tcphdr *tcph;
48 struct iphdr *iph;
49
50 QETH_DBF_TEXT(trace, 5, "tsofhdr");
51
52 hdr = (struct qeth_hdr_tso *) skb->data;
53 iph = skb->nh.iph;
54 tcph = skb->h.th;
55 /*fix header to TSO values ...*/
56 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
57 /*set values which are fix for the first approach ...*/
58 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
59 hdr->ext.imb_hdr_no = 1;
60 hdr->ext.hdr_type = 1;
61 hdr->ext.hdr_version = 1;
62 hdr->ext.hdr_len = 28;
63 /*insert non-fix values */
64 hdr->ext.mss = skb_shinfo(skb)->tso_size;
65 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
66 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
67 sizeof(struct qeth_hdr_tso));
68}
69
70/**
71 * change some header values as requested by hardware
72 */
73static inline void
74qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
75{
76 struct iphdr *iph;
77 struct ipv6hdr *ip6h;
78 struct tcphdr *tcph;
79
80 iph = skb->nh.iph;
81 ip6h = skb->nh.ipv6h;
82 tcph = skb->h.th;
83
84 tcph->check = 0;
85 if (skb->protocol == ETH_P_IPV6) {
86 ip6h->payload_len = 0;
87 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
88 0, IPPROTO_TCP, 0);
89 return;
90 }
91 /*OSA want us to set these values ...*/
92 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
93 0, IPPROTO_TCP, 0);
94 iph->tot_len = 0;
95 iph->check = 0;
96}
97
98static inline struct qeth_hdr_tso *
99qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
100 int ipv, int cast_type)
101{
102 struct qeth_hdr_tso *hdr;
103 int rc = 0;
104
105 QETH_DBF_TEXT(trace, 5, "tsoprep");
106
107 /*get headroom for tso qdio header */
108 hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
109 if (hdr == NULL) {
110 QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
111 return NULL;
112 }
113 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
114 /*fill first 32 bytes of qdio header as used
115 *FIXME: TSO has two struct members
116 * with different names but same size
117 * */
118 qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
119 qeth_tso_fill_header(card, skb);
120 qeth_tso_set_tcpip_header(card, skb);
121 return hdr;
122}
123
124static inline int
125qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
126{
127 struct qeth_qdio_out_buffer *buffer;
128 int flush_cnt = 0;
129
130 QETH_DBF_TEXT(trace, 5, "tsobuf");
131
132 /* force to non-packing*/
133 if (queue->do_pack)
134 queue->do_pack = 0;
135 buffer = &queue->bufs[queue->next_buf_to_fill];
136 /* get a new buffer if current is already in use*/
137 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
138 (buffer->next_element_to_fill > 0)) {
139 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
140 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
141 QDIO_MAX_BUFFERS_PER_Q;
142 flush_cnt++;
143 }
144 return flush_cnt;
145}
146
147static inline void
148__qeth_tso_fill_buffer_frag(struct qeth_qdio_out_buffer *buf,
149 struct sk_buff *skb)
150{
151 struct skb_frag_struct *frag;
152 struct qdio_buffer *buffer;
153 int fragno, cnt, element;
154 unsigned long addr;
155
156 QETH_DBF_TEXT(trace, 6, "tsfilfrg");
157
158 /*initialize variables ...*/
159 fragno = skb_shinfo(skb)->nr_frags;
160 buffer = buf->buffer;
161 element = buf->next_element_to_fill;
162 /*fill buffer elements .....*/
163 for (cnt = 0; cnt < fragno; cnt++) {
164 frag = &skb_shinfo(skb)->frags[cnt];
165 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
166 frag->page_offset;
167 buffer->element[element].addr = (char *)addr;
168 buffer->element[element].length = frag->size;
169 if (cnt < (fragno - 1))
170 buffer->element[element].flags =
171 SBAL_FLAGS_MIDDLE_FRAG;
172 else
173 buffer->element[element].flags =
174 SBAL_FLAGS_LAST_FRAG;
175 element++;
176 }
177 buf->next_element_to_fill = element;
178}
179
180static inline int
181qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
182 struct sk_buff *skb)
183{
184 int length, length_here, element;
185 int hdr_len;
186 struct qdio_buffer *buffer;
187 struct qeth_hdr_tso *hdr;
188 char *data;
189
190 QETH_DBF_TEXT(trace, 3, "tsfilbuf");
191
192 /*increment user count and queue skb ...*/
193 atomic_inc(&skb->users);
194 skb_queue_tail(&buf->skb_list, skb);
195
196 /*initialize all variables...*/
197 buffer = buf->buffer;
198 hdr = (struct qeth_hdr_tso *)skb->data;
199 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
200 data = skb->data + hdr_len;
201 length = skb->len - hdr_len;
202 element = buf->next_element_to_fill;
203 /*fill first buffer entry only with header information */
204 buffer->element[element].addr = skb->data;
205 buffer->element[element].length = hdr_len;
206 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
207 buf->next_element_to_fill++;
208
209 if (skb_shinfo(skb)->nr_frags > 0) {
210 __qeth_tso_fill_buffer_frag(buf, skb);
211 goto out;
212 }
213
214 /*start filling buffer entries ...*/
215 element++;
216 while (length > 0) {
217 /* length_here is the remaining amount of data in this page */
218 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
219 if (length < length_here)
220 length_here = length;
221 buffer->element[element].addr = data;
222 buffer->element[element].length = length_here;
223 length -= length_here;
224 if (!length)
225 buffer->element[element].flags =
226 SBAL_FLAGS_LAST_FRAG;
227 else
228 buffer->element[element].flags =
229 SBAL_FLAGS_MIDDLE_FRAG;
230 data += length_here;
231 element++;
232 }
233 /*set the buffer to primed ...*/
234 buf->next_element_to_fill = element;
235out:
236 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
237 return 1;
238}
239
240int
241qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
242 struct qeth_qdio_out_q *queue, int ipv, int cast_type)
243{
244 int flush_cnt = 0;
245 struct qeth_hdr_tso *hdr;
246 struct qeth_qdio_out_buffer *buffer;
247 int start_index;
248
249 QETH_DBF_TEXT(trace, 3, "tsosend");
250
251 if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
252 return -ENOMEM;
253 /*check if skb fits in one SBAL ...*/
254 if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
255 return -EINVAL;
256 /*lock queue, force switching to non-packing and send it ...*/
257 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
258 QETH_OUT_Q_LOCKED,
259 &queue->state));
260 start_index = queue->next_buf_to_fill;
261 buffer = &queue->bufs[queue->next_buf_to_fill];
262 /*check if card is too busy ...*/
263 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
264 card->stats.tx_dropped++;
265 goto out;
266 }
267 /*let's force to non-packing and get a new SBAL*/
268 flush_cnt += qeth_tso_get_queue_buffer(queue);
269 buffer = &queue->bufs[queue->next_buf_to_fill];
270 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
271 card->stats.tx_dropped++;
272 goto out;
273 }
274 flush_cnt += qeth_tso_fill_buffer(buffer, skb);
275 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
276 QDIO_MAX_BUFFERS_PER_Q;
277out:
278 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
279 if (flush_cnt)
280 qeth_flush_buffers(queue, 0, start_index, flush_cnt);
281 /*do some statistics */
282 card->stats.tx_packets++;
283 card->stats.tx_bytes += skb->len;
284 return 0;
285}
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h
new file mode 100644
index 000000000000..83504dee3f57
--- /dev/null
+++ b/drivers/s390/net/qeth_tso.h
@@ -0,0 +1,58 @@
1/*
2 * linux/drivers/s390/net/qeth_tso.h ($Revision: 1.4 $)
3 *
4 * Header file for qeth TCP Segmentation Offload support.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Frank Pavlic <pavlic@de.ibm.com>
9 *
10 * $Revision: 1.4 $ $Date: 2005/03/24 09:04:18 $
11 *
12 */
13#ifndef __QETH_TSO_H__
14#define __QETH_TSO_H__
15
16
17extern int
18qeth_tso_send_packet(struct qeth_card *, struct sk_buff *,
19 struct qeth_qdio_out_q *, int , int);
20
21struct qeth_hdr_ext_tso {
22 __u16 hdr_tot_len;
23 __u8 imb_hdr_no;
24 __u8 reserved;
25 __u8 hdr_type;
26 __u8 hdr_version;
27 __u16 hdr_len;
28 __u32 payload_len;
29 __u16 mss;
30 __u16 dg_hdr_len;
31 __u8 padding[16];
32} __attribute__ ((packed));
33
34struct qeth_hdr_tso {
35 struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
36 struct qeth_hdr_ext_tso ext;
37} __attribute__ ((packed));
38
39/*some helper functions*/
40
41static inline int
42qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb)
43{
44 int elements_needed = 0;
45
46 if (skb_shinfo(skb)->nr_frags > 0)
47 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
48 if (elements_needed == 0 )
49 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
50 + skb->len) >> PAGE_SHIFT);
51 if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
52 PRINT_ERR("qeth_do_send_packet: invalid size of "
53 "IP packet. Discarded.");
54 return 0;
55 }
56 return elements_needed;
57}
58#endif /* __QETH_TSO_H__ */
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
new file mode 100644
index 000000000000..a3d285859564
--- /dev/null
+++ b/drivers/s390/net/smsgiucv.c
@@ -0,0 +1,180 @@
1/*
2 * IUCV special message driver
3 *
4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/errno.h>
25#include <linux/device.h>
26#include <asm/cpcmd.h>
27#include <asm/ebcdic.h>
28
29#include "iucv.h"
30
31struct smsg_callback {
32 struct list_head list;
33 char *prefix;
34 int len;
35 void (*callback)(char *str);
36};
37
38MODULE_AUTHOR
39 ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)");
40MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
41
42static iucv_handle_t smsg_handle;
43static unsigned short smsg_pathid;
44static DEFINE_SPINLOCK(smsg_list_lock);
45static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list);
46
47static void
48smsg_connection_complete(iucv_ConnectionComplete *eib, void *pgm_data)
49{
50}
51
52
53static void
54smsg_message_pending(iucv_MessagePending *eib, void *pgm_data)
55{
56 struct smsg_callback *cb;
57 unsigned char *msg;
58 unsigned short len;
59 int rc;
60
61 len = eib->ln1msg2.ipbfln1f;
62 msg = kmalloc(len + 1, GFP_ATOMIC|GFP_DMA);
63 if (!msg) {
64 iucv_reject(eib->ippathid, eib->ipmsgid, eib->iptrgcls);
65 return;
66 }
67 rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls,
68 msg, len, 0, 0, 0);
69 if (rc == 0) {
70 msg[len] = 0;
71 EBCASC(msg, len);
72 spin_lock(&smsg_list_lock);
73 list_for_each_entry(cb, &smsg_list, list)
74 if (strncmp(msg + 8, cb->prefix, cb->len) == 0) {
75 cb->callback(msg + 8);
76 break;
77 }
78 spin_unlock(&smsg_list_lock);
79 }
80 kfree(msg);
81}
82
83static iucv_interrupt_ops_t smsg_ops = {
84 .ConnectionComplete = smsg_connection_complete,
85 .MessagePending = smsg_message_pending,
86};
87
88static struct device_driver smsg_driver = {
89 .name = "SMSGIUCV",
90 .bus = &iucv_bus,
91};
92
93int
94smsg_register_callback(char *prefix, void (*callback)(char *str))
95{
96 struct smsg_callback *cb;
97
98 cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL);
99 if (!cb)
100 return -ENOMEM;
101 cb->prefix = prefix;
102 cb->len = strlen(prefix);
103 cb->callback = callback;
104 spin_lock(&smsg_list_lock);
105 list_add_tail(&cb->list, &smsg_list);
106 spin_unlock(&smsg_list_lock);
107 return 0;
108}
109
110void
111smsg_unregister_callback(char *prefix, void (*callback)(char *str))
112{
113 struct smsg_callback *cb, *tmp;
114
115 spin_lock(&smsg_list_lock);
116 cb = 0;
117 list_for_each_entry(tmp, &smsg_list, list)
118 if (tmp->callback == callback &&
119 strcmp(tmp->prefix, prefix) == 0) {
120 cb = tmp;
121 list_del(&cb->list);
122 break;
123 }
124 spin_unlock(&smsg_list_lock);
125 kfree(cb);
126}
127
128static void __exit
129smsg_exit(void)
130{
131 if (smsg_handle > 0) {
132 cpcmd("SET SMSG OFF", 0, 0);
133 iucv_sever(smsg_pathid, 0);
134 iucv_unregister_program(smsg_handle);
135 driver_unregister(&smsg_driver);
136 }
137 return;
138}
139
140static int __init
141smsg_init(void)
142{
143 static unsigned char pgmmask[24] = {
144 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
145 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
146 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
147 };
148 int rc;
149
150 rc = driver_register(&smsg_driver);
151 if (rc != 0) {
152 printk(KERN_ERR "SMSGIUCV: failed to register driver.\n");
153 return rc;
154 }
155 smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ",
156 pgmmask, &smsg_ops, 0);
157 if (!smsg_handle) {
158 printk(KERN_ERR "SMSGIUCV: failed to register to iucv");
159 driver_unregister(&smsg_driver);
160 return -EIO; /* better errno ? */
161 }
162 rc = iucv_connect (&smsg_pathid, 1, 0, "*MSG ", 0, 0, 0, 0,
163 smsg_handle, 0);
164 if (rc) {
165 printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG");
166 iucv_unregister_program(smsg_handle);
167 driver_unregister(&smsg_driver);
168 smsg_handle = 0;
169 return -EIO;
170 }
171 cpcmd("SET SMSG IUCV", 0, 0);
172 return 0;
173}
174
175module_init(smsg_init);
176module_exit(smsg_exit);
177MODULE_LICENSE("GPL");
178
179EXPORT_SYMBOL(smsg_register_callback);
180EXPORT_SYMBOL(smsg_unregister_callback);
diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h
new file mode 100644
index 000000000000..04cd87152964
--- /dev/null
+++ b/drivers/s390/net/smsgiucv.h
@@ -0,0 +1,10 @@
1/*
2 * IUCV special message driver
3 *
4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8int smsg_register_callback(char *, void (*)(char *));
9void smsg_unregister_callback(char *, void (*)(char *));
10