aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 12:00:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 12:00:47 -0400
commit6c373ca89399c5a3f7ef210ad8f63dc3437da345 (patch)
tree74d1ec65087df1da1021b43ac51acc1ee8601809 /drivers/s390/net
parentbb0fd7ab0986105765d11baa82e619c618a235aa (diff)
parent9f9151412dd7aae0e3f51a89ae4a1f8755fdb4d0 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Add BQL support to via-rhine, from Tino Reichardt. 2) Integrate SWITCHDEV layer support into the DSA layer, so DSA drivers can support hw switch offloading. From Floria Fainelli. 3) Allow 'ip address' commands to initiate multicast group join/leave, from Madhu Challa. 4) Many ipv4 FIB lookup optimizations from Alexander Duyck. 5) Support EBPF in cls_bpf classifier and act_bpf action, from Daniel Borkmann. 6) Remove the ugly compat support in ARP for ugly layers like ax25, rose, etc. And use this to clean up the neigh layer, then use it to implement MPLS support. All from Eric Biederman. 7) Support L3 forwarding offloading in switches, from Scott Feldman. 8) Collapse the LOCAL and MAIN ipv4 FIB tables when possible, to speed up route lookups even further. From Alexander Duyck. 9) Many improvements and bug fixes to the rhashtable implementation, from Herbert Xu and Thomas Graf. In particular, in the case where an rhashtable user bulk adds a large number of items into an empty table, we expand the table much more sanely. 10) Don't make the tcp_metrics hash table per-namespace, from Eric Biederman. 11) Extend EBPF to access SKB fields, from Alexei Starovoitov. 12) Split out new connection request sockets so that they can be established in the main hash table. Much less false sharing since hash lookups go direct to the request sockets instead of having to go first to the listener then to the request socks hashed underneath. From Eric Dumazet. 13) Add async I/O support for crytpo AF_ALG sockets, from Tadeusz Struk. 14) Support stable privacy address generation for RFC7217 in IPV6. From Hannes Frederic Sowa. 15) Hash network namespace into IP frag IDs, also from Hannes Frederic Sowa. 16) Convert PTP get/set methods to use 64-bit time, from Richard Cochran. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1816 commits) fm10k: Bump driver version to 0.15.2 fm10k: corrected VF multicast update fm10k: mbx_update_max_size does not drop all oversized messages fm10k: reset head instead of calling update_max_size fm10k: renamed mbx_tx_dropped to mbx_tx_oversized fm10k: update xcast mode before synchronizing multicast addresses fm10k: start service timer on probe fm10k: fix function header comment fm10k: comment next_vf_mbx flow fm10k: don't handle mailbox events in iov_event path and always process mailbox fm10k: use separate workqueue for fm10k driver fm10k: Set PF queues to unlimited bandwidth during virtualization fm10k: expose tx_timeout_count as an ethtool stat fm10k: only increment tx_timeout_count in Tx hang path fm10k: remove extraneous "Reset interface" message fm10k: separate PF only stats so that VF does not display them fm10k: use hw->mac.max_queues for stats fm10k: only show actual queues, not the maximum in hardware fm10k: allow creation of VLAN on default vid fm10k: fix unused warnings ...
Diffstat (limited to 'drivers/s390/net')
-rw-r--r--drivers/s390/net/Kconfig13
-rw-r--r--drivers/s390/net/Makefile1
-rw-r--r--drivers/s390/net/claw.c3377
-rw-r--r--drivers/s390/net/claw.h348
-rw-r--r--drivers/s390/net/qeth_core_main.c5
5 files changed, 4 insertions, 3740 deletions
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index f1b5111bbaba..b2837b1c70b7 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -57,17 +57,6 @@ config SMSGIUCV_EVENT
57 57
58 To compile as a module, choose M. The module name is "smsgiucv_app". 58 To compile as a module, choose M. The module name is "smsgiucv_app".
59 59
60config CLAW
61 def_tristate m
62 prompt "CLAW device support"
63 depends on CCW && NETDEVICES
64 help
65 This driver supports channel attached CLAW devices.
66 CLAW is Common Link Access for Workstation. Common devices
67 that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
68 To compile as a module, choose M. The module name is claw.
69 To compile into the kernel, choose Y.
70
71config QETH 60config QETH
72 def_tristate y 61 def_tristate y
73 prompt "Gigabit Ethernet device support" 62 prompt "Gigabit Ethernet device support"
@@ -106,6 +95,6 @@ config QETH_IPV6
106 95
107config CCWGROUP 96config CCWGROUP
108 tristate 97 tristate
109 default (LCS || CTCM || QETH || CLAW) 98 default (LCS || CTCM || QETH)
110 99
111endmenu 100endmenu
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index d28f05d0c75a..c351b07603e0 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
9obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o 9obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o
10obj-$(CONFIG_LCS) += lcs.o 10obj-$(CONFIG_LCS) += lcs.o
11obj-$(CONFIG_CLAW) += claw.o
12qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o 11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
13obj-$(CONFIG_QETH) += qeth.o 12obj-$(CONFIG_QETH) += qeth.o
14qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o 13qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
deleted file mode 100644
index d609ca09aa94..000000000000
--- a/drivers/s390/net/claw.c
+++ /dev/null
@@ -1,3377 +0,0 @@
1/*
2 * ESCON CLAW network driver
3 *
4 * Linux for zSeries version
5 * Copyright IBM Corp. 2002, 2009
6 * Author(s) Original code written by:
7 * Kazuo Iimura <iimura@jp.ibm.com>
8 * Rewritten by
9 * Andy Richter <richtera@us.ibm.com>
10 * Marc Price <mwprice@us.ibm.com>
11 *
12 * sysfs parms:
13 * group x.x.rrrr,x.x.wwww
14 * read_buffer nnnnnnn
15 * write_buffer nnnnnn
16 * host_name aaaaaaaa
17 * adapter_name aaaaaaaa
18 * api_type aaaaaaaa
19 *
20 * eg.
21 * group 0.0.0200 0.0.0201
22 * read_buffer 25
23 * write_buffer 20
24 * host_name LINUX390
25 * adapter_name RS6K
26 * api_type TCPIP
27 *
28 * where
29 *
30 * The device id is decided by the order entries
31 * are added to the group the first is claw0 the second claw1
32 * up to CLAW_MAX_DEV
33 *
34 * rrrr - the first of 2 consecutive device addresses used for the
35 * CLAW protocol.
36 * The specified address is always used as the input (Read)
37 * channel and the next address is used as the output channel.
38 *
39 * wwww - the second of 2 consecutive device addresses used for
40 * the CLAW protocol.
41 * The specified address is always used as the output
42 * channel and the previous address is used as the input channel.
43 *
44 * read_buffer - specifies number of input buffers to allocate.
45 * write_buffer - specifies number of output buffers to allocate.
46 * host_name - host name
47 * adaptor_name - adaptor name
48 * api_type - API type TCPIP or API will be sent and expected
49 * as ws_name
50 *
51 * Note the following requirements:
52 * 1) host_name must match the configured adapter_name on the remote side
53 * 2) adaptor_name must match the configured host name on the remote side
54 *
55 * Change History
56 * 1.00 Initial release shipped
57 * 1.10 Changes for Buffer allocation
58 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
59 * 1.25 Added Packing support
60 * 1.5
61 */
62
63#define KMSG_COMPONENT "claw"
64
65#include <asm/ccwdev.h>
66#include <asm/ccwgroup.h>
67#include <asm/debug.h>
68#include <asm/idals.h>
69#include <asm/io.h>
70#include <linux/bitops.h>
71#include <linux/ctype.h>
72#include <linux/delay.h>
73#include <linux/errno.h>
74#include <linux/if_arp.h>
75#include <linux/init.h>
76#include <linux/interrupt.h>
77#include <linux/ip.h>
78#include <linux/kernel.h>
79#include <linux/module.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/proc_fs.h>
83#include <linux/sched.h>
84#include <linux/signal.h>
85#include <linux/skbuff.h>
86#include <linux/slab.h>
87#include <linux/string.h>
88#include <linux/tcp.h>
89#include <linux/timer.h>
90#include <linux/types.h>
91
92#include "claw.h"
93
94/*
95 CLAW uses the s390dbf file system see claw_trace and claw_setup
96*/
97
98static char version[] __initdata = "CLAW driver";
99static char debug_buffer[255];
100/**
101 * Debug Facility Stuff
102 */
103static debug_info_t *claw_dbf_setup;
104static debug_info_t *claw_dbf_trace;
105
106/**
107 * CLAW Debug Facility functions
108 */
109static void
110claw_unregister_debug_facility(void)
111{
112 debug_unregister(claw_dbf_setup);
113 debug_unregister(claw_dbf_trace);
114}
115
116static int
117claw_register_debug_facility(void)
118{
119 claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
120 claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
121 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
122 claw_unregister_debug_facility();
123 return -ENOMEM;
124 }
125 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
126 debug_set_level(claw_dbf_setup, 2);
127 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
128 debug_set_level(claw_dbf_trace, 2);
129 return 0;
130}
131
132static inline void
133claw_set_busy(struct net_device *dev)
134{
135 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
136}
137
138static inline void
139claw_clear_busy(struct net_device *dev)
140{
141 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
142 netif_wake_queue(dev);
143}
144
145static inline int
146claw_check_busy(struct net_device *dev)
147{
148 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
149}
150
151static inline void
152claw_setbit_busy(int nr,struct net_device *dev)
153{
154 netif_stop_queue(dev);
155 set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
156}
157
158static inline void
159claw_clearbit_busy(int nr,struct net_device *dev)
160{
161 clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
162 netif_wake_queue(dev);
163}
164
165static inline int
166claw_test_and_setbit_busy(int nr,struct net_device *dev)
167{
168 netif_stop_queue(dev);
169 return test_and_set_bit(nr,
170 (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
171}
172
173
174/* Functions for the DEV methods */
175
176static int claw_probe(struct ccwgroup_device *cgdev);
177static void claw_remove_device(struct ccwgroup_device *cgdev);
178static void claw_purge_skb_queue(struct sk_buff_head *q);
179static int claw_new_device(struct ccwgroup_device *cgdev);
180static int claw_shutdown_device(struct ccwgroup_device *cgdev);
181static int claw_tx(struct sk_buff *skb, struct net_device *dev);
182static int claw_change_mtu( struct net_device *dev, int new_mtu);
183static int claw_open(struct net_device *dev);
184static void claw_irq_handler(struct ccw_device *cdev,
185 unsigned long intparm, struct irb *irb);
186static void claw_irq_tasklet ( unsigned long data );
187static int claw_release(struct net_device *dev);
188static void claw_write_retry ( struct chbk * p_ch );
189static void claw_write_next ( struct chbk * p_ch );
190static void claw_timer ( struct chbk * p_ch );
191
192/* Functions */
193static int add_claw_reads(struct net_device *dev,
194 struct ccwbk* p_first, struct ccwbk* p_last);
195static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
196static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
197static int find_link(struct net_device *dev, char *host_name, char *ws_name );
198static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
199static int init_ccw_bk(struct net_device *dev);
200static void probe_error( struct ccwgroup_device *cgdev);
201static struct net_device_stats *claw_stats(struct net_device *dev);
202static int pages_to_order_of_mag(int num_of_pages);
203static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
204/* sysfs Functions */
205static ssize_t claw_hname_show(struct device *dev,
206 struct device_attribute *attr, char *buf);
207static ssize_t claw_hname_write(struct device *dev,
208 struct device_attribute *attr,
209 const char *buf, size_t count);
210static ssize_t claw_adname_show(struct device *dev,
211 struct device_attribute *attr, char *buf);
212static ssize_t claw_adname_write(struct device *dev,
213 struct device_attribute *attr,
214 const char *buf, size_t count);
215static ssize_t claw_apname_show(struct device *dev,
216 struct device_attribute *attr, char *buf);
217static ssize_t claw_apname_write(struct device *dev,
218 struct device_attribute *attr,
219 const char *buf, size_t count);
220static ssize_t claw_wbuff_show(struct device *dev,
221 struct device_attribute *attr, char *buf);
222static ssize_t claw_wbuff_write(struct device *dev,
223 struct device_attribute *attr,
224 const char *buf, size_t count);
225static ssize_t claw_rbuff_show(struct device *dev,
226 struct device_attribute *attr, char *buf);
227static ssize_t claw_rbuff_write(struct device *dev,
228 struct device_attribute *attr,
229 const char *buf, size_t count);
230
231/* Functions for System Validate */
232static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
233static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
234 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
235static int claw_snd_conn_req(struct net_device *dev, __u8 link);
236static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
237static int claw_snd_sys_validate_rsp(struct net_device *dev,
238 struct clawctl * p_ctl, __u32 return_code);
239static int claw_strt_conn_req(struct net_device *dev );
240static void claw_strt_read(struct net_device *dev, int lock);
241static void claw_strt_out_IO(struct net_device *dev);
242static void claw_free_wrt_buf(struct net_device *dev);
243
244/* Functions for unpack reads */
245static void unpack_read(struct net_device *dev);
246
247static int claw_pm_prepare(struct ccwgroup_device *gdev)
248{
249 return -EPERM;
250}
251
252/* the root device for claw group devices */
253static struct device *claw_root_dev;
254
255/* ccwgroup table */
256
257static struct ccwgroup_driver claw_group_driver = {
258 .driver = {
259 .owner = THIS_MODULE,
260 .name = "claw",
261 },
262 .setup = claw_probe,
263 .remove = claw_remove_device,
264 .set_online = claw_new_device,
265 .set_offline = claw_shutdown_device,
266 .prepare = claw_pm_prepare,
267};
268
269static struct ccw_device_id claw_ids[] = {
270 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
271 {},
272};
273MODULE_DEVICE_TABLE(ccw, claw_ids);
274
275static struct ccw_driver claw_ccw_driver = {
276 .driver = {
277 .owner = THIS_MODULE,
278 .name = "claw",
279 },
280 .ids = claw_ids,
281 .probe = ccwgroup_probe_ccwdev,
282 .remove = ccwgroup_remove_ccwdev,
283 .int_class = IRQIO_CLW,
284};
285
286static ssize_t claw_driver_group_store(struct device_driver *ddrv,
287 const char *buf, size_t count)
288{
289 int err;
290 err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf);
291 return err ? err : count;
292}
293static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
294
295static struct attribute *claw_drv_attrs[] = {
296 &driver_attr_group.attr,
297 NULL,
298};
299static struct attribute_group claw_drv_attr_group = {
300 .attrs = claw_drv_attrs,
301};
302static const struct attribute_group *claw_drv_attr_groups[] = {
303 &claw_drv_attr_group,
304 NULL,
305};
306
307/*
308* Key functions
309*/
310
311/*-------------------------------------------------------------------*
312 * claw_tx *
313 *-------------------------------------------------------------------*/
314
315static int
316claw_tx(struct sk_buff *skb, struct net_device *dev)
317{
318 int rc;
319 struct claw_privbk *privptr = dev->ml_priv;
320 unsigned long saveflags;
321 struct chbk *p_ch;
322
323 CLAW_DBF_TEXT(4, trace, "claw_tx");
324 p_ch = &privptr->channel[WRITE_CHANNEL];
325 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
326 rc=claw_hw_tx( skb, dev, 1 );
327 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
328 CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
329 if (rc)
330 rc = NETDEV_TX_BUSY;
331 else
332 rc = NETDEV_TX_OK;
333 return rc;
334} /* end of claw_tx */
335
336/*------------------------------------------------------------------*
337 * pack the collect queue into an skb and return it *
338 * If not packing just return the top skb from the queue *
339 *------------------------------------------------------------------*/
340
341static struct sk_buff *
342claw_pack_skb(struct claw_privbk *privptr)
343{
344 struct sk_buff *new_skb,*held_skb;
345 struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
346 struct claw_env *p_env = privptr->p_env;
347 int pkt_cnt,pk_ind,so_far;
348
349 new_skb = NULL; /* assume no dice */
350 pkt_cnt = 0;
351 CLAW_DBF_TEXT(4, trace, "PackSKBe");
352 if (!skb_queue_empty(&p_ch->collect_queue)) {
353 /* some data */
354 held_skb = skb_dequeue(&p_ch->collect_queue);
355 if (held_skb)
356 dev_kfree_skb_any(held_skb);
357 else
358 return NULL;
359 if (p_env->packing != DO_PACKED)
360 return held_skb;
361 /* get a new SKB we will pack at least one */
362 new_skb = dev_alloc_skb(p_env->write_size);
363 if (new_skb == NULL) {
364 atomic_inc(&held_skb->users);
365 skb_queue_head(&p_ch->collect_queue,held_skb);
366 return NULL;
367 }
368 /* we have packed packet and a place to put it */
369 pk_ind = 1;
370 so_far = 0;
371 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
372 while ((pk_ind) && (held_skb != NULL)) {
373 if (held_skb->len+so_far <= p_env->write_size-8) {
374 memcpy(skb_put(new_skb,held_skb->len),
375 held_skb->data,held_skb->len);
376 privptr->stats.tx_packets++;
377 so_far += held_skb->len;
378 pkt_cnt++;
379 dev_kfree_skb_any(held_skb);
380 held_skb = skb_dequeue(&p_ch->collect_queue);
381 if (held_skb)
382 atomic_dec(&held_skb->users);
383 } else {
384 pk_ind = 0;
385 atomic_inc(&held_skb->users);
386 skb_queue_head(&p_ch->collect_queue,held_skb);
387 }
388 }
389 }
390 CLAW_DBF_TEXT(4, trace, "PackSKBx");
391 return new_skb;
392}
393
394/*-------------------------------------------------------------------*
395 * claw_change_mtu *
396 * *
397 *-------------------------------------------------------------------*/
398
399static int
400claw_change_mtu(struct net_device *dev, int new_mtu)
401{
402 struct claw_privbk *privptr = dev->ml_priv;
403 int buff_size;
404 CLAW_DBF_TEXT(4, trace, "setmtu");
405 buff_size = privptr->p_env->write_size;
406 if ((new_mtu < 60) || (new_mtu > buff_size)) {
407 return -EINVAL;
408 }
409 dev->mtu = new_mtu;
410 return 0;
411} /* end of claw_change_mtu */
412
413
414/*-------------------------------------------------------------------*
415 * claw_open *
416 * *
417 *-------------------------------------------------------------------*/
418static int
419claw_open(struct net_device *dev)
420{
421
422 int rc;
423 int i;
424 unsigned long saveflags=0;
425 unsigned long parm;
426 struct claw_privbk *privptr;
427 DECLARE_WAITQUEUE(wait, current);
428 struct timer_list timer;
429 struct ccwbk *p_buf;
430
431 CLAW_DBF_TEXT(4, trace, "open");
432 privptr = (struct claw_privbk *)dev->ml_priv;
433 /* allocate and initialize CCW blocks */
434 if (privptr->buffs_alloc == 0) {
435 rc=init_ccw_bk(dev);
436 if (rc) {
437 CLAW_DBF_TEXT(2, trace, "openmem");
438 return -ENOMEM;
439 }
440 }
441 privptr->system_validate_comp=0;
442 privptr->release_pend=0;
443 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
444 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
445 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
446 privptr->p_env->packing=PACKING_ASK;
447 } else {
448 privptr->p_env->packing=0;
449 privptr->p_env->read_size=CLAW_FRAME_SIZE;
450 privptr->p_env->write_size=CLAW_FRAME_SIZE;
451 }
452 claw_set_busy(dev);
453 tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
454 (unsigned long) &privptr->channel[READ_CHANNEL]);
455 for ( i = 0; i < 2; i++) {
456 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
457 init_waitqueue_head(&privptr->channel[i].wait);
458 /* skb_queue_head_init(&p_ch->io_queue); */
459 if (i == WRITE_CHANNEL)
460 skb_queue_head_init(
461 &privptr->channel[WRITE_CHANNEL].collect_queue);
462 privptr->channel[i].flag_a = 0;
463 privptr->channel[i].IO_active = 0;
464 privptr->channel[i].flag &= ~CLAW_TIMER;
465 init_timer(&timer);
466 timer.function = (void *)claw_timer;
467 timer.data = (unsigned long)(&privptr->channel[i]);
468 timer.expires = jiffies + 15*HZ;
469 add_timer(&timer);
470 spin_lock_irqsave(get_ccwdev_lock(
471 privptr->channel[i].cdev), saveflags);
472 parm = (unsigned long) &privptr->channel[i];
473 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
474 rc = 0;
475 add_wait_queue(&privptr->channel[i].wait, &wait);
476 rc = ccw_device_halt(
477 (struct ccw_device *)privptr->channel[i].cdev,parm);
478 set_current_state(TASK_INTERRUPTIBLE);
479 spin_unlock_irqrestore(
480 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
481 schedule();
482 remove_wait_queue(&privptr->channel[i].wait, &wait);
483 if(rc != 0)
484 ccw_check_return_code(privptr->channel[i].cdev, rc);
485 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
486 del_timer(&timer);
487 }
488 if ((((privptr->channel[READ_CHANNEL].last_dstat |
489 privptr->channel[WRITE_CHANNEL].last_dstat) &
490 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
491 (((privptr->channel[READ_CHANNEL].flag |
492 privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
493 dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
494 "%s: remote side is not ready\n", dev->name);
495 CLAW_DBF_TEXT(2, trace, "notrdy");
496
497 for ( i = 0; i < 2; i++) {
498 spin_lock_irqsave(
499 get_ccwdev_lock(privptr->channel[i].cdev),
500 saveflags);
501 parm = (unsigned long) &privptr->channel[i];
502 privptr->channel[i].claw_state = CLAW_STOP;
503 rc = ccw_device_halt(
504 (struct ccw_device *)&privptr->channel[i].cdev,
505 parm);
506 spin_unlock_irqrestore(
507 get_ccwdev_lock(privptr->channel[i].cdev),
508 saveflags);
509 if (rc != 0) {
510 ccw_check_return_code(
511 privptr->channel[i].cdev, rc);
512 }
513 }
514 free_pages((unsigned long)privptr->p_buff_ccw,
515 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
516 if (privptr->p_env->read_size < PAGE_SIZE) {
517 free_pages((unsigned long)privptr->p_buff_read,
518 (int)pages_to_order_of_mag(
519 privptr->p_buff_read_num));
520 }
521 else {
522 p_buf=privptr->p_read_active_first;
523 while (p_buf!=NULL) {
524 free_pages((unsigned long)p_buf->p_buffer,
525 (int)pages_to_order_of_mag(
526 privptr->p_buff_pages_perread ));
527 p_buf=p_buf->next;
528 }
529 }
530 if (privptr->p_env->write_size < PAGE_SIZE ) {
531 free_pages((unsigned long)privptr->p_buff_write,
532 (int)pages_to_order_of_mag(
533 privptr->p_buff_write_num));
534 }
535 else {
536 p_buf=privptr->p_write_active_first;
537 while (p_buf!=NULL) {
538 free_pages((unsigned long)p_buf->p_buffer,
539 (int)pages_to_order_of_mag(
540 privptr->p_buff_pages_perwrite ));
541 p_buf=p_buf->next;
542 }
543 }
544 privptr->buffs_alloc = 0;
545 privptr->channel[READ_CHANNEL].flag = 0x00;
546 privptr->channel[WRITE_CHANNEL].flag = 0x00;
547 privptr->p_buff_ccw=NULL;
548 privptr->p_buff_read=NULL;
549 privptr->p_buff_write=NULL;
550 claw_clear_busy(dev);
551 CLAW_DBF_TEXT(2, trace, "open EIO");
552 return -EIO;
553 }
554
555 /* Send SystemValidate command */
556
557 claw_clear_busy(dev);
558 CLAW_DBF_TEXT(4, trace, "openok");
559 return 0;
560} /* end of claw_open */
561
562/*-------------------------------------------------------------------*
563* *
564* claw_irq_handler *
565* *
566*--------------------------------------------------------------------*/
567static void
568claw_irq_handler(struct ccw_device *cdev,
569 unsigned long intparm, struct irb *irb)
570{
571 struct chbk *p_ch = NULL;
572 struct claw_privbk *privptr = NULL;
573 struct net_device *dev = NULL;
574 struct claw_env *p_env;
575 struct chbk *p_ch_r=NULL;
576
577 CLAW_DBF_TEXT(4, trace, "clawirq");
578 /* Bypass all 'unsolicited interrupts' */
579 privptr = dev_get_drvdata(&cdev->dev);
580 if (!privptr) {
581 dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
582 " IRQ, c-%02x d-%02x\n",
583 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
584 CLAW_DBF_TEXT(2, trace, "badirq");
585 return;
586 }
587
588 /* Try to extract channel from driver data. */
589 if (privptr->channel[READ_CHANNEL].cdev == cdev)
590 p_ch = &privptr->channel[READ_CHANNEL];
591 else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
592 p_ch = &privptr->channel[WRITE_CHANNEL];
593 else {
594 dev_warn(&cdev->dev, "The device is not a CLAW device\n");
595 CLAW_DBF_TEXT(2, trace, "badchan");
596 return;
597 }
598 CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
599
600 dev = (struct net_device *) (p_ch->ndev);
601 p_env=privptr->p_env;
602
603 /* Copy interruption response block. */
604 memcpy(p_ch->irb, irb, sizeof(struct irb));
605
606 /* Check for good subchannel return code, otherwise info message */
607 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
608 dev_info(&cdev->dev,
609 "%s: subchannel check for device: %04x -"
610 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
611 dev->name, p_ch->devno,
612 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
613 irb->scsw.cmd.cpa);
614 CLAW_DBF_TEXT(2, trace, "chanchk");
615 /* return; */
616 }
617
618 /* Check the reason-code of a unit check */
619 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
620 ccw_check_unit_check(p_ch, irb->ecw[0]);
621
622 /* State machine to bring the connection up, down and to restart */
623 p_ch->last_dstat = irb->scsw.cmd.dstat;
624
625 switch (p_ch->claw_state) {
626 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
627 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
628 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
629 (p_ch->irb->scsw.cmd.stctl ==
630 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
631 return;
632 wake_up(&p_ch->wait); /* wake up claw_release */
633 CLAW_DBF_TEXT(4, trace, "stop");
634 return;
635 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
636 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
637 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
638 (p_ch->irb->scsw.cmd.stctl ==
639 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
640 CLAW_DBF_TEXT(4, trace, "haltio");
641 return;
642 }
643 if (p_ch->flag == CLAW_READ) {
644 p_ch->claw_state = CLAW_START_READ;
645 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
646 } else if (p_ch->flag == CLAW_WRITE) {
647 p_ch->claw_state = CLAW_START_WRITE;
648 /* send SYSTEM_VALIDATE */
649 claw_strt_read(dev, LOCK_NO);
650 claw_send_control(dev,
651 SYSTEM_VALIDATE_REQUEST,
652 0, 0, 0,
653 p_env->host_name,
654 p_env->adapter_name);
655 } else {
656 dev_warn(&cdev->dev, "The CLAW device received"
657 " an unexpected IRQ, "
658 "c-%02x d-%02x\n",
659 irb->scsw.cmd.cstat,
660 irb->scsw.cmd.dstat);
661 return;
662 }
663 CLAW_DBF_TEXT(4, trace, "haltio");
664 return;
665 case CLAW_START_READ:
666 CLAW_DBF_TEXT(4, trace, "ReadIRQ");
667 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
668 clear_bit(0, (void *)&p_ch->IO_active);
669 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
670 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
671 (p_ch->irb->ecw[0]) == 0) {
672 privptr->stats.rx_errors++;
673 dev_info(&cdev->dev,
674 "%s: Restart is required after remote "
675 "side recovers \n",
676 dev->name);
677 }
678 CLAW_DBF_TEXT(4, trace, "notrdy");
679 return;
680 }
681 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
682 (p_ch->irb->scsw.cmd.dstat == 0)) {
683 if (test_and_set_bit(CLAW_BH_ACTIVE,
684 (void *)&p_ch->flag_a) == 0)
685 tasklet_schedule(&p_ch->tasklet);
686 else
687 CLAW_DBF_TEXT(4, trace, "PCINoBH");
688 CLAW_DBF_TEXT(4, trace, "PCI_read");
689 return;
690 }
691 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
692 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
693 (p_ch->irb->scsw.cmd.stctl ==
694 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
695 CLAW_DBF_TEXT(4, trace, "SPend_rd");
696 return;
697 }
698 clear_bit(0, (void *)&p_ch->IO_active);
699 claw_clearbit_busy(TB_RETRY, dev);
700 if (test_and_set_bit(CLAW_BH_ACTIVE,
701 (void *)&p_ch->flag_a) == 0)
702 tasklet_schedule(&p_ch->tasklet);
703 else
704 CLAW_DBF_TEXT(4, trace, "RdBHAct");
705 CLAW_DBF_TEXT(4, trace, "RdIRQXit");
706 return;
707 case CLAW_START_WRITE:
708 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
709 dev_info(&cdev->dev,
710 "%s: Unit Check Occurred in "
711 "write channel\n", dev->name);
712 clear_bit(0, (void *)&p_ch->IO_active);
713 if (p_ch->irb->ecw[0] & 0x80) {
714 dev_info(&cdev->dev,
715 "%s: Resetting Event "
716 "occurred:\n", dev->name);
717 init_timer(&p_ch->timer);
718 p_ch->timer.function =
719 (void *)claw_write_retry;
720 p_ch->timer.data = (unsigned long)p_ch;
721 p_ch->timer.expires = jiffies + 10*HZ;
722 add_timer(&p_ch->timer);
723 dev_info(&cdev->dev,
724 "%s: write connection "
725 "restarting\n", dev->name);
726 }
727 CLAW_DBF_TEXT(4, trace, "rstrtwrt");
728 return;
729 }
730 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
731 clear_bit(0, (void *)&p_ch->IO_active);
732 dev_info(&cdev->dev,
733 "%s: Unit Exception "
734 "occurred in write channel\n",
735 dev->name);
736 }
737 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
738 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
739 (p_ch->irb->scsw.cmd.stctl ==
740 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
741 CLAW_DBF_TEXT(4, trace, "writeUE");
742 return;
743 }
744 clear_bit(0, (void *)&p_ch->IO_active);
745 if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
746 claw_write_next(p_ch);
747 claw_clearbit_busy(TB_TX, dev);
748 claw_clear_busy(dev);
749 }
750 p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
751 if (test_and_set_bit(CLAW_BH_ACTIVE,
752 (void *)&p_ch_r->flag_a) == 0)
753 tasklet_schedule(&p_ch_r->tasklet);
754 CLAW_DBF_TEXT(4, trace, "StWtExit");
755 return;
756 default:
757 dev_warn(&cdev->dev,
758 "The CLAW device for %s received an unexpected IRQ\n",
759 dev->name);
760 CLAW_DBF_TEXT(2, trace, "badIRQ");
761 return;
762 }
763
764} /* end of claw_irq_handler */
765
766
767/*-------------------------------------------------------------------*
768* claw_irq_tasklet *
769* *
770*--------------------------------------------------------------------*/
771static void
772claw_irq_tasklet ( unsigned long data )
773{
774 struct chbk * p_ch;
775 struct net_device *dev;
776
777 p_ch = (struct chbk *) data;
778 dev = (struct net_device *)p_ch->ndev;
779 CLAW_DBF_TEXT(4, trace, "IRQtask");
780 unpack_read(dev);
781 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
782 CLAW_DBF_TEXT(4, trace, "TskletXt");
783 return;
784} /* end of claw_irq_bh */
785
786/*-------------------------------------------------------------------*
787* claw_release *
788* *
789*--------------------------------------------------------------------*/
790static int
791claw_release(struct net_device *dev)
792{
793 int rc;
794 int i;
795 unsigned long saveflags;
796 unsigned long parm;
797 struct claw_privbk *privptr;
798 DECLARE_WAITQUEUE(wait, current);
799 struct ccwbk* p_this_ccw;
800 struct ccwbk* p_buf;
801
802 if (!dev)
803 return 0;
804 privptr = (struct claw_privbk *)dev->ml_priv;
805 if (!privptr)
806 return 0;
807 CLAW_DBF_TEXT(4, trace, "release");
808 privptr->release_pend=1;
809 claw_setbit_busy(TB_STOP,dev);
810 for ( i = 1; i >=0 ; i--) {
811 spin_lock_irqsave(
812 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
813 /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
814 privptr->channel[i].claw_state = CLAW_STOP;
815 privptr->channel[i].IO_active = 0;
816 parm = (unsigned long) &privptr->channel[i];
817 if (i == WRITE_CHANNEL)
818 claw_purge_skb_queue(
819 &privptr->channel[WRITE_CHANNEL].collect_queue);
820 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
821 if (privptr->system_validate_comp==0x00) /* never opened? */
822 init_waitqueue_head(&privptr->channel[i].wait);
823 add_wait_queue(&privptr->channel[i].wait, &wait);
824 set_current_state(TASK_INTERRUPTIBLE);
825 spin_unlock_irqrestore(
826 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
827 schedule();
828 remove_wait_queue(&privptr->channel[i].wait, &wait);
829 if (rc != 0) {
830 ccw_check_return_code(privptr->channel[i].cdev, rc);
831 }
832 }
833 if (privptr->pk_skb != NULL) {
834 dev_kfree_skb_any(privptr->pk_skb);
835 privptr->pk_skb = NULL;
836 }
837 if(privptr->buffs_alloc != 1) {
838 CLAW_DBF_TEXT(4, trace, "none2fre");
839 return 0;
840 }
841 CLAW_DBF_TEXT(4, trace, "freebufs");
842 if (privptr->p_buff_ccw != NULL) {
843 free_pages((unsigned long)privptr->p_buff_ccw,
844 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
845 }
846 CLAW_DBF_TEXT(4, trace, "freeread");
847 if (privptr->p_env->read_size < PAGE_SIZE) {
848 if (privptr->p_buff_read != NULL) {
849 free_pages((unsigned long)privptr->p_buff_read,
850 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
851 }
852 }
853 else {
854 p_buf=privptr->p_read_active_first;
855 while (p_buf!=NULL) {
856 free_pages((unsigned long)p_buf->p_buffer,
857 (int)pages_to_order_of_mag(
858 privptr->p_buff_pages_perread ));
859 p_buf=p_buf->next;
860 }
861 }
862 CLAW_DBF_TEXT(4, trace, "freewrit");
863 if (privptr->p_env->write_size < PAGE_SIZE ) {
864 free_pages((unsigned long)privptr->p_buff_write,
865 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
866 }
867 else {
868 p_buf=privptr->p_write_active_first;
869 while (p_buf!=NULL) {
870 free_pages((unsigned long)p_buf->p_buffer,
871 (int)pages_to_order_of_mag(
872 privptr->p_buff_pages_perwrite ));
873 p_buf=p_buf->next;
874 }
875 }
876 CLAW_DBF_TEXT(4, trace, "clearptr");
877 privptr->buffs_alloc = 0;
878 privptr->p_buff_ccw=NULL;
879 privptr->p_buff_read=NULL;
880 privptr->p_buff_write=NULL;
881 privptr->system_validate_comp=0;
882 privptr->release_pend=0;
883 /* Remove any writes that were pending and reset all reads */
884 p_this_ccw=privptr->p_read_active_first;
885 while (p_this_ccw!=NULL) {
886 p_this_ccw->header.length=0xffff;
887 p_this_ccw->header.opcode=0xff;
888 p_this_ccw->header.flag=0x00;
889 p_this_ccw=p_this_ccw->next;
890 }
891
892 while (privptr->p_write_active_first!=NULL) {
893 p_this_ccw=privptr->p_write_active_first;
894 p_this_ccw->header.flag=CLAW_PENDING;
895 privptr->p_write_active_first=p_this_ccw->next;
896 p_this_ccw->next=privptr->p_write_free_chain;
897 privptr->p_write_free_chain=p_this_ccw;
898 ++privptr->write_free_count;
899 }
900 privptr->p_write_active_last=NULL;
901 privptr->mtc_logical_link = -1;
902 privptr->mtc_skipping = 1;
903 privptr->mtc_offset=0;
904
905 if (((privptr->channel[READ_CHANNEL].last_dstat |
906 privptr->channel[WRITE_CHANNEL].last_dstat) &
907 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
908 dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
909 "Deactivating %s completed with incorrect"
910 " subchannel status "
911 "(read %02x, write %02x)\n",
912 dev->name,
913 privptr->channel[READ_CHANNEL].last_dstat,
914 privptr->channel[WRITE_CHANNEL].last_dstat);
915 CLAW_DBF_TEXT(2, trace, "badclose");
916 }
917 CLAW_DBF_TEXT(4, trace, "rlsexit");
918 return 0;
919} /* end of claw_release */
920
921/*-------------------------------------------------------------------*
922* claw_write_retry *
923* *
924*--------------------------------------------------------------------*/
925
926static void
927claw_write_retry ( struct chbk *p_ch )
928{
929
930 struct net_device *dev=p_ch->ndev;
931
932 CLAW_DBF_TEXT(4, trace, "w_retry");
933 if (p_ch->claw_state == CLAW_STOP) {
934 return;
935 }
936 claw_strt_out_IO( dev );
937 CLAW_DBF_TEXT(4, trace, "rtry_xit");
938 return;
939} /* end of claw_write_retry */
940
941
942/*-------------------------------------------------------------------*
943* claw_write_next *
944* *
945*--------------------------------------------------------------------*/
946
947static void
948claw_write_next ( struct chbk * p_ch )
949{
950
951 struct net_device *dev;
952 struct claw_privbk *privptr=NULL;
953 struct sk_buff *pk_skb;
954
955 CLAW_DBF_TEXT(4, trace, "claw_wrt");
956 if (p_ch->claw_state == CLAW_STOP)
957 return;
958 dev = (struct net_device *) p_ch->ndev;
959 privptr = (struct claw_privbk *) dev->ml_priv;
960 claw_free_wrt_buf( dev );
961 if ((privptr->write_free_count > 0) &&
962 !skb_queue_empty(&p_ch->collect_queue)) {
963 pk_skb = claw_pack_skb(privptr);
964 while (pk_skb != NULL) {
965 claw_hw_tx(pk_skb, dev, 1);
966 if (privptr->write_free_count > 0) {
967 pk_skb = claw_pack_skb(privptr);
968 } else
969 pk_skb = NULL;
970 }
971 }
972 if (privptr->p_write_active_first!=NULL) {
973 claw_strt_out_IO(dev);
974 }
975 return;
976} /* end of claw_write_next */
977
978/*-------------------------------------------------------------------*
979* *
980* claw_timer *
981*--------------------------------------------------------------------*/
982
983static void
984claw_timer ( struct chbk * p_ch )
985{
986 CLAW_DBF_TEXT(4, trace, "timer");
987 p_ch->flag |= CLAW_TIMER;
988 wake_up(&p_ch->wait);
989 return;
990} /* end of claw_timer */
991
992/*
993*
994* functions
995*/
996
997
998/*-------------------------------------------------------------------*
999* *
1000* pages_to_order_of_mag *
1001* *
1002* takes a number of pages from 1 to 512 and returns the *
1003* log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1004* of magnitude get_free_pages() has an upper order of 9 *
1005*--------------------------------------------------------------------*/
1006
1007static int
1008pages_to_order_of_mag(int num_of_pages)
1009{
1010 int order_of_mag=1; /* assume 2 pages */
1011 int nump;
1012
1013 CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
1014 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1015 /* 512 pages = 2Meg on 4k page systems */
1016 if (num_of_pages >= 512) {return 9; }
1017 /* we have two or more pages order is at least 1 */
1018 for (nump=2 ;nump <= 512;nump*=2) {
1019 if (num_of_pages <= nump)
1020 break;
1021 order_of_mag +=1;
1022 }
1023 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1024 CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
1025 return order_of_mag;
1026}
1027
1028/*-------------------------------------------------------------------*
1029* *
1030* add_claw_reads *
1031* *
1032*--------------------------------------------------------------------*/
1033static int
1034add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1035 struct ccwbk* p_last)
1036{
1037 struct claw_privbk *privptr;
1038 struct ccw1 temp_ccw;
1039 struct endccw * p_end;
1040 CLAW_DBF_TEXT(4, trace, "addreads");
1041 privptr = dev->ml_priv;
1042 p_end = privptr->p_end_ccw;
1043
1044 /* first CCW and last CCW contains a new set of read channel programs
1045 * to apend the running channel programs
1046 */
1047 if ( p_first==NULL) {
1048 CLAW_DBF_TEXT(4, trace, "addexit");
1049 return 0;
1050 }
1051
1052 /* set up ending CCW sequence for this segment */
1053 if (p_end->read1) {
1054 p_end->read1=0x00; /* second ending CCW is now active */
1055 /* reset ending CCWs and setup TIC CCWs */
1056 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1057 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1058 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1059 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1060 p_end->read2_nop2.cda=0;
1061 p_end->read2_nop2.count=1;
1062 }
1063 else {
1064 p_end->read1=0x01; /* first ending CCW is now active */
1065 /* reset ending CCWs and setup TIC CCWs */
1066 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1067 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1068 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1069 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1070 p_end->read1_nop2.cda=0;
1071 p_end->read1_nop2.count=1;
1072 }
1073
1074 if ( privptr-> p_read_active_first ==NULL ) {
1075 privptr->p_read_active_first = p_first; /* set new first */
1076 privptr->p_read_active_last = p_last; /* set new last */
1077 }
1078 else {
1079
1080 /* set up TIC ccw */
1081 temp_ccw.cda= (__u32)__pa(&p_first->read);
1082 temp_ccw.count=0;
1083 temp_ccw.flags=0;
1084 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1085
1086
1087 if (p_end->read1) {
1088
1089 /* first set of CCW's is chained to the new read */
1090 /* chain, so the second set is chained to the active chain. */
1091 /* Therefore modify the second set to point to the new */
1092 /* read chain set up TIC CCWs */
1093 /* make sure we update the CCW so channel doesn't fetch it */
1094 /* when it's only half done */
1095 memcpy( &p_end->read2_nop2, &temp_ccw ,
1096 sizeof(struct ccw1));
1097 privptr->p_read_active_last->r_TIC_1.cda=
1098 (__u32)__pa(&p_first->read);
1099 privptr->p_read_active_last->r_TIC_2.cda=
1100 (__u32)__pa(&p_first->read);
1101 }
1102 else {
1103 /* make sure we update the CCW so channel doesn't */
1104 /* fetch it when it is only half done */
1105 memcpy( &p_end->read1_nop2, &temp_ccw ,
1106 sizeof(struct ccw1));
1107 privptr->p_read_active_last->r_TIC_1.cda=
1108 (__u32)__pa(&p_first->read);
1109 privptr->p_read_active_last->r_TIC_2.cda=
1110 (__u32)__pa(&p_first->read);
1111 }
1112 /* chain in new set of blocks */
1113 privptr->p_read_active_last->next = p_first;
1114 privptr->p_read_active_last=p_last;
1115 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1116 CLAW_DBF_TEXT(4, trace, "addexit");
1117 return 0;
1118} /* end of add_claw_reads */
1119
1120/*-------------------------------------------------------------------*
1121 * ccw_check_return_code *
1122 * *
1123 *-------------------------------------------------------------------*/
1124
1125static void
1126ccw_check_return_code(struct ccw_device *cdev, int return_code)
1127{
1128 CLAW_DBF_TEXT(4, trace, "ccwret");
1129 if (return_code != 0) {
1130 switch (return_code) {
1131 case -EBUSY: /* BUSY is a transient state no action needed */
1132 break;
1133 case -ENODEV:
1134 dev_err(&cdev->dev, "The remote channel adapter is not"
1135 " available\n");
1136 break;
1137 case -EINVAL:
1138 dev_err(&cdev->dev,
1139 "The status of the remote channel adapter"
1140 " is not valid\n");
1141 break;
1142 default:
1143 dev_err(&cdev->dev, "The common device layer"
1144 " returned error code %d\n",
1145 return_code);
1146 }
1147 }
1148 CLAW_DBF_TEXT(4, trace, "ccwret");
1149} /* end of ccw_check_return_code */
1150
1151/*-------------------------------------------------------------------*
1152* ccw_check_unit_check *
1153*--------------------------------------------------------------------*/
1154
1155static void
1156ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1157{
1158 struct net_device *ndev = p_ch->ndev;
1159 struct device *dev = &p_ch->cdev->dev;
1160
1161 CLAW_DBF_TEXT(4, trace, "unitchek");
1162 dev_warn(dev, "The communication peer of %s disconnected\n",
1163 ndev->name);
1164
1165 if (sense & 0x40) {
1166 if (sense & 0x01) {
1167 dev_warn(dev, "The remote channel adapter for"
1168 " %s has been reset\n",
1169 ndev->name);
1170 }
1171 } else if (sense & 0x20) {
1172 if (sense & 0x04) {
1173 dev_warn(dev, "A data streaming timeout occurred"
1174 " for %s\n",
1175 ndev->name);
1176 } else if (sense & 0x10) {
1177 dev_warn(dev, "The remote channel adapter for %s"
1178 " is faulty\n",
1179 ndev->name);
1180 } else {
1181 dev_warn(dev, "A data transfer parity error occurred"
1182 " for %s\n",
1183 ndev->name);
1184 }
1185 } else if (sense & 0x10) {
1186 dev_warn(dev, "A read data parity error occurred"
1187 " for %s\n",
1188 ndev->name);
1189 }
1190
1191} /* end of ccw_check_unit_check */
1192
1193/*-------------------------------------------------------------------*
1194* find_link *
1195*--------------------------------------------------------------------*/
1196static int
1197find_link(struct net_device *dev, char *host_name, char *ws_name )
1198{
1199 struct claw_privbk *privptr;
1200 struct claw_env *p_env;
1201 int rc=0;
1202
1203 CLAW_DBF_TEXT(2, setup, "findlink");
1204 privptr = dev->ml_priv;
1205 p_env=privptr->p_env;
1206 switch (p_env->packing)
1207 {
1208 case PACKING_ASK:
1209 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1210 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1211 rc = EINVAL;
1212 break;
1213 case DO_PACKED:
1214 case PACK_SEND:
1215 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1216 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1217 rc = EINVAL;
1218 break;
1219 default:
1220 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1221 (memcmp(p_env->api_type , ws_name, 8)!=0))
1222 rc = EINVAL;
1223 break;
1224 }
1225
1226 return rc;
1227} /* end of find_link */
1228
1229/*-------------------------------------------------------------------*
1230 * claw_hw_tx *
1231 * *
1232 * *
1233 *-------------------------------------------------------------------*/
1234
1235static int
1236claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1237{
1238 int rc=0;
1239 struct claw_privbk *privptr;
1240 struct ccwbk *p_this_ccw;
1241 struct ccwbk *p_first_ccw;
1242 struct ccwbk *p_last_ccw;
1243 __u32 numBuffers;
1244 signed long len_of_data;
1245 unsigned long bytesInThisBuffer;
1246 unsigned char *pDataAddress;
1247 struct endccw *pEnd;
1248 struct ccw1 tempCCW;
1249 struct claw_env *p_env;
1250 struct clawph *pk_head;
1251 struct chbk *ch;
1252
1253 CLAW_DBF_TEXT(4, trace, "hw_tx");
1254 privptr = (struct claw_privbk *)(dev->ml_priv);
1255 p_env =privptr->p_env;
1256 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1257 /* scan the write queue to free any completed write packets */
1258 p_first_ccw=NULL;
1259 p_last_ccw=NULL;
1260 if ((p_env->packing >= PACK_SEND) &&
1261 (skb->cb[1] != 'P')) {
1262 skb_push(skb,sizeof(struct clawph));
1263 pk_head=(struct clawph *)skb->data;
1264 pk_head->len=skb->len-sizeof(struct clawph);
1265 if (pk_head->len%4) {
1266 pk_head->len+= 4-(pk_head->len%4);
1267 skb_pad(skb,4-(pk_head->len%4));
1268 skb_put(skb,4-(pk_head->len%4));
1269 }
1270 if (p_env->packing == DO_PACKED)
1271 pk_head->link_num = linkid;
1272 else
1273 pk_head->link_num = 0;
1274 pk_head->flag = 0x00;
1275 skb_pad(skb,4);
1276 skb->cb[1] = 'P';
1277 }
1278 if (linkid == 0) {
1279 if (claw_check_busy(dev)) {
1280 if (privptr->write_free_count!=0) {
1281 claw_clear_busy(dev);
1282 }
1283 else {
1284 claw_strt_out_IO(dev );
1285 claw_free_wrt_buf( dev );
1286 if (privptr->write_free_count==0) {
1287 ch = &privptr->channel[WRITE_CHANNEL];
1288 atomic_inc(&skb->users);
1289 skb_queue_tail(&ch->collect_queue, skb);
1290 goto Done;
1291 }
1292 else {
1293 claw_clear_busy(dev);
1294 }
1295 }
1296 }
1297 /* tx lock */
1298 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1299 ch = &privptr->channel[WRITE_CHANNEL];
1300 atomic_inc(&skb->users);
1301 skb_queue_tail(&ch->collect_queue, skb);
1302 claw_strt_out_IO(dev );
1303 rc=-EBUSY;
1304 goto Done2;
1305 }
1306 }
1307 /* See how many write buffers are required to hold this data */
1308 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1309
1310 /* If that number of buffers isn't available, give up for now */
1311 if (privptr->write_free_count < numBuffers ||
1312 privptr->p_write_free_chain == NULL ) {
1313
1314 claw_setbit_busy(TB_NOBUFFER,dev);
1315 ch = &privptr->channel[WRITE_CHANNEL];
1316 atomic_inc(&skb->users);
1317 skb_queue_tail(&ch->collect_queue, skb);
1318 CLAW_DBF_TEXT(2, trace, "clawbusy");
1319 goto Done2;
1320 }
1321 pDataAddress=skb->data;
1322 len_of_data=skb->len;
1323
1324 while (len_of_data > 0) {
1325 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1326 if (p_this_ccw == NULL) { /* lost the race */
1327 ch = &privptr->channel[WRITE_CHANNEL];
1328 atomic_inc(&skb->users);
1329 skb_queue_tail(&ch->collect_queue, skb);
1330 goto Done2;
1331 }
1332 privptr->p_write_free_chain=p_this_ccw->next;
1333 p_this_ccw->next=NULL;
1334 --privptr->write_free_count; /* -1 */
1335 if (len_of_data >= privptr->p_env->write_size)
1336 bytesInThisBuffer = privptr->p_env->write_size;
1337 else
1338 bytesInThisBuffer = len_of_data;
1339 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1340 len_of_data-=bytesInThisBuffer;
1341 pDataAddress+=(unsigned long)bytesInThisBuffer;
1342 /* setup write CCW */
1343 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1344 if (len_of_data>0) {
1345 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1346 }
1347 p_this_ccw->write.count=bytesInThisBuffer;
1348 /* now add to end of this chain */
1349 if (p_first_ccw==NULL) {
1350 p_first_ccw=p_this_ccw;
1351 }
1352 if (p_last_ccw!=NULL) {
1353 p_last_ccw->next=p_this_ccw;
1354 /* set up TIC ccws */
1355 p_last_ccw->w_TIC_1.cda=
1356 (__u32)__pa(&p_this_ccw->write);
1357 }
1358 p_last_ccw=p_this_ccw; /* save new last block */
1359 }
1360
1361 /* FirstCCW and LastCCW now contain a new set of write channel
1362 * programs to append to the running channel program
1363 */
1364
1365 if (p_first_ccw!=NULL) {
1366 /* setup ending ccw sequence for this segment */
1367 pEnd=privptr->p_end_ccw;
1368 if (pEnd->write1) {
1369 pEnd->write1=0x00; /* second end ccw is now active */
1370 /* set up Tic CCWs */
1371 p_last_ccw->w_TIC_1.cda=
1372 (__u32)__pa(&pEnd->write2_nop1);
1373 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1374 pEnd->write2_nop2.flags =
1375 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1376 pEnd->write2_nop2.cda=0;
1377 pEnd->write2_nop2.count=1;
1378 }
1379 else { /* end of if (pEnd->write1)*/
1380 pEnd->write1=0x01; /* first end ccw is now active */
1381 /* set up Tic CCWs */
1382 p_last_ccw->w_TIC_1.cda=
1383 (__u32)__pa(&pEnd->write1_nop1);
1384 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1385 pEnd->write1_nop2.flags =
1386 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1387 pEnd->write1_nop2.cda=0;
1388 pEnd->write1_nop2.count=1;
1389 } /* end if if (pEnd->write1) */
1390
1391 if (privptr->p_write_active_first==NULL ) {
1392 privptr->p_write_active_first=p_first_ccw;
1393 privptr->p_write_active_last=p_last_ccw;
1394 }
1395 else {
1396 /* set up Tic CCWs */
1397
1398 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1399 tempCCW.count=0;
1400 tempCCW.flags=0;
1401 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1402
1403 if (pEnd->write1) {
1404
1405 /*
1406 * first set of ending CCW's is chained to the new write
1407 * chain, so the second set is chained to the active chain
1408 * Therefore modify the second set to point the new write chain.
1409 * make sure we update the CCW atomically
1410 * so channel does not fetch it when it's only half done
1411 */
1412 memcpy( &pEnd->write2_nop2, &tempCCW ,
1413 sizeof(struct ccw1));
1414 privptr->p_write_active_last->w_TIC_1.cda=
1415 (__u32)__pa(&p_first_ccw->write);
1416 }
1417 else {
1418
1419 /*make sure we update the CCW atomically
1420 *so channel does not fetch it when it's only half done
1421 */
1422 memcpy(&pEnd->write1_nop2, &tempCCW ,
1423 sizeof(struct ccw1));
1424 privptr->p_write_active_last->w_TIC_1.cda=
1425 (__u32)__pa(&p_first_ccw->write);
1426
1427 } /* end if if (pEnd->write1) */
1428
1429 privptr->p_write_active_last->next=p_first_ccw;
1430 privptr->p_write_active_last=p_last_ccw;
1431 }
1432
1433 } /* endif (p_first_ccw!=NULL) */
1434 dev_kfree_skb_any(skb);
1435 claw_strt_out_IO(dev );
1436 /* if write free count is zero , set NOBUFFER */
1437 if (privptr->write_free_count==0) {
1438 claw_setbit_busy(TB_NOBUFFER,dev);
1439 }
1440Done2:
1441 claw_clearbit_busy(TB_TX,dev);
1442Done:
1443 return(rc);
1444} /* end of claw_hw_tx */
1445
1446/*-------------------------------------------------------------------*
1447* *
1448* init_ccw_bk *
1449* *
1450*--------------------------------------------------------------------*/
1451
1452static int
1453init_ccw_bk(struct net_device *dev)
1454{
1455
1456 __u32 ccw_blocks_required;
1457 __u32 ccw_blocks_perpage;
1458 __u32 ccw_pages_required;
1459 __u32 claw_reads_perpage=1;
1460 __u32 claw_read_pages;
1461 __u32 claw_writes_perpage=1;
1462 __u32 claw_write_pages;
1463 void *p_buff=NULL;
1464 struct ccwbk*p_free_chain;
1465 struct ccwbk*p_buf;
1466 struct ccwbk*p_last_CCWB;
1467 struct ccwbk*p_first_CCWB;
1468 struct endccw *p_endccw=NULL;
1469 addr_t real_address;
1470 struct claw_privbk *privptr = dev->ml_priv;
1471 struct clawh *pClawH=NULL;
1472 addr_t real_TIC_address;
1473 int i,j;
1474 CLAW_DBF_TEXT(4, trace, "init_ccw");
1475
1476 /* initialize statistics field */
1477 privptr->active_link_ID=0;
1478 /* initialize ccwbk pointers */
1479 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
1480 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
1481 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
1482 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
1483 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
1484 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
1485 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
1486 privptr->buffs_alloc = 0;
1487 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
1488 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
1489 /* initialize free write ccwbk counter */
1490 privptr->write_free_count=0; /* number of free bufs on write chain */
1491 p_last_CCWB = NULL;
1492 p_first_CCWB= NULL;
1493 /*
1494 * We need 1 CCW block for each read buffer, 1 for each
1495 * write buffer, plus 1 for ClawSignalBlock
1496 */
1497 ccw_blocks_required =
1498 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
1499 /*
1500 * compute number of CCW blocks that will fit in a page
1501 */
1502 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
1503 ccw_pages_required=
1504 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
1505
1506 /*
1507 * read and write sizes are set by 2 constants in claw.h
1508 * 4k and 32k. Unpacked values other than 4k are not going to
1509 * provide good performance. With packing buffers support 32k
1510 * buffers are used.
1511 */
1512 if (privptr->p_env->read_size < PAGE_SIZE) {
1513 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
1514 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
1515 claw_reads_perpage);
1516 }
1517 else { /* > or equal */
1518 privptr->p_buff_pages_perread =
1519 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1520 claw_read_pages = privptr->p_env->read_buffers *
1521 privptr->p_buff_pages_perread;
1522 }
1523 if (privptr->p_env->write_size < PAGE_SIZE) {
1524 claw_writes_perpage =
1525 PAGE_SIZE / privptr->p_env->write_size;
1526 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
1527 claw_writes_perpage);
1528
1529 }
1530 else { /* > or equal */
1531 privptr->p_buff_pages_perwrite =
1532 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1533 claw_write_pages = privptr->p_env->write_buffers *
1534 privptr->p_buff_pages_perwrite;
1535 }
1536 /*
1537 * allocate ccw_pages_required
1538 */
1539 if (privptr->p_buff_ccw==NULL) {
1540 privptr->p_buff_ccw=
1541 (void *)__get_free_pages(__GFP_DMA,
1542 (int)pages_to_order_of_mag(ccw_pages_required ));
1543 if (privptr->p_buff_ccw==NULL) {
1544 return -ENOMEM;
1545 }
1546 privptr->p_buff_ccw_num=ccw_pages_required;
1547 }
1548 memset(privptr->p_buff_ccw, 0x00,
1549 privptr->p_buff_ccw_num * PAGE_SIZE);
1550
1551 /*
1552 * obtain ending ccw block address
1553 *
1554 */
1555 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
1556 real_address = (__u32)__pa(privptr->p_end_ccw);
1557 /* Initialize ending CCW block */
1558 p_endccw=privptr->p_end_ccw;
1559 p_endccw->real=real_address;
1560 p_endccw->write1=0x00;
1561 p_endccw->read1=0x00;
1562
1563 /* write1_nop1 */
1564 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1565 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1566 p_endccw->write1_nop1.count = 1;
1567 p_endccw->write1_nop1.cda = 0;
1568
1569 /* write1_nop2 */
1570 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1571 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1572 p_endccw->write1_nop2.count = 1;
1573 p_endccw->write1_nop2.cda = 0;
1574
1575 /* write2_nop1 */
1576 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1577 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1578 p_endccw->write2_nop1.count = 1;
1579 p_endccw->write2_nop1.cda = 0;
1580
1581 /* write2_nop2 */
1582 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1583 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1584 p_endccw->write2_nop2.count = 1;
1585 p_endccw->write2_nop2.cda = 0;
1586
1587 /* read1_nop1 */
1588 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1589 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1590 p_endccw->read1_nop1.count = 1;
1591 p_endccw->read1_nop1.cda = 0;
1592
1593 /* read1_nop2 */
1594 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1595 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1596 p_endccw->read1_nop2.count = 1;
1597 p_endccw->read1_nop2.cda = 0;
1598
1599 /* read2_nop1 */
1600 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1601 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1602 p_endccw->read2_nop1.count = 1;
1603 p_endccw->read2_nop1.cda = 0;
1604
1605 /* read2_nop2 */
1606 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1607 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1608 p_endccw->read2_nop2.count = 1;
1609 p_endccw->read2_nop2.cda = 0;
1610
1611 /*
1612 * Build a chain of CCWs
1613 *
1614 */
1615 p_buff=privptr->p_buff_ccw;
1616
1617 p_free_chain=NULL;
1618 for (i=0 ; i < ccw_pages_required; i++ ) {
1619 real_address = (__u32)__pa(p_buff);
1620 p_buf=p_buff;
1621 for (j=0 ; j < ccw_blocks_perpage ; j++) {
1622 p_buf->next = p_free_chain;
1623 p_free_chain = p_buf;
1624 p_buf->real=(__u32)__pa(p_buf);
1625 ++p_buf;
1626 }
1627 p_buff+=PAGE_SIZE;
1628 }
1629 /*
1630 * Initialize ClawSignalBlock
1631 *
1632 */
1633 if (privptr->p_claw_signal_blk==NULL) {
1634 privptr->p_claw_signal_blk=p_free_chain;
1635 p_free_chain=p_free_chain->next;
1636 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
1637 pClawH->length=0xffff;
1638 pClawH->opcode=0xff;
1639 pClawH->flag=CLAW_BUSY;
1640 }
1641
1642 /*
1643 * allocate write_pages_required and add to free chain
1644 */
1645 if (privptr->p_buff_write==NULL) {
1646 if (privptr->p_env->write_size < PAGE_SIZE) {
1647 privptr->p_buff_write=
1648 (void *)__get_free_pages(__GFP_DMA,
1649 (int)pages_to_order_of_mag(claw_write_pages ));
1650 if (privptr->p_buff_write==NULL) {
1651 privptr->p_buff_ccw=NULL;
1652 return -ENOMEM;
1653 }
1654 /*
1655 * Build CLAW write free chain
1656 *
1657 */
1658
1659 memset(privptr->p_buff_write, 0x00,
1660 ccw_pages_required * PAGE_SIZE);
1661 privptr->p_write_free_chain=NULL;
1662
1663 p_buff=privptr->p_buff_write;
1664
1665 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
1666 p_buf = p_free_chain; /* get a CCW */
1667 p_free_chain = p_buf->next;
1668 p_buf->next =privptr->p_write_free_chain;
1669 privptr->p_write_free_chain = p_buf;
1670 p_buf-> p_buffer = (struct clawbuf *)p_buff;
1671 p_buf-> write.cda = (__u32)__pa(p_buff);
1672 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1673 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1674 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1675 p_buf-> w_read_FF.count = 1;
1676 p_buf-> w_read_FF.cda =
1677 (__u32)__pa(&p_buf-> header.flag);
1678 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1679 p_buf-> w_TIC_1.flags = 0;
1680 p_buf-> w_TIC_1.count = 0;
1681
1682 if (((unsigned long)p_buff +
1683 privptr->p_env->write_size) >=
1684 ((unsigned long)(p_buff+2*
1685 (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
1686 p_buff = p_buff+privptr->p_env->write_size;
1687 }
1688 }
1689 }
1690 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
1691 {
1692 privptr->p_write_free_chain=NULL;
1693 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
1694 p_buff=(void *)__get_free_pages(__GFP_DMA,
1695 (int)pages_to_order_of_mag(
1696 privptr->p_buff_pages_perwrite) );
1697 if (p_buff==NULL) {
1698 free_pages((unsigned long)privptr->p_buff_ccw,
1699 (int)pages_to_order_of_mag(
1700 privptr->p_buff_ccw_num));
1701 privptr->p_buff_ccw=NULL;
1702 p_buf=privptr->p_buff_write;
1703 while (p_buf!=NULL) {
1704 free_pages((unsigned long)
1705 p_buf->p_buffer,
1706 (int)pages_to_order_of_mag(
1707 privptr->p_buff_pages_perwrite));
1708 p_buf=p_buf->next;
1709 }
1710 return -ENOMEM;
1711 } /* Error on get_pages */
1712 memset(p_buff, 0x00, privptr->p_env->write_size );
1713 p_buf = p_free_chain;
1714 p_free_chain = p_buf->next;
1715 p_buf->next = privptr->p_write_free_chain;
1716 privptr->p_write_free_chain = p_buf;
1717 privptr->p_buff_write = p_buf;
1718 p_buf->p_buffer=(struct clawbuf *)p_buff;
1719 p_buf-> write.cda = (__u32)__pa(p_buff);
1720 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1721 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1722 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1723 p_buf-> w_read_FF.count = 1;
1724 p_buf-> w_read_FF.cda =
1725 (__u32)__pa(&p_buf-> header.flag);
1726 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1727 p_buf-> w_TIC_1.flags = 0;
1728 p_buf-> w_TIC_1.count = 0;
1729 } /* for all write_buffers */
1730
1731 } /* else buffers are PAGE_SIZE or bigger */
1732
1733 }
1734 privptr->p_buff_write_num=claw_write_pages;
1735 privptr->write_free_count=privptr->p_env->write_buffers;
1736
1737
1738 /*
1739 * allocate read_pages_required and chain to free chain
1740 */
1741 if (privptr->p_buff_read==NULL) {
1742 if (privptr->p_env->read_size < PAGE_SIZE) {
1743 privptr->p_buff_read=
1744 (void *)__get_free_pages(__GFP_DMA,
1745 (int)pages_to_order_of_mag(claw_read_pages) );
1746 if (privptr->p_buff_read==NULL) {
1747 free_pages((unsigned long)privptr->p_buff_ccw,
1748 (int)pages_to_order_of_mag(
1749 privptr->p_buff_ccw_num));
1750 /* free the write pages size is < page size */
1751 free_pages((unsigned long)privptr->p_buff_write,
1752 (int)pages_to_order_of_mag(
1753 privptr->p_buff_write_num));
1754 privptr->p_buff_ccw=NULL;
1755 privptr->p_buff_write=NULL;
1756 return -ENOMEM;
1757 }
1758 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
1759 privptr->p_buff_read_num=claw_read_pages;
1760 /*
1761 * Build CLAW read free chain
1762 *
1763 */
1764 p_buff=privptr->p_buff_read;
1765 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1766 p_buf = p_free_chain;
1767 p_free_chain = p_buf->next;
1768
1769 if (p_last_CCWB==NULL) {
1770 p_buf->next=NULL;
1771 real_TIC_address=0;
1772 p_last_CCWB=p_buf;
1773 }
1774 else {
1775 p_buf->next=p_first_CCWB;
1776 real_TIC_address=
1777 (__u32)__pa(&p_first_CCWB -> read );
1778 }
1779
1780 p_first_CCWB=p_buf;
1781
1782 p_buf->p_buffer=(struct clawbuf *)p_buff;
1783 /* initialize read command */
1784 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1785 p_buf-> read.cda = (__u32)__pa(p_buff);
1786 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1787 p_buf-> read.count = privptr->p_env->read_size;
1788
1789 /* initialize read_h command */
1790 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1791 p_buf-> read_h.cda =
1792 (__u32)__pa(&(p_buf->header));
1793 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1794 p_buf-> read_h.count = sizeof(struct clawh);
1795
1796 /* initialize Signal command */
1797 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1798 p_buf-> signal.cda =
1799 (__u32)__pa(&(pClawH->flag));
1800 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1801 p_buf-> signal.count = 1;
1802
1803 /* initialize r_TIC_1 command */
1804 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1805 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1806 p_buf-> r_TIC_1.flags = 0;
1807 p_buf-> r_TIC_1.count = 0;
1808
1809 /* initialize r_read_FF command */
1810 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1811 p_buf-> r_read_FF.cda =
1812 (__u32)__pa(&(pClawH->flag));
1813 p_buf-> r_read_FF.flags =
1814 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1815 p_buf-> r_read_FF.count = 1;
1816
1817 /* initialize r_TIC_2 */
1818 memcpy(&p_buf->r_TIC_2,
1819 &p_buf->r_TIC_1, sizeof(struct ccw1));
1820
1821 /* initialize Header */
1822 p_buf->header.length=0xffff;
1823 p_buf->header.opcode=0xff;
1824 p_buf->header.flag=CLAW_PENDING;
1825
1826 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
1827 ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
1828 -1)
1829 & PAGE_MASK)) {
1830 p_buff= p_buff+privptr->p_env->read_size;
1831 }
1832 else {
1833 p_buff=
1834 (void *)((unsigned long)
1835 (p_buff+2*(privptr->p_env->read_size)-1)
1836 & PAGE_MASK) ;
1837 }
1838 } /* for read_buffers */
1839 } /* read_size < PAGE_SIZE */
1840 else { /* read Size >= PAGE_SIZE */
1841 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1842 p_buff = (void *)__get_free_pages(__GFP_DMA,
1843 (int)pages_to_order_of_mag(
1844 privptr->p_buff_pages_perread));
1845 if (p_buff==NULL) {
1846 free_pages((unsigned long)privptr->p_buff_ccw,
1847 (int)pages_to_order_of_mag(privptr->
1848 p_buff_ccw_num));
1849 /* free the write pages */
1850 p_buf=privptr->p_buff_write;
1851 while (p_buf!=NULL) {
1852 free_pages(
1853 (unsigned long)p_buf->p_buffer,
1854 (int)pages_to_order_of_mag(
1855 privptr->p_buff_pages_perwrite));
1856 p_buf=p_buf->next;
1857 }
1858 /* free any read pages already alloc */
1859 p_buf=privptr->p_buff_read;
1860 while (p_buf!=NULL) {
1861 free_pages(
1862 (unsigned long)p_buf->p_buffer,
1863 (int)pages_to_order_of_mag(
1864 privptr->p_buff_pages_perread));
1865 p_buf=p_buf->next;
1866 }
1867 privptr->p_buff_ccw=NULL;
1868 privptr->p_buff_write=NULL;
1869 return -ENOMEM;
1870 }
1871 memset(p_buff, 0x00, privptr->p_env->read_size);
1872 p_buf = p_free_chain;
1873 privptr->p_buff_read = p_buf;
1874 p_free_chain = p_buf->next;
1875
1876 if (p_last_CCWB==NULL) {
1877 p_buf->next=NULL;
1878 real_TIC_address=0;
1879 p_last_CCWB=p_buf;
1880 }
1881 else {
1882 p_buf->next=p_first_CCWB;
1883 real_TIC_address=
1884 (addr_t)__pa(
1885 &p_first_CCWB -> read );
1886 }
1887
1888 p_first_CCWB=p_buf;
1889 /* save buff address */
1890 p_buf->p_buffer=(struct clawbuf *)p_buff;
1891 /* initialize read command */
1892 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1893 p_buf-> read.cda = (__u32)__pa(p_buff);
1894 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1895 p_buf-> read.count = privptr->p_env->read_size;
1896
1897 /* initialize read_h command */
1898 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1899 p_buf-> read_h.cda =
1900 (__u32)__pa(&(p_buf->header));
1901 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1902 p_buf-> read_h.count = sizeof(struct clawh);
1903
1904 /* initialize Signal command */
1905 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1906 p_buf-> signal.cda =
1907 (__u32)__pa(&(pClawH->flag));
1908 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1909 p_buf-> signal.count = 1;
1910
1911 /* initialize r_TIC_1 command */
1912 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1913 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1914 p_buf-> r_TIC_1.flags = 0;
1915 p_buf-> r_TIC_1.count = 0;
1916
1917 /* initialize r_read_FF command */
1918 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1919 p_buf-> r_read_FF.cda =
1920 (__u32)__pa(&(pClawH->flag));
1921 p_buf-> r_read_FF.flags =
1922 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1923 p_buf-> r_read_FF.count = 1;
1924
1925 /* initialize r_TIC_2 */
1926 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
1927 sizeof(struct ccw1));
1928
1929 /* initialize Header */
1930 p_buf->header.length=0xffff;
1931 p_buf->header.opcode=0xff;
1932 p_buf->header.flag=CLAW_PENDING;
1933
1934 } /* For read_buffers */
1935 } /* read_size >= PAGE_SIZE */
1936 } /* pBuffread = NULL */
1937 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
1938 privptr->buffs_alloc = 1;
1939
1940 return 0;
1941} /* end of init_ccw_bk */
1942
1943/*-------------------------------------------------------------------*
1944* *
1945* probe_error *
1946* *
1947*--------------------------------------------------------------------*/
1948
1949static void
1950probe_error( struct ccwgroup_device *cgdev)
1951{
1952 struct claw_privbk *privptr;
1953
1954 CLAW_DBF_TEXT(4, trace, "proberr");
1955 privptr = dev_get_drvdata(&cgdev->dev);
1956 if (privptr != NULL) {
1957 dev_set_drvdata(&cgdev->dev, NULL);
1958 kfree(privptr->p_env);
1959 kfree(privptr->p_mtc_envelope);
1960 kfree(privptr);
1961 }
1962} /* probe_error */
1963
1964/*-------------------------------------------------------------------*
1965* claw_process_control *
1966* *
1967* *
1968*--------------------------------------------------------------------*/
1969
1970static int
1971claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
1972{
1973
1974 struct clawbuf *p_buf;
1975 struct clawctl ctlbk;
1976 struct clawctl *p_ctlbk;
1977 char temp_host_name[8];
1978 char temp_ws_name[8];
1979 struct claw_privbk *privptr;
1980 struct claw_env *p_env;
1981 struct sysval *p_sysval;
1982 struct conncmd *p_connect=NULL;
1983 int rc;
1984 struct chbk *p_ch = NULL;
1985 struct device *tdev;
1986 CLAW_DBF_TEXT(2, setup, "clw_cntl");
1987 udelay(1000); /* Wait a ms for the control packets to
1988 *catch up to each other */
1989 privptr = dev->ml_priv;
1990 p_env=privptr->p_env;
1991 tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
1992 memcpy( &temp_host_name, p_env->host_name, 8);
1993 memcpy( &temp_ws_name, p_env->adapter_name , 8);
1994 dev_info(tdev, "%s: CLAW device %.8s: "
1995 "Received Control Packet\n",
1996 dev->name, temp_ws_name);
1997 if (privptr->release_pend==1) {
1998 return 0;
1999 }
2000 p_buf=p_ccw->p_buffer;
2001 p_ctlbk=&ctlbk;
2002 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2003 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2004 } else {
2005 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2006 }
2007 switch (p_ctlbk->command)
2008 {
2009 case SYSTEM_VALIDATE_REQUEST:
2010 if (p_ctlbk->version != CLAW_VERSION_ID) {
2011 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2012 CLAW_RC_WRONG_VERSION);
2013 dev_warn(tdev, "The communication peer of %s"
2014 " uses an incorrect API version %d\n",
2015 dev->name, p_ctlbk->version);
2016 }
2017 p_sysval = (struct sysval *)&(p_ctlbk->data);
2018 dev_info(tdev, "%s: Recv Sys Validate Request: "
2019 "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
2020 "Host name=%.8s\n",
2021 dev->name, p_ctlbk->version,
2022 p_ctlbk->linkid,
2023 p_ctlbk->correlator,
2024 p_sysval->WS_name,
2025 p_sysval->host_name);
2026 if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
2027 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2028 CLAW_RC_NAME_MISMATCH);
2029 CLAW_DBF_TEXT(2, setup, "HSTBAD");
2030 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
2031 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
2032 dev_warn(tdev,
2033 "Host name %s for %s does not match the"
2034 " remote adapter name %s\n",
2035 p_sysval->host_name,
2036 dev->name,
2037 temp_host_name);
2038 }
2039 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
2040 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2041 CLAW_RC_NAME_MISMATCH);
2042 CLAW_DBF_TEXT(2, setup, "WSNBAD");
2043 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
2044 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
2045 dev_warn(tdev, "Adapter name %s for %s does not match"
2046 " the remote host name %s\n",
2047 p_sysval->WS_name,
2048 dev->name,
2049 temp_ws_name);
2050 }
2051 if ((p_sysval->write_frame_size < p_env->write_size) &&
2052 (p_env->packing == 0)) {
2053 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2054 CLAW_RC_HOST_RCV_TOO_SMALL);
2055 dev_warn(tdev,
2056 "The local write buffer is smaller than the"
2057 " remote read buffer\n");
2058 CLAW_DBF_TEXT(2, setup, "wrtszbad");
2059 }
2060 if ((p_sysval->read_frame_size < p_env->read_size) &&
2061 (p_env->packing == 0)) {
2062 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2063 CLAW_RC_HOST_RCV_TOO_SMALL);
2064 dev_warn(tdev,
2065 "The local read buffer is smaller than the"
2066 " remote write buffer\n");
2067 CLAW_DBF_TEXT(2, setup, "rdsizbad");
2068 }
2069 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
2070 dev_info(tdev,
2071 "CLAW device %.8s: System validate"
2072 " completed.\n", temp_ws_name);
2073 dev_info(tdev,
2074 "%s: sys Validate Rsize:%d Wsize:%d\n",
2075 dev->name, p_sysval->read_frame_size,
2076 p_sysval->write_frame_size);
2077 privptr->system_validate_comp = 1;
2078 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
2079 p_env->packing = PACKING_ASK;
2080 claw_strt_conn_req(dev);
2081 break;
2082 case SYSTEM_VALIDATE_RESPONSE:
2083 p_sysval = (struct sysval *)&(p_ctlbk->data);
2084 dev_info(tdev,
2085 "Settings for %s validated (version=%d, "
2086 "remote device=%d, rc=%d, adapter name=%.8s, "
2087 "host name=%.8s)\n",
2088 dev->name,
2089 p_ctlbk->version,
2090 p_ctlbk->correlator,
2091 p_ctlbk->rc,
2092 p_sysval->WS_name,
2093 p_sysval->host_name);
2094 switch (p_ctlbk->rc) {
2095 case 0:
2096 dev_info(tdev, "%s: CLAW device "
2097 "%.8s: System validate completed.\n",
2098 dev->name, temp_ws_name);
2099 if (privptr->system_validate_comp == 0)
2100 claw_strt_conn_req(dev);
2101 privptr->system_validate_comp = 1;
2102 break;
2103 case CLAW_RC_NAME_MISMATCH:
2104 dev_warn(tdev, "Validating %s failed because of"
2105 " a host or adapter name mismatch\n",
2106 dev->name);
2107 break;
2108 case CLAW_RC_WRONG_VERSION:
2109 dev_warn(tdev, "Validating %s failed because of a"
2110 " version conflict\n",
2111 dev->name);
2112 break;
2113 case CLAW_RC_HOST_RCV_TOO_SMALL:
2114 dev_warn(tdev, "Validating %s failed because of a"
2115 " frame size conflict\n",
2116 dev->name);
2117 break;
2118 default:
2119 dev_warn(tdev, "The communication peer of %s rejected"
2120 " the connection\n",
2121 dev->name);
2122 break;
2123 }
2124 break;
2125
2126 case CONNECTION_REQUEST:
2127 p_connect = (struct conncmd *)&(p_ctlbk->data);
2128 dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2129 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2130 dev->name,
2131 p_ctlbk->version,
2132 p_ctlbk->linkid,
2133 p_ctlbk->correlator,
2134 p_connect->host_name,
2135 p_connect->WS_name);
2136 if (privptr->active_link_ID != 0) {
2137 claw_snd_disc(dev, p_ctlbk);
2138 dev_info(tdev, "%s rejected a connection request"
2139 " because it is already active\n",
2140 dev->name);
2141 }
2142 if (p_ctlbk->linkid != 1) {
2143 claw_snd_disc(dev, p_ctlbk);
2144 dev_info(tdev, "%s rejected a request to open multiple"
2145 " connections\n",
2146 dev->name);
2147 }
2148 rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
2149 if (rc != 0) {
2150 claw_snd_disc(dev, p_ctlbk);
2151 dev_info(tdev, "%s rejected a connection request"
2152 " because of a type mismatch\n",
2153 dev->name);
2154 }
2155 claw_send_control(dev,
2156 CONNECTION_CONFIRM, p_ctlbk->linkid,
2157 p_ctlbk->correlator,
2158 0, p_connect->host_name,
2159 p_connect->WS_name);
2160 if (p_env->packing == PACKING_ASK) {
2161 p_env->packing = PACK_SEND;
2162 claw_snd_conn_req(dev, 0);
2163 }
2164 dev_info(tdev, "%s: CLAW device %.8s: Connection "
2165 "completed link_id=%d.\n",
2166 dev->name, temp_ws_name,
2167 p_ctlbk->linkid);
2168 privptr->active_link_ID = p_ctlbk->linkid;
2169 p_ch = &privptr->channel[WRITE_CHANNEL];
2170 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2171 break;
2172 case CONNECTION_RESPONSE:
2173 p_connect = (struct conncmd *)&(p_ctlbk->data);
2174 dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
2175 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2176 dev->name,
2177 p_ctlbk->version,
2178 p_ctlbk->linkid,
2179 p_ctlbk->correlator,
2180 p_ctlbk->rc,
2181 p_connect->host_name,
2182 p_connect->WS_name);
2183
2184 if (p_ctlbk->rc != 0) {
2185 dev_warn(tdev, "The communication peer of %s rejected"
2186 " a connection request\n",
2187 dev->name);
2188 return 1;
2189 }
2190 rc = find_link(dev,
2191 p_connect->host_name, p_connect->WS_name);
2192 if (rc != 0) {
2193 claw_snd_disc(dev, p_ctlbk);
2194 dev_warn(tdev, "The communication peer of %s"
2195 " rejected a connection "
2196 "request because of a type mismatch\n",
2197 dev->name);
2198 }
2199 /* should be until CONNECTION_CONFIRM */
2200 privptr->active_link_ID = -(p_ctlbk->linkid);
2201 break;
2202 case CONNECTION_CONFIRM:
2203 p_connect = (struct conncmd *)&(p_ctlbk->data);
2204 dev_info(tdev,
2205 "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2206 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2207 dev->name,
2208 p_ctlbk->version,
2209 p_ctlbk->linkid,
2210 p_ctlbk->correlator,
2211 p_connect->host_name,
2212 p_connect->WS_name);
2213 if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
2214 privptr->active_link_ID = p_ctlbk->linkid;
2215 if (p_env->packing > PACKING_ASK) {
2216 dev_info(tdev,
2217 "%s: Confirmed Now packing\n", dev->name);
2218 p_env->packing = DO_PACKED;
2219 }
2220 p_ch = &privptr->channel[WRITE_CHANNEL];
2221 wake_up(&p_ch->wait);
2222 } else {
2223 dev_warn(tdev, "Activating %s failed because of"
2224 " an incorrect link ID=%d\n",
2225 dev->name, p_ctlbk->linkid);
2226 claw_snd_disc(dev, p_ctlbk);
2227 }
2228 break;
2229 case DISCONNECT:
2230 dev_info(tdev, "%s: Disconnect: "
2231 "Vers=%d,link_id=%d,Corr=%d\n",
2232 dev->name, p_ctlbk->version,
2233 p_ctlbk->linkid, p_ctlbk->correlator);
2234 if ((p_ctlbk->linkid == 2) &&
2235 (p_env->packing == PACK_SEND)) {
2236 privptr->active_link_ID = 1;
2237 p_env->packing = DO_PACKED;
2238 } else
2239 privptr->active_link_ID = 0;
2240 break;
2241 case CLAW_ERROR:
2242 dev_warn(tdev, "The communication peer of %s failed\n",
2243 dev->name);
2244 break;
2245 default:
2246 dev_warn(tdev, "The communication peer of %s sent"
2247 " an unknown command code\n",
2248 dev->name);
2249 break;
2250 }
2251
2252 return 0;
2253} /* end of claw_process_control */
2254
2255
2256/*-------------------------------------------------------------------*
2257* claw_send_control *
2258* *
2259*--------------------------------------------------------------------*/
2260
2261static int
2262claw_send_control(struct net_device *dev, __u8 type, __u8 link,
2263 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
2264{
2265 struct claw_privbk *privptr;
2266 struct clawctl *p_ctl;
2267 struct sysval *p_sysval;
2268 struct conncmd *p_connect;
2269 struct sk_buff *skb;
2270
2271 CLAW_DBF_TEXT(2, setup, "sndcntl");
2272 privptr = dev->ml_priv;
2273 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2274
2275 p_ctl->command=type;
2276 p_ctl->version=CLAW_VERSION_ID;
2277 p_ctl->linkid=link;
2278 p_ctl->correlator=correlator;
2279 p_ctl->rc=rc;
2280
2281 p_sysval=(struct sysval *)&p_ctl->data;
2282 p_connect=(struct conncmd *)&p_ctl->data;
2283
2284 switch (p_ctl->command) {
2285 case SYSTEM_VALIDATE_REQUEST:
2286 case SYSTEM_VALIDATE_RESPONSE:
2287 memcpy(&p_sysval->host_name, local_name, 8);
2288 memcpy(&p_sysval->WS_name, remote_name, 8);
2289 if (privptr->p_env->packing > 0) {
2290 p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
2291 p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
2292 } else {
2293 /* how big is the biggest group of packets */
2294 p_sysval->read_frame_size =
2295 privptr->p_env->read_size;
2296 p_sysval->write_frame_size =
2297 privptr->p_env->write_size;
2298 }
2299 memset(&p_sysval->reserved, 0x00, 4);
2300 break;
2301 case CONNECTION_REQUEST:
2302 case CONNECTION_RESPONSE:
2303 case CONNECTION_CONFIRM:
2304 case DISCONNECT:
2305 memcpy(&p_sysval->host_name, local_name, 8);
2306 memcpy(&p_sysval->WS_name, remote_name, 8);
2307 if (privptr->p_env->packing > 0) {
2308 /* How big is the biggest packet */
2309 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
2310 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
2311 } else {
2312 memset(&p_connect->reserved1, 0x00, 4);
2313 memset(&p_connect->reserved2, 0x00, 4);
2314 }
2315 break;
2316 default:
2317 break;
2318 }
2319
2320 /* write Control Record to the device */
2321
2322
2323 skb = dev_alloc_skb(sizeof(struct clawctl));
2324 if (!skb) {
2325 return -ENOMEM;
2326 }
2327 memcpy(skb_put(skb, sizeof(struct clawctl)),
2328 p_ctl, sizeof(struct clawctl));
2329 if (privptr->p_env->packing >= PACK_SEND)
2330 claw_hw_tx(skb, dev, 1);
2331 else
2332 claw_hw_tx(skb, dev, 0);
2333 return 0;
2334} /* end of claw_send_control */
2335
2336/*-------------------------------------------------------------------*
2337* claw_snd_conn_req *
2338* *
2339*--------------------------------------------------------------------*/
2340static int
2341claw_snd_conn_req(struct net_device *dev, __u8 link)
2342{
2343 int rc;
2344 struct claw_privbk *privptr = dev->ml_priv;
2345 struct clawctl *p_ctl;
2346
2347 CLAW_DBF_TEXT(2, setup, "snd_conn");
2348 rc = 1;
2349 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2350 p_ctl->linkid = link;
2351 if ( privptr->system_validate_comp==0x00 ) {
2352 return rc;
2353 }
2354 if (privptr->p_env->packing == PACKING_ASK )
2355 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2356 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
2357 if (privptr->p_env->packing == PACK_SEND) {
2358 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2359 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
2360 }
2361 if (privptr->p_env->packing == 0)
2362 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2363 HOST_APPL_NAME, privptr->p_env->api_type);
2364 return rc;
2365
2366} /* end of claw_snd_conn_req */
2367
2368
2369/*-------------------------------------------------------------------*
2370* claw_snd_disc *
2371* *
2372*--------------------------------------------------------------------*/
2373
2374static int
2375claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
2376{
2377 int rc;
2378 struct conncmd * p_connect;
2379
2380 CLAW_DBF_TEXT(2, setup, "snd_dsc");
2381 p_connect=(struct conncmd *)&p_ctl->data;
2382
2383 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
2384 p_ctl->correlator, 0,
2385 p_connect->host_name, p_connect->WS_name);
2386 return rc;
2387} /* end of claw_snd_disc */
2388
2389
2390/*-------------------------------------------------------------------*
2391* claw_snd_sys_validate_rsp *
2392* *
2393*--------------------------------------------------------------------*/
2394
2395static int
2396claw_snd_sys_validate_rsp(struct net_device *dev,
2397 struct clawctl *p_ctl, __u32 return_code)
2398{
2399 struct claw_env * p_env;
2400 struct claw_privbk *privptr;
2401 int rc;
2402
2403 CLAW_DBF_TEXT(2, setup, "chkresp");
2404 privptr = dev->ml_priv;
2405 p_env=privptr->p_env;
2406 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
2407 p_ctl->linkid,
2408 p_ctl->correlator,
2409 return_code,
2410 p_env->host_name,
2411 p_env->adapter_name );
2412 return rc;
2413} /* end of claw_snd_sys_validate_rsp */
2414
2415/*-------------------------------------------------------------------*
2416* claw_strt_conn_req *
2417* *
2418*--------------------------------------------------------------------*/
2419
2420static int
2421claw_strt_conn_req(struct net_device *dev )
2422{
2423 int rc;
2424
2425 CLAW_DBF_TEXT(2, setup, "conn_req");
2426 rc=claw_snd_conn_req(dev, 1);
2427 return rc;
2428} /* end of claw_strt_conn_req */
2429
2430
2431
2432/*-------------------------------------------------------------------*
2433 * claw_stats *
2434 *-------------------------------------------------------------------*/
2435
2436static struct
2437net_device_stats *claw_stats(struct net_device *dev)
2438{
2439 struct claw_privbk *privptr;
2440
2441 CLAW_DBF_TEXT(4, trace, "stats");
2442 privptr = dev->ml_priv;
2443 return &privptr->stats;
2444} /* end of claw_stats */
2445
2446
2447/*-------------------------------------------------------------------*
2448* unpack_read *
2449* *
2450*--------------------------------------------------------------------*/
2451static void
2452unpack_read(struct net_device *dev )
2453{
2454 struct sk_buff *skb;
2455 struct claw_privbk *privptr;
2456 struct claw_env *p_env;
2457 struct ccwbk *p_this_ccw;
2458 struct ccwbk *p_first_ccw;
2459 struct ccwbk *p_last_ccw;
2460 struct clawph *p_packh;
2461 void *p_packd;
2462 struct clawctl *p_ctlrec=NULL;
2463 struct device *p_dev;
2464
2465 __u32 len_of_data;
2466 __u32 pack_off;
2467 __u8 link_num;
2468 __u8 mtc_this_frm=0;
2469 __u32 bytes_to_mov;
2470 int i=0;
2471 int p=0;
2472
2473 CLAW_DBF_TEXT(4, trace, "unpkread");
2474 p_first_ccw=NULL;
2475 p_last_ccw=NULL;
2476 p_packh=NULL;
2477 p_packd=NULL;
2478 privptr = dev->ml_priv;
2479
2480 p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
2481 p_env = privptr->p_env;
2482 p_this_ccw=privptr->p_read_active_first;
2483 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
2484 pack_off = 0;
2485 p = 0;
2486 p_this_ccw->header.flag=CLAW_PENDING;
2487 privptr->p_read_active_first=p_this_ccw->next;
2488 p_this_ccw->next=NULL;
2489 p_packh = (struct clawph *)p_this_ccw->p_buffer;
2490 if ((p_env->packing == PACK_SEND) &&
2491 (p_packh->len == 32) &&
2492 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
2493 p_packh++; /* peek past pack header */
2494 p_ctlrec = (struct clawctl *)p_packh;
2495 p_packh--; /* un peek */
2496 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
2497 (p_ctlrec->command == CONNECTION_CONFIRM))
2498 p_env->packing = DO_PACKED;
2499 }
2500 if (p_env->packing == DO_PACKED)
2501 link_num=p_packh->link_num;
2502 else
2503 link_num=p_this_ccw->header.opcode / 8;
2504 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
2505 mtc_this_frm=1;
2506 if (p_this_ccw->header.length!=
2507 privptr->p_env->read_size ) {
2508 dev_warn(p_dev,
2509 "The communication peer of %s"
2510 " sent a faulty"
2511 " frame of length %02x\n",
2512 dev->name, p_this_ccw->header.length);
2513 }
2514 }
2515
2516 if (privptr->mtc_skipping) {
2517 /*
2518 * We're in the mode of skipping past a
2519 * multi-frame message
2520 * that we can't process for some reason or other.
2521 * The first frame without the More-To-Come flag is
2522 * the last frame of the skipped message.
2523 */
2524 /* in case of More-To-Come not set in this frame */
2525 if (mtc_this_frm==0) {
2526 privptr->mtc_skipping=0; /* Ok, the end */
2527 privptr->mtc_logical_link=-1;
2528 }
2529 goto NextFrame;
2530 }
2531
2532 if (link_num==0) {
2533 claw_process_control(dev, p_this_ccw);
2534 CLAW_DBF_TEXT(4, trace, "UnpkCntl");
2535 goto NextFrame;
2536 }
2537unpack_next:
2538 if (p_env->packing == DO_PACKED) {
2539 if (pack_off > p_env->read_size)
2540 goto NextFrame;
2541 p_packd = p_this_ccw->p_buffer+pack_off;
2542 p_packh = (struct clawph *) p_packd;
2543 if ((p_packh->len == 0) || /* done with this frame? */
2544 (p_packh->flag != 0))
2545 goto NextFrame;
2546 bytes_to_mov = p_packh->len;
2547 pack_off += bytes_to_mov+sizeof(struct clawph);
2548 p++;
2549 } else {
2550 bytes_to_mov=p_this_ccw->header.length;
2551 }
2552 if (privptr->mtc_logical_link<0) {
2553
2554 /*
2555 * if More-To-Come is set in this frame then we don't know
2556 * length of entire message, and hence have to allocate
2557 * large buffer */
2558
2559 /* We are starting a new envelope */
2560 privptr->mtc_offset=0;
2561 privptr->mtc_logical_link=link_num;
2562 }
2563
2564 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
2565 /* error */
2566 privptr->stats.rx_frame_errors++;
2567 goto NextFrame;
2568 }
2569 if (p_env->packing == DO_PACKED) {
2570 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2571 p_packd+sizeof(struct clawph), bytes_to_mov);
2572
2573 } else {
2574 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2575 p_this_ccw->p_buffer, bytes_to_mov);
2576 }
2577 if (mtc_this_frm==0) {
2578 len_of_data=privptr->mtc_offset+bytes_to_mov;
2579 skb=dev_alloc_skb(len_of_data);
2580 if (skb) {
2581 memcpy(skb_put(skb,len_of_data),
2582 privptr->p_mtc_envelope,
2583 len_of_data);
2584 skb->dev=dev;
2585 skb_reset_mac_header(skb);
2586 skb->protocol=htons(ETH_P_IP);
2587 skb->ip_summed=CHECKSUM_UNNECESSARY;
2588 privptr->stats.rx_packets++;
2589 privptr->stats.rx_bytes+=len_of_data;
2590 netif_rx(skb);
2591 }
2592 else {
2593 dev_info(p_dev, "Allocating a buffer for"
2594 " incoming data failed\n");
2595 privptr->stats.rx_dropped++;
2596 }
2597 privptr->mtc_offset=0;
2598 privptr->mtc_logical_link=-1;
2599 }
2600 else {
2601 privptr->mtc_offset+=bytes_to_mov;
2602 }
2603 if (p_env->packing == DO_PACKED)
2604 goto unpack_next;
2605NextFrame:
2606 /*
2607 * Remove ThisCCWblock from active read queue, and add it
2608 * to queue of free blocks to be reused.
2609 */
2610 i++;
2611 p_this_ccw->header.length=0xffff;
2612 p_this_ccw->header.opcode=0xff;
2613 /*
2614 * add this one to the free queue for later reuse
2615 */
2616 if (p_first_ccw==NULL) {
2617 p_first_ccw = p_this_ccw;
2618 }
2619 else {
2620 p_last_ccw->next = p_this_ccw;
2621 }
2622 p_last_ccw = p_this_ccw;
2623 /*
2624 * chain to next block on active read queue
2625 */
2626 p_this_ccw = privptr->p_read_active_first;
2627 CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
2628 } /* end of while */
2629
2630 /* check validity */
2631
2632 CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
2633 add_claw_reads(dev, p_first_ccw, p_last_ccw);
2634 claw_strt_read(dev, LOCK_YES);
2635 return;
2636} /* end of unpack_read */
2637
2638/*-------------------------------------------------------------------*
2639* claw_strt_read *
2640* *
2641*--------------------------------------------------------------------*/
2642static void
2643claw_strt_read (struct net_device *dev, int lock )
2644{
2645 int rc = 0;
2646 __u32 parm;
2647 unsigned long saveflags = 0;
2648 struct claw_privbk *privptr = dev->ml_priv;
2649 struct ccwbk*p_ccwbk;
2650 struct chbk *p_ch;
2651 struct clawh *p_clawh;
2652 p_ch = &privptr->channel[READ_CHANNEL];
2653
2654 CLAW_DBF_TEXT(4, trace, "StRdNter");
2655 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
2656 p_clawh->flag=CLAW_IDLE; /* 0x00 */
2657
2658 if ((privptr->p_write_active_first!=NULL &&
2659 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
2660 (privptr->p_read_active_first!=NULL &&
2661 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
2662 p_clawh->flag=CLAW_BUSY; /* 0xff */
2663 }
2664 if (lock==LOCK_YES) {
2665 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
2666 }
2667 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2668 CLAW_DBF_TEXT(4, trace, "HotRead");
2669 p_ccwbk=privptr->p_read_active_first;
2670 parm = (unsigned long) p_ch;
2671 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
2672 0xff, 0);
2673 if (rc != 0) {
2674 ccw_check_return_code(p_ch->cdev, rc);
2675 }
2676 }
2677 else {
2678 CLAW_DBF_TEXT(2, trace, "ReadAct");
2679 }
2680
2681 if (lock==LOCK_YES) {
2682 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
2683 }
2684 CLAW_DBF_TEXT(4, trace, "StRdExit");
2685 return;
2686} /* end of claw_strt_read */
2687
2688/*-------------------------------------------------------------------*
2689* claw_strt_out_IO *
2690* *
2691*--------------------------------------------------------------------*/
2692
2693static void
2694claw_strt_out_IO( struct net_device *dev )
2695{
2696 int rc = 0;
2697 unsigned long parm;
2698 struct claw_privbk *privptr;
2699 struct chbk *p_ch;
2700 struct ccwbk *p_first_ccw;
2701
2702 if (!dev) {
2703 return;
2704 }
2705 privptr = (struct claw_privbk *)dev->ml_priv;
2706 p_ch = &privptr->channel[WRITE_CHANNEL];
2707
2708 CLAW_DBF_TEXT(4, trace, "strt_io");
2709 p_first_ccw=privptr->p_write_active_first;
2710
2711 if (p_ch->claw_state == CLAW_STOP)
2712 return;
2713 if (p_first_ccw == NULL) {
2714 return;
2715 }
2716 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2717 parm = (unsigned long) p_ch;
2718 CLAW_DBF_TEXT(2, trace, "StWrtIO");
2719 rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
2720 0xff, 0);
2721 if (rc != 0) {
2722 ccw_check_return_code(p_ch->cdev, rc);
2723 }
2724 }
2725 dev->trans_start = jiffies;
2726 return;
2727} /* end of claw_strt_out_IO */
2728
2729/*-------------------------------------------------------------------*
2730* Free write buffers *
2731* *
2732*--------------------------------------------------------------------*/
2733
2734static void
2735claw_free_wrt_buf( struct net_device *dev )
2736{
2737
2738 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2739 struct ccwbk*p_this_ccw;
2740 struct ccwbk*p_next_ccw;
2741
2742 CLAW_DBF_TEXT(4, trace, "freewrtb");
2743 /* scan the write queue to free any completed write packets */
2744 p_this_ccw=privptr->p_write_active_first;
2745 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2746 {
2747 p_next_ccw = p_this_ccw->next;
2748 if (((p_next_ccw!=NULL) &&
2749 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
2750 ((p_this_ccw == privptr->p_write_active_last) &&
2751 (p_this_ccw->header.flag!=CLAW_PENDING))) {
2752 /* The next CCW is OK or this is */
2753 /* the last CCW...free it @A1A */
2754 privptr->p_write_active_first=p_this_ccw->next;
2755 p_this_ccw->header.flag=CLAW_PENDING;
2756 p_this_ccw->next=privptr->p_write_free_chain;
2757 privptr->p_write_free_chain=p_this_ccw;
2758 ++privptr->write_free_count;
2759 privptr->stats.tx_bytes+= p_this_ccw->write.count;
2760 p_this_ccw=privptr->p_write_active_first;
2761 privptr->stats.tx_packets++;
2762 }
2763 else {
2764 break;
2765 }
2766 }
2767 if (privptr->write_free_count!=0) {
2768 claw_clearbit_busy(TB_NOBUFFER,dev);
2769 }
2770 /* whole chain removed? */
2771 if (privptr->p_write_active_first==NULL) {
2772 privptr->p_write_active_last=NULL;
2773 }
2774 CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
2775 return;
2776}
2777
2778/*-------------------------------------------------------------------*
2779* claw free netdevice *
2780* *
2781*--------------------------------------------------------------------*/
2782static void
2783claw_free_netdevice(struct net_device * dev, int free_dev)
2784{
2785 struct claw_privbk *privptr;
2786
2787 CLAW_DBF_TEXT(2, setup, "free_dev");
2788 if (!dev)
2789 return;
2790 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2791 privptr = dev->ml_priv;
2792 if (dev->flags & IFF_RUNNING)
2793 claw_release(dev);
2794 if (privptr) {
2795 privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
2796 }
2797 dev->ml_priv = NULL;
2798#ifdef MODULE
2799 if (free_dev) {
2800 free_netdev(dev);
2801 }
2802#endif
2803 CLAW_DBF_TEXT(2, setup, "free_ok");
2804}
2805
2806/**
2807 * Claw init netdevice
2808 * Initialize everything of the net device except the name and the
2809 * channel structs.
2810 */
2811static const struct net_device_ops claw_netdev_ops = {
2812 .ndo_open = claw_open,
2813 .ndo_stop = claw_release,
2814 .ndo_get_stats = claw_stats,
2815 .ndo_start_xmit = claw_tx,
2816 .ndo_change_mtu = claw_change_mtu,
2817};
2818
2819static void
2820claw_init_netdevice(struct net_device * dev)
2821{
2822 CLAW_DBF_TEXT(2, setup, "init_dev");
2823 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2824 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
2825 dev->hard_header_len = 0;
2826 dev->addr_len = 0;
2827 dev->type = ARPHRD_SLIP;
2828 dev->tx_queue_len = 1300;
2829 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2830 dev->netdev_ops = &claw_netdev_ops;
2831 CLAW_DBF_TEXT(2, setup, "initok");
2832 return;
2833}
2834
2835/**
2836 * Init a new channel in the privptr->channel[i].
2837 *
2838 * @param cdev The ccw_device to be added.
2839 *
2840 * @return 0 on success, !0 on error.
2841 */
2842static int
2843add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
2844{
2845 struct chbk *p_ch;
2846 struct ccw_dev_id dev_id;
2847
2848 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
2849 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
2850 p_ch = &privptr->channel[i];
2851 p_ch->cdev = cdev;
2852 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
2853 ccw_device_get_id(cdev, &dev_id);
2854 p_ch->devno = dev_id.devno;
2855 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
2856 return -ENOMEM;
2857 }
2858 return 0;
2859}
2860
2861
2862/**
2863 *
2864 * Setup an interface.
2865 *
2866 * @param cgdev Device to be setup.
2867 *
2868 * @returns 0 on success, !0 on failure.
2869 */
2870static int
2871claw_new_device(struct ccwgroup_device *cgdev)
2872{
2873 struct claw_privbk *privptr;
2874 struct claw_env *p_env;
2875 struct net_device *dev;
2876 int ret;
2877 struct ccw_dev_id dev_id;
2878
2879 dev_info(&cgdev->dev, "add for %s\n",
2880 dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
2881 CLAW_DBF_TEXT(2, setup, "new_dev");
2882 privptr = dev_get_drvdata(&cgdev->dev);
2883 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2884 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2885 if (!privptr)
2886 return -ENODEV;
2887 p_env = privptr->p_env;
2888 ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
2889 p_env->devno[READ_CHANNEL] = dev_id.devno;
2890 ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
2891 p_env->devno[WRITE_CHANNEL] = dev_id.devno;
2892 ret = add_channel(cgdev->cdev[0],0,privptr);
2893 if (ret == 0)
2894 ret = add_channel(cgdev->cdev[1],1,privptr);
2895 if (ret != 0) {
2896 dev_warn(&cgdev->dev, "Creating a CLAW group device"
2897 " failed with error code %d\n", ret);
2898 goto out;
2899 }
2900 ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
2901 if (ret != 0) {
2902 dev_warn(&cgdev->dev,
2903 "Setting the read subchannel online"
2904 " failed with error code %d\n", ret);
2905 goto out;
2906 }
2907 ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
2908 if (ret != 0) {
2909 dev_warn(&cgdev->dev,
2910 "Setting the write subchannel online "
2911 "failed with error code %d\n", ret);
2912 goto out;
2913 }
2914 dev = alloc_netdev(0, "claw%d", NET_NAME_UNKNOWN, claw_init_netdevice);
2915 if (!dev) {
2916 dev_warn(&cgdev->dev,
2917 "Activating the CLAW device failed\n");
2918 goto out;
2919 }
2920 dev->ml_priv = privptr;
2921 dev_set_drvdata(&cgdev->dev, privptr);
2922 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2923 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2924 /* sysfs magic */
2925 SET_NETDEV_DEV(dev, &cgdev->dev);
2926 if (register_netdev(dev) != 0) {
2927 claw_free_netdevice(dev, 1);
2928 CLAW_DBF_TEXT(2, trace, "regfail");
2929 goto out;
2930 }
2931 dev->flags &=~IFF_RUNNING;
2932 if (privptr->buffs_alloc == 0) {
2933 ret=init_ccw_bk(dev);
2934 if (ret !=0) {
2935 unregister_netdev(dev);
2936 claw_free_netdevice(dev,1);
2937 CLAW_DBF_TEXT(2, trace, "ccwmem");
2938 goto out;
2939 }
2940 }
2941 privptr->channel[READ_CHANNEL].ndev = dev;
2942 privptr->channel[WRITE_CHANNEL].ndev = dev;
2943 privptr->p_env->ndev = dev;
2944
2945 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
2946 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
2947 dev->name, p_env->read_size,
2948 p_env->write_size, p_env->read_buffers,
2949 p_env->write_buffers, p_env->devno[READ_CHANNEL],
2950 p_env->devno[WRITE_CHANNEL]);
2951 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
2952 ":%.8s api_type: %.8s\n",
2953 dev->name, p_env->host_name,
2954 p_env->adapter_name , p_env->api_type);
2955 return 0;
2956out:
2957 ccw_device_set_offline(cgdev->cdev[1]);
2958 ccw_device_set_offline(cgdev->cdev[0]);
2959 return -ENODEV;
2960}
2961
2962static void
2963claw_purge_skb_queue(struct sk_buff_head *q)
2964{
2965 struct sk_buff *skb;
2966
2967 CLAW_DBF_TEXT(4, trace, "purgque");
2968 while ((skb = skb_dequeue(q))) {
2969 atomic_dec(&skb->users);
2970 dev_kfree_skb_any(skb);
2971 }
2972}
2973
2974/**
2975 * Shutdown an interface.
2976 *
2977 * @param cgdev Device to be shut down.
2978 *
2979 * @returns 0 on success, !0 on failure.
2980 */
2981static int
2982claw_shutdown_device(struct ccwgroup_device *cgdev)
2983{
2984 struct claw_privbk *priv;
2985 struct net_device *ndev;
2986 int ret = 0;
2987
2988 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
2989 priv = dev_get_drvdata(&cgdev->dev);
2990 if (!priv)
2991 return -ENODEV;
2992 ndev = priv->channel[READ_CHANNEL].ndev;
2993 if (ndev) {
2994 /* Close the device */
2995 dev_info(&cgdev->dev, "%s: shutting down\n",
2996 ndev->name);
2997 if (ndev->flags & IFF_RUNNING)
2998 ret = claw_release(ndev);
2999 ndev->flags &=~IFF_RUNNING;
3000 unregister_netdev(ndev);
3001 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
3002 claw_free_netdevice(ndev, 1);
3003 priv->channel[READ_CHANNEL].ndev = NULL;
3004 priv->channel[WRITE_CHANNEL].ndev = NULL;
3005 priv->p_env->ndev = NULL;
3006 }
3007 ccw_device_set_offline(cgdev->cdev[1]);
3008 ccw_device_set_offline(cgdev->cdev[0]);
3009 return ret;
3010}
3011
3012static void
3013claw_remove_device(struct ccwgroup_device *cgdev)
3014{
3015 struct claw_privbk *priv;
3016
3017 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3018 priv = dev_get_drvdata(&cgdev->dev);
3019 dev_info(&cgdev->dev, " will be removed.\n");
3020 if (cgdev->state == CCWGROUP_ONLINE)
3021 claw_shutdown_device(cgdev);
3022 kfree(priv->p_mtc_envelope);
3023 priv->p_mtc_envelope=NULL;
3024 kfree(priv->p_env);
3025 priv->p_env=NULL;
3026 kfree(priv->channel[0].irb);
3027 priv->channel[0].irb=NULL;
3028 kfree(priv->channel[1].irb);
3029 priv->channel[1].irb=NULL;
3030 kfree(priv);
3031 dev_set_drvdata(&cgdev->dev, NULL);
3032 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
3033 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
3034 put_device(&cgdev->dev);
3035
3036 return;
3037}
3038
3039
3040/*
3041 * sysfs attributes
3042 */
3043static ssize_t
3044claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3045{
3046 struct claw_privbk *priv;
3047 struct claw_env * p_env;
3048
3049 priv = dev_get_drvdata(dev);
3050 if (!priv)
3051 return -ENODEV;
3052 p_env = priv->p_env;
3053 return sprintf(buf, "%s\n",p_env->host_name);
3054}
3055
3056static ssize_t
3057claw_hname_write(struct device *dev, struct device_attribute *attr,
3058 const char *buf, size_t count)
3059{
3060 struct claw_privbk *priv;
3061 struct claw_env * p_env;
3062
3063 priv = dev_get_drvdata(dev);
3064 if (!priv)
3065 return -ENODEV;
3066 p_env = priv->p_env;
3067 if (count > MAX_NAME_LEN+1)
3068 return -EINVAL;
3069 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
3070 strncpy(p_env->host_name,buf, count);
3071 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
3072 p_env->host_name[MAX_NAME_LEN] = 0x00;
3073 CLAW_DBF_TEXT(2, setup, "HstnSet");
3074 CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
3075
3076 return count;
3077}
3078
3079static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
3080
3081static ssize_t
3082claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3083{
3084 struct claw_privbk *priv;
3085 struct claw_env * p_env;
3086
3087 priv = dev_get_drvdata(dev);
3088 if (!priv)
3089 return -ENODEV;
3090 p_env = priv->p_env;
3091 return sprintf(buf, "%s\n", p_env->adapter_name);
3092}
3093
3094static ssize_t
3095claw_adname_write(struct device *dev, struct device_attribute *attr,
3096 const char *buf, size_t count)
3097{
3098 struct claw_privbk *priv;
3099 struct claw_env * p_env;
3100
3101 priv = dev_get_drvdata(dev);
3102 if (!priv)
3103 return -ENODEV;
3104 p_env = priv->p_env;
3105 if (count > MAX_NAME_LEN+1)
3106 return -EINVAL;
3107 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
3108 strncpy(p_env->adapter_name,buf, count);
3109 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
3110 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
3111 CLAW_DBF_TEXT(2, setup, "AdnSet");
3112 CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
3113
3114 return count;
3115}
3116
3117static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
3118
3119static ssize_t
3120claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3121{
3122 struct claw_privbk *priv;
3123 struct claw_env * p_env;
3124
3125 priv = dev_get_drvdata(dev);
3126 if (!priv)
3127 return -ENODEV;
3128 p_env = priv->p_env;
3129 return sprintf(buf, "%s\n",
3130 p_env->api_type);
3131}
3132
3133static ssize_t
3134claw_apname_write(struct device *dev, struct device_attribute *attr,
3135 const char *buf, size_t count)
3136{
3137 struct claw_privbk *priv;
3138 struct claw_env * p_env;
3139
3140 priv = dev_get_drvdata(dev);
3141 if (!priv)
3142 return -ENODEV;
3143 p_env = priv->p_env;
3144 if (count > MAX_NAME_LEN+1)
3145 return -EINVAL;
3146 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
3147 strncpy(p_env->api_type,buf, count);
3148 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
3149 p_env->api_type[MAX_NAME_LEN] = 0x00;
3150 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
3151 p_env->read_size=DEF_PACK_BUFSIZE;
3152 p_env->write_size=DEF_PACK_BUFSIZE;
3153 p_env->packing=PACKING_ASK;
3154 CLAW_DBF_TEXT(2, setup, "PACKING");
3155 }
3156 else {
3157 p_env->packing=0;
3158 p_env->read_size=CLAW_FRAME_SIZE;
3159 p_env->write_size=CLAW_FRAME_SIZE;
3160 CLAW_DBF_TEXT(2, setup, "ApiSet");
3161 }
3162 CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
3163 return count;
3164}
3165
3166static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
3167
3168static ssize_t
3169claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3170{
3171 struct claw_privbk *priv;
3172 struct claw_env * p_env;
3173
3174 priv = dev_get_drvdata(dev);
3175 if (!priv)
3176 return -ENODEV;
3177 p_env = priv->p_env;
3178 return sprintf(buf, "%d\n", p_env->write_buffers);
3179}
3180
3181static ssize_t
3182claw_wbuff_write(struct device *dev, struct device_attribute *attr,
3183 const char *buf, size_t count)
3184{
3185 struct claw_privbk *priv;
3186 struct claw_env * p_env;
3187 int nnn,max;
3188
3189 priv = dev_get_drvdata(dev);
3190 if (!priv)
3191 return -ENODEV;
3192 p_env = priv->p_env;
3193 sscanf(buf, "%i", &nnn);
3194 if (p_env->packing) {
3195 max = 64;
3196 }
3197 else {
3198 max = 512;
3199 }
3200 if ((nnn > max ) || (nnn < 2))
3201 return -EINVAL;
3202 p_env->write_buffers = nnn;
3203 CLAW_DBF_TEXT(2, setup, "Wbufset");
3204 CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
3205 return count;
3206}
3207
3208static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
3209
3210static ssize_t
3211claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3212{
3213 struct claw_privbk *priv;
3214 struct claw_env * p_env;
3215
3216 priv = dev_get_drvdata(dev);
3217 if (!priv)
3218 return -ENODEV;
3219 p_env = priv->p_env;
3220 return sprintf(buf, "%d\n", p_env->read_buffers);
3221}
3222
3223static ssize_t
3224claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3225 const char *buf, size_t count)
3226{
3227 struct claw_privbk *priv;
3228 struct claw_env *p_env;
3229 int nnn,max;
3230
3231 priv = dev_get_drvdata(dev);
3232 if (!priv)
3233 return -ENODEV;
3234 p_env = priv->p_env;
3235 sscanf(buf, "%i", &nnn);
3236 if (p_env->packing) {
3237 max = 64;
3238 }
3239 else {
3240 max = 512;
3241 }
3242 if ((nnn > max ) || (nnn < 2))
3243 return -EINVAL;
3244 p_env->read_buffers = nnn;
3245 CLAW_DBF_TEXT(2, setup, "Rbufset");
3246 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3247 return count;
3248}
3249static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3250
3251static struct attribute *claw_attr[] = {
3252 &dev_attr_read_buffer.attr,
3253 &dev_attr_write_buffer.attr,
3254 &dev_attr_adapter_name.attr,
3255 &dev_attr_api_type.attr,
3256 &dev_attr_host_name.attr,
3257 NULL,
3258};
3259static struct attribute_group claw_attr_group = {
3260 .attrs = claw_attr,
3261};
3262static const struct attribute_group *claw_attr_groups[] = {
3263 &claw_attr_group,
3264 NULL,
3265};
3266static const struct device_type claw_devtype = {
3267 .name = "claw",
3268 .groups = claw_attr_groups,
3269};
3270
3271/*----------------------------------------------------------------*
3272 * claw_probe *
3273 * this function is called for each CLAW device. *
3274 *----------------------------------------------------------------*/
3275static int claw_probe(struct ccwgroup_device *cgdev)
3276{
3277 struct claw_privbk *privptr = NULL;
3278
3279 CLAW_DBF_TEXT(2, setup, "probe");
3280 if (!get_device(&cgdev->dev))
3281 return -ENODEV;
3282 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
3283 dev_set_drvdata(&cgdev->dev, privptr);
3284 if (privptr == NULL) {
3285 probe_error(cgdev);
3286 put_device(&cgdev->dev);
3287 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3288 return -ENOMEM;
3289 }
3290 privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL);
3291 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
3292 if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) {
3293 probe_error(cgdev);
3294 put_device(&cgdev->dev);
3295 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3296 return -ENOMEM;
3297 }
3298 memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8);
3299 memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8);
3300 memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8);
3301 privptr->p_env->packing = 0;
3302 privptr->p_env->write_buffers = 5;
3303 privptr->p_env->read_buffers = 5;
3304 privptr->p_env->read_size = CLAW_FRAME_SIZE;
3305 privptr->p_env->write_size = CLAW_FRAME_SIZE;
3306 privptr->p_env->p_priv = privptr;
3307 cgdev->cdev[0]->handler = claw_irq_handler;
3308 cgdev->cdev[1]->handler = claw_irq_handler;
3309 cgdev->dev.type = &claw_devtype;
3310 CLAW_DBF_TEXT(2, setup, "prbext 0");
3311
3312 return 0;
3313} /* end of claw_probe */
3314
3315/*--------------------------------------------------------------------*
3316* claw_init and cleanup *
3317*---------------------------------------------------------------------*/
3318
3319static void __exit claw_cleanup(void)
3320{
3321 ccwgroup_driver_unregister(&claw_group_driver);
3322 ccw_driver_unregister(&claw_ccw_driver);
3323 root_device_unregister(claw_root_dev);
3324 claw_unregister_debug_facility();
3325 pr_info("Driver unloaded\n");
3326}
3327
3328/**
3329 * Initialize module.
3330 * This is called just after the module is loaded.
3331 *
3332 * @return 0 on success, !0 on error.
3333 */
3334static int __init claw_init(void)
3335{
3336 int ret = 0;
3337
3338 pr_info("Loading %s\n", version);
3339 ret = claw_register_debug_facility();
3340 if (ret) {
3341 pr_err("Registering with the S/390 debug feature"
3342 " failed with error code %d\n", ret);
3343 goto out_err;
3344 }
3345 CLAW_DBF_TEXT(2, setup, "init_mod");
3346 claw_root_dev = root_device_register("claw");
3347 ret = PTR_ERR_OR_ZERO(claw_root_dev);
3348 if (ret)
3349 goto register_err;
3350 ret = ccw_driver_register(&claw_ccw_driver);
3351 if (ret)
3352 goto ccw_err;
3353 claw_group_driver.driver.groups = claw_drv_attr_groups;
3354 ret = ccwgroup_driver_register(&claw_group_driver);
3355 if (ret)
3356 goto ccwgroup_err;
3357 return 0;
3358
3359ccwgroup_err:
3360 ccw_driver_unregister(&claw_ccw_driver);
3361ccw_err:
3362 root_device_unregister(claw_root_dev);
3363register_err:
3364 CLAW_DBF_TEXT(2, setup, "init_bad");
3365 claw_unregister_debug_facility();
3366out_err:
3367 pr_err("Initializing the claw device driver failed\n");
3368 return ret;
3369}
3370
3371module_init(claw_init);
3372module_exit(claw_cleanup);
3373
3374MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3375MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3376 "Copyright IBM Corp. 2000, 2008\n");
3377MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
deleted file mode 100644
index 3339b9b607b3..000000000000
--- a/drivers/s390/net/claw.h
+++ /dev/null
@@ -1,348 +0,0 @@
1/*******************************************************
2* Define constants *
3* *
4********************************************************/
5
6/*-----------------------------------------------------*
7* CCW command codes for CLAW protocol *
8*------------------------------------------------------*/
9
10#define CCW_CLAW_CMD_WRITE 0x01 /* write - not including link */
11#define CCW_CLAW_CMD_READ 0x02 /* read */
12#define CCW_CLAW_CMD_NOP 0x03 /* NOP */
13#define CCW_CLAW_CMD_SENSE 0x04 /* Sense */
14#define CCW_CLAW_CMD_SIGNAL_SMOD 0x05 /* Signal Status Modifier */
15#define CCW_CLAW_CMD_TIC 0x08 /* TIC */
16#define CCW_CLAW_CMD_READHEADER 0x12 /* read header data */
17#define CCW_CLAW_CMD_READFF 0x22 /* read an FF */
18#define CCW_CLAW_CMD_SENSEID 0xe4 /* Sense ID */
19
20
21/*-----------------------------------------------------*
22* CLAW Unique constants *
23*------------------------------------------------------*/
24
25#define MORE_to_COME_FLAG 0x04 /* OR with write CCW in case of m-t-c */
26#define CLAW_IDLE 0x00 /* flag to indicate CLAW is idle */
27#define CLAW_BUSY 0xff /* flag to indicate CLAW is busy */
28#define CLAW_PENDING 0x00 /* flag to indicate i/o is pending */
29#define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */
30
31/*-----------------------------------------------------*
32* CLAW control command code *
33*------------------------------------------------------*/
34
35#define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */
36#define SYSTEM_VALIDATE_RESPONSE 0x02 /* System Validate response */
37#define CONNECTION_REQUEST 0x21 /* Connection request */
38#define CONNECTION_RESPONSE 0x22 /* Connection response */
39#define CONNECTION_CONFIRM 0x23 /* Connection confirm */
40#define DISCONNECT 0x24 /* Disconnect */
41#define CLAW_ERROR 0x41 /* CLAW error message */
42#define CLAW_VERSION_ID 2 /* CLAW version ID */
43
44/*-----------------------------------------------------*
45* CLAW adater sense bytes *
46*------------------------------------------------------*/
47
48#define CLAW_ADAPTER_SENSE_BYTE 0x41 /* Stop command issued to adapter */
49
50/*-----------------------------------------------------*
51* CLAW control command return codes *
52*------------------------------------------------------*/
53
54#define CLAW_RC_NAME_MISMATCH 166 /* names do not match */
55#define CLAW_RC_WRONG_VERSION 167 /* wrong CLAW version number */
56#define CLAW_RC_HOST_RCV_TOO_SMALL 180 /* Host maximum receive is */
57 /* less than Linux on zSeries*/
58 /* transmit size */
59
60/*-----------------------------------------------------*
61* CLAW Constants application name *
62*------------------------------------------------------*/
63
64#define HOST_APPL_NAME "TCPIP "
65#define WS_APPL_NAME_IP_LINK "TCPIP "
66#define WS_APPL_NAME_IP_NAME "IP "
67#define WS_APPL_NAME_API_LINK "API "
68#define WS_APPL_NAME_PACKED "PACKED "
69#define WS_NAME_NOT_DEF "NOT_DEF "
70#define PACKING_ASK 1
71#define PACK_SEND 2
72#define DO_PACKED 3
73
74#define MAX_ENVELOPE_SIZE 65536
75#define CLAW_DEFAULT_MTU_SIZE 4096
76#define DEF_PACK_BUFSIZE 32768
77#define READ_CHANNEL 0
78#define WRITE_CHANNEL 1
79
80#define TB_TX 0 /* sk buffer handling in process */
81#define TB_STOP 1 /* network device stop in process */
82#define TB_RETRY 2 /* retry in process */
83#define TB_NOBUFFER 3 /* no buffer on free queue */
84#define CLAW_MAX_LINK_ID 1
85#define CLAW_MAX_DEV 256 /* max claw devices */
86#define MAX_NAME_LEN 8 /* host name, adapter name length */
87#define CLAW_FRAME_SIZE 4096
88#define CLAW_ID_SIZE 20+3
89
90/* state machine codes used in claw_irq_handler */
91
92#define CLAW_STOP 0
93#define CLAW_START_HALT_IO 1
94#define CLAW_START_SENSEID 2
95#define CLAW_START_READ 3
96#define CLAW_START_WRITE 4
97
98/*-----------------------------------------------------*
99* Lock flag *
100*------------------------------------------------------*/
101#define LOCK_YES 0
102#define LOCK_NO 1
103
104/*-----------------------------------------------------*
105* DBF Debug macros *
106*------------------------------------------------------*/
107#define CLAW_DBF_TEXT(level, name, text) \
108 do { \
109 debug_text_event(claw_dbf_##name, level, text); \
110 } while (0)
111
112#define CLAW_DBF_HEX(level,name,addr,len) \
113do { \
114 debug_event(claw_dbf_##name,level,(void*)(addr),len); \
115} while (0)
116
117#define CLAW_DBF_TEXT_(level,name,text...) \
118 do { \
119 if (debug_level_enabled(claw_dbf_##name, level)) { \
120 sprintf(debug_buffer, text); \
121 debug_text_event(claw_dbf_##name, level, \
122 debug_buffer); \
123 } \
124 } while (0)
125
126/**
127 * Enum for classifying detected devices.
128 */
129enum claw_channel_types {
130 /* Device is not a channel */
131 claw_channel_type_none,
132
133 /* Device is a CLAW channel device */
134 claw_channel_type_claw
135};
136
137
138/*******************************************************
139* Define Control Blocks *
140* *
141********************************************************/
142
143/*------------------------------------------------------*/
144/* CLAW header */
145/*------------------------------------------------------*/
146
147struct clawh {
148 __u16 length; /* length of data read by preceding read CCW */
149 __u8 opcode; /* equivalent read CCW */
150 __u8 flag; /* flag of FF to indicate read was completed */
151};
152
153/*------------------------------------------------------*/
154/* CLAW Packing header 4 bytes */
155/*------------------------------------------------------*/
156struct clawph {
157 __u16 len; /* Length of Packed Data Area */
158 __u8 flag; /* Reserved not used */
159 __u8 link_num; /* Link ID */
160};
161
162/*------------------------------------------------------*/
163/* CLAW Ending struct ccwbk */
164/*------------------------------------------------------*/
165struct endccw {
166 __u32 real; /* real address of this block */
167 __u8 write1; /* write 1 is active */
168 __u8 read1; /* read 1 is active */
169 __u16 reserved; /* reserved for future use */
170 struct ccw1 write1_nop1;
171 struct ccw1 write1_nop2;
172 struct ccw1 write2_nop1;
173 struct ccw1 write2_nop2;
174 struct ccw1 read1_nop1;
175 struct ccw1 read1_nop2;
176 struct ccw1 read2_nop1;
177 struct ccw1 read2_nop2;
178};
179
180/*------------------------------------------------------*/
181/* CLAW struct ccwbk */
182/*------------------------------------------------------*/
183struct ccwbk {
184 void *next; /* pointer to next ccw block */
185 __u32 real; /* real address of this ccw */
186 void *p_buffer; /* virtual address of data */
187 struct clawh header; /* claw header */
188 struct ccw1 write; /* write CCW */
189 struct ccw1 w_read_FF; /* read FF */
190 struct ccw1 w_TIC_1; /* TIC */
191 struct ccw1 read; /* read CCW */
192 struct ccw1 read_h; /* read header */
193 struct ccw1 signal; /* signal SMOD */
194 struct ccw1 r_TIC_1; /* TIC1 */
195 struct ccw1 r_read_FF; /* read FF */
196 struct ccw1 r_TIC_2; /* TIC2 */
197};
198
199/*------------------------------------------------------*/
200/* CLAW control block */
201/*------------------------------------------------------*/
202struct clawctl {
203 __u8 command; /* control command */
204 __u8 version; /* CLAW protocol version */
205 __u8 linkid; /* link ID */
206 __u8 correlator; /* correlator */
207 __u8 rc; /* return code */
208 __u8 reserved1; /* reserved */
209 __u8 reserved2; /* reserved */
210 __u8 reserved3; /* reserved */
211 __u8 data[24]; /* command specific fields */
212};
213
214/*------------------------------------------------------*/
215/* Data for SYSTEMVALIDATE command */
216/*------------------------------------------------------*/
217struct sysval {
218 char WS_name[8]; /* Workstation System name */
219 char host_name[8]; /* Host system name */
220 __u16 read_frame_size; /* read frame size */
221 __u16 write_frame_size; /* write frame size */
222 __u8 reserved[4]; /* reserved */
223};
224
225/*------------------------------------------------------*/
226/* Data for Connect command */
227/*------------------------------------------------------*/
228struct conncmd {
229 char WS_name[8]; /* Workstation application name */
230 char host_name[8]; /* Host application name */
231 __u16 reserved1[2]; /* read frame size */
232 __u8 reserved2[4]; /* reserved */
233};
234
235/*------------------------------------------------------*/
236/* Data for CLAW error */
237/*------------------------------------------------------*/
238struct clawwerror {
239 char reserved1[8]; /* reserved */
240 char reserved2[8]; /* reserved */
241 char reserved3[8]; /* reserved */
242};
243
244/*------------------------------------------------------*/
245/* Data buffer for CLAW */
246/*------------------------------------------------------*/
247struct clawbuf {
248 char buffer[MAX_ENVELOPE_SIZE]; /* data buffer */
249};
250
251/*------------------------------------------------------*/
252/* Channel control block for read and write channel */
253/*------------------------------------------------------*/
254
255struct chbk {
256 unsigned int devno;
257 int irq;
258 char id[CLAW_ID_SIZE];
259 __u32 IO_active;
260 __u8 claw_state;
261 struct irb *irb;
262 struct ccw_device *cdev; /* pointer to the channel device */
263 struct net_device *ndev;
264 wait_queue_head_t wait;
265 struct tasklet_struct tasklet;
266 struct timer_list timer;
267 unsigned long flag_a; /* atomic flags */
268#define CLAW_BH_ACTIVE 0
269 unsigned long flag_b; /* atomic flags */
270#define CLAW_WRITE_ACTIVE 0
271 __u8 last_dstat;
272 __u8 flag;
273 struct sk_buff_head collect_queue;
274 spinlock_t collect_lock;
275#define CLAW_WRITE 0x02 /* - Set if this is a write channel */
276#define CLAW_READ 0x01 /* - Set if this is a read channel */
277#define CLAW_TIMER 0x80 /* - Set if timer made the wake_up */
278};
279
280/*--------------------------------------------------------------*
281* CLAW environment block *
282*---------------------------------------------------------------*/
283
284struct claw_env {
285 unsigned int devno[2]; /* device number */
286 char host_name[9]; /* Host name */
287 char adapter_name [9]; /* adapter name */
288 char api_type[9]; /* TCPIP, API or PACKED */
289 void *p_priv; /* privptr */
290 __u16 read_buffers; /* read buffer number */
291 __u16 write_buffers; /* write buffer number */
292 __u16 read_size; /* read buffer size */
293 __u16 write_size; /* write buffer size */
294 __u16 dev_id; /* device ident */
295 __u8 packing; /* are we packing? */
296 __u8 in_use; /* device active flag */
297 struct net_device *ndev; /* backward ptr to the net dev*/
298};
299
300/*--------------------------------------------------------------*
301* CLAW main control block *
302*---------------------------------------------------------------*/
303
304struct claw_privbk {
305 void *p_buff_ccw;
306 __u32 p_buff_ccw_num;
307 void *p_buff_read;
308 __u32 p_buff_read_num;
309 __u32 p_buff_pages_perread;
310 void *p_buff_write;
311 __u32 p_buff_write_num;
312 __u32 p_buff_pages_perwrite;
313 long active_link_ID; /* Active logical link ID */
314 struct ccwbk *p_write_free_chain; /* pointer to free ccw chain */
315 struct ccwbk *p_write_active_first; /* ptr to the first write ccw */
316 struct ccwbk *p_write_active_last; /* ptr to the last write ccw */
317 struct ccwbk *p_read_active_first; /* ptr to the first read ccw */
318 struct ccwbk *p_read_active_last; /* ptr to the last read ccw */
319 struct endccw *p_end_ccw; /*ptr to ending ccw */
320 struct ccwbk *p_claw_signal_blk; /* ptr to signal block */
321 __u32 write_free_count; /* number of free bufs for write */
322 struct net_device_stats stats; /* device status */
323 struct chbk channel[2]; /* Channel control blocks */
324 __u8 mtc_skipping;
325 int mtc_offset;
326 int mtc_logical_link;
327 void *p_mtc_envelope;
328 struct sk_buff *pk_skb; /* packing buffer */
329 int pk_cnt;
330 struct clawctl ctl_bk;
331 struct claw_env *p_env;
332 __u8 system_validate_comp;
333 __u8 release_pend;
334 __u8 checksum_received_ip_pkts;
335 __u8 buffs_alloc;
336 struct endccw end_ccw;
337 unsigned long tbusy;
338
339};
340
341
342/************************************************************/
343/* define global constants */
344/************************************************************/
345
346#define CCWBK_SIZE sizeof(struct ccwbk)
347
348
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 642c77c76b84..3466d3cb7647 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4218,7 +4218,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
4218 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4218 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4219 4219
4220 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4220 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4221 sizeof(struct qeth_ipacmd_setadpparms)); 4221 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8);
4222 if (!iob) 4222 if (!iob)
4223 return; 4223 return;
4224 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); 4224 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
@@ -4290,7 +4290,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4290 QETH_CARD_TEXT(card, 4, "chgmac"); 4290 QETH_CARD_TEXT(card, 4, "chgmac");
4291 4291
4292 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4292 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4293 sizeof(struct qeth_ipacmd_setadpparms)); 4293 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4294 sizeof(struct qeth_change_addr));
4294 if (!iob) 4295 if (!iob)
4295 return -ENOMEM; 4296 return -ENOMEM;
4296 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 4297 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);