aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 21:02:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 21:02:35 -0400
commit334d094504c2fe1c44211ecb49146ae6bca8c321 (patch)
treed3c0f68e4b9f8e3d2ccc39e7dfe5de0534a5fad9 /drivers/s390/net
parentd1a4be630fb068f251d64b62919f143c49ca8057 (diff)
parentd1643d24c61b725bef399cc1cf2944b4c9c23177 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26: (1090 commits) [NET]: Fix and allocate less memory for ->priv'less netdevices [IPV6]: Fix dangling references on error in fib6_add(). [NETLABEL]: Fix NULL deref in netlbl_unlabel_staticlist_gen() if ifindex not found [PKT_SCHED]: Fix datalen check in tcf_simp_init(). [INET]: Uninline the __inet_inherit_port call. [INET]: Drop the inet_inherit_port() call. SCTP: Initialize partial_bytes_acked to 0, when all of the data is acked. [netdrvr] forcedeth: internal simplifications; changelog removal phylib: factor out get_phy_id from within get_phy_device PHY: add BCM5464 support to broadcom PHY driver cxgb3: Fix __must_check warning with dev_dbg. tc35815: Statistics cleanup natsemi: fix MMIO for PPC 44x platforms [TIPC]: Cleanup of TIPC reference table code [TIPC]: Optimized initialization of TIPC reference table [TIPC]: Remove inlining of reference table locking routines e1000: convert uint16_t style integers to u16 ixgb: convert uint16_t style integers to u16 sb1000.c: make const arrays static sb1000.c: stop inlining largish static functions ...
Diffstat (limited to 'drivers/s390/net')
-rw-r--r--drivers/s390/net/Kconfig84
-rw-r--r--drivers/s390/net/Makefile12
-rw-r--r--drivers/s390/net/ctcdbug.c80
-rw-r--r--drivers/s390/net/ctcdbug.h125
-rw-r--r--drivers/s390/net/ctcm_dbug.c67
-rw-r--r--drivers/s390/net/ctcm_dbug.h158
-rw-r--r--drivers/s390/net/ctcm_fsms.c2347
-rw-r--r--drivers/s390/net/ctcm_fsms.h359
-rw-r--r--drivers/s390/net/ctcm_main.c1772
-rw-r--r--drivers/s390/net/ctcm_main.h287
-rw-r--r--drivers/s390/net/ctcm_mpc.c2472
-rw-r--r--drivers/s390/net/ctcm_mpc.h239
-rw-r--r--drivers/s390/net/ctcm_sysfs.c210
-rw-r--r--drivers/s390/net/ctcmain.c3062
-rw-r--r--drivers/s390/net/ctcmain.h270
-rw-r--r--drivers/s390/net/qeth.h1253
-rw-r--r--drivers/s390/net/qeth_core.h905
-rw-r--r--drivers/s390/net/qeth_core_main.c4492
-rw-r--r--drivers/s390/net/qeth_core_mpc.c266
-rw-r--r--drivers/s390/net/qeth_core_mpc.h (renamed from drivers/s390/net/qeth_mpc.h)145
-rw-r--r--drivers/s390/net/qeth_core_offl.c (renamed from drivers/s390/net/qeth_eddp.c)345
-rw-r--r--drivers/s390/net/qeth_core_offl.h (renamed from drivers/s390/net/qeth_eddp.h)50
-rw-r--r--drivers/s390/net/qeth_core_sys.c651
-rw-r--r--drivers/s390/net/qeth_fs.h168
-rw-r--r--drivers/s390/net/qeth_l2_main.c1234
-rw-r--r--drivers/s390/net/qeth_l3.h67
-rw-r--r--drivers/s390/net/qeth_l3_main.c3396
-rw-r--r--drivers/s390/net/qeth_l3_sys.c1051
-rw-r--r--drivers/s390/net/qeth_main.c8956
-rw-r--r--drivers/s390/net/qeth_mpc.c269
-rw-r--r--drivers/s390/net/qeth_proc.c316
-rw-r--r--drivers/s390/net/qeth_sys.c1858
-rw-r--r--drivers/s390/net/qeth_tso.h148
33 files changed, 20316 insertions, 16798 deletions
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index eada69dec4fe..a7745c82b4ae 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -5,22 +5,25 @@ config LCS
5 tristate "Lan Channel Station Interface" 5 tristate "Lan Channel Station Interface"
6 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI) 6 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
7 help 7 help
8 Select this option if you want to use LCS networking on IBM S/390 8 Select this option if you want to use LCS networking on IBM System z.
9 or zSeries. This device driver supports Token Ring (IEEE 802.5), 9 This device driver supports Token Ring (IEEE 802.5),
10 FDDI (IEEE 802.7) and Ethernet. 10 FDDI (IEEE 802.7) and Ethernet.
11 This option is also available as a module which will be 11 To compile as a module, choose M. The module name is lcs.ko.
12 called lcs.ko. If you do not know what it is, it's safe to say "Y". 12 If you do not know what it is, it's safe to choose Y.
13 13
14config CTC 14config CTCM
15 tristate "CTC device support" 15 tristate "CTC and MPC SNA device support"
16 depends on CCW && NETDEVICES 16 depends on CCW && NETDEVICES
17 help 17 help
18 Select this option if you want to use channel-to-channel networking 18 Select this option if you want to use channel-to-channel
19 on IBM S/390 or zSeries. This device driver supports real CTC 19 point-to-point networking on IBM System z.
20 coupling using ESCON. It also supports virtual CTCs when running 20 This device driver supports real CTC coupling using ESCON.
21 under VM. It will use the channel device configuration if this is 21 It also supports virtual CTCs when running under VM.
22 available. This option is also available as a module which will be 22 This driver also supports channel-to-channel MPC SNA devices.
23 called ctc.ko. If you do not know what it is, it's safe to say "Y". 23 MPC is an SNA protocol device used by Communication Server for Linux.
24 To compile as a module, choose M. The module name is ctcm.ko.
25 To compile into the kernel, choose Y.
26 If you do not need any channel-to-channel connection, choose N.
24 27
25config NETIUCV 28config NETIUCV
26 tristate "IUCV network device support (VM only)" 29 tristate "IUCV network device support (VM only)"
@@ -29,9 +32,9 @@ config NETIUCV
29 Select this option if you want to use inter-user communication 32 Select this option if you want to use inter-user communication
30 vehicle networking under VM or VIF. It enables a fast communication 33 vehicle networking under VM or VIF. It enables a fast communication
31 link between VM guests. Using ifconfig a point-to-point connection 34 link between VM guests. Using ifconfig a point-to-point connection
32 can be established to the Linux for zSeries and S7390 system 35 can be established to the Linux on IBM System z
33 running on the other VM guest. This option is also available 36 running on the other VM guest. To compile as a module, choose M.
34 as a module which will be called netiucv.ko. If unsure, say "Y". 37 The module name is netiucv.ko. If unsure, choose Y.
35 38
36config SMSGIUCV 39config SMSGIUCV
37 tristate "IUCV special message support (VM only)" 40 tristate "IUCV special message support (VM only)"
@@ -47,43 +50,46 @@ config CLAW
47 This driver supports channel attached CLAW devices. 50 This driver supports channel attached CLAW devices.
48 CLAW is Common Link Access for Workstation. Common devices 51 CLAW is Common Link Access for Workstation. Common devices
49 that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices. 52 that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
50 To compile as a module choose M here: The module will be called 53 To compile as a module, choose M. The module name is claw.ko.
51 claw.ko to compile into the kernel choose Y 54 To compile into the kernel, choose Y.
52 55
53config QETH 56config QETH
54 tristate "Gigabit Ethernet device support" 57 tristate "Gigabit Ethernet device support"
55 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO 58 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
56 help 59 help
57 This driver supports the IBM S/390 and zSeries OSA Express adapters 60 This driver supports the IBM System z OSA Express adapters
58 in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN 61 in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
59 interfaces in QDIO and HIPER mode. 62 interfaces in QDIO and HIPER mode.
60 63
61 For details please refer to the documentation provided by IBM at 64 For details please refer to the documentation provided by IBM at
62 <http://www10.software.ibm.com/developerworks/opensource/linux390> 65 <http://www.ibm.com/developerworks/linux/linux390>
63 66
64 To compile this driver as a module, choose M here: the 67 To compile this driver as a module, choose M.
65 module will be called qeth.ko. 68 The module name is qeth.ko.
66 69
70config QETH_L2
71 tristate "qeth layer 2 device support"
72 depends on QETH
73 help
74 Select this option to be able to run qeth devices in layer 2 mode.
75 To compile as a module, choose M. The module name is qeth_l2.ko.
76 If unsure, choose y.
67 77
68comment "Gigabit Ethernet default settings" 78config QETH_L3
69 depends on QETH 79 tristate "qeth layer 3 device support"
80 depends on QETH
81 help
82 Select this option to be able to run qeth devices in layer 3 mode.
83 To compile as a module choose M. The module name is qeth_l3.ko.
84 If unsure, choose Y.
70 85
71config QETH_IPV6 86config QETH_IPV6
72 bool "IPv6 support for gigabit ethernet" 87 bool
73 depends on (QETH = IPV6) || (QETH && IPV6 = 'y') 88 depends on (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
74 help 89 default y
75 If CONFIG_QETH is switched on, this option will include IPv6
76 support in the qeth device driver.
77
78config QETH_VLAN
79 bool "VLAN support for gigabit ethernet"
80 depends on (QETH = VLAN_8021Q) || (QETH && VLAN_8021Q = 'y')
81 help
82 If CONFIG_QETH is switched on, this option will include IEEE
83 802.1q VLAN support in the qeth device driver.
84 90
85config CCWGROUP 91config CCWGROUP
86 tristate 92 tristate
87 default (LCS || CTC || QETH) 93 default (LCS || CTCM || QETH)
88 94
89endmenu 95endmenu
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index bbe3ab2e93d9..6382c04d2bdf 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -2,13 +2,15 @@
2# S/390 network devices 2# S/390 network devices
3# 3#
4 4
5ctc-objs := ctcmain.o ctcdbug.o 5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
6 6obj-$(CONFIG_CTCM) += ctcm.o fsm.o cu3088.o
7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o 7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
9obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
10obj-$(CONFIG_LCS) += lcs.o cu3088.o 9obj-$(CONFIG_LCS) += lcs.o cu3088.o
11obj-$(CONFIG_CLAW) += claw.o cu3088.o 10obj-$(CONFIG_CLAW) += claw.o cu3088.o
12qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o 11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_core_offl.o
13qeth-$(CONFIG_PROC_FS) += qeth_proc.o
14obj-$(CONFIG_QETH) += qeth.o 12obj-$(CONFIG_QETH) += qeth.o
13qeth_l2-y += qeth_l2_main.o
14obj-$(CONFIG_QETH_L2) += qeth_l2.o
15qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
16obj-$(CONFIG_QETH_L3) += qeth_l3.o
diff --git a/drivers/s390/net/ctcdbug.c b/drivers/s390/net/ctcdbug.c
deleted file mode 100644
index e6e72deb36b5..000000000000
--- a/drivers/s390/net/ctcdbug.c
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 *
3 * linux/drivers/s390/net/ctcdbug.c
4 *
5 * CTC / ESCON network driver - s390 dbf exploit.
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Peter Tiedemann (ptiedem@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include "ctcdbug.h"
28
29/**
30 * Debug Facility Stuff
31 */
32debug_info_t *ctc_dbf_setup = NULL;
33debug_info_t *ctc_dbf_data = NULL;
34debug_info_t *ctc_dbf_trace = NULL;
35
36DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf);
37
38void
39ctc_unregister_dbf_views(void)
40{
41 if (ctc_dbf_setup)
42 debug_unregister(ctc_dbf_setup);
43 if (ctc_dbf_data)
44 debug_unregister(ctc_dbf_data);
45 if (ctc_dbf_trace)
46 debug_unregister(ctc_dbf_trace);
47}
48int
49ctc_register_dbf_views(void)
50{
51 ctc_dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
52 CTC_DBF_SETUP_PAGES,
53 CTC_DBF_SETUP_NR_AREAS,
54 CTC_DBF_SETUP_LEN);
55 ctc_dbf_data = debug_register(CTC_DBF_DATA_NAME,
56 CTC_DBF_DATA_PAGES,
57 CTC_DBF_DATA_NR_AREAS,
58 CTC_DBF_DATA_LEN);
59 ctc_dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
60 CTC_DBF_TRACE_PAGES,
61 CTC_DBF_TRACE_NR_AREAS,
62 CTC_DBF_TRACE_LEN);
63
64 if ((ctc_dbf_setup == NULL) || (ctc_dbf_data == NULL) ||
65 (ctc_dbf_trace == NULL)) {
66 ctc_unregister_dbf_views();
67 return -ENOMEM;
68 }
69 debug_register_view(ctc_dbf_setup, &debug_hex_ascii_view);
70 debug_set_level(ctc_dbf_setup, CTC_DBF_SETUP_LEVEL);
71
72 debug_register_view(ctc_dbf_data, &debug_hex_ascii_view);
73 debug_set_level(ctc_dbf_data, CTC_DBF_DATA_LEVEL);
74
75 debug_register_view(ctc_dbf_trace, &debug_hex_ascii_view);
76 debug_set_level(ctc_dbf_trace, CTC_DBF_TRACE_LEVEL);
77
78 return 0;
79}
80
diff --git a/drivers/s390/net/ctcdbug.h b/drivers/s390/net/ctcdbug.h
deleted file mode 100644
index 413925ee23d1..000000000000
--- a/drivers/s390/net/ctcdbug.h
+++ /dev/null
@@ -1,125 +0,0 @@
1/*
2 *
3 * linux/drivers/s390/net/ctcdbug.h
4 *
5 * CTC / ESCON network driver - s390 dbf exploit.
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Peter Tiedemann (ptiedem@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26#ifndef _CTCDBUG_H_
27#define _CTCDBUG_H_
28
29#include <asm/debug.h>
30#include "ctcmain.h"
31/**
32 * Debug Facility stuff
33 */
34#define CTC_DBF_SETUP_NAME "ctc_setup"
35#define CTC_DBF_SETUP_LEN 16
36#define CTC_DBF_SETUP_PAGES 8
37#define CTC_DBF_SETUP_NR_AREAS 1
38#define CTC_DBF_SETUP_LEVEL 3
39
40#define CTC_DBF_DATA_NAME "ctc_data"
41#define CTC_DBF_DATA_LEN 128
42#define CTC_DBF_DATA_PAGES 8
43#define CTC_DBF_DATA_NR_AREAS 1
44#define CTC_DBF_DATA_LEVEL 3
45
46#define CTC_DBF_TRACE_NAME "ctc_trace"
47#define CTC_DBF_TRACE_LEN 16
48#define CTC_DBF_TRACE_PAGES 4
49#define CTC_DBF_TRACE_NR_AREAS 2
50#define CTC_DBF_TRACE_LEVEL 3
51
52#define DBF_TEXT(name,level,text) \
53 do { \
54 debug_text_event(ctc_dbf_##name,level,text); \
55 } while (0)
56
57#define DBF_HEX(name,level,addr,len) \
58 do { \
59 debug_event(ctc_dbf_##name,level,(void*)(addr),len); \
60 } while (0)
61
62DECLARE_PER_CPU(char[256], ctc_dbf_txt_buf);
63extern debug_info_t *ctc_dbf_setup;
64extern debug_info_t *ctc_dbf_data;
65extern debug_info_t *ctc_dbf_trace;
66
67
68#define DBF_TEXT_(name,level,text...) \
69 do { \
70 char* ctc_dbf_txt_buf = get_cpu_var(ctc_dbf_txt_buf); \
71 sprintf(ctc_dbf_txt_buf, text); \
72 debug_text_event(ctc_dbf_##name,level,ctc_dbf_txt_buf); \
73 put_cpu_var(ctc_dbf_txt_buf); \
74 } while (0)
75
76#define DBF_SPRINTF(name,level,text...) \
77 do { \
78 debug_sprintf_event(ctc_dbf_trace, level, ##text ); \
79 debug_sprintf_event(ctc_dbf_trace, level, text ); \
80 } while (0)
81
82
83int ctc_register_dbf_views(void);
84
85void ctc_unregister_dbf_views(void);
86
87/**
88 * some more debug stuff
89 */
90
91#define HEXDUMP16(importance,header,ptr) \
92PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
93 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
94 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
95 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
96 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
97 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
98 *(((char*)ptr)+12),*(((char*)ptr)+13), \
99 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
100PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
101 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
102 *(((char*)ptr)+16),*(((char*)ptr)+17), \
103 *(((char*)ptr)+18),*(((char*)ptr)+19), \
104 *(((char*)ptr)+20),*(((char*)ptr)+21), \
105 *(((char*)ptr)+22),*(((char*)ptr)+23), \
106 *(((char*)ptr)+24),*(((char*)ptr)+25), \
107 *(((char*)ptr)+26),*(((char*)ptr)+27), \
108 *(((char*)ptr)+28),*(((char*)ptr)+29), \
109 *(((char*)ptr)+30),*(((char*)ptr)+31));
110
111static inline void
112hex_dump(unsigned char *buf, size_t len)
113{
114 size_t i;
115
116 for (i = 0; i < len; i++) {
117 if (i && !(i % 16))
118 printk("\n");
119 printk("%02x ", *(buf + i));
120 }
121 printk("\n");
122}
123
124
125#endif
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
new file mode 100644
index 000000000000..8eb25d00b2e7
--- /dev/null
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -0,0 +1,67 @@
1/*
2 * drivers/s390/net/ctcm_dbug.c
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
6 *
7 */
8
9#include <linux/stddef.h>
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/slab.h>
13#include <linux/ctype.h>
14#include <linux/sysctl.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/fs.h>
18#include <linux/debugfs.h>
19#include "ctcm_dbug.h"
20
21/*
22 * Debug Facility Stuff
23 */
24
25DEFINE_PER_CPU(char[256], ctcm_dbf_txt_buf);
26
27struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = {
28 [CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, 5, NULL},
29 [CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, 3, NULL},
30 [CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, 3, NULL},
31 [CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 64, 5, NULL},
32 [CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 64, 3, NULL},
33 [CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 64, 3, NULL},
34};
35
36void ctcm_unregister_dbf_views(void)
37{
38 int x;
39 for (x = 0; x < CTCM_DBF_INFOS; x++) {
40 debug_unregister(ctcm_dbf[x].id);
41 ctcm_dbf[x].id = NULL;
42 }
43}
44
45int ctcm_register_dbf_views(void)
46{
47 int x;
48 for (x = 0; x < CTCM_DBF_INFOS; x++) {
49 /* register the areas */
50 ctcm_dbf[x].id = debug_register(ctcm_dbf[x].name,
51 ctcm_dbf[x].pages,
52 ctcm_dbf[x].areas,
53 ctcm_dbf[x].len);
54 if (ctcm_dbf[x].id == NULL) {
55 ctcm_unregister_dbf_views();
56 return -ENOMEM;
57 }
58
59 /* register a view */
60 debug_register_view(ctcm_dbf[x].id, &debug_hex_ascii_view);
61 /* set a passing level */
62 debug_set_level(ctcm_dbf[x].id, ctcm_dbf[x].level);
63 }
64
65 return 0;
66}
67
diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h
new file mode 100644
index 000000000000..fdff34fe59a2
--- /dev/null
+++ b/drivers/s390/net/ctcm_dbug.h
@@ -0,0 +1,158 @@
1/*
2 * drivers/s390/net/ctcm_dbug.h
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
6 *
7 */
8
9#ifndef _CTCM_DBUG_H_
10#define _CTCM_DBUG_H_
11
12/*
13 * Debug Facility stuff
14 */
15
16#include <asm/debug.h>
17
18#ifdef DEBUG
19 #define do_debug 1
20#else
21 #define do_debug 0
22#endif
23#ifdef DEBUGDATA
24 #define do_debug_data 1
25#else
26 #define do_debug_data 0
27#endif
28#ifdef DEBUGCCW
29 #define do_debug_ccw 1
30#else
31 #define do_debug_ccw 0
32#endif
33
34/* define dbf debug levels similar to kernel msg levels */
35#define CTC_DBF_ALWAYS 0 /* always print this */
36#define CTC_DBF_EMERG 0 /* system is unusable */
37#define CTC_DBF_ALERT 1 /* action must be taken immediately */
38#define CTC_DBF_CRIT 2 /* critical conditions */
39#define CTC_DBF_ERROR 3 /* error conditions */
40#define CTC_DBF_WARN 4 /* warning conditions */
41#define CTC_DBF_NOTICE 5 /* normal but significant condition */
42#define CTC_DBF_INFO 5 /* informational */
43#define CTC_DBF_DEBUG 6 /* debug-level messages */
44
45DECLARE_PER_CPU(char[256], ctcm_dbf_txt_buf);
46
47enum ctcm_dbf_names {
48 CTCM_DBF_SETUP,
49 CTCM_DBF_ERROR,
50 CTCM_DBF_TRACE,
51 CTCM_DBF_MPC_SETUP,
52 CTCM_DBF_MPC_ERROR,
53 CTCM_DBF_MPC_TRACE,
54 CTCM_DBF_INFOS /* must be last element */
55};
56
57struct ctcm_dbf_info {
58 char name[DEBUG_MAX_NAME_LEN];
59 int pages;
60 int areas;
61 int len;
62 int level;
63 debug_info_t *id;
64};
65
66extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS];
67
68int ctcm_register_dbf_views(void);
69void ctcm_unregister_dbf_views(void);
70
71static inline const char *strtail(const char *s, int n)
72{
73 int l = strlen(s);
74 return (l > n) ? s + (l - n) : s;
75}
76
77/* sort out levels early to avoid unnecessary sprintfs */
78static inline int ctcm_dbf_passes(debug_info_t *dbf_grp, int level)
79{
80 return (dbf_grp->level >= level);
81}
82
83#define CTCM_FUNTAIL strtail((char *)__func__, 16)
84
85#define CTCM_DBF_TEXT(name, level, text) \
86 do { \
87 debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, level, text); \
88 } while (0)
89
90#define CTCM_DBF_HEX(name, level, addr, len) \
91 do { \
92 debug_event(ctcm_dbf[CTCM_DBF_##name].id, \
93 level, (void *)(addr), len); \
94 } while (0)
95
96#define CTCM_DBF_TEXT_(name, level, text...) \
97 do { \
98 if (ctcm_dbf_passes(ctcm_dbf[CTCM_DBF_##name].id, level)) { \
99 char *ctcm_dbf_txt_buf = \
100 get_cpu_var(ctcm_dbf_txt_buf); \
101 sprintf(ctcm_dbf_txt_buf, text); \
102 debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, \
103 level, ctcm_dbf_txt_buf); \
104 put_cpu_var(ctcm_dbf_txt_buf); \
105 } \
106 } while (0)
107
108/*
109 * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
110 * dev : netdevice with valid name field.
111 * text: any text string.
112 */
113#define CTCM_DBF_DEV_NAME(cat, dev, text) \
114 do { \
115 CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) : %s", \
116 CTCM_FUNTAIL, dev->name, text); \
117 } while (0)
118
119#define MPC_DBF_DEV_NAME(cat, dev, text) \
120 do { \
121 CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) : %s", \
122 CTCM_FUNTAIL, dev->name, text); \
123 } while (0)
124
125#define CTCMY_DBF_DEV_NAME(cat, dev, text) \
126 do { \
127 if (IS_MPCDEV(dev)) \
128 MPC_DBF_DEV_NAME(cat, dev, text); \
129 else \
130 CTCM_DBF_DEV_NAME(cat, dev, text); \
131 } while (0)
132
133/*
134 * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
135 * dev : netdevice.
136 * text: any text string.
137 */
138#define CTCM_DBF_DEV(cat, dev, text) \
139 do { \
140 CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) : %s", \
141 CTCM_FUNTAIL, dev, text); \
142 } while (0)
143
144#define MPC_DBF_DEV(cat, dev, text) \
145 do { \
146 CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) : %s", \
147 CTCM_FUNTAIL, dev, text); \
148 } while (0)
149
150#define CTCMY_DBF_DEV(cat, dev, text) \
151 do { \
152 if (IS_MPCDEV(dev)) \
153 MPC_DBF_DEV(cat, dev, text); \
154 else \
155 CTCM_DBF_DEV(cat, dev, text); \
156 } while (0)
157
158#endif
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
new file mode 100644
index 000000000000..2a106f3a076d
--- /dev/null
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -0,0 +1,2347 @@
1/*
2 * drivers/s390/net/ctcm_fsms.c
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Authors: Fritz Elfert (felfert@millenux.com)
6 * Peter Tiedemann (ptiedem@de.ibm.com)
7 * MPC additions :
8 * Belinda Thompson (belindat@us.ibm.com)
9 * Andy Richter (richtera@us.ibm.com)
10 */
11
12#undef DEBUG
13#undef DEBUGDATA
14#undef DEBUGCCW
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/interrupt.h>
23#include <linux/timer.h>
24#include <linux/bitops.h>
25
26#include <linux/signal.h>
27#include <linux/string.h>
28
29#include <linux/ip.h>
30#include <linux/if_arp.h>
31#include <linux/tcp.h>
32#include <linux/skbuff.h>
33#include <linux/ctype.h>
34#include <net/dst.h>
35
36#include <linux/io.h>
37#include <asm/ccwdev.h>
38#include <asm/ccwgroup.h>
39#include <linux/uaccess.h>
40
41#include <asm/idals.h>
42
43#include "fsm.h"
44#include "cu3088.h"
45
46#include "ctcm_dbug.h"
47#include "ctcm_main.h"
48#include "ctcm_fsms.h"
49
50const char *dev_state_names[] = {
51 [DEV_STATE_STOPPED] = "Stopped",
52 [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX",
53 [DEV_STATE_STARTWAIT_RX] = "StartWait RX",
54 [DEV_STATE_STARTWAIT_TX] = "StartWait TX",
55 [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX",
56 [DEV_STATE_STOPWAIT_RX] = "StopWait RX",
57 [DEV_STATE_STOPWAIT_TX] = "StopWait TX",
58 [DEV_STATE_RUNNING] = "Running",
59};
60
61const char *dev_event_names[] = {
62 [DEV_EVENT_START] = "Start",
63 [DEV_EVENT_STOP] = "Stop",
64 [DEV_EVENT_RXUP] = "RX up",
65 [DEV_EVENT_TXUP] = "TX up",
66 [DEV_EVENT_RXDOWN] = "RX down",
67 [DEV_EVENT_TXDOWN] = "TX down",
68 [DEV_EVENT_RESTART] = "Restart",
69};
70
71const char *ctc_ch_event_names[] = {
72 [CTC_EVENT_IO_SUCCESS] = "ccw_device success",
73 [CTC_EVENT_IO_EBUSY] = "ccw_device busy",
74 [CTC_EVENT_IO_ENODEV] = "ccw_device enodev",
75 [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown",
76 [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY",
77 [CTC_EVENT_ATTN] = "Status ATTN",
78 [CTC_EVENT_BUSY] = "Status BUSY",
79 [CTC_EVENT_UC_RCRESET] = "Unit check remote reset",
80 [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset",
81 [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
82 [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity",
83 [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure",
84 [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity",
85 [CTC_EVENT_UC_ZERO] = "Unit check ZERO",
86 [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown",
87 [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown",
88 [CTC_EVENT_MC_FAIL] = "Machine check failure",
89 [CTC_EVENT_MC_GOOD] = "Machine check operational",
90 [CTC_EVENT_IRQ] = "IRQ normal",
91 [CTC_EVENT_FINSTAT] = "IRQ final",
92 [CTC_EVENT_TIMER] = "Timer",
93 [CTC_EVENT_START] = "Start",
94 [CTC_EVENT_STOP] = "Stop",
95 /*
96 * additional MPC events
97 */
98 [CTC_EVENT_SEND_XID] = "XID Exchange",
99 [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
100};
101
102const char *ctc_ch_state_names[] = {
103 [CTC_STATE_IDLE] = "Idle",
104 [CTC_STATE_STOPPED] = "Stopped",
105 [CTC_STATE_STARTWAIT] = "StartWait",
106 [CTC_STATE_STARTRETRY] = "StartRetry",
107 [CTC_STATE_SETUPWAIT] = "SetupWait",
108 [CTC_STATE_RXINIT] = "RX init",
109 [CTC_STATE_TXINIT] = "TX init",
110 [CTC_STATE_RX] = "RX",
111 [CTC_STATE_TX] = "TX",
112 [CTC_STATE_RXIDLE] = "RX idle",
113 [CTC_STATE_TXIDLE] = "TX idle",
114 [CTC_STATE_RXERR] = "RX error",
115 [CTC_STATE_TXERR] = "TX error",
116 [CTC_STATE_TERM] = "Terminating",
117 [CTC_STATE_DTERM] = "Restarting",
118 [CTC_STATE_NOTOP] = "Not operational",
119 /*
120 * additional MPC states
121 */
122 [CH_XID0_PENDING] = "Pending XID0 Start",
123 [CH_XID0_INPROGRESS] = "In XID0 Negotiations ",
124 [CH_XID7_PENDING] = "Pending XID7 P1 Start",
125 [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ",
126 [CH_XID7_PENDING2] = "Pending XID7 P2 Start ",
127 [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ",
128 [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ",
129};
130
131static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
132
133/*
134 * ----- static ctcm actions for channel statemachine -----
135 *
136*/
137static void chx_txdone(fsm_instance *fi, int event, void *arg);
138static void chx_rx(fsm_instance *fi, int event, void *arg);
139static void chx_rxidle(fsm_instance *fi, int event, void *arg);
140static void chx_firstio(fsm_instance *fi, int event, void *arg);
141static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
142static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
143static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
144static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
145static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
146static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
147static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
148static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
149static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
150static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
151static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
152static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
153static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
154static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
155
156/*
157 * ----- static ctcmpc actions for ctcmpc channel statemachine -----
158 *
159*/
160static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
161static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
162static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
163/* shared :
164static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
165static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
166static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
167static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
168static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
169static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
170static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
171static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
172static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
173static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
174static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
175static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
176static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
177static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
178*/
179static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
180static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
181static void ctcmpc_chx_resend(fsm_instance *, int, void *);
182static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
183
184/**
185 * Check return code of a preceeding ccw_device call, halt_IO etc...
186 *
187 * ch : The channel, the error belongs to.
188 * Returns the error code (!= 0) to inspect.
189 */
190void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
191{
192 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
193 "ccw error %s (%s): %04x\n", ch->id, msg, rc);
194 switch (rc) {
195 case -EBUSY:
196 ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg);
197 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
198 break;
199 case -ENODEV:
200 ctcm_pr_emerg("%s (%s): Invalid device called for IO\n",
201 ch->id, msg);
202 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
203 break;
204 default:
205 ctcm_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
206 ch->id, msg, rc);
207 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
208 }
209}
210
211void ctcm_purge_skb_queue(struct sk_buff_head *q)
212{
213 struct sk_buff *skb;
214
215 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
216
217 while ((skb = skb_dequeue(q))) {
218 atomic_dec(&skb->users);
219 dev_kfree_skb_any(skb);
220 }
221}
222
223/**
224 * NOP action for statemachines
225 */
226static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
227{
228}
229
230/*
231 * Actions for channel - statemachines.
232 */
233
234/**
235 * Normal data has been send. Free the corresponding
236 * skb (it's in io_queue), reset dev->tbusy and
237 * revert to idle state.
238 *
239 * fi An instance of a channel statemachine.
240 * event The event, just happened.
241 * arg Generic pointer, casted from channel * upon call.
242 */
243static void chx_txdone(fsm_instance *fi, int event, void *arg)
244{
245 struct channel *ch = arg;
246 struct net_device *dev = ch->netdev;
247 struct ctcm_priv *priv = dev->priv;
248 struct sk_buff *skb;
249 int first = 1;
250 int i;
251 unsigned long duration;
252 struct timespec done_stamp = current_kernel_time(); /* xtime */
253
254 duration =
255 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
256 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
257 if (duration > ch->prof.tx_time)
258 ch->prof.tx_time = duration;
259
260 if (ch->irb->scsw.count != 0)
261 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
262 dev->name, ch->irb->scsw.count);
263 fsm_deltimer(&ch->timer);
264 while ((skb = skb_dequeue(&ch->io_queue))) {
265 priv->stats.tx_packets++;
266 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
267 if (first) {
268 priv->stats.tx_bytes += 2;
269 first = 0;
270 }
271 atomic_dec(&skb->users);
272 dev_kfree_skb_irq(skb);
273 }
274 spin_lock(&ch->collect_lock);
275 clear_normalized_cda(&ch->ccw[4]);
276 if (ch->collect_len > 0) {
277 int rc;
278
279 if (ctcm_checkalloc_buffer(ch)) {
280 spin_unlock(&ch->collect_lock);
281 return;
282 }
283 ch->trans_skb->data = ch->trans_skb_data;
284 skb_reset_tail_pointer(ch->trans_skb);
285 ch->trans_skb->len = 0;
286 if (ch->prof.maxmulti < (ch->collect_len + 2))
287 ch->prof.maxmulti = ch->collect_len + 2;
288 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
289 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
290 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
291 i = 0;
292 while ((skb = skb_dequeue(&ch->collect_queue))) {
293 skb_copy_from_linear_data(skb,
294 skb_put(ch->trans_skb, skb->len), skb->len);
295 priv->stats.tx_packets++;
296 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
297 atomic_dec(&skb->users);
298 dev_kfree_skb_irq(skb);
299 i++;
300 }
301 ch->collect_len = 0;
302 spin_unlock(&ch->collect_lock);
303 ch->ccw[1].count = ch->trans_skb->len;
304 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
305 ch->prof.send_stamp = current_kernel_time(); /* xtime */
306 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
307 (unsigned long)ch, 0xff, 0);
308 ch->prof.doios_multi++;
309 if (rc != 0) {
310 priv->stats.tx_dropped += i;
311 priv->stats.tx_errors += i;
312 fsm_deltimer(&ch->timer);
313 ctcm_ccw_check_rc(ch, rc, "chained TX");
314 }
315 } else {
316 spin_unlock(&ch->collect_lock);
317 fsm_newstate(fi, CTC_STATE_TXIDLE);
318 }
319 ctcm_clear_busy_do(dev);
320}
321
322/**
323 * Initial data is sent.
324 * Notify device statemachine that we are up and
325 * running.
326 *
327 * fi An instance of a channel statemachine.
328 * event The event, just happened.
329 * arg Generic pointer, casted from channel * upon call.
330 */
331void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
332{
333 struct channel *ch = arg;
334 struct net_device *dev = ch->netdev;
335 struct ctcm_priv *priv = dev->priv;
336
337 CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__);
338 fsm_deltimer(&ch->timer);
339 fsm_newstate(fi, CTC_STATE_TXIDLE);
340 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
341}
342
343/**
344 * Got normal data, check for sanity, queue it up, allocate new buffer
345 * trigger bottom half, and initiate next read.
346 *
347 * fi An instance of a channel statemachine.
348 * event The event, just happened.
349 * arg Generic pointer, casted from channel * upon call.
350 */
351static void chx_rx(fsm_instance *fi, int event, void *arg)
352{
353 struct channel *ch = arg;
354 struct net_device *dev = ch->netdev;
355 struct ctcm_priv *priv = dev->priv;
356 int len = ch->max_bufsize - ch->irb->scsw.count;
357 struct sk_buff *skb = ch->trans_skb;
358 __u16 block_len = *((__u16 *)skb->data);
359 int check_len;
360 int rc;
361
362 fsm_deltimer(&ch->timer);
363 if (len < 8) {
364 ctcm_pr_debug("%s: got packet with length %d < 8\n",
365 dev->name, len);
366 priv->stats.rx_dropped++;
367 priv->stats.rx_length_errors++;
368 goto again;
369 }
370 if (len > ch->max_bufsize) {
371 ctcm_pr_debug("%s: got packet with length %d > %d\n",
372 dev->name, len, ch->max_bufsize);
373 priv->stats.rx_dropped++;
374 priv->stats.rx_length_errors++;
375 goto again;
376 }
377
378 /*
379 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
380 */
381 switch (ch->protocol) {
382 case CTCM_PROTO_S390:
383 case CTCM_PROTO_OS390:
384 check_len = block_len + 2;
385 break;
386 default:
387 check_len = block_len;
388 break;
389 }
390 if ((len < block_len) || (len > check_len)) {
391 ctcm_pr_debug("%s: got block length %d != rx length %d\n",
392 dev->name, block_len, len);
393 if (do_debug)
394 ctcmpc_dump_skb(skb, 0);
395
396 *((__u16 *)skb->data) = len;
397 priv->stats.rx_dropped++;
398 priv->stats.rx_length_errors++;
399 goto again;
400 }
401 block_len -= 2;
402 if (block_len > 0) {
403 *((__u16 *)skb->data) = block_len;
404 ctcm_unpack_skb(ch, skb);
405 }
406 again:
407 skb->data = ch->trans_skb_data;
408 skb_reset_tail_pointer(skb);
409 skb->len = 0;
410 if (ctcm_checkalloc_buffer(ch))
411 return;
412 ch->ccw[1].count = ch->max_bufsize;
413 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
414 (unsigned long)ch, 0xff, 0);
415 if (rc != 0)
416 ctcm_ccw_check_rc(ch, rc, "normal RX");
417}
418
419/**
420 * Initialize connection by sending a __u16 of value 0.
421 *
422 * fi An instance of a channel statemachine.
423 * event The event, just happened.
424 * arg Generic pointer, casted from channel * upon call.
425 */
426static void chx_firstio(fsm_instance *fi, int event, void *arg)
427{
428 struct channel *ch = arg;
429 int rc;
430
431 CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__);
432
433 if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
434 ctcm_pr_debug("%s: remote side issued READ?, init.\n", ch->id);
435 fsm_deltimer(&ch->timer);
436 if (ctcm_checkalloc_buffer(ch))
437 return;
438 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
439 (ch->protocol == CTCM_PROTO_OS390)) {
440 /* OS/390 resp. z/OS */
441 if (CHANNEL_DIRECTION(ch->flags) == READ) {
442 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
443 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
444 CTC_EVENT_TIMER, ch);
445 chx_rxidle(fi, event, arg);
446 } else {
447 struct net_device *dev = ch->netdev;
448 struct ctcm_priv *priv = dev->priv;
449 fsm_newstate(fi, CTC_STATE_TXIDLE);
450 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
451 }
452 return;
453 }
454
455 /*
456 * Don't setup a timer for receiving the initial RX frame
457 * if in compatibility mode, since VM TCP delays the initial
458 * frame until it has some data to send.
459 */
460 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
461 (ch->protocol != CTCM_PROTO_S390))
462 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
463
464 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
465 ch->ccw[1].count = 2; /* Transfer only length */
466
467 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
468 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
469 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
470 (unsigned long)ch, 0xff, 0);
471 if (rc != 0) {
472 fsm_deltimer(&ch->timer);
473 fsm_newstate(fi, CTC_STATE_SETUPWAIT);
474 ctcm_ccw_check_rc(ch, rc, "init IO");
475 }
476 /*
477 * If in compatibility mode since we don't setup a timer, we
478 * also signal RX channel up immediately. This enables us
479 * to send packets early which in turn usually triggers some
480 * reply from VM TCP which brings up the RX channel to it's
481 * final state.
482 */
483 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
484 (ch->protocol == CTCM_PROTO_S390)) {
485 struct net_device *dev = ch->netdev;
486 struct ctcm_priv *priv = dev->priv;
487 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
488 }
489}
490
491/**
492 * Got initial data, check it. If OK,
493 * notify device statemachine that we are up and
494 * running.
495 *
496 * fi An instance of a channel statemachine.
497 * event The event, just happened.
498 * arg Generic pointer, casted from channel * upon call.
499 */
500static void chx_rxidle(fsm_instance *fi, int event, void *arg)
501{
502 struct channel *ch = arg;
503 struct net_device *dev = ch->netdev;
504 struct ctcm_priv *priv = dev->priv;
505 __u16 buflen;
506 int rc;
507
508 CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__);
509 fsm_deltimer(&ch->timer);
510 buflen = *((__u16 *)ch->trans_skb->data);
511 if (do_debug)
512 ctcm_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
513
514 if (buflen >= CTCM_INITIAL_BLOCKLEN) {
515 if (ctcm_checkalloc_buffer(ch))
516 return;
517 ch->ccw[1].count = ch->max_bufsize;
518 fsm_newstate(fi, CTC_STATE_RXIDLE);
519 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
520 (unsigned long)ch, 0xff, 0);
521 if (rc != 0) {
522 fsm_newstate(fi, CTC_STATE_RXINIT);
523 ctcm_ccw_check_rc(ch, rc, "initial RX");
524 } else
525 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
526 } else {
527 if (do_debug)
528 ctcm_pr_debug("%s: Initial RX count %d not %d\n",
529 dev->name, buflen, CTCM_INITIAL_BLOCKLEN);
530 chx_firstio(fi, event, arg);
531 }
532}
533
534/**
535 * Set channel into extended mode.
536 *
537 * fi An instance of a channel statemachine.
538 * event The event, just happened.
539 * arg Generic pointer, casted from channel * upon call.
540 */
541static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
542{
543 struct channel *ch = arg;
544 int rc;
545 unsigned long saveflags = 0;
546 int timeout = CTCM_TIME_5_SEC;
547
548 fsm_deltimer(&ch->timer);
549 if (IS_MPC(ch)) {
550 timeout = 1500;
551 if (do_debug)
552 ctcm_pr_debug("ctcm enter: %s(): cp=%i ch=0x%p id=%s\n",
553 __FUNCTION__, smp_processor_id(), ch, ch->id);
554 }
555 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
556 fsm_newstate(fi, CTC_STATE_SETUPWAIT);
557 if (do_debug_ccw && IS_MPC(ch))
558 ctcmpc_dumpit((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
559
560 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
561 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
562 /* Such conditional locking is undeterministic in
563 * static view. => ignore sparse warnings here. */
564
565 rc = ccw_device_start(ch->cdev, &ch->ccw[6],
566 (unsigned long)ch, 0xff, 0);
567 if (event == CTC_EVENT_TIMER) /* see above comments */
568 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
569 if (rc != 0) {
570 fsm_deltimer(&ch->timer);
571 fsm_newstate(fi, CTC_STATE_STARTWAIT);
572 ctcm_ccw_check_rc(ch, rc, "set Mode");
573 } else
574 ch->retry = 0;
575}
576
577/**
578 * Setup channel.
579 *
580 * fi An instance of a channel statemachine.
581 * event The event, just happened.
582 * arg Generic pointer, casted from channel * upon call.
583 */
584static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
585{
586 struct channel *ch = arg;
587 int rc;
588 struct net_device *dev;
589 unsigned long saveflags;
590
591 CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
592 if (ch == NULL) {
593 ctcm_pr_warn("chx_start ch=NULL\n");
594 return;
595 }
596 if (ch->netdev == NULL) {
597 ctcm_pr_warn("chx_start dev=NULL, id=%s\n", ch->id);
598 return;
599 }
600 dev = ch->netdev;
601
602 if (do_debug)
603 ctcm_pr_debug("%s: %s channel start\n", dev->name,
604 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
605
606 if (ch->trans_skb != NULL) {
607 clear_normalized_cda(&ch->ccw[1]);
608 dev_kfree_skb(ch->trans_skb);
609 ch->trans_skb = NULL;
610 }
611 if (CHANNEL_DIRECTION(ch->flags) == READ) {
612 ch->ccw[1].cmd_code = CCW_CMD_READ;
613 ch->ccw[1].flags = CCW_FLAG_SLI;
614 ch->ccw[1].count = 0;
615 } else {
616 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
617 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
618 ch->ccw[1].count = 0;
619 }
620 if (ctcm_checkalloc_buffer(ch)) {
621 ctcm_pr_notice("%s: %s trans_skb allocation delayed "
622 "until first transfer\n", dev->name,
623 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
624 }
625
626 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
627 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
628 ch->ccw[0].count = 0;
629 ch->ccw[0].cda = 0;
630 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
631 ch->ccw[2].flags = CCW_FLAG_SLI;
632 ch->ccw[2].count = 0;
633 ch->ccw[2].cda = 0;
634 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
635 ch->ccw[4].cda = 0;
636 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
637
638 fsm_newstate(fi, CTC_STATE_STARTWAIT);
639 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
640 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
641 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
642 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
643 if (rc != 0) {
644 if (rc != -EBUSY)
645 fsm_deltimer(&ch->timer);
646 ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
647 }
648}
649
650/**
651 * Shutdown a channel.
652 *
653 * fi An instance of a channel statemachine.
654 * event The event, just happened.
655 * arg Generic pointer, casted from channel * upon call.
656 */
657static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
658{
659 struct channel *ch = arg;
660 unsigned long saveflags = 0;
661 int rc;
662 int oldstate;
663
664 CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__);
665 fsm_deltimer(&ch->timer);
666 if (IS_MPC(ch))
667 fsm_deltimer(&ch->sweep_timer);
668
669 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
670
671 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */
672 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
673 /* Such conditional locking is undeterministic in
674 * static view. => ignore sparse warnings here. */
675 oldstate = fsm_getstate(fi);
676 fsm_newstate(fi, CTC_STATE_TERM);
677 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
678
679 if (event == CTC_EVENT_STOP)
680 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
681 /* see remark above about conditional locking */
682
683 if (rc != 0 && rc != -EBUSY) {
684 fsm_deltimer(&ch->timer);
685 if (event != CTC_EVENT_STOP) {
686 fsm_newstate(fi, oldstate);
687 ctcm_ccw_check_rc(ch, rc, (char *)__FUNCTION__);
688 }
689 }
690}
691
692/**
693 * Cleanup helper for chx_fail and chx_stopped
694 * cleanup channels queue and notify interface statemachine.
695 *
696 * fi An instance of a channel statemachine.
697 * state The next state (depending on caller).
698 * ch The channel to operate on.
699 */
700static void ctcm_chx_cleanup(fsm_instance *fi, int state,
701 struct channel *ch)
702{
703 struct net_device *dev = ch->netdev;
704 struct ctcm_priv *priv = dev->priv;
705
706 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
707
708 fsm_deltimer(&ch->timer);
709 if (IS_MPC(ch))
710 fsm_deltimer(&ch->sweep_timer);
711
712 fsm_newstate(fi, state);
713 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
714 clear_normalized_cda(&ch->ccw[1]);
715 dev_kfree_skb_any(ch->trans_skb);
716 ch->trans_skb = NULL;
717 }
718
719 ch->th_seg = 0x00;
720 ch->th_seq_num = 0x00;
721 if (CHANNEL_DIRECTION(ch->flags) == READ) {
722 skb_queue_purge(&ch->io_queue);
723 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
724 } else {
725 ctcm_purge_skb_queue(&ch->io_queue);
726 if (IS_MPC(ch))
727 ctcm_purge_skb_queue(&ch->sweep_queue);
728 spin_lock(&ch->collect_lock);
729 ctcm_purge_skb_queue(&ch->collect_queue);
730 ch->collect_len = 0;
731 spin_unlock(&ch->collect_lock);
732 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
733 }
734}
735
736/**
737 * A channel has successfully been halted.
738 * Cleanup it's queue and notify interface statemachine.
739 *
740 * fi An instance of a channel statemachine.
741 * event The event, just happened.
742 * arg Generic pointer, casted from channel * upon call.
743 */
744static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
745{
746 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
747 ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
748}
749
750/**
751 * A stop command from device statemachine arrived and we are in
752 * not operational mode. Set state to stopped.
753 *
754 * fi An instance of a channel statemachine.
755 * event The event, just happened.
756 * arg Generic pointer, casted from channel * upon call.
757 */
758static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
759{
760 fsm_newstate(fi, CTC_STATE_STOPPED);
761}
762
763/**
764 * A machine check for no path, not operational status or gone device has
765 * happened.
766 * Cleanup queue and notify interface statemachine.
767 *
768 * fi An instance of a channel statemachine.
769 * event The event, just happened.
770 * arg Generic pointer, casted from channel * upon call.
771 */
772static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
773{
774 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
775 ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
776}
777
778/**
779 * Handle error during setup of channel.
780 *
781 * fi An instance of a channel statemachine.
782 * event The event, just happened.
783 * arg Generic pointer, casted from channel * upon call.
784 */
785static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
786{
787 struct channel *ch = arg;
788 struct net_device *dev = ch->netdev;
789 struct ctcm_priv *priv = dev->priv;
790
791 /*
792 * Special case: Got UC_RCRESET on setmode.
793 * This means that remote side isn't setup. In this case
794 * simply retry after some 10 secs...
795 */
796 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
797 ((event == CTC_EVENT_UC_RCRESET) ||
798 (event == CTC_EVENT_UC_RSRESET))) {
799 fsm_newstate(fi, CTC_STATE_STARTRETRY);
800 fsm_deltimer(&ch->timer);
801 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
802 if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) {
803 int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
804 if (rc != 0)
805 ctcm_ccw_check_rc(ch, rc,
806 "HaltIO in chx_setuperr");
807 }
808 return;
809 }
810
811 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
812 "%s : %s error during %s channel setup state=%s\n",
813 dev->name, ctc_ch_event_names[event],
814 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
815 fsm_getstate_str(fi));
816
817 if (CHANNEL_DIRECTION(ch->flags) == READ) {
818 fsm_newstate(fi, CTC_STATE_RXERR);
819 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
820 } else {
821 fsm_newstate(fi, CTC_STATE_TXERR);
822 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
823 }
824}
825
826/**
827 * Restart a channel after an error.
828 *
829 * fi An instance of a channel statemachine.
830 * event The event, just happened.
831 * arg Generic pointer, casted from channel * upon call.
832 */
833static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
834{
835 struct channel *ch = arg;
836 struct net_device *dev = ch->netdev;
837 unsigned long saveflags = 0;
838 int oldstate;
839 int rc;
840
841 CTCM_DBF_TEXT(TRACE, CTC_DBF_NOTICE, __FUNCTION__);
842 fsm_deltimer(&ch->timer);
843 ctcm_pr_debug("%s: %s channel restart\n", dev->name,
844 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
845 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
846 oldstate = fsm_getstate(fi);
847 fsm_newstate(fi, CTC_STATE_STARTWAIT);
848 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
849 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
850 /* Such conditional locking is a known problem for
851 * sparse because its undeterministic in static view.
852 * Warnings should be ignored here. */
853 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
854 if (event == CTC_EVENT_TIMER)
855 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
856 if (rc != 0) {
857 if (rc != -EBUSY) {
858 fsm_deltimer(&ch->timer);
859 fsm_newstate(fi, oldstate);
860 }
861 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
862 }
863}
864
865/**
866 * Handle error during RX initial handshake (exchange of
867 * 0-length block header)
868 *
869 * fi An instance of a channel statemachine.
870 * event The event, just happened.
871 * arg Generic pointer, casted from channel * upon call.
872 */
873static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
874{
875 struct channel *ch = arg;
876 struct net_device *dev = ch->netdev;
877 struct ctcm_priv *priv = dev->priv;
878
879 CTCM_DBF_TEXT(SETUP, 3, __FUNCTION__);
880 if (event == CTC_EVENT_TIMER) {
881 if (!IS_MPCDEV(dev))
882 /* TODO : check if MPC deletes timer somewhere */
883 fsm_deltimer(&ch->timer);
884 ctcm_pr_debug("%s: Timeout during RX init handshake\n",
885 dev->name);
886 if (ch->retry++ < 3)
887 ctcm_chx_restart(fi, event, arg);
888 else {
889 fsm_newstate(fi, CTC_STATE_RXERR);
890 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
891 }
892 } else
893 ctcm_pr_warn("%s: Error during RX init handshake\n", dev->name);
894}
895
896/**
897 * Notify device statemachine if we gave up initialization
898 * of RX channel.
899 *
900 * fi An instance of a channel statemachine.
901 * event The event, just happened.
902 * arg Generic pointer, casted from channel * upon call.
903 */
904static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
905{
906 struct channel *ch = arg;
907 struct net_device *dev = ch->netdev;
908 struct ctcm_priv *priv = dev->priv;
909
910 CTCM_DBF_TEXT(SETUP, 3, __FUNCTION__);
911 fsm_newstate(fi, CTC_STATE_RXERR);
912 ctcm_pr_warn("%s: RX busy. Initialization failed\n", dev->name);
913 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
914}
915
916/**
917 * Handle RX Unit check remote reset (remote disconnected)
918 *
919 * fi An instance of a channel statemachine.
920 * event The event, just happened.
921 * arg Generic pointer, casted from channel * upon call.
922 */
923static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
924{
925 struct channel *ch = arg;
926 struct channel *ch2;
927 struct net_device *dev = ch->netdev;
928 struct ctcm_priv *priv = dev->priv;
929
930 CTCM_DBF_DEV_NAME(TRACE, dev, "Got remote disconnect, re-initializing");
931 fsm_deltimer(&ch->timer);
932 if (do_debug)
933 ctcm_pr_debug("%s: Got remote disconnect, "
934 "re-initializing ...\n", dev->name);
935 /*
936 * Notify device statemachine
937 */
938 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
939 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
940
941 fsm_newstate(fi, CTC_STATE_DTERM);
942 ch2 = priv->channel[WRITE];
943 fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
944
945 ccw_device_halt(ch->cdev, (unsigned long)ch);
946 ccw_device_halt(ch2->cdev, (unsigned long)ch2);
947}
948
949/**
950 * Handle error during TX channel initialization.
951 *
952 * fi An instance of a channel statemachine.
953 * event The event, just happened.
954 * arg Generic pointer, casted from channel * upon call.
955 */
956static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
957{
958 struct channel *ch = arg;
959 struct net_device *dev = ch->netdev;
960 struct ctcm_priv *priv = dev->priv;
961
962 if (event == CTC_EVENT_TIMER) {
963 fsm_deltimer(&ch->timer);
964 CTCM_DBF_DEV_NAME(ERROR, dev,
965 "Timeout during TX init handshake");
966 if (ch->retry++ < 3)
967 ctcm_chx_restart(fi, event, arg);
968 else {
969 fsm_newstate(fi, CTC_STATE_TXERR);
970 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
971 }
972 } else {
973 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
974 "%s : %s error during channel setup state=%s",
975 dev->name, ctc_ch_event_names[event],
976 fsm_getstate_str(fi));
977
978 ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name);
979 }
980}
981
982/**
983 * Handle TX timeout by retrying operation.
984 *
985 * fi An instance of a channel statemachine.
986 * event The event, just happened.
987 * arg Generic pointer, casted from channel * upon call.
988 */
989static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
990{
991 struct channel *ch = arg;
992 struct net_device *dev = ch->netdev;
993 struct ctcm_priv *priv = dev->priv;
994 struct sk_buff *skb;
995
996 if (do_debug)
997 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
998 __FUNCTION__, smp_processor_id(), ch, ch->id);
999
1000 fsm_deltimer(&ch->timer);
1001 if (ch->retry++ > 3) {
1002 struct mpc_group *gptr = priv->mpcg;
1003 ctcm_pr_debug("%s: TX retry failed, restarting channel\n",
1004 dev->name);
1005 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1006 /* call restart if not MPC or if MPC and mpcg fsm is ready.
1007 use gptr as mpc indicator */
1008 if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
1009 ctcm_chx_restart(fi, event, arg);
1010 goto done;
1011 }
1012
1013 ctcm_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1014 skb = skb_peek(&ch->io_queue);
1015 if (skb) {
1016 int rc = 0;
1017 unsigned long saveflags = 0;
1018 clear_normalized_cda(&ch->ccw[4]);
1019 ch->ccw[4].count = skb->len;
1020 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1021 ctcm_pr_debug("%s: IDAL alloc failed, chan restart\n",
1022 dev->name);
1023 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1024 ctcm_chx_restart(fi, event, arg);
1025 goto done;
1026 }
1027 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
1028 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
1029 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1030 /* Such conditional locking is a known problem for
1031 * sparse because its undeterministic in static view.
1032 * Warnings should be ignored here. */
1033 if (do_debug_ccw)
1034 ctcmpc_dumpit((char *)&ch->ccw[3],
1035 sizeof(struct ccw1) * 3);
1036
1037 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1038 (unsigned long)ch, 0xff, 0);
1039 if (event == CTC_EVENT_TIMER)
1040 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1041 saveflags);
1042 if (rc != 0) {
1043 fsm_deltimer(&ch->timer);
1044 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
1045 ctcm_purge_skb_queue(&ch->io_queue);
1046 }
1047 }
1048done:
1049 return;
1050}
1051
1052/**
1053 * Handle fatal errors during an I/O command.
1054 *
1055 * fi An instance of a channel statemachine.
1056 * event The event, just happened.
1057 * arg Generic pointer, casted from channel * upon call.
1058 */
1059static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
1060{
1061 struct channel *ch = arg;
1062 struct net_device *dev = ch->netdev;
1063 struct ctcm_priv *priv = dev->priv;
1064
1065 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
1066 fsm_deltimer(&ch->timer);
1067 ctcm_pr_warn("%s %s : unrecoverable channel error\n",
1068 CTC_DRIVER_NAME, dev->name);
1069 if (IS_MPC(ch)) {
1070 priv->stats.tx_dropped++;
1071 priv->stats.tx_errors++;
1072 }
1073
1074 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1075 ctcm_pr_debug("%s: RX I/O error\n", dev->name);
1076 fsm_newstate(fi, CTC_STATE_RXERR);
1077 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
1078 } else {
1079 ctcm_pr_debug("%s: TX I/O error\n", dev->name);
1080 fsm_newstate(fi, CTC_STATE_TXERR);
1081 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1082 }
1083}
1084
1085/*
1086 * The ctcm statemachine for a channel.
1087 */
1088const fsm_node ch_fsm[] = {
1089 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
1090 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
1091 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
1092 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1093
1094 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
1095 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
1096 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
1097 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1098 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
1099
1100 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1101 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
1102 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1103 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
1104 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1105 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1106
1107 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
1108 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
1109 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop },
1110 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1111
1112 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1113 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
1114 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio },
1115 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1116 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1117 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
1118 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1119 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1120
1121 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1122 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
1123 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle },
1124 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
1125 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
1126 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
1127 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
1128 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1129 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio },
1130 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1131
1132 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1133 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
1134 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx },
1135 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
1136 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1137 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1138 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx },
1139
1140 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1141 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
1142 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
1143 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
1144 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
1145 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
1146 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1147 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1148
1149 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1150 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
1151 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio },
1152 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1153 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1154 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1155 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1156
1157 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
1158 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
1159 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
1160 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1161 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1162 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1163
1164 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
1165 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
1166 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1167 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1168 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1169 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1170
1171 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
1172 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
1173 { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone },
1174 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry },
1175 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry },
1176 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
1177 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1178 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1179
1180 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
1181 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
1182 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1183 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1184};
1185
1186int ch_fsm_len = ARRAY_SIZE(ch_fsm);
1187
1188/*
1189 * MPC actions for mpc channel statemachine
1190 * handling of MPC protocol requires extra
1191 * statemachine and actions which are prefixed ctcmpc_ .
1192 * The ctc_ch_states and ctc_ch_state_names,
1193 * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
1194 * which are expanded by some elements.
1195 */
1196
1197/*
1198 * Actions for mpc channel statemachine.
1199 */
1200
1201/**
1202 * Normal data has been send. Free the corresponding
1203 * skb (it's in io_queue), reset dev->tbusy and
1204 * revert to idle state.
1205 *
1206 * fi An instance of a channel statemachine.
1207 * event The event, just happened.
1208 * arg Generic pointer, casted from channel * upon call.
1209 */
1210static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1211{
1212 struct channel *ch = arg;
1213 struct net_device *dev = ch->netdev;
1214 struct ctcm_priv *priv = dev->priv;
1215 struct mpc_group *grp = priv->mpcg;
1216 struct sk_buff *skb;
1217 int first = 1;
1218 int i;
1219 struct timespec done_stamp;
1220 __u32 data_space;
1221 unsigned long duration;
1222 struct sk_buff *peekskb;
1223 int rc;
1224 struct th_header *header;
1225 struct pdu *p_header;
1226
1227 if (do_debug)
1228 ctcm_pr_debug("%s cp:%i enter: %s()\n",
1229 dev->name, smp_processor_id(), __FUNCTION__);
1230
1231 done_stamp = current_kernel_time(); /* xtime */
1232 duration = (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000
1233 + (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
1234 if (duration > ch->prof.tx_time)
1235 ch->prof.tx_time = duration;
1236
1237 if (ch->irb->scsw.count != 0)
1238 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
1239 dev->name, ch->irb->scsw.count);
1240 fsm_deltimer(&ch->timer);
1241 while ((skb = skb_dequeue(&ch->io_queue))) {
1242 priv->stats.tx_packets++;
1243 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
1244 if (first) {
1245 priv->stats.tx_bytes += 2;
1246 first = 0;
1247 }
1248 atomic_dec(&skb->users);
1249 dev_kfree_skb_irq(skb);
1250 }
1251 spin_lock(&ch->collect_lock);
1252 clear_normalized_cda(&ch->ccw[4]);
1253
1254 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
1255 spin_unlock(&ch->collect_lock);
1256 fsm_newstate(fi, CTC_STATE_TXIDLE);
1257 goto done;
1258 }
1259
1260 if (ctcm_checkalloc_buffer(ch)) {
1261 spin_unlock(&ch->collect_lock);
1262 goto done;
1263 }
1264 ch->trans_skb->data = ch->trans_skb_data;
1265 skb_reset_tail_pointer(ch->trans_skb);
1266 ch->trans_skb->len = 0;
1267 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
1268 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
1269 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
1270 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
1271 i = 0;
1272
1273 if (do_debug_data)
1274 ctcm_pr_debug("ctcmpc: %s() building "
1275 "trans_skb from collect_q \n", __FUNCTION__);
1276
1277 data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
1278
1279 if (do_debug_data)
1280 ctcm_pr_debug("ctcmpc: %s() building trans_skb from collect_q"
1281 " data_space:%04x\n", __FUNCTION__, data_space);
1282 p_header = NULL;
1283 while ((skb = skb_dequeue(&ch->collect_queue))) {
1284 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
1285 p_header = (struct pdu *)
1286 (skb_tail_pointer(ch->trans_skb) - skb->len);
1287 p_header->pdu_flag = 0x00;
1288 if (skb->protocol == ntohs(ETH_P_SNAP))
1289 p_header->pdu_flag |= 0x60;
1290 else
1291 p_header->pdu_flag |= 0x20;
1292
1293 if (do_debug_data) {
1294 ctcm_pr_debug("ctcmpc: %s()trans_skb len:%04x \n",
1295 __FUNCTION__, ch->trans_skb->len);
1296 ctcm_pr_debug("ctcmpc: %s() pdu header and data"
1297 " for up to 32 bytes sent to vtam\n",
1298 __FUNCTION__);
1299 ctcmpc_dumpit((char *)p_header,
1300 min_t(int, skb->len, 32));
1301 }
1302 ch->collect_len -= skb->len;
1303 data_space -= skb->len;
1304 priv->stats.tx_packets++;
1305 priv->stats.tx_bytes += skb->len;
1306 atomic_dec(&skb->users);
1307 dev_kfree_skb_any(skb);
1308 peekskb = skb_peek(&ch->collect_queue);
1309 if (peekskb->len > data_space)
1310 break;
1311 i++;
1312 }
1313 /* p_header points to the last one we handled */
1314 if (p_header)
1315 p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/
1316 header = kzalloc(TH_HEADER_LENGTH, gfp_type());
1317
1318 if (!header) {
1319 printk(KERN_WARNING "ctcmpc: OUT OF MEMORY IN %s()"
1320 ": Data Lost \n", __FUNCTION__);
1321 spin_unlock(&ch->collect_lock);
1322 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1323 goto done;
1324 }
1325
1326 header->th_ch_flag = TH_HAS_PDU; /* Normal data */
1327 ch->th_seq_num++;
1328 header->th_seq_num = ch->th_seq_num;
1329
1330 if (do_debug_data)
1331 ctcm_pr_debug("%s: ToVTAM_th_seq= %08x\n" ,
1332 __FUNCTION__, ch->th_seq_num);
1333
1334 memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
1335 TH_HEADER_LENGTH); /* put the TH on the packet */
1336
1337 kfree(header);
1338
1339 if (do_debug_data) {
1340 ctcm_pr_debug("ctcmpc: %s()trans_skb len:%04x \n",
1341 __FUNCTION__, ch->trans_skb->len);
1342
1343 ctcm_pr_debug("ctcmpc: %s() up-to-50 bytes of trans_skb "
1344 "data to vtam from collect_q\n", __FUNCTION__);
1345 ctcmpc_dumpit((char *)ch->trans_skb->data,
1346 min_t(int, ch->trans_skb->len, 50));
1347 }
1348
1349 spin_unlock(&ch->collect_lock);
1350 clear_normalized_cda(&ch->ccw[1]);
1351 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
1352 dev_kfree_skb_any(ch->trans_skb);
1353 ch->trans_skb = NULL;
1354 printk(KERN_WARNING
1355 "ctcmpc: %s()CCW failure - data lost\n",
1356 __FUNCTION__);
1357 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1358 return;
1359 }
1360 ch->ccw[1].count = ch->trans_skb->len;
1361 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
1362 ch->prof.send_stamp = current_kernel_time(); /* xtime */
1363 if (do_debug_ccw)
1364 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1365 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1366 (unsigned long)ch, 0xff, 0);
1367 ch->prof.doios_multi++;
1368 if (rc != 0) {
1369 priv->stats.tx_dropped += i;
1370 priv->stats.tx_errors += i;
1371 fsm_deltimer(&ch->timer);
1372 ctcm_ccw_check_rc(ch, rc, "chained TX");
1373 }
1374done:
1375 ctcm_clear_busy(dev);
1376 ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__);
1377 return;
1378}
1379
1380/**
1381 * Got normal data, check for sanity, queue it up, allocate new buffer
1382 * trigger bottom half, and initiate next read.
1383 *
1384 * fi An instance of a channel statemachine.
1385 * event The event, just happened.
1386 * arg Generic pointer, casted from channel * upon call.
1387 */
1388static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1389{
1390 struct channel *ch = arg;
1391 struct net_device *dev = ch->netdev;
1392 struct ctcm_priv *priv = dev->priv;
1393 struct mpc_group *grp = priv->mpcg;
1394 struct sk_buff *skb = ch->trans_skb;
1395 struct sk_buff *new_skb;
1396 unsigned long saveflags = 0; /* avoids compiler warning */
1397 int len = ch->max_bufsize - ch->irb->scsw.count;
1398
1399 if (do_debug_data) {
1400 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n",
1401 dev->name, smp_processor_id(), ch->id);
1402 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx: maxbuf: %04x "
1403 "len: %04x\n", ch->max_bufsize, len);
1404 }
1405 fsm_deltimer(&ch->timer);
1406
1407 if (skb == NULL) {
1408 ctcm_pr_debug("ctcmpc exit: %s() TRANS_SKB = NULL \n",
1409 __FUNCTION__);
1410 goto again;
1411 }
1412
1413 if (len < TH_HEADER_LENGTH) {
1414 ctcm_pr_info("%s: got packet with invalid length %d\n",
1415 dev->name, len);
1416 priv->stats.rx_dropped++;
1417 priv->stats.rx_length_errors++;
1418 } else {
1419 /* must have valid th header or game over */
1420 __u32 block_len = len;
1421 len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
1422 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
1423
1424 if (new_skb == NULL) {
1425 printk(KERN_INFO "ctcmpc:%s() NEW_SKB = NULL\n",
1426 __FUNCTION__);
1427 printk(KERN_WARNING "ctcmpc: %s() MEMORY ALLOC FAILED"
1428 " - DATA LOST - MPC FAILED\n",
1429 __FUNCTION__);
1430 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1431 goto again;
1432 }
1433 switch (fsm_getstate(grp->fsm)) {
1434 case MPCG_STATE_RESET:
1435 case MPCG_STATE_INOP:
1436 dev_kfree_skb_any(new_skb);
1437 break;
1438 case MPCG_STATE_FLOWC:
1439 case MPCG_STATE_READY:
1440 memcpy(skb_put(new_skb, block_len),
1441 skb->data, block_len);
1442 skb_queue_tail(&ch->io_queue, new_skb);
1443 tasklet_schedule(&ch->ch_tasklet);
1444 break;
1445 default:
1446 memcpy(skb_put(new_skb, len), skb->data, len);
1447 skb_queue_tail(&ch->io_queue, new_skb);
1448 tasklet_hi_schedule(&ch->ch_tasklet);
1449 break;
1450 }
1451 }
1452
1453again:
1454 switch (fsm_getstate(grp->fsm)) {
1455 int rc, dolock;
1456 case MPCG_STATE_FLOWC:
1457 case MPCG_STATE_READY:
1458 if (ctcm_checkalloc_buffer(ch))
1459 break;
1460 ch->trans_skb->data = ch->trans_skb_data;
1461 skb_reset_tail_pointer(ch->trans_skb);
1462 ch->trans_skb->len = 0;
1463 ch->ccw[1].count = ch->max_bufsize;
1464 if (do_debug_ccw)
1465 ctcmpc_dumpit((char *)&ch->ccw[0],
1466 sizeof(struct ccw1) * 3);
1467 dolock = !in_irq();
1468 if (dolock)
1469 spin_lock_irqsave(
1470 get_ccwdev_lock(ch->cdev), saveflags);
1471 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1472 (unsigned long)ch, 0xff, 0);
1473 if (dolock) /* see remark about conditional locking */
1474 spin_unlock_irqrestore(
1475 get_ccwdev_lock(ch->cdev), saveflags);
1476 if (rc != 0)
1477 ctcm_ccw_check_rc(ch, rc, "normal RX");
1478 default:
1479 break;
1480 }
1481
1482 if (do_debug)
1483 ctcm_pr_debug("ctcmpc exit : %s %s(): ch=0x%p id=%s\n",
1484 dev->name, __FUNCTION__, ch, ch->id);
1485
1486}
1487
1488/**
1489 * Initialize connection by sending a __u16 of value 0.
1490 *
1491 * fi An instance of a channel statemachine.
1492 * event The event, just happened.
1493 * arg Generic pointer, casted from channel * upon call.
1494 */
1495static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1496{
1497 struct channel *ch = arg;
1498 struct net_device *dev = ch->netdev;
1499 struct ctcm_priv *priv = dev->priv;
1500
1501 if (do_debug) {
1502 struct mpc_group *gptr = priv->mpcg;
1503 ctcm_pr_debug("ctcmpc enter: %s(): ch=0x%p id=%s\n",
1504 __FUNCTION__, ch, ch->id);
1505 ctcm_pr_debug("%s() %s chstate:%i grpstate:%i chprotocol:%i\n",
1506 __FUNCTION__, ch->id, fsm_getstate(fi),
1507 fsm_getstate(gptr->fsm), ch->protocol);
1508 }
1509 if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
1510 MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
1511
1512 fsm_deltimer(&ch->timer);
1513 if (ctcm_checkalloc_buffer(ch))
1514 goto done;
1515
1516 switch (fsm_getstate(fi)) {
1517 case CTC_STATE_STARTRETRY:
1518 case CTC_STATE_SETUPWAIT:
1519 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1520 ctcmpc_chx_rxidle(fi, event, arg);
1521 } else {
1522 fsm_newstate(fi, CTC_STATE_TXIDLE);
1523 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
1524 }
1525 goto done;
1526 default:
1527 break;
1528 };
1529
1530 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
1531 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
1532
1533done:
1534 if (do_debug)
1535 ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n",
1536 __FUNCTION__, ch, ch->id);
1537 return;
1538}
1539
1540/**
1541 * Got initial data, check it. If OK,
1542 * notify device statemachine that we are up and
1543 * running.
1544 *
1545 * fi An instance of a channel statemachine.
1546 * event The event, just happened.
1547 * arg Generic pointer, casted from channel * upon call.
1548 */
1549void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
1550{
1551 struct channel *ch = arg;
1552 struct net_device *dev = ch->netdev;
1553 struct ctcm_priv *priv = dev->priv;
1554 struct mpc_group *grp = priv->mpcg;
1555 int rc;
1556 unsigned long saveflags = 0; /* avoids compiler warning */
1557
1558 fsm_deltimer(&ch->timer);
1559 ctcm_pr_debug("%s cp:%i enter: %s()\n",
1560 dev->name, smp_processor_id(), __FUNCTION__);
1561 if (do_debug)
1562 ctcm_pr_debug("%s() %s chstate:%i grpstate:%i\n",
1563 __FUNCTION__, ch->id,
1564 fsm_getstate(fi), fsm_getstate(grp->fsm));
1565
1566 fsm_newstate(fi, CTC_STATE_RXIDLE);
1567 /* XID processing complete */
1568
1569 switch (fsm_getstate(grp->fsm)) {
1570 case MPCG_STATE_FLOWC:
1571 case MPCG_STATE_READY:
1572 if (ctcm_checkalloc_buffer(ch))
1573 goto done;
1574 ch->trans_skb->data = ch->trans_skb_data;
1575 skb_reset_tail_pointer(ch->trans_skb);
1576 ch->trans_skb->len = 0;
1577 ch->ccw[1].count = ch->max_bufsize;
1578 if (do_debug_ccw)
1579 ctcmpc_dumpit((char *)&ch->ccw[0],
1580 sizeof(struct ccw1) * 3);
1581 if (event == CTC_EVENT_START)
1582 /* see remark about conditional locking */
1583 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1584 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1585 (unsigned long)ch, 0xff, 0);
1586 if (event == CTC_EVENT_START)
1587 spin_unlock_irqrestore(
1588 get_ccwdev_lock(ch->cdev), saveflags);
1589 if (rc != 0) {
1590 fsm_newstate(fi, CTC_STATE_RXINIT);
1591 ctcm_ccw_check_rc(ch, rc, "initial RX");
1592 goto done;
1593 }
1594 break;
1595 default:
1596 break;
1597 }
1598
1599 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
1600done:
1601 if (do_debug)
1602 ctcm_pr_debug("ctcmpc exit: %s %s()\n",
1603 dev->name, __FUNCTION__);
1604 return;
1605}
1606
1607/*
1608 * ctcmpc channel FSM action
1609 * called from several points in ctcmpc_ch_fsm
1610 * ctcmpc only
1611 */
1612static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
1613{
1614 struct channel *ch = arg;
1615 struct net_device *dev = ch->netdev;
1616 struct ctcm_priv *priv = dev->priv;
1617 struct mpc_group *grp = priv->mpcg;
1618
1619 if (do_debug) {
1620 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s"
1621 "GrpState:%s ChState:%s\n",
1622 __FUNCTION__, smp_processor_id(), ch, ch->id,
1623 fsm_getstate_str(grp->fsm),
1624 fsm_getstate_str(ch->fsm));
1625 }
1626
1627 switch (fsm_getstate(grp->fsm)) {
1628 case MPCG_STATE_XID2INITW:
1629 /* ok..start yside xid exchanges */
1630 if (!ch->in_mpcgroup)
1631 break;
1632 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) {
1633 fsm_deltimer(&grp->timer);
1634 fsm_addtimer(&grp->timer,
1635 MPC_XID_TIMEOUT_VALUE,
1636 MPCG_EVENT_TIMER, dev);
1637 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1638
1639 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1640 /* attn rcvd before xid0 processed via bh */
1641 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1642 break;
1643 case MPCG_STATE_XID2INITX:
1644 case MPCG_STATE_XID0IOWAIT:
1645 case MPCG_STATE_XID0IOWAIX:
1646 /* attn rcvd before xid0 processed on ch
1647 but mid-xid0 processing for group */
1648 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1649 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1650 break;
1651 case MPCG_STATE_XID7INITW:
1652 case MPCG_STATE_XID7INITX:
1653 case MPCG_STATE_XID7INITI:
1654 case MPCG_STATE_XID7INITZ:
1655 switch (fsm_getstate(ch->fsm)) {
1656 case CH_XID7_PENDING:
1657 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1658 break;
1659 case CH_XID7_PENDING2:
1660 fsm_newstate(ch->fsm, CH_XID7_PENDING3);
1661 break;
1662 }
1663 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
1664 break;
1665 }
1666
1667 if (do_debug)
1668 ctcm_pr_debug("ctcmpc exit : %s(): cp=%i ch=0x%p id=%s\n",
1669 __FUNCTION__, smp_processor_id(), ch, ch->id);
1670 return;
1671
1672}
1673
1674/*
1675 * ctcmpc channel FSM action
1676 * called from one point in ctcmpc_ch_fsm
1677 * ctcmpc only
1678 */
1679static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
1680{
1681 struct channel *ch = arg;
1682 struct net_device *dev = ch->netdev;
1683 struct ctcm_priv *priv = dev->priv;
1684 struct mpc_group *grp = priv->mpcg;
1685
1686 ctcm_pr_debug("ctcmpc enter: %s %s() %s \nGrpState:%s ChState:%s\n",
1687 dev->name,
1688 __FUNCTION__, ch->id,
1689 fsm_getstate_str(grp->fsm),
1690 fsm_getstate_str(ch->fsm));
1691
1692 fsm_deltimer(&ch->timer);
1693
1694 switch (fsm_getstate(grp->fsm)) {
1695 case MPCG_STATE_XID0IOWAIT:
1696 /* vtam wants to be primary.start yside xid exchanges*/
1697 /* only receive one attn-busy at a time so must not */
1698 /* change state each time */
1699 grp->changed_side = 1;
1700 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
1701 break;
1702 case MPCG_STATE_XID2INITW:
1703 if (grp->changed_side == 1) {
1704 grp->changed_side = 2;
1705 break;
1706 }
1707 /* process began via call to establish_conn */
1708 /* so must report failure instead of reverting */
1709 /* back to ready-for-xid passive state */
1710 if (grp->estconnfunc)
1711 goto done;
1712 /* this attnbusy is NOT the result of xside xid */
1713 /* collisions so yside must have been triggered */
1714 /* by an ATTN that was not intended to start XID */
1715 /* processing. Revert back to ready-for-xid and */
1716 /* wait for ATTN interrupt to signal xid start */
1717 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
1718 fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
1719 fsm_deltimer(&grp->timer);
1720 goto done;
1721 }
1722 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1723 goto done;
1724 case MPCG_STATE_XID2INITX:
1725 /* XID2 was received before ATTN Busy for second
1726 channel.Send yside xid for second channel.
1727 */
1728 if (grp->changed_side == 1) {
1729 grp->changed_side = 2;
1730 break;
1731 }
1732 case MPCG_STATE_XID0IOWAIX:
1733 case MPCG_STATE_XID7INITW:
1734 case MPCG_STATE_XID7INITX:
1735 case MPCG_STATE_XID7INITI:
1736 case MPCG_STATE_XID7INITZ:
1737 default:
1738 /* multiple attn-busy indicates too out-of-sync */
1739 /* and they are certainly not being received as part */
1740 /* of valid mpc group negotiations.. */
1741 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1742 goto done;
1743 }
1744
1745 if (grp->changed_side == 1) {
1746 fsm_deltimer(&grp->timer);
1747 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
1748 MPCG_EVENT_TIMER, dev);
1749 }
1750 if (ch->in_mpcgroup)
1751 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1752 else
1753 printk(KERN_WARNING "ctcmpc: %s() Not all channels have"
1754 " been added to group\n", __FUNCTION__);
1755
1756done:
1757 if (do_debug)
1758 ctcm_pr_debug("ctcmpc exit : %s()%s ch=0x%p id=%s\n",
1759 __FUNCTION__, dev->name, ch, ch->id);
1760
1761 return;
1762
1763}
1764
1765/*
1766 * ctcmpc channel FSM action
1767 * called from several points in ctcmpc_ch_fsm
1768 * ctcmpc only
1769 */
1770static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
1771{
1772 struct channel *ch = arg;
1773 struct net_device *dev = ch->netdev;
1774 struct ctcm_priv *priv = dev->priv;
1775 struct mpc_group *grp = priv->mpcg;
1776
1777 ctcm_pr_debug("ctcmpc enter: %s %s() %s \nGrpState:%s ChState:%s\n",
1778 dev->name, __FUNCTION__, ch->id,
1779 fsm_getstate_str(grp->fsm),
1780 fsm_getstate_str(ch->fsm));
1781
1782 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1783
1784 return;
1785}
1786
1787/*
1788 * ctcmpc channel FSM action
1789 * called from several points in ctcmpc_ch_fsm
1790 * ctcmpc only
1791 */
1792static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
1793{
1794 struct channel *ach = arg;
1795 struct net_device *dev = ach->netdev;
1796 struct ctcm_priv *priv = dev->priv;
1797 struct mpc_group *grp = priv->mpcg;
1798 struct channel *wch = priv->channel[WRITE];
1799 struct channel *rch = priv->channel[READ];
1800 struct sk_buff *skb;
1801 struct th_sweep *header;
1802 int rc = 0;
1803 unsigned long saveflags = 0;
1804
1805 if (do_debug)
1806 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
1807 __FUNCTION__, smp_processor_id(), ach, ach->id);
1808
1809 if (grp->in_sweep == 0)
1810 goto done;
1811
1812 if (do_debug_data) {
1813 ctcm_pr_debug("ctcmpc: %s() 1: ToVTAM_th_seq= %08x\n" ,
1814 __FUNCTION__, wch->th_seq_num);
1815 ctcm_pr_debug("ctcmpc: %s() 1: FromVTAM_th_seq= %08x\n" ,
1816 __FUNCTION__, rch->th_seq_num);
1817 }
1818
1819 if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
1820 /* give the previous IO time to complete */
1821 fsm_addtimer(&wch->sweep_timer,
1822 200, CTC_EVENT_RSWEEP_TIMER, wch);
1823 goto done;
1824 }
1825
1826 skb = skb_dequeue(&wch->sweep_queue);
1827 if (!skb)
1828 goto done;
1829
1830 if (set_normalized_cda(&wch->ccw[4], skb->data)) {
1831 grp->in_sweep = 0;
1832 ctcm_clear_busy_do(dev);
1833 dev_kfree_skb_any(skb);
1834 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1835 goto done;
1836 } else {
1837 atomic_inc(&skb->users);
1838 skb_queue_tail(&wch->io_queue, skb);
1839 }
1840
1841 /* send out the sweep */
1842 wch->ccw[4].count = skb->len;
1843
1844 header = (struct th_sweep *)skb->data;
1845 switch (header->th.th_ch_flag) {
1846 case TH_SWEEP_REQ:
1847 grp->sweep_req_pend_num--;
1848 break;
1849 case TH_SWEEP_RESP:
1850 grp->sweep_rsp_pend_num--;
1851 break;
1852 }
1853
1854 header->sw.th_last_seq = wch->th_seq_num;
1855
1856 if (do_debug_ccw)
1857 ctcmpc_dumpit((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
1858
1859 ctcm_pr_debug("ctcmpc: %s() sweep packet\n", __FUNCTION__);
1860 ctcmpc_dumpit((char *)header, TH_SWEEP_LENGTH);
1861
1862 fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
1863 fsm_newstate(wch->fsm, CTC_STATE_TX);
1864
1865 spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
1866 wch->prof.send_stamp = current_kernel_time(); /* xtime */
1867 rc = ccw_device_start(wch->cdev, &wch->ccw[3],
1868 (unsigned long) wch, 0xff, 0);
1869 spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
1870
1871 if ((grp->sweep_req_pend_num == 0) &&
1872 (grp->sweep_rsp_pend_num == 0)) {
1873 grp->in_sweep = 0;
1874 rch->th_seq_num = 0x00;
1875 wch->th_seq_num = 0x00;
1876 ctcm_clear_busy_do(dev);
1877 }
1878
1879 if (do_debug_data) {
1880 ctcm_pr_debug("ctcmpc: %s()2: ToVTAM_th_seq= %08x\n" ,
1881 __FUNCTION__, wch->th_seq_num);
1882 ctcm_pr_debug("ctcmpc: %s()2: FromVTAM_th_seq= %08x\n" ,
1883 __FUNCTION__, rch->th_seq_num);
1884 }
1885
1886 if (rc != 0)
1887 ctcm_ccw_check_rc(wch, rc, "send sweep");
1888
1889done:
1890 if (do_debug)
1891 ctcm_pr_debug("ctcmpc exit: %s() %s\n", __FUNCTION__, ach->id);
1892 return;
1893}
1894
1895
1896/*
1897 * The ctcmpc statemachine for a channel.
1898 */
1899
1900const fsm_node ctcmpc_ch_fsm[] = {
1901 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
1902 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
1903 { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1904 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
1905 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1906
1907 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
1908 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
1909 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
1910 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1911 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
1912 { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop },
1913 { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop },
1914 { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1915
1916 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1917 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
1918 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1919 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
1920 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1921 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1922
1923 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
1924 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
1925 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1926 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1927 { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1928
1929 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1930 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
1931 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
1932 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1933 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1934 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
1935 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1936 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1937
1938 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1939 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
1940 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle },
1941 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
1942 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
1943 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
1944 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
1945 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1946 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio },
1947 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1948
1949 { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop },
1950 { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1951 { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
1952 { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop },
1953 { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1954 { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1955 { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1956 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1957 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1958 { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1959
1960 { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1961 { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1962 { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio },
1963 { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop },
1964 { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1965 { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1966 { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1967 { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1968 { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy },
1969 { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1970 { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1971
1972 { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1973 { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1974 { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
1975 { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop },
1976 { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1977 { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1978 { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1979 { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1980 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1981 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1982 { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1983 { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1984 { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1985
1986 { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1987 { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1988 { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio },
1989 { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop },
1990 { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1991 { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1992 { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1993 { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1994 { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1995 { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1996 { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1997 { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1998
1999 { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
2000 { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn },
2001 { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio },
2002 { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop },
2003 { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2004 { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2005 { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
2006 { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
2007 { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
2008 { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
2009 { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend },
2010 { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2011
2012 { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
2013 { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn },
2014 { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio },
2015 { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop },
2016 { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2017 { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2018 { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
2019 { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
2020 { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
2021 { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
2022 { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend },
2023 { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2024
2025 { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
2026 { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn },
2027 { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio },
2028 { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop },
2029 { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2030 { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2031 { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
2032 { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
2033 { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
2034 { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
2035 { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend },
2036 { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2037
2038 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
2039 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
2040 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
2041 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
2042 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2043 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2044 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2045 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
2046
2047 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
2048 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
2049 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
2050 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
2051 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
2052 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
2053 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2054 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2055 { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2056
2057 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
2058 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
2059 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
2060 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
2061 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2062 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2063 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2064 { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2065
2066 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
2067 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
2068 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
2069 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
2070 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
2071 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2072 { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2073 { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2074
2075 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
2076 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
2077 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
2078 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
2079 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
2080 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2081 { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2082
2083 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
2084 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
2085 { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone },
2086 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
2087 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2088 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
2089 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2090 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2091 { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2092 { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2093
2094 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
2095 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
2096 { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2097 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2098 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2099};
2100
2101int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
2102
2103/*
2104 * Actions for interface - statemachine.
2105 */
2106
2107/**
2108 * Startup channels by sending CTC_EVENT_START to each channel.
2109 *
2110 * fi An instance of an interface statemachine.
2111 * event The event, just happened.
2112 * arg Generic pointer, casted from struct net_device * upon call.
2113 */
2114static void dev_action_start(fsm_instance *fi, int event, void *arg)
2115{
2116 struct net_device *dev = arg;
2117 struct ctcm_priv *priv = dev->priv;
2118 int direction;
2119
2120 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2121
2122 fsm_deltimer(&priv->restart_timer);
2123 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2124 if (IS_MPC(priv))
2125 priv->mpcg->channels_terminating = 0;
2126 for (direction = READ; direction <= WRITE; direction++) {
2127 struct channel *ch = priv->channel[direction];
2128 fsm_event(ch->fsm, CTC_EVENT_START, ch);
2129 }
2130}
2131
2132/**
2133 * Shutdown channels by sending CTC_EVENT_STOP to each channel.
2134 *
2135 * fi An instance of an interface statemachine.
2136 * event The event, just happened.
2137 * arg Generic pointer, casted from struct net_device * upon call.
2138 */
2139static void dev_action_stop(fsm_instance *fi, int event, void *arg)
2140{
2141 int direction;
2142 struct net_device *dev = arg;
2143 struct ctcm_priv *priv = dev->priv;
2144
2145 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2146
2147 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2148 for (direction = READ; direction <= WRITE; direction++) {
2149 struct channel *ch = priv->channel[direction];
2150 fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
2151 ch->th_seq_num = 0x00;
2152 if (do_debug)
2153 ctcm_pr_debug("ctcm: %s() CH_th_seq= %08x\n",
2154 __FUNCTION__, ch->th_seq_num);
2155 }
2156 if (IS_MPC(priv))
2157 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2158}
2159
2160static void dev_action_restart(fsm_instance *fi, int event, void *arg)
2161{
2162 int restart_timer;
2163 struct net_device *dev = arg;
2164 struct ctcm_priv *priv = dev->priv;
2165
2166 CTCMY_DBF_DEV_NAME(TRACE, dev, "");
2167
2168 if (IS_MPC(priv)) {
2169 ctcm_pr_info("ctcm: %s Restarting Device and "
2170 "MPC Group in 5 seconds\n",
2171 dev->name);
2172 restart_timer = CTCM_TIME_1_SEC;
2173 } else {
2174 ctcm_pr_info("%s: Restarting\n", dev->name);
2175 restart_timer = CTCM_TIME_5_SEC;
2176 }
2177
2178 dev_action_stop(fi, event, arg);
2179 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
2180 if (IS_MPC(priv))
2181 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2182
2183 /* going back into start sequence too quickly can */
2184 /* result in the other side becoming unreachable due */
2185 /* to sense reported when IO is aborted */
2186 fsm_addtimer(&priv->restart_timer, restart_timer,
2187 DEV_EVENT_START, dev);
2188}
2189
2190/**
2191 * Called from channel statemachine
2192 * when a channel is up and running.
2193 *
2194 * fi An instance of an interface statemachine.
2195 * event The event, just happened.
2196 * arg Generic pointer, casted from struct net_device * upon call.
2197 */
2198static void dev_action_chup(fsm_instance *fi, int event, void *arg)
2199{
2200 struct net_device *dev = arg;
2201 struct ctcm_priv *priv = dev->priv;
2202
2203 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2204
2205 switch (fsm_getstate(fi)) {
2206 case DEV_STATE_STARTWAIT_RXTX:
2207 if (event == DEV_EVENT_RXUP)
2208 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2209 else
2210 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2211 break;
2212 case DEV_STATE_STARTWAIT_RX:
2213 if (event == DEV_EVENT_RXUP) {
2214 fsm_newstate(fi, DEV_STATE_RUNNING);
2215 ctcm_pr_info("%s: connected with remote side\n",
2216 dev->name);
2217 ctcm_clear_busy(dev);
2218 }
2219 break;
2220 case DEV_STATE_STARTWAIT_TX:
2221 if (event == DEV_EVENT_TXUP) {
2222 fsm_newstate(fi, DEV_STATE_RUNNING);
2223 ctcm_pr_info("%s: connected with remote side\n",
2224 dev->name);
2225 ctcm_clear_busy(dev);
2226 }
2227 break;
2228 case DEV_STATE_STOPWAIT_TX:
2229 if (event == DEV_EVENT_RXUP)
2230 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2231 break;
2232 case DEV_STATE_STOPWAIT_RX:
2233 if (event == DEV_EVENT_TXUP)
2234 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2235 break;
2236 }
2237
2238 if (IS_MPC(priv)) {
2239 if (event == DEV_EVENT_RXUP)
2240 mpc_channel_action(priv->channel[READ],
2241 READ, MPC_CHANNEL_ADD);
2242 else
2243 mpc_channel_action(priv->channel[WRITE],
2244 WRITE, MPC_CHANNEL_ADD);
2245 }
2246}
2247
2248/**
2249 * Called from device statemachine
2250 * when a channel has been shutdown.
2251 *
2252 * fi An instance of an interface statemachine.
2253 * event The event, just happened.
2254 * arg Generic pointer, casted from struct net_device * upon call.
2255 */
2256static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
2257{
2258
2259 struct net_device *dev = arg;
2260 struct ctcm_priv *priv = dev->priv;
2261
2262 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2263
2264 switch (fsm_getstate(fi)) {
2265 case DEV_STATE_RUNNING:
2266 if (event == DEV_EVENT_TXDOWN)
2267 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2268 else
2269 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2270 break;
2271 case DEV_STATE_STARTWAIT_RX:
2272 if (event == DEV_EVENT_TXDOWN)
2273 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2274 break;
2275 case DEV_STATE_STARTWAIT_TX:
2276 if (event == DEV_EVENT_RXDOWN)
2277 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2278 break;
2279 case DEV_STATE_STOPWAIT_RXTX:
2280 if (event == DEV_EVENT_TXDOWN)
2281 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2282 else
2283 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2284 break;
2285 case DEV_STATE_STOPWAIT_RX:
2286 if (event == DEV_EVENT_RXDOWN)
2287 fsm_newstate(fi, DEV_STATE_STOPPED);
2288 break;
2289 case DEV_STATE_STOPWAIT_TX:
2290 if (event == DEV_EVENT_TXDOWN)
2291 fsm_newstate(fi, DEV_STATE_STOPPED);
2292 break;
2293 }
2294 if (IS_MPC(priv)) {
2295 if (event == DEV_EVENT_RXDOWN)
2296 mpc_channel_action(priv->channel[READ],
2297 READ, MPC_CHANNEL_REMOVE);
2298 else
2299 mpc_channel_action(priv->channel[WRITE],
2300 WRITE, MPC_CHANNEL_REMOVE);
2301 }
2302}
2303
2304const fsm_node dev_fsm[] = {
2305 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
2306 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2307 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2308 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2309 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2310 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2311 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2312 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2313 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2314 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2315 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2316 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2317 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2318 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2319 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2320 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2321 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2322 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2323 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2324 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2325 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2326 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2327 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2328 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2329 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2330 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2331 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2332 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2333 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2334 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2335 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2336 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2337 { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2338 { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2339 { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop },
2340 { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop },
2341 { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2342};
2343
2344int dev_fsm_len = ARRAY_SIZE(dev_fsm);
2345
2346/* --- This is the END my friend --- */
2347
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
new file mode 100644
index 000000000000..2326aba9807a
--- /dev/null
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -0,0 +1,359 @@
1/*
2 * drivers/s390/net/ctcm_fsms.h
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Authors: Fritz Elfert (felfert@millenux.com)
6 * Peter Tiedemann (ptiedem@de.ibm.com)
7 * MPC additions :
8 * Belinda Thompson (belindat@us.ibm.com)
9 * Andy Richter (richtera@us.ibm.com)
10 */
11#ifndef _CTCM_FSMS_H_
12#define _CTCM_FSMS_H_
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/interrupt.h>
21#include <linux/timer.h>
22#include <linux/bitops.h>
23
24#include <linux/signal.h>
25#include <linux/string.h>
26
27#include <linux/ip.h>
28#include <linux/if_arp.h>
29#include <linux/tcp.h>
30#include <linux/skbuff.h>
31#include <linux/ctype.h>
32#include <net/dst.h>
33
34#include <linux/io.h>
35#include <asm/ccwdev.h>
36#include <asm/ccwgroup.h>
37#include <linux/uaccess.h>
38
39#include <asm/idals.h>
40
41#include "fsm.h"
42#include "cu3088.h"
43#include "ctcm_main.h"
44
45/*
46 * Definitions for the channel statemachine(s) for ctc and ctcmpc
47 *
48 * To allow better kerntyping, prefix-less definitions for channel states
49 * and channel events have been replaced :
50 * ch_event... -> ctc_ch_event...
51 * CH_EVENT... -> CTC_EVENT...
52 * ch_state... -> ctc_ch_state...
53 * CH_STATE... -> CTC_STATE...
54 */
55/*
56 * Events of the channel statemachine(s) for ctc and ctcmpc
57 */
58enum ctc_ch_events {
59 /*
60 * Events, representing return code of
61 * I/O operations (ccw_device_start, ccw_device_halt et al.)
62 */
63 CTC_EVENT_IO_SUCCESS,
64 CTC_EVENT_IO_EBUSY,
65 CTC_EVENT_IO_ENODEV,
66 CTC_EVENT_IO_UNKNOWN,
67
68 CTC_EVENT_ATTNBUSY,
69 CTC_EVENT_ATTN,
70 CTC_EVENT_BUSY,
71 /*
72 * Events, representing unit-check
73 */
74 CTC_EVENT_UC_RCRESET,
75 CTC_EVENT_UC_RSRESET,
76 CTC_EVENT_UC_TXTIMEOUT,
77 CTC_EVENT_UC_TXPARITY,
78 CTC_EVENT_UC_HWFAIL,
79 CTC_EVENT_UC_RXPARITY,
80 CTC_EVENT_UC_ZERO,
81 CTC_EVENT_UC_UNKNOWN,
82 /*
83 * Events, representing subchannel-check
84 */
85 CTC_EVENT_SC_UNKNOWN,
86 /*
87 * Events, representing machine checks
88 */
89 CTC_EVENT_MC_FAIL,
90 CTC_EVENT_MC_GOOD,
91 /*
92 * Event, representing normal IRQ
93 */
94 CTC_EVENT_IRQ,
95 CTC_EVENT_FINSTAT,
96 /*
97 * Event, representing timer expiry.
98 */
99 CTC_EVENT_TIMER,
100 /*
101 * Events, representing commands from upper levels.
102 */
103 CTC_EVENT_START,
104 CTC_EVENT_STOP,
105 CTC_NR_EVENTS,
106 /*
107 * additional MPC events
108 */
109 CTC_EVENT_SEND_XID = CTC_NR_EVENTS,
110 CTC_EVENT_RSWEEP_TIMER,
111 /*
112 * MUST be always the last element!!
113 */
114 CTC_MPC_NR_EVENTS,
115};
116
117/*
118 * States of the channel statemachine(s) for ctc and ctcmpc.
119 */
120enum ctc_ch_states {
121 /*
122 * Channel not assigned to any device,
123 * initial state, direction invalid
124 */
125 CTC_STATE_IDLE,
126 /*
127 * Channel assigned but not operating
128 */
129 CTC_STATE_STOPPED,
130 CTC_STATE_STARTWAIT,
131 CTC_STATE_STARTRETRY,
132 CTC_STATE_SETUPWAIT,
133 CTC_STATE_RXINIT,
134 CTC_STATE_TXINIT,
135 CTC_STATE_RX,
136 CTC_STATE_TX,
137 CTC_STATE_RXIDLE,
138 CTC_STATE_TXIDLE,
139 CTC_STATE_RXERR,
140 CTC_STATE_TXERR,
141 CTC_STATE_TERM,
142 CTC_STATE_DTERM,
143 CTC_STATE_NOTOP,
144 CTC_NR_STATES, /* MUST be the last element of non-expanded states */
145 /*
146 * additional MPC states
147 */
148 CH_XID0_PENDING = CTC_NR_STATES,
149 CH_XID0_INPROGRESS,
150 CH_XID7_PENDING,
151 CH_XID7_PENDING1,
152 CH_XID7_PENDING2,
153 CH_XID7_PENDING3,
154 CH_XID7_PENDING4,
155 CTC_MPC_NR_STATES, /* MUST be the last element of expanded mpc states */
156};
157
158extern const char *ctc_ch_event_names[];
159
160extern const char *ctc_ch_state_names[];
161
162void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg);
163void ctcm_purge_skb_queue(struct sk_buff_head *q);
164void fsm_action_nop(fsm_instance *fi, int event, void *arg);
165
166/*
167 * ----- non-static actions for ctcm channel statemachine -----
168 *
169 */
170void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg);
171
172/*
173 * ----- FSM (state/event/action) of the ctcm channel statemachine -----
174 */
175extern const fsm_node ch_fsm[];
176extern int ch_fsm_len;
177
178
179/*
180 * ----- non-static actions for ctcmpc channel statemachine ----
181 *
182 */
183/* shared :
184void ctcm_chx_txidle(fsm_instance * fi, int event, void *arg);
185 */
186void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg);
187
188/*
189 * ----- FSM (state/event/action) of the ctcmpc channel statemachine -----
190 */
191extern const fsm_node ctcmpc_ch_fsm[];
192extern int mpc_ch_fsm_len;
193
194/*
195 * Definitions for the device interface statemachine for ctc and mpc
196 */
197
198/*
199 * States of the device interface statemachine.
200 */
201enum dev_states {
202 DEV_STATE_STOPPED,
203 DEV_STATE_STARTWAIT_RXTX,
204 DEV_STATE_STARTWAIT_RX,
205 DEV_STATE_STARTWAIT_TX,
206 DEV_STATE_STOPWAIT_RXTX,
207 DEV_STATE_STOPWAIT_RX,
208 DEV_STATE_STOPWAIT_TX,
209 DEV_STATE_RUNNING,
210 /*
211 * MUST be always the last element!!
212 */
213 CTCM_NR_DEV_STATES
214};
215
216extern const char *dev_state_names[];
217
218/*
219 * Events of the device interface statemachine.
220 * ctcm and ctcmpc
221 */
222enum dev_events {
223 DEV_EVENT_START,
224 DEV_EVENT_STOP,
225 DEV_EVENT_RXUP,
226 DEV_EVENT_TXUP,
227 DEV_EVENT_RXDOWN,
228 DEV_EVENT_TXDOWN,
229 DEV_EVENT_RESTART,
230 /*
231 * MUST be always the last element!!
232 */
233 CTCM_NR_DEV_EVENTS
234};
235
236extern const char *dev_event_names[];
237
238/*
239 * Actions for the device interface statemachine.
240 * ctc and ctcmpc
241 */
242/*
243static void dev_action_start(fsm_instance * fi, int event, void *arg);
244static void dev_action_stop(fsm_instance * fi, int event, void *arg);
245static void dev_action_restart(fsm_instance *fi, int event, void *arg);
246static void dev_action_chup(fsm_instance * fi, int event, void *arg);
247static void dev_action_chdown(fsm_instance * fi, int event, void *arg);
248*/
249
250/*
251 * The (state/event/action) fsm table of the device interface statemachine.
252 * ctcm and ctcmpc
253 */
254extern const fsm_node dev_fsm[];
255extern int dev_fsm_len;
256
257
258/*
259 * Definitions for the MPC Group statemachine
260 */
261
262/*
263 * MPC Group Station FSM States
264
265State Name When In This State
266====================== =======================================
267MPCG_STATE_RESET Initial State When Driver Loaded
268 We receive and send NOTHING
269
270MPCG_STATE_INOP INOP Received.
271 Group level non-recoverable error
272
273MPCG_STATE_READY XID exchanges for at least 1 write and
274 1 read channel have completed.
275 Group is ready for data transfer.
276
277States from ctc_mpc_alloc_channel
278==============================================================
279MPCG_STATE_XID2INITW Awaiting XID2(0) Initiation
280 ATTN from other side will start
281 XID negotiations.
282 Y-side protocol only.
283
284MPCG_STATE_XID2INITX XID2(0) negotiations are in progress.
285 At least 1, but not all, XID2(0)'s
286 have been received from partner.
287
288MPCG_STATE_XID7INITW XID2(0) complete
289 No XID2(7)'s have yet been received.
290 XID2(7) negotiations pending.
291
292MPCG_STATE_XID7INITX XID2(7) negotiations in progress.
293 At least 1, but not all, XID2(7)'s
294 have been received from partner.
295
296MPCG_STATE_XID7INITF XID2(7) negotiations complete.
297 Transitioning to READY.
298
299MPCG_STATE_READY Ready for Data Transfer.
300
301
302States from ctc_mpc_establish_connectivity call
303==============================================================
304MPCG_STATE_XID0IOWAIT Initiating XID2(0) negotiations.
305 X-side protocol only.
306 ATTN-BUSY from other side will convert
307 this to Y-side protocol and the
308 ctc_mpc_alloc_channel flow will begin.
309
310MPCG_STATE_XID0IOWAIX XID2(0) negotiations are in progress.
311 At least 1, but not all, XID2(0)'s
312 have been received from partner.
313
314MPCG_STATE_XID7INITI XID2(0) complete
315 No XID2(7)'s have yet been received.
316 XID2(7) negotiations pending.
317
318MPCG_STATE_XID7INITZ XID2(7) negotiations in progress.
319 At least 1, but not all, XID2(7)'s
320 have been received from partner.
321
322MPCG_STATE_XID7INITF XID2(7) negotiations complete.
323 Transitioning to READY.
324
325MPCG_STATE_READY Ready for Data Transfer.
326
327*/
328
329enum mpcg_events {
330 MPCG_EVENT_INOP,
331 MPCG_EVENT_DISCONC,
332 MPCG_EVENT_XID0DO,
333 MPCG_EVENT_XID2,
334 MPCG_EVENT_XID2DONE,
335 MPCG_EVENT_XID7DONE,
336 MPCG_EVENT_TIMER,
337 MPCG_EVENT_DOIO,
338 MPCG_NR_EVENTS,
339};
340
341enum mpcg_states {
342 MPCG_STATE_RESET,
343 MPCG_STATE_INOP,
344 MPCG_STATE_XID2INITW,
345 MPCG_STATE_XID2INITX,
346 MPCG_STATE_XID7INITW,
347 MPCG_STATE_XID7INITX,
348 MPCG_STATE_XID0IOWAIT,
349 MPCG_STATE_XID0IOWAIX,
350 MPCG_STATE_XID7INITI,
351 MPCG_STATE_XID7INITZ,
352 MPCG_STATE_XID7INITF,
353 MPCG_STATE_FLOWC,
354 MPCG_STATE_READY,
355 MPCG_NR_STATES,
356};
357
358#endif
359/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
new file mode 100644
index 000000000000..d52843da4f55
--- /dev/null
+++ b/drivers/s390/net/ctcm_main.c
@@ -0,0 +1,1772 @@
1/*
2 * drivers/s390/net/ctcm_main.c
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Author(s):
6 * Original CTC driver(s):
7 * Fritz Elfert (felfert@millenux.com)
8 * Dieter Wellerdiek (wel@de.ibm.com)
9 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 * Denis Joseph Barrow (barrow_dj@yahoo.com)
11 * Jochen Roehrig (roehrig@de.ibm.com)
12 * Cornelia Huck <cornelia.huck@de.ibm.com>
13 * MPC additions:
14 * Belinda Thompson (belindat@us.ibm.com)
15 * Andy Richter (richtera@us.ibm.com)
16 * Revived by:
17 * Peter Tiedemann (ptiedem@de.ibm.com)
18 */
19
20#undef DEBUG
21#undef DEBUGDATA
22#undef DEBUGCCW
23
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/interrupt.h>
31#include <linux/timer.h>
32#include <linux/bitops.h>
33
34#include <linux/signal.h>
35#include <linux/string.h>
36
37#include <linux/ip.h>
38#include <linux/if_arp.h>
39#include <linux/tcp.h>
40#include <linux/skbuff.h>
41#include <linux/ctype.h>
42#include <net/dst.h>
43
44#include <linux/io.h>
45#include <asm/ccwdev.h>
46#include <asm/ccwgroup.h>
47#include <linux/uaccess.h>
48
49#include <asm/idals.h>
50
51#include "cu3088.h"
52#include "ctcm_fsms.h"
53#include "ctcm_main.h"
54
55/* Some common global variables */
56
57/*
58 * Linked list of all detected channels.
59 */
60struct channel *channels;
61
62/**
63 * Unpack a just received skb and hand it over to
64 * upper layers.
65 *
66 * ch The channel where this skb has been received.
67 * pskb The received skb.
68 */
69void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
70{
71 struct net_device *dev = ch->netdev;
72 struct ctcm_priv *priv = dev->priv;
73 __u16 len = *((__u16 *) pskb->data);
74
75 skb_put(pskb, 2 + LL_HEADER_LENGTH);
76 skb_pull(pskb, 2);
77 pskb->dev = dev;
78 pskb->ip_summed = CHECKSUM_UNNECESSARY;
79 while (len > 0) {
80 struct sk_buff *skb;
81 int skblen;
82 struct ll_header *header = (struct ll_header *)pskb->data;
83
84 skb_pull(pskb, LL_HEADER_LENGTH);
85 if ((ch->protocol == CTCM_PROTO_S390) &&
86 (header->type != ETH_P_IP)) {
87
88 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
89 /*
90 * Check packet type only if we stick strictly
91 * to S/390's protocol of OS390. This only
92 * supports IP. Otherwise allow any packet
93 * type.
94 */
95 ctcm_pr_warn("%s Illegal packet type 0x%04x "
96 "received, dropping\n",
97 dev->name, header->type);
98 ch->logflags |= LOG_FLAG_ILLEGALPKT;
99 }
100
101 priv->stats.rx_dropped++;
102 priv->stats.rx_frame_errors++;
103 return;
104 }
105 pskb->protocol = ntohs(header->type);
106 if (header->length <= LL_HEADER_LENGTH) {
107 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
108 ctcm_pr_warn(
109 "%s Illegal packet size %d "
110 "received (MTU=%d blocklen=%d), "
111 "dropping\n", dev->name, header->length,
112 dev->mtu, len);
113 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
114 }
115
116 priv->stats.rx_dropped++;
117 priv->stats.rx_length_errors++;
118 return;
119 }
120 header->length -= LL_HEADER_LENGTH;
121 len -= LL_HEADER_LENGTH;
122 if ((header->length > skb_tailroom(pskb)) ||
123 (header->length > len)) {
124 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
125 ctcm_pr_warn(
126 "%s Illegal packet size %d (beyond the"
127 " end of received data), dropping\n",
128 dev->name, header->length);
129 ch->logflags |= LOG_FLAG_OVERRUN;
130 }
131
132 priv->stats.rx_dropped++;
133 priv->stats.rx_length_errors++;
134 return;
135 }
136 skb_put(pskb, header->length);
137 skb_reset_mac_header(pskb);
138 len -= header->length;
139 skb = dev_alloc_skb(pskb->len);
140 if (!skb) {
141 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
142 ctcm_pr_warn(
143 "%s Out of memory in ctcm_unpack_skb\n",
144 dev->name);
145 ch->logflags |= LOG_FLAG_NOMEM;
146 }
147 priv->stats.rx_dropped++;
148 return;
149 }
150 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
151 pskb->len);
152 skb_reset_mac_header(skb);
153 skb->dev = pskb->dev;
154 skb->protocol = pskb->protocol;
155 pskb->ip_summed = CHECKSUM_UNNECESSARY;
156 skblen = skb->len;
157 /*
158 * reset logflags
159 */
160 ch->logflags = 0;
161 priv->stats.rx_packets++;
162 priv->stats.rx_bytes += skblen;
163 netif_rx_ni(skb);
164 dev->last_rx = jiffies;
165 if (len > 0) {
166 skb_pull(pskb, header->length);
167 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
168 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
169 CTCM_DBF_DEV_NAME(TRACE, dev,
170 "Overrun in ctcm_unpack_skb");
171 ch->logflags |= LOG_FLAG_OVERRUN;
172 }
173 return;
174 }
175 skb_put(pskb, LL_HEADER_LENGTH);
176 }
177 }
178}
179
180/**
181 * Release a specific channel in the channel list.
182 *
183 * ch Pointer to channel struct to be released.
184 */
185static void channel_free(struct channel *ch)
186{
187 CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__);
188 ch->flags &= ~CHANNEL_FLAGS_INUSE;
189 fsm_newstate(ch->fsm, CTC_STATE_IDLE);
190}
191
192/**
193 * Remove a specific channel in the channel list.
194 *
195 * ch Pointer to channel struct to be released.
196 */
197static void channel_remove(struct channel *ch)
198{
199 struct channel **c = &channels;
200 char chid[CTCM_ID_SIZE+1];
201 int ok = 0;
202
203 if (ch == NULL)
204 return;
205 else
206 strncpy(chid, ch->id, CTCM_ID_SIZE);
207
208 channel_free(ch);
209 while (*c) {
210 if (*c == ch) {
211 *c = ch->next;
212 fsm_deltimer(&ch->timer);
213 if (IS_MPC(ch))
214 fsm_deltimer(&ch->sweep_timer);
215
216 kfree_fsm(ch->fsm);
217 clear_normalized_cda(&ch->ccw[4]);
218 if (ch->trans_skb != NULL) {
219 clear_normalized_cda(&ch->ccw[1]);
220 dev_kfree_skb_any(ch->trans_skb);
221 }
222 if (IS_MPC(ch)) {
223 tasklet_kill(&ch->ch_tasklet);
224 tasklet_kill(&ch->ch_disc_tasklet);
225 kfree(ch->discontact_th);
226 }
227 kfree(ch->ccw);
228 kfree(ch->irb);
229 kfree(ch);
230 ok = 1;
231 break;
232 }
233 c = &((*c)->next);
234 }
235
236 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s) %s", CTCM_FUNTAIL,
237 chid, ok ? "OK" : "failed");
238}
239
240/**
241 * Get a specific channel from the channel list.
242 *
243 * type Type of channel we are interested in.
244 * id Id of channel we are interested in.
245 * direction Direction we want to use this channel for.
246 *
247 * returns Pointer to a channel or NULL if no matching channel available.
248 */
249static struct channel *channel_get(enum channel_types type,
250 char *id, int direction)
251{
252 struct channel *ch = channels;
253
254 if (do_debug) {
255 char buf[64];
256 sprintf(buf, "%s(%d, %s, %d)\n",
257 CTCM_FUNTAIL, type, id, direction);
258 CTCM_DBF_TEXT(TRACE, CTC_DBF_INFO, buf);
259 }
260 while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type)))
261 ch = ch->next;
262 if (!ch) {
263 char buf[64];
264 sprintf(buf, "%s(%d, %s, %d) not found in channel list\n",
265 CTCM_FUNTAIL, type, id, direction);
266 CTCM_DBF_TEXT(ERROR, CTC_DBF_ERROR, buf);
267 } else {
268 if (ch->flags & CHANNEL_FLAGS_INUSE)
269 ch = NULL;
270 else {
271 ch->flags |= CHANNEL_FLAGS_INUSE;
272 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
273 ch->flags |= (direction == WRITE)
274 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
275 fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
276 }
277 }
278 return ch;
279}
280
281static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
282{
283 if (!IS_ERR(irb))
284 return 0;
285
286 CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN, "irb error %ld on device %s\n",
287 PTR_ERR(irb), cdev->dev.bus_id);
288
289 switch (PTR_ERR(irb)) {
290 case -EIO:
291 ctcm_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
292 break;
293 case -ETIMEDOUT:
294 ctcm_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
295 break;
296 default:
297 ctcm_pr_warn("unknown error %ld on device %s\n",
298 PTR_ERR(irb), cdev->dev.bus_id);
299 }
300 return PTR_ERR(irb);
301}
302
303
304/**
305 * Check sense of a unit check.
306 *
307 * ch The channel, the sense code belongs to.
308 * sense The sense code to inspect.
309 */
310static inline void ccw_unit_check(struct channel *ch, unsigned char sense)
311{
312 CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
313 if (sense & SNS0_INTERVENTION_REQ) {
314 if (sense & 0x01) {
315 ctcm_pr_debug("%s: Interface disc. or Sel. reset "
316 "(remote)\n", ch->id);
317 fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch);
318 } else {
319 ctcm_pr_debug("%s: System reset (remote)\n", ch->id);
320 fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch);
321 }
322 } else if (sense & SNS0_EQUIPMENT_CHECK) {
323 if (sense & SNS0_BUS_OUT_CHECK) {
324 ctcm_pr_warn("%s: Hardware malfunction (remote)\n",
325 ch->id);
326 fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch);
327 } else {
328 ctcm_pr_warn("%s: Read-data parity error (remote)\n",
329 ch->id);
330 fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch);
331 }
332 } else if (sense & SNS0_BUS_OUT_CHECK) {
333 if (sense & 0x04) {
334 ctcm_pr_warn("%s: Data-streaming timeout)\n", ch->id);
335 fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch);
336 } else {
337 ctcm_pr_warn("%s: Data-transfer parity error\n",
338 ch->id);
339 fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch);
340 }
341 } else if (sense & SNS0_CMD_REJECT) {
342 ctcm_pr_warn("%s: Command reject\n", ch->id);
343 } else if (sense == 0) {
344 ctcm_pr_debug("%s: Unit check ZERO\n", ch->id);
345 fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch);
346 } else {
347 ctcm_pr_warn("%s: Unit Check with sense code: %02x\n",
348 ch->id, sense);
349 fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch);
350 }
351}
352
353int ctcm_ch_alloc_buffer(struct channel *ch)
354{
355 CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
356
357 clear_normalized_cda(&ch->ccw[1]);
358 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA);
359 if (ch->trans_skb == NULL) {
360 ctcm_pr_warn("%s: Couldn't alloc %s trans_skb\n",
361 ch->id,
362 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
363 return -ENOMEM;
364 }
365
366 ch->ccw[1].count = ch->max_bufsize;
367 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
368 dev_kfree_skb(ch->trans_skb);
369 ch->trans_skb = NULL;
370 ctcm_pr_warn("%s: set_normalized_cda for %s "
371 "trans_skb failed, dropping packets\n",
372 ch->id,
373 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
374 return -ENOMEM;
375 }
376
377 ch->ccw[1].count = 0;
378 ch->trans_skb_data = ch->trans_skb->data;
379 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
380 return 0;
381}
382
383/*
384 * Interface API for upper network layers
385 */
386
387/**
388 * Open an interface.
389 * Called from generic network layer when ifconfig up is run.
390 *
391 * dev Pointer to interface struct.
392 *
393 * returns 0 on success, -ERRNO on failure. (Never fails.)
394 */
395int ctcm_open(struct net_device *dev)
396{
397 struct ctcm_priv *priv = dev->priv;
398
399 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
400 if (!IS_MPC(priv))
401 fsm_event(priv->fsm, DEV_EVENT_START, dev);
402 return 0;
403}
404
405/**
406 * Close an interface.
407 * Called from generic network layer when ifconfig down is run.
408 *
409 * dev Pointer to interface struct.
410 *
411 * returns 0 on success, -ERRNO on failure. (Never fails.)
412 */
413int ctcm_close(struct net_device *dev)
414{
415 struct ctcm_priv *priv = dev->priv;
416
417 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
418 if (!IS_MPC(priv))
419 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
420 return 0;
421}
422
423
424/**
425 * Transmit a packet.
426 * This is a helper function for ctcm_tx().
427 *
428 * ch Channel to be used for sending.
429 * skb Pointer to struct sk_buff of packet to send.
430 * The linklevel header has already been set up
431 * by ctcm_tx().
432 *
433 * returns 0 on success, -ERRNO on failure. (Never fails.)
434 */
435static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
436{
437 unsigned long saveflags;
438 struct ll_header header;
439 int rc = 0;
440 __u16 block_len;
441 int ccw_idx;
442 struct sk_buff *nskb;
443 unsigned long hi;
444
445 /* we need to acquire the lock for testing the state
446 * otherwise we can have an IRQ changing the state to
447 * TXIDLE after the test but before acquiring the lock.
448 */
449 spin_lock_irqsave(&ch->collect_lock, saveflags);
450 if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
451 int l = skb->len + LL_HEADER_LENGTH;
452
453 if (ch->collect_len + l > ch->max_bufsize - 2) {
454 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
455 return -EBUSY;
456 } else {
457 atomic_inc(&skb->users);
458 header.length = l;
459 header.type = skb->protocol;
460 header.unused = 0;
461 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
462 LL_HEADER_LENGTH);
463 skb_queue_tail(&ch->collect_queue, skb);
464 ch->collect_len += l;
465 }
466 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
467 goto done;
468 }
469 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
470 /*
471 * Protect skb against beeing free'd by upper
472 * layers.
473 */
474 atomic_inc(&skb->users);
475 ch->prof.txlen += skb->len;
476 header.length = skb->len + LL_HEADER_LENGTH;
477 header.type = skb->protocol;
478 header.unused = 0;
479 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
480 block_len = skb->len + 2;
481 *((__u16 *)skb_push(skb, 2)) = block_len;
482
483 /*
484 * IDAL support in CTCM is broken, so we have to
485 * care about skb's above 2G ourselves.
486 */
487 hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
488 if (hi) {
489 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
490 if (!nskb) {
491 atomic_dec(&skb->users);
492 skb_pull(skb, LL_HEADER_LENGTH + 2);
493 ctcm_clear_busy(ch->netdev);
494 return -ENOMEM;
495 } else {
496 memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
497 atomic_inc(&nskb->users);
498 atomic_dec(&skb->users);
499 dev_kfree_skb_irq(skb);
500 skb = nskb;
501 }
502 }
503
504 ch->ccw[4].count = block_len;
505 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
506 /*
507 * idal allocation failed, try via copying to
508 * trans_skb. trans_skb usually has a pre-allocated
509 * idal.
510 */
511 if (ctcm_checkalloc_buffer(ch)) {
512 /*
513 * Remove our header. It gets added
514 * again on retransmit.
515 */
516 atomic_dec(&skb->users);
517 skb_pull(skb, LL_HEADER_LENGTH + 2);
518 ctcm_clear_busy(ch->netdev);
519 return -EBUSY;
520 }
521
522 skb_reset_tail_pointer(ch->trans_skb);
523 ch->trans_skb->len = 0;
524 ch->ccw[1].count = skb->len;
525 skb_copy_from_linear_data(skb,
526 skb_put(ch->trans_skb, skb->len), skb->len);
527 atomic_dec(&skb->users);
528 dev_kfree_skb_irq(skb);
529 ccw_idx = 0;
530 } else {
531 skb_queue_tail(&ch->io_queue, skb);
532 ccw_idx = 3;
533 }
534 ch->retry = 0;
535 fsm_newstate(ch->fsm, CTC_STATE_TX);
536 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
537 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
538 ch->prof.send_stamp = current_kernel_time(); /* xtime */
539 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
540 (unsigned long)ch, 0xff, 0);
541 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
542 if (ccw_idx == 3)
543 ch->prof.doios_single++;
544 if (rc != 0) {
545 fsm_deltimer(&ch->timer);
546 ctcm_ccw_check_rc(ch, rc, "single skb TX");
547 if (ccw_idx == 3)
548 skb_dequeue_tail(&ch->io_queue);
549 /*
550 * Remove our header. It gets added
551 * again on retransmit.
552 */
553 skb_pull(skb, LL_HEADER_LENGTH + 2);
554 } else if (ccw_idx == 0) {
555 struct net_device *dev = ch->netdev;
556 struct ctcm_priv *priv = dev->priv;
557 priv->stats.tx_packets++;
558 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
559 }
560done:
561 ctcm_clear_busy(ch->netdev);
562 return rc;
563}
564
565static void ctcmpc_send_sweep_req(struct channel *rch)
566{
567 struct net_device *dev = rch->netdev;
568 struct ctcm_priv *priv;
569 struct mpc_group *grp;
570 struct th_sweep *header;
571 struct sk_buff *sweep_skb;
572 struct channel *ch;
573 int rc = 0;
574
575 priv = dev->priv;
576 grp = priv->mpcg;
577 ch = priv->channel[WRITE];
578
579 if (do_debug)
580 MPC_DBF_DEV_NAME(TRACE, dev, ch->id);
581
582 /* sweep processing is not complete until response and request */
583 /* has completed for all read channels in group */
584 if (grp->in_sweep == 0) {
585 grp->in_sweep = 1;
586 grp->sweep_rsp_pend_num = grp->active_channels[READ];
587 grp->sweep_req_pend_num = grp->active_channels[READ];
588 }
589
590 sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
591
592 if (sweep_skb == NULL) {
593 printk(KERN_INFO "Couldn't alloc sweep_skb\n");
594 rc = -ENOMEM;
595 goto done;
596 }
597
598 header = kmalloc(TH_SWEEP_LENGTH, gfp_type());
599
600 if (!header) {
601 dev_kfree_skb_any(sweep_skb);
602 rc = -ENOMEM;
603 goto done;
604 }
605
606 header->th.th_seg = 0x00 ;
607 header->th.th_ch_flag = TH_SWEEP_REQ; /* 0x0f */
608 header->th.th_blk_flag = 0x00;
609 header->th.th_is_xid = 0x00;
610 header->th.th_seq_num = 0x00;
611 header->sw.th_last_seq = ch->th_seq_num;
612
613 memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
614
615 kfree(header);
616
617 dev->trans_start = jiffies;
618 skb_queue_tail(&ch->sweep_queue, sweep_skb);
619
620 fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
621
622 return;
623
624done:
625 if (rc != 0) {
626 grp->in_sweep = 0;
627 ctcm_clear_busy(dev);
628 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
629 }
630
631 return;
632}
633
634/*
635 * MPC mode version of transmit_skb
636 */
637static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
638{
639 struct pdu *p_header;
640 struct net_device *dev = ch->netdev;
641 struct ctcm_priv *priv = dev->priv;
642 struct mpc_group *grp = priv->mpcg;
643 struct th_header *header;
644 struct sk_buff *nskb;
645 int rc = 0;
646 int ccw_idx;
647 unsigned long hi;
648 unsigned long saveflags = 0; /* avoids compiler warning */
649 __u16 block_len;
650
651 if (do_debug)
652 ctcm_pr_debug(
653 "ctcm enter: %s(): %s cp=%i ch=0x%p id=%s state=%s\n",
654 __FUNCTION__, dev->name, smp_processor_id(), ch,
655 ch->id, fsm_getstate_str(ch->fsm));
656
657 if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
658 spin_lock_irqsave(&ch->collect_lock, saveflags);
659 atomic_inc(&skb->users);
660 p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
661
662 if (!p_header) {
663 printk(KERN_WARNING "ctcm: OUT OF MEMORY IN %s():"
664 " Data Lost \n", __FUNCTION__);
665
666 atomic_dec(&skb->users);
667 dev_kfree_skb_any(skb);
668 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
669 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
670 goto done;
671 }
672
673 p_header->pdu_offset = skb->len;
674 p_header->pdu_proto = 0x01;
675 p_header->pdu_flag = 0x00;
676 if (skb->protocol == ntohs(ETH_P_SNAP)) {
677 p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
678 } else {
679 p_header->pdu_flag |= PDU_FIRST;
680 }
681 p_header->pdu_seq = 0;
682 memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
683 PDU_HEADER_LENGTH);
684
685 if (do_debug_data) {
686 ctcm_pr_debug("ctcm: %s() Putting on collect_q"
687 " - skb len: %04x \n", __FUNCTION__, skb->len);
688 ctcm_pr_debug("ctcm: %s() pdu header and data"
689 " for up to 32 bytes\n", __FUNCTION__);
690 ctcmpc_dump32((char *)skb->data, skb->len);
691 }
692
693 skb_queue_tail(&ch->collect_queue, skb);
694 ch->collect_len += skb->len;
695 kfree(p_header);
696
697 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
698 goto done;
699 }
700
701 /*
702 * Protect skb against beeing free'd by upper
703 * layers.
704 */
705 atomic_inc(&skb->users);
706
707 block_len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
708 /*
709 * IDAL support in CTCM is broken, so we have to
710 * care about skb's above 2G ourselves.
711 */
712 hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31;
713 if (hi) {
714 nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
715 if (!nskb) {
716 printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY"
717 "- Data Lost \n", __FUNCTION__);
718 atomic_dec(&skb->users);
719 dev_kfree_skb_any(skb);
720 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
721 goto done;
722 } else {
723 memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
724 atomic_inc(&nskb->users);
725 atomic_dec(&skb->users);
726 dev_kfree_skb_irq(skb);
727 skb = nskb;
728 }
729 }
730
731 p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
732
733 if (!p_header) {
734 printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY"
735 ": Data Lost \n", __FUNCTION__);
736
737 atomic_dec(&skb->users);
738 dev_kfree_skb_any(skb);
739 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
740 goto done;
741 }
742
743 p_header->pdu_offset = skb->len;
744 p_header->pdu_proto = 0x01;
745 p_header->pdu_flag = 0x00;
746 p_header->pdu_seq = 0;
747 if (skb->protocol == ntohs(ETH_P_SNAP)) {
748 p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
749 } else {
750 p_header->pdu_flag |= PDU_FIRST;
751 }
752 memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, PDU_HEADER_LENGTH);
753
754 kfree(p_header);
755
756 if (ch->collect_len > 0) {
757 spin_lock_irqsave(&ch->collect_lock, saveflags);
758 skb_queue_tail(&ch->collect_queue, skb);
759 ch->collect_len += skb->len;
760 skb = skb_dequeue(&ch->collect_queue);
761 ch->collect_len -= skb->len;
762 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
763 }
764
765 p_header = (struct pdu *)skb->data;
766 p_header->pdu_flag |= PDU_LAST;
767
768 ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
769
770 header = kmalloc(TH_HEADER_LENGTH, gfp_type());
771
772 if (!header) {
773 printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY: Data Lost \n",
774 __FUNCTION__);
775 atomic_dec(&skb->users);
776 dev_kfree_skb_any(skb);
777 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
778 goto done;
779 }
780
781 header->th_seg = 0x00;
782 header->th_ch_flag = TH_HAS_PDU; /* Normal data */
783 header->th_blk_flag = 0x00;
784 header->th_is_xid = 0x00; /* Just data here */
785 ch->th_seq_num++;
786 header->th_seq_num = ch->th_seq_num;
787
788 if (do_debug_data)
789 ctcm_pr_debug("ctcm: %s() ToVTAM_th_seq= %08x\n" ,
790 __FUNCTION__, ch->th_seq_num);
791
792 /* put the TH on the packet */
793 memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH);
794
795 kfree(header);
796
797 if (do_debug_data) {
798 ctcm_pr_debug("ctcm: %s(): skb len: %04x \n",
799 __FUNCTION__, skb->len);
800 ctcm_pr_debug("ctcm: %s(): pdu header and data for up to 32 "
801 "bytes sent to vtam\n", __FUNCTION__);
802 ctcmpc_dump32((char *)skb->data, skb->len);
803 }
804
805 ch->ccw[4].count = skb->len;
806 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
807 /*
808 * idal allocation failed, try via copying to
809 * trans_skb. trans_skb usually has a pre-allocated
810 * idal.
811 */
812 if (ctcm_checkalloc_buffer(ch)) {
813 /*
814 * Remove our header. It gets added
815 * again on retransmit.
816 */
817 atomic_dec(&skb->users);
818 dev_kfree_skb_any(skb);
819 printk(KERN_WARNING "ctcm: %s()OUT OF MEMORY:"
820 " Data Lost \n", __FUNCTION__);
821 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
822 goto done;
823 }
824
825 skb_reset_tail_pointer(ch->trans_skb);
826 ch->trans_skb->len = 0;
827 ch->ccw[1].count = skb->len;
828 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
829 atomic_dec(&skb->users);
830 dev_kfree_skb_irq(skb);
831 ccw_idx = 0;
832 if (do_debug_data) {
833 ctcm_pr_debug("ctcm: %s() TRANS skb len: %d \n",
834 __FUNCTION__, ch->trans_skb->len);
835 ctcm_pr_debug("ctcm: %s up to 32 bytes of data"
836 " sent to vtam\n", __FUNCTION__);
837 ctcmpc_dump32((char *)ch->trans_skb->data,
838 ch->trans_skb->len);
839 }
840 } else {
841 skb_queue_tail(&ch->io_queue, skb);
842 ccw_idx = 3;
843 }
844 ch->retry = 0;
845 fsm_newstate(ch->fsm, CTC_STATE_TX);
846 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
847
848 if (do_debug_ccw)
849 ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
850 sizeof(struct ccw1) * 3);
851
852 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
853 ch->prof.send_stamp = current_kernel_time(); /* xtime */
854 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
855 (unsigned long)ch, 0xff, 0);
856 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
857 if (ccw_idx == 3)
858 ch->prof.doios_single++;
859 if (rc != 0) {
860 fsm_deltimer(&ch->timer);
861 ctcm_ccw_check_rc(ch, rc, "single skb TX");
862 if (ccw_idx == 3)
863 skb_dequeue_tail(&ch->io_queue);
864 } else if (ccw_idx == 0) {
865 priv->stats.tx_packets++;
866 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
867 }
868 if (ch->th_seq_num > 0xf0000000) /* Chose 4Billion at random. */
869 ctcmpc_send_sweep_req(ch);
870
871done:
872 if (do_debug)
873 ctcm_pr_debug("ctcm exit: %s %s()\n", dev->name, __FUNCTION__);
874 return 0;
875}
876
877/**
878 * Start transmission of a packet.
879 * Called from generic network device layer.
880 *
881 * skb Pointer to buffer containing the packet.
882 * dev Pointer to interface struct.
883 *
884 * returns 0 if packet consumed, !0 if packet rejected.
885 * Note: If we return !0, then the packet is free'd by
886 * the generic network layer.
887 */
888/* first merge version - leaving both functions separated */
889static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
890{
891 int rc = 0;
892 struct ctcm_priv *priv;
893
894 CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
895 priv = dev->priv;
896
897 if (skb == NULL) {
898 ctcm_pr_warn("%s: NULL sk_buff passed\n", dev->name);
899 priv->stats.tx_dropped++;
900 return 0;
901 }
902 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
903 ctcm_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
904 dev->name, LL_HEADER_LENGTH + 2);
905 dev_kfree_skb(skb);
906 priv->stats.tx_dropped++;
907 return 0;
908 }
909
910 /*
911 * If channels are not running, try to restart them
912 * and throw away packet.
913 */
914 if (fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) {
915 fsm_event(priv->fsm, DEV_EVENT_START, dev);
916 dev_kfree_skb(skb);
917 priv->stats.tx_dropped++;
918 priv->stats.tx_errors++;
919 priv->stats.tx_carrier_errors++;
920 return 0;
921 }
922
923 if (ctcm_test_and_set_busy(dev))
924 return -EBUSY;
925
926 dev->trans_start = jiffies;
927 if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0)
928 rc = 1;
929 return rc;
930}
931
932/* unmerged MPC variant of ctcm_tx */
933static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
934{
935 int len = 0;
936 struct ctcm_priv *priv = NULL;
937 struct mpc_group *grp = NULL;
938 struct sk_buff *newskb = NULL;
939
940 if (do_debug)
941 ctcm_pr_debug("ctcmpc enter: %s(): skb:%0lx\n",
942 __FUNCTION__, (unsigned long)skb);
943
944 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
945 "ctcmpc enter: %s(): skb:%0lx\n",
946 __FUNCTION__, (unsigned long)skb);
947
948 priv = dev->priv;
949 grp = priv->mpcg;
950 /*
951 * Some sanity checks ...
952 */
953 if (skb == NULL) {
954 ctcm_pr_warn("ctcmpc: %s: NULL sk_buff passed\n", dev->name);
955 priv->stats.tx_dropped++;
956 goto done;
957 }
958 if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
959 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_WARN,
960 "%s: Got sk_buff with head room < %ld bytes\n",
961 dev->name, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
962
963 if (do_debug_data)
964 ctcmpc_dump32((char *)skb->data, skb->len);
965
966 len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
967 newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA);
968
969 if (!newskb) {
970 printk(KERN_WARNING "ctcmpc: %s() OUT OF MEMORY-"
971 "Data Lost\n",
972 __FUNCTION__);
973
974 dev_kfree_skb_any(skb);
975 priv->stats.tx_dropped++;
976 priv->stats.tx_errors++;
977 priv->stats.tx_carrier_errors++;
978 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
979 goto done;
980 }
981 newskb->protocol = skb->protocol;
982 skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
983 memcpy(skb_put(newskb, skb->len), skb->data, skb->len);
984 dev_kfree_skb_any(skb);
985 skb = newskb;
986 }
987
988 /*
989 * If channels are not running,
990 * notify anybody about a link failure and throw
991 * away packet.
992 */
993 if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) ||
994 (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
995 dev_kfree_skb_any(skb);
996 printk(KERN_INFO "ctcmpc: %s() DATA RCVD - MPC GROUP "
997 "NOT ACTIVE - DROPPED\n",
998 __FUNCTION__);
999 priv->stats.tx_dropped++;
1000 priv->stats.tx_errors++;
1001 priv->stats.tx_carrier_errors++;
1002 goto done;
1003 }
1004
1005 if (ctcm_test_and_set_busy(dev)) {
1006 printk(KERN_WARNING "%s:DEVICE ERR - UNRECOVERABLE DATA LOSS\n",
1007 __FUNCTION__);
1008 dev_kfree_skb_any(skb);
1009 priv->stats.tx_dropped++;
1010 priv->stats.tx_errors++;
1011 priv->stats.tx_carrier_errors++;
1012 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1013 goto done;
1014 }
1015
1016 dev->trans_start = jiffies;
1017 if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) {
1018 printk(KERN_WARNING "ctcmpc: %s() DEVICE ERROR"
1019 ": Data Lost \n",
1020 __FUNCTION__);
1021 printk(KERN_WARNING "ctcmpc: %s() DEVICE ERROR"
1022 " - UNRECOVERABLE DATA LOSS\n",
1023 __FUNCTION__);
1024 dev_kfree_skb_any(skb);
1025 priv->stats.tx_dropped++;
1026 priv->stats.tx_errors++;
1027 priv->stats.tx_carrier_errors++;
1028 ctcm_clear_busy(dev);
1029 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1030 goto done;
1031 }
1032 ctcm_clear_busy(dev);
1033done:
1034 if (do_debug)
1035 MPC_DBF_DEV_NAME(TRACE, dev, "exit");
1036
1037 return 0; /* handle freeing of skb here */
1038}
1039
1040
1041/**
1042 * Sets MTU of an interface.
1043 *
1044 * dev Pointer to interface struct.
1045 * new_mtu The new MTU to use for this interface.
1046 *
1047 * returns 0 on success, -EINVAL if MTU is out of valid range.
1048 * (valid range is 576 .. 65527). If VM is on the
1049 * remote side, maximum MTU is 32760, however this is
1050 * not checked here.
1051 */
1052static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
1053{
1054 struct ctcm_priv *priv;
1055 int max_bufsize;
1056
1057 CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
1058
1059 if (new_mtu < 576 || new_mtu > 65527)
1060 return -EINVAL;
1061
1062 priv = dev->priv;
1063 max_bufsize = priv->channel[READ]->max_bufsize;
1064
1065 if (IS_MPC(priv)) {
1066 if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
1067 return -EINVAL;
1068 dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
1069 } else {
1070 if (new_mtu > max_bufsize - LL_HEADER_LENGTH - 2)
1071 return -EINVAL;
1072 dev->hard_header_len = LL_HEADER_LENGTH + 2;
1073 }
1074 dev->mtu = new_mtu;
1075 return 0;
1076}
1077
1078/**
1079 * Returns interface statistics of a device.
1080 *
1081 * dev Pointer to interface struct.
1082 *
1083 * returns Pointer to stats struct of this interface.
1084 */
1085static struct net_device_stats *ctcm_stats(struct net_device *dev)
1086{
1087 return &((struct ctcm_priv *)dev->priv)->stats;
1088}
1089
1090
1091static void ctcm_netdev_unregister(struct net_device *dev)
1092{
1093 CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
1094 if (!dev)
1095 return;
1096 unregister_netdev(dev);
1097}
1098
1099static int ctcm_netdev_register(struct net_device *dev)
1100{
1101 CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
1102 return register_netdev(dev);
1103}
1104
1105static void ctcm_free_netdevice(struct net_device *dev)
1106{
1107 struct ctcm_priv *priv;
1108 struct mpc_group *grp;
1109
1110 CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
1111
1112 if (!dev)
1113 return;
1114 priv = dev->priv;
1115 if (priv) {
1116 grp = priv->mpcg;
1117 if (grp) {
1118 if (grp->fsm)
1119 kfree_fsm(grp->fsm);
1120 if (grp->xid_skb)
1121 dev_kfree_skb(grp->xid_skb);
1122 if (grp->rcvd_xid_skb)
1123 dev_kfree_skb(grp->rcvd_xid_skb);
1124 tasklet_kill(&grp->mpc_tasklet2);
1125 kfree(grp);
1126 priv->mpcg = NULL;
1127 }
1128 if (priv->fsm) {
1129 kfree_fsm(priv->fsm);
1130 priv->fsm = NULL;
1131 }
1132 kfree(priv->xid);
1133 priv->xid = NULL;
1134 /*
1135 * Note: kfree(priv); is done in "opposite" function of
1136 * allocator function probe_device which is remove_device.
1137 */
1138 }
1139#ifdef MODULE
1140 free_netdev(dev);
1141#endif
1142}
1143
1144struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
1145
1146void static ctcm_dev_setup(struct net_device *dev)
1147{
1148 dev->open = ctcm_open;
1149 dev->stop = ctcm_close;
1150 dev->get_stats = ctcm_stats;
1151 dev->change_mtu = ctcm_change_mtu;
1152 dev->type = ARPHRD_SLIP;
1153 dev->tx_queue_len = 100;
1154 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1155}
1156
1157/*
1158 * Initialize everything of the net device except the name and the
1159 * channel structs.
1160 */
1161static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1162{
1163 struct net_device *dev;
1164 struct mpc_group *grp;
1165 if (!priv)
1166 return NULL;
1167
1168 if (IS_MPC(priv))
1169 dev = alloc_netdev(0, MPC_DEVICE_GENE, ctcm_dev_setup);
1170 else
1171 dev = alloc_netdev(0, CTC_DEVICE_GENE, ctcm_dev_setup);
1172
1173 if (!dev) {
1174 ctcm_pr_err("%s: Out of memory\n", __FUNCTION__);
1175 return NULL;
1176 }
1177 dev->priv = priv;
1178 priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names,
1179 CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS,
1180 dev_fsm, dev_fsm_len, GFP_KERNEL);
1181 if (priv->fsm == NULL) {
1182 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
1183 kfree(dev);
1184 return NULL;
1185 }
1186 fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
1187 fsm_settimer(priv->fsm, &priv->restart_timer);
1188
1189 if (IS_MPC(priv)) {
1190 /* MPC Group Initializations */
1191 grp = ctcmpc_init_mpc_group(priv);
1192 if (grp == NULL) {
1193 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
1194 kfree(dev);
1195 return NULL;
1196 }
1197 tasklet_init(&grp->mpc_tasklet2,
1198 mpc_group_ready, (unsigned long)dev);
1199 dev->mtu = MPC_BUFSIZE_DEFAULT -
1200 TH_HEADER_LENGTH - PDU_HEADER_LENGTH;
1201
1202 dev->hard_start_xmit = ctcmpc_tx;
1203 dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
1204 priv->buffer_size = MPC_BUFSIZE_DEFAULT;
1205 } else {
1206 dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
1207 dev->hard_start_xmit = ctcm_tx;
1208 dev->hard_header_len = LL_HEADER_LENGTH + 2;
1209 }
1210
1211 CTCMY_DBF_DEV(SETUP, dev, "finished");
1212 return dev;
1213}
1214
1215/**
1216 * Main IRQ handler.
1217 *
1218 * cdev The ccw_device the interrupt is for.
1219 * intparm interruption parameter.
1220 * irb interruption response block.
1221 */
1222static void ctcm_irq_handler(struct ccw_device *cdev,
1223 unsigned long intparm, struct irb *irb)
1224{
1225 struct channel *ch;
1226 struct net_device *dev;
1227 struct ctcm_priv *priv;
1228 struct ccwgroup_device *cgdev;
1229
1230 CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __FUNCTION__);
1231 if (ctcm_check_irb_error(cdev, irb))
1232 return;
1233
1234 cgdev = dev_get_drvdata(&cdev->dev);
1235
1236 /* Check for unsolicited interrupts. */
1237 if (cgdev == NULL) {
1238 ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n",
1239 cdev->dev.bus_id, irb->scsw.cstat,
1240 irb->scsw.dstat);
1241 return;
1242 }
1243
1244 priv = dev_get_drvdata(&cgdev->dev);
1245
1246 /* Try to extract channel from driver data. */
1247 if (priv->channel[READ]->cdev == cdev)
1248 ch = priv->channel[READ];
1249 else if (priv->channel[WRITE]->cdev == cdev)
1250 ch = priv->channel[WRITE];
1251 else {
1252 ctcm_pr_err("ctcm: Can't determine channel for interrupt, "
1253 "device %s\n", cdev->dev.bus_id);
1254 return;
1255 }
1256
1257 dev = (struct net_device *)(ch->netdev);
1258 if (dev == NULL) {
1259 ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n",
1260 __FUNCTION__, cdev->dev.bus_id, ch);
1261 return;
1262 }
1263
1264 if (do_debug)
1265 ctcm_pr_debug("%s: interrupt for device: %s "
1266 "received c-%02x d-%02x\n",
1267 dev->name,
1268 ch->id,
1269 irb->scsw.cstat,
1270 irb->scsw.dstat);
1271
1272 /* Copy interruption response block. */
1273 memcpy(ch->irb, irb, sizeof(struct irb));
1274
1275 /* Check for good subchannel return code, otherwise error message */
1276 if (irb->scsw.cstat) {
1277 fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
1278 ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n",
1279 dev->name, ch->id, irb->scsw.cstat,
1280 irb->scsw.dstat);
1281 return;
1282 }
1283
1284 /* Check the reason-code of a unit check */
1285 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1286 ccw_unit_check(ch, irb->ecw[0]);
1287 return;
1288 }
1289 if (irb->scsw.dstat & DEV_STAT_BUSY) {
1290 if (irb->scsw.dstat & DEV_STAT_ATTENTION)
1291 fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
1292 else
1293 fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
1294 return;
1295 }
1296 if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
1297 fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
1298 return;
1299 }
1300 if ((irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1301 (irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1302 (irb->scsw.stctl ==
1303 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1304 fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
1305 else
1306 fsm_event(ch->fsm, CTC_EVENT_IRQ, ch);
1307
1308}
1309
1310/**
1311 * Add ctcm specific attributes.
1312 * Add ctcm private data.
1313 *
1314 * cgdev pointer to ccwgroup_device just added
1315 *
1316 * returns 0 on success, !0 on failure.
1317 */
1318static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1319{
1320 struct ctcm_priv *priv;
1321 int rc;
1322
1323 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s %p", __FUNCTION__, cgdev);
1324
1325 if (!get_device(&cgdev->dev))
1326 return -ENODEV;
1327
1328 priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL);
1329 if (!priv) {
1330 ctcm_pr_err("%s: Out of memory\n", __FUNCTION__);
1331 put_device(&cgdev->dev);
1332 return -ENOMEM;
1333 }
1334
1335 rc = ctcm_add_files(&cgdev->dev);
1336 if (rc) {
1337 kfree(priv);
1338 put_device(&cgdev->dev);
1339 return rc;
1340 }
1341 priv->buffer_size = CTCM_BUFSIZE_DEFAULT;
1342 cgdev->cdev[0]->handler = ctcm_irq_handler;
1343 cgdev->cdev[1]->handler = ctcm_irq_handler;
1344 dev_set_drvdata(&cgdev->dev, priv);
1345
1346 return 0;
1347}
1348
1349/**
1350 * Add a new channel to the list of channels.
1351 * Keeps the channel list sorted.
1352 *
1353 * cdev The ccw_device to be added.
1354 * type The type class of the new channel.
1355 * priv Points to the private data of the ccwgroup_device.
1356 *
1357 * returns 0 on success, !0 on error.
1358 */
1359static int add_channel(struct ccw_device *cdev, enum channel_types type,
1360 struct ctcm_priv *priv)
1361{
1362 struct channel **c = &channels;
1363 struct channel *ch;
1364 int ccw_num;
1365 int rc = 0;
1366
1367 CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__);
1368 ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1369 if (ch == NULL)
1370 goto nomem_return;
1371
1372 ch->protocol = priv->protocol;
1373 if (IS_MPC(priv)) {
1374 ch->discontact_th = (struct th_header *)
1375 kzalloc(TH_HEADER_LENGTH, gfp_type());
1376 if (ch->discontact_th == NULL)
1377 goto nomem_return;
1378
1379 ch->discontact_th->th_blk_flag = TH_DISCONTACT;
1380 tasklet_init(&ch->ch_disc_tasklet,
1381 mpc_action_send_discontact, (unsigned long)ch);
1382
1383 tasklet_init(&ch->ch_tasklet, ctcmpc_bh, (unsigned long)ch);
1384 ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35);
1385 ccw_num = 17;
1386 } else
1387 ccw_num = 8;
1388
1389 ch->ccw = (struct ccw1 *)
1390 kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1391 if (ch->ccw == NULL)
1392 goto nomem_return;
1393
1394 ch->cdev = cdev;
1395 snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1396 ch->type = type;
1397
1398 /**
1399 * "static" ccws are used in the following way:
1400 *
1401 * ccw[0..2] (Channel program for generic I/O):
1402 * 0: prepare
1403 * 1: read or write (depending on direction) with fixed
1404 * buffer (idal allocated once when buffer is allocated)
1405 * 2: nop
1406 * ccw[3..5] (Channel program for direct write of packets)
1407 * 3: prepare
1408 * 4: write (idal allocated on every write).
1409 * 5: nop
1410 * ccw[6..7] (Channel program for initial channel setup):
1411 * 6: set extended mode
1412 * 7: nop
1413 *
1414 * ch->ccw[0..5] are initialized in ch_action_start because
1415 * the channel's direction is yet unknown here.
1416 *
1417 * ccws used for xid2 negotiations
1418 * ch-ccw[8-14] need to be used for the XID exchange either
1419 * X side XID2 Processing
1420 * 8: write control
1421 * 9: write th
1422 * 10: write XID
1423 * 11: read th from secondary
1424 * 12: read XID from secondary
1425 * 13: read 4 byte ID
1426 * 14: nop
1427 * Y side XID Processing
1428 * 8: sense
1429 * 9: read th
1430 * 10: read XID
1431 * 11: write th
1432 * 12: write XID
1433 * 13: write 4 byte ID
1434 * 14: nop
1435 *
1436 * ccws used for double noop due to VM timing issues
1437 * which result in unrecoverable Busy on channel
1438 * 15: nop
1439 * 16: nop
1440 */
1441 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1442 ch->ccw[6].flags = CCW_FLAG_SLI;
1443
1444 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1445 ch->ccw[7].flags = CCW_FLAG_SLI;
1446
1447 if (IS_MPC(priv)) {
1448 ch->ccw[15].cmd_code = CCW_CMD_WRITE;
1449 ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1450 ch->ccw[15].count = TH_HEADER_LENGTH;
1451 ch->ccw[15].cda = virt_to_phys(ch->discontact_th);
1452
1453 ch->ccw[16].cmd_code = CCW_CMD_NOOP;
1454 ch->ccw[16].flags = CCW_FLAG_SLI;
1455
1456 ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
1457 ctc_ch_event_names, CTC_MPC_NR_STATES,
1458 CTC_MPC_NR_EVENTS, ctcmpc_ch_fsm,
1459 mpc_ch_fsm_len, GFP_KERNEL);
1460 } else {
1461 ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
1462 ctc_ch_event_names, CTC_NR_STATES,
1463 CTC_NR_EVENTS, ch_fsm,
1464 ch_fsm_len, GFP_KERNEL);
1465 }
1466 if (ch->fsm == NULL)
1467 goto free_return;
1468
1469 fsm_newstate(ch->fsm, CTC_STATE_IDLE);
1470
1471 ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1472 if (ch->irb == NULL)
1473 goto nomem_return;
1474
1475 while (*c && ctcm_less_than((*c)->id, ch->id))
1476 c = &(*c)->next;
1477
1478 if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) {
1479 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1480 "%s (%s) already in list, using old entry",
1481 __FUNCTION__, (*c)->id);
1482
1483 goto free_return;
1484 }
1485
1486 spin_lock_init(&ch->collect_lock);
1487
1488 fsm_settimer(ch->fsm, &ch->timer);
1489 skb_queue_head_init(&ch->io_queue);
1490 skb_queue_head_init(&ch->collect_queue);
1491
1492 if (IS_MPC(priv)) {
1493 fsm_settimer(ch->fsm, &ch->sweep_timer);
1494 skb_queue_head_init(&ch->sweep_queue);
1495 }
1496 ch->next = *c;
1497 *c = ch;
1498 return 0;
1499
1500nomem_return:
1501 ctcm_pr_warn("ctcm: Out of memory in %s\n", __FUNCTION__);
1502 rc = -ENOMEM;
1503
1504free_return: /* note that all channel pointers are 0 or valid */
1505 kfree(ch->ccw); /* TODO: check that again */
1506 kfree(ch->discontact_th);
1507 kfree_fsm(ch->fsm);
1508 kfree(ch->irb);
1509 kfree(ch);
1510 return rc;
1511}
1512
1513/*
1514 * Return type of a detected device.
1515 */
1516static enum channel_types get_channel_type(struct ccw_device_id *id)
1517{
1518 enum channel_types type;
1519 type = (enum channel_types)id->driver_info;
1520
1521 if (type == channel_type_ficon)
1522 type = channel_type_escon;
1523
1524 return type;
1525}
1526
1527/**
1528 *
1529 * Setup an interface.
1530 *
1531 * cgdev Device to be setup.
1532 *
1533 * returns 0 on success, !0 on failure.
1534 */
1535static int ctcm_new_device(struct ccwgroup_device *cgdev)
1536{
1537 char read_id[CTCM_ID_SIZE];
1538 char write_id[CTCM_ID_SIZE];
1539 int direction;
1540 enum channel_types type;
1541 struct ctcm_priv *priv;
1542 struct net_device *dev;
1543 int ret;
1544
1545 CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
1546
1547 priv = dev_get_drvdata(&cgdev->dev);
1548 if (!priv)
1549 return -ENODEV;
1550
1551 type = get_channel_type(&cgdev->cdev[0]->id);
1552
1553 snprintf(read_id, CTCM_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
1554 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
1555
1556 ret = add_channel(cgdev->cdev[0], type, priv);
1557 if (ret)
1558 return ret;
1559 ret = add_channel(cgdev->cdev[1], type, priv);
1560 if (ret)
1561 return ret;
1562
1563 ret = ccw_device_set_online(cgdev->cdev[0]);
1564 if (ret != 0) {
1565 CTCM_DBF_TEXT(SETUP, CTC_DBF_WARN,
1566 "ccw_device_set_online (cdev[0]) failed ");
1567 ctcm_pr_warn("ccw_device_set_online (cdev[0]) failed "
1568 "with ret = %d\n", ret);
1569 }
1570
1571 ret = ccw_device_set_online(cgdev->cdev[1]);
1572 if (ret != 0) {
1573 CTCM_DBF_TEXT(SETUP, CTC_DBF_WARN,
1574 "ccw_device_set_online (cdev[1]) failed ");
1575 ctcm_pr_warn("ccw_device_set_online (cdev[1]) failed "
1576 "with ret = %d\n", ret);
1577 }
1578
1579 dev = ctcm_init_netdevice(priv);
1580
1581 if (dev == NULL) {
1582 ctcm_pr_warn("ctcm_init_netdevice failed\n");
1583 goto out;
1584 }
1585
1586 for (direction = READ; direction <= WRITE; direction++) {
1587 priv->channel[direction] =
1588 channel_get(type, direction == READ ? read_id : write_id,
1589 direction);
1590 if (priv->channel[direction] == NULL) {
1591 if (direction == WRITE)
1592 channel_free(priv->channel[READ]);
1593 ctcm_free_netdevice(dev);
1594 goto out;
1595 }
1596 priv->channel[direction]->netdev = dev;
1597 priv->channel[direction]->protocol = priv->protocol;
1598 priv->channel[direction]->max_bufsize = priv->buffer_size;
1599 }
1600 /* sysfs magic */
1601 SET_NETDEV_DEV(dev, &cgdev->dev);
1602
1603 if (ctcm_netdev_register(dev) != 0) {
1604 ctcm_free_netdevice(dev);
1605 goto out;
1606 }
1607
1608 if (ctcm_add_attributes(&cgdev->dev)) {
1609 ctcm_netdev_unregister(dev);
1610/* dev->priv = NULL; why that ???? */
1611 ctcm_free_netdevice(dev);
1612 goto out;
1613 }
1614
1615 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
1616
1617 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1618 "setup(%s) ok : r/w = %s / %s, proto : %d",
1619 dev->name, priv->channel[READ]->id,
1620 priv->channel[WRITE]->id, priv->protocol);
1621
1622 return 0;
1623out:
1624 ccw_device_set_offline(cgdev->cdev[1]);
1625 ccw_device_set_offline(cgdev->cdev[0]);
1626
1627 return -ENODEV;
1628}
1629
1630/**
1631 * Shutdown an interface.
1632 *
1633 * cgdev Device to be shut down.
1634 *
1635 * returns 0 on success, !0 on failure.
1636 */
1637static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
1638{
1639 struct ctcm_priv *priv;
1640 struct net_device *dev;
1641
1642 priv = dev_get_drvdata(&cgdev->dev);
1643 if (!priv)
1644 return -ENODEV;
1645
1646 if (priv->channel[READ]) {
1647 dev = priv->channel[READ]->netdev;
1648 CTCM_DBF_DEV(SETUP, dev, "");
1649 /* Close the device */
1650 ctcm_close(dev);
1651 dev->flags &= ~IFF_RUNNING;
1652 ctcm_remove_attributes(&cgdev->dev);
1653 channel_free(priv->channel[READ]);
1654 } else
1655 dev = NULL;
1656
1657 if (priv->channel[WRITE])
1658 channel_free(priv->channel[WRITE]);
1659
1660 if (dev) {
1661 ctcm_netdev_unregister(dev);
1662/* dev->priv = NULL; why that ??? */
1663 ctcm_free_netdevice(dev);
1664 }
1665
1666 if (priv->fsm)
1667 kfree_fsm(priv->fsm);
1668
1669 ccw_device_set_offline(cgdev->cdev[1]);
1670 ccw_device_set_offline(cgdev->cdev[0]);
1671
1672 if (priv->channel[READ])
1673 channel_remove(priv->channel[READ]);
1674 if (priv->channel[WRITE])
1675 channel_remove(priv->channel[WRITE]);
1676 priv->channel[READ] = priv->channel[WRITE] = NULL;
1677
1678 return 0;
1679
1680}
1681
1682
1683static void ctcm_remove_device(struct ccwgroup_device *cgdev)
1684{
1685 struct ctcm_priv *priv;
1686
1687 CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, __FUNCTION__);
1688
1689 priv = dev_get_drvdata(&cgdev->dev);
1690 if (!priv)
1691 return;
1692 if (cgdev->state == CCWGROUP_ONLINE)
1693 ctcm_shutdown_device(cgdev);
1694 ctcm_remove_files(&cgdev->dev);
1695 dev_set_drvdata(&cgdev->dev, NULL);
1696 kfree(priv);
1697 put_device(&cgdev->dev);
1698}
1699
1700static struct ccwgroup_driver ctcm_group_driver = {
1701 .owner = THIS_MODULE,
1702 .name = CTC_DRIVER_NAME,
1703 .max_slaves = 2,
1704 .driver_id = 0xC3E3C3D4, /* CTCM */
1705 .probe = ctcm_probe_device,
1706 .remove = ctcm_remove_device,
1707 .set_online = ctcm_new_device,
1708 .set_offline = ctcm_shutdown_device,
1709};
1710
1711
1712/*
1713 * Module related routines
1714 */
1715
1716/*
1717 * Prepare to be unloaded. Free IRQ's and release all resources.
1718 * This is called just before this module is unloaded. It is
1719 * not called, if the usage count is !0, so we don't need to check
1720 * for that.
1721 */
1722static void __exit ctcm_exit(void)
1723{
1724 unregister_cu3088_discipline(&ctcm_group_driver);
1725 ctcm_unregister_dbf_views();
1726 ctcm_pr_info("CTCM driver unloaded\n");
1727}
1728
1729/*
1730 * Print Banner.
1731 */
1732static void print_banner(void)
1733{
1734 printk(KERN_INFO "CTCM driver initialized\n");
1735}
1736
1737/**
1738 * Initialize module.
1739 * This is called just after the module is loaded.
1740 *
1741 * returns 0 on success, !0 on error.
1742 */
1743static int __init ctcm_init(void)
1744{
1745 int ret;
1746
1747 channels = NULL;
1748
1749 ret = ctcm_register_dbf_views();
1750 if (ret) {
1751 ctcm_pr_crit("ctcm_init failed with ctcm_register_dbf_views "
1752 "rc = %d\n", ret);
1753 return ret;
1754 }
1755 ret = register_cu3088_discipline(&ctcm_group_driver);
1756 if (ret) {
1757 ctcm_unregister_dbf_views();
1758 ctcm_pr_crit("ctcm_init failed with register_cu3088_discipline "
1759 "(rc = %d)\n", ret);
1760 return ret;
1761 }
1762 print_banner();
1763 return ret;
1764}
1765
1766module_init(ctcm_init);
1767module_exit(ctcm_exit);
1768
1769MODULE_AUTHOR("Peter Tiedemann <ptiedem@de.ibm.com>");
1770MODULE_DESCRIPTION("Network driver for S/390 CTC + CTCMPC (SNA)");
1771MODULE_LICENSE("GPL");
1772
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
new file mode 100644
index 000000000000..95b0c0b6ebc6
--- /dev/null
+++ b/drivers/s390/net/ctcm_main.h
@@ -0,0 +1,287 @@
1/*
2 * drivers/s390/net/ctcm_main.h
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Authors: Fritz Elfert (felfert@millenux.com)
6 * Peter Tiedemann (ptiedem@de.ibm.com)
7 */
8
9#ifndef _CTCM_MAIN_H_
10#define _CTCM_MAIN_H_
11
12#include <asm/ccwdev.h>
13#include <asm/ccwgroup.h>
14
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17
18#include "fsm.h"
19#include "cu3088.h"
20#include "ctcm_dbug.h"
21#include "ctcm_mpc.h"
22
23#define CTC_DRIVER_NAME "ctcm"
24#define CTC_DEVICE_NAME "ctc"
25#define CTC_DEVICE_GENE "ctc%d"
26#define MPC_DEVICE_NAME "mpc"
27#define MPC_DEVICE_GENE "mpc%d"
28
29#define CHANNEL_FLAGS_READ 0
30#define CHANNEL_FLAGS_WRITE 1
31#define CHANNEL_FLAGS_INUSE 2
32#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
33#define CHANNEL_FLAGS_FAILED 8
34#define CHANNEL_FLAGS_WAITIRQ 16
35#define CHANNEL_FLAGS_RWMASK 1
36#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
37
38#define LOG_FLAG_ILLEGALPKT 1
39#define LOG_FLAG_ILLEGALSIZE 2
40#define LOG_FLAG_OVERRUN 4
41#define LOG_FLAG_NOMEM 8
42
43#define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
44#define ctcm_pr_info(fmt, arg...) printk(KERN_INFO fmt, ##arg)
45#define ctcm_pr_notice(fmt, arg...) printk(KERN_NOTICE fmt, ##arg)
46#define ctcm_pr_warn(fmt, arg...) printk(KERN_WARNING fmt, ##arg)
47#define ctcm_pr_emerg(fmt, arg...) printk(KERN_EMERG fmt, ##arg)
48#define ctcm_pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg)
49#define ctcm_pr_crit(fmt, arg...) printk(KERN_CRIT fmt, ##arg)
50
51/*
52 * CCW commands, used in this driver.
53 */
54#define CCW_CMD_WRITE 0x01
55#define CCW_CMD_READ 0x02
56#define CCW_CMD_NOOP 0x03
57#define CCW_CMD_TIC 0x08
58#define CCW_CMD_SENSE_CMD 0x14
59#define CCW_CMD_WRITE_CTL 0x17
60#define CCW_CMD_SET_EXTENDED 0xc3
61#define CCW_CMD_PREPARE 0xe3
62
63#define CTCM_PROTO_S390 0
64#define CTCM_PROTO_LINUX 1
65#define CTCM_PROTO_LINUX_TTY 2
66#define CTCM_PROTO_OS390 3
67#define CTCM_PROTO_MPC 4
68#define CTCM_PROTO_MAX 4
69
70#define CTCM_BUFSIZE_LIMIT 65535
71#define CTCM_BUFSIZE_DEFAULT 32768
72#define MPC_BUFSIZE_DEFAULT CTCM_BUFSIZE_LIMIT
73
74#define CTCM_TIME_1_SEC 1000
75#define CTCM_TIME_5_SEC 5000
76#define CTCM_TIME_10_SEC 10000
77
78#define CTCM_INITIAL_BLOCKLEN 2
79
80#define READ 0
81#define WRITE 1
82
83#define CTCM_ID_SIZE BUS_ID_SIZE+3
84
85struct ctcm_profile {
86 unsigned long maxmulti;
87 unsigned long maxcqueue;
88 unsigned long doios_single;
89 unsigned long doios_multi;
90 unsigned long txlen;
91 unsigned long tx_time;
92 struct timespec send_stamp;
93};
94
95/*
96 * Definition of one channel
97 */
98struct channel {
99 struct channel *next;
100 char id[CTCM_ID_SIZE];
101 struct ccw_device *cdev;
102 /*
103 * Type of this channel.
104 * CTC/A or Escon for valid channels.
105 */
106 enum channel_types type;
107 /*
108 * Misc. flags. See CHANNEL_FLAGS_... below
109 */
110 __u32 flags;
111 __u16 protocol; /* protocol of this channel (4 = MPC) */
112 /*
113 * I/O and irq related stuff
114 */
115 struct ccw1 *ccw;
116 struct irb *irb;
117 /*
118 * RX/TX buffer size
119 */
120 int max_bufsize;
121 struct sk_buff *trans_skb; /* transmit/receive buffer */
122 struct sk_buff_head io_queue; /* universal I/O queue */
123 struct tasklet_struct ch_tasklet; /* MPC ONLY */
124 /*
125 * TX queue for collecting skb's during busy.
126 */
127 struct sk_buff_head collect_queue;
128 /*
129 * Amount of data in collect_queue.
130 */
131 int collect_len;
132 /*
133 * spinlock for collect_queue and collect_len
134 */
135 spinlock_t collect_lock;
136 /*
137 * Timer for detecting unresposive
138 * I/O operations.
139 */
140 fsm_timer timer;
141 /* MPC ONLY section begin */
142 __u32 th_seq_num; /* SNA TH seq number */
143 __u8 th_seg;
144 __u32 pdu_seq;
145 struct sk_buff *xid_skb;
146 char *xid_skb_data;
147 struct th_header *xid_th;
148 struct xid2 *xid;
149 char *xid_id;
150 struct th_header *rcvd_xid_th;
151 struct xid2 *rcvd_xid;
152 char *rcvd_xid_id;
153 __u8 in_mpcgroup;
154 fsm_timer sweep_timer;
155 struct sk_buff_head sweep_queue;
156 struct th_header *discontact_th;
157 struct tasklet_struct ch_disc_tasklet;
158 /* MPC ONLY section end */
159
160 int retry; /* retry counter for misc. operations */
161 fsm_instance *fsm; /* finite state machine of this channel */
162 struct net_device *netdev; /* corresponding net_device */
163 struct ctcm_profile prof;
164 unsigned char *trans_skb_data;
165 __u16 logflags;
166};
167
168struct ctcm_priv {
169 struct net_device_stats stats;
170 unsigned long tbusy;
171
172 /* The MPC group struct of this interface */
173 struct mpc_group *mpcg; /* MPC only */
174 struct xid2 *xid; /* MPC only */
175
176 /* The finite state machine of this interface */
177 fsm_instance *fsm;
178
179 /* The protocol of this device */
180 __u16 protocol;
181
182 /* Timer for restarting after I/O Errors */
183 fsm_timer restart_timer;
184
185 int buffer_size; /* ctc only */
186
187 struct channel *channel[2];
188};
189
190int ctcm_open(struct net_device *dev);
191int ctcm_close(struct net_device *dev);
192
193/*
194 * prototypes for non-static sysfs functions
195 */
196int ctcm_add_attributes(struct device *dev);
197void ctcm_remove_attributes(struct device *dev);
198int ctcm_add_files(struct device *dev);
199void ctcm_remove_files(struct device *dev);
200
201/*
202 * Compatibility macros for busy handling
203 * of network devices.
204 */
205static inline void ctcm_clear_busy_do(struct net_device *dev)
206{
207 clear_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy));
208 netif_wake_queue(dev);
209}
210
211static inline void ctcm_clear_busy(struct net_device *dev)
212{
213 struct mpc_group *grp;
214 grp = ((struct ctcm_priv *)dev->priv)->mpcg;
215
216 if (!(grp && grp->in_sweep))
217 ctcm_clear_busy_do(dev);
218}
219
220
221static inline int ctcm_test_and_set_busy(struct net_device *dev)
222{
223 netif_stop_queue(dev);
224 return test_and_set_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy));
225}
226
227extern int loglevel;
228extern struct channel *channels;
229
230void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb);
231
232/*
233 * Functions related to setup and device detection.
234 */
235
236static inline int ctcm_less_than(char *id1, char *id2)
237{
238 unsigned long dev1, dev2;
239
240 id1 = id1 + 5;
241 id2 = id2 + 5;
242
243 dev1 = simple_strtoul(id1, &id1, 16);
244 dev2 = simple_strtoul(id2, &id2, 16);
245
246 return (dev1 < dev2);
247}
248
249int ctcm_ch_alloc_buffer(struct channel *ch);
250
251static inline int ctcm_checkalloc_buffer(struct channel *ch)
252{
253 if (ch->trans_skb == NULL)
254 return ctcm_ch_alloc_buffer(ch);
255 if (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED) {
256 dev_kfree_skb(ch->trans_skb);
257 return ctcm_ch_alloc_buffer(ch);
258 }
259 return 0;
260}
261
262struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
263
264/* test if protocol attribute (of struct ctcm_priv or struct channel)
265 * has MPC protocol setting. Type is not checked
266 */
267#define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC)
268
269/* test if struct ctcm_priv of struct net_device has MPC protocol setting */
270#define IS_MPCDEV(d) IS_MPC((struct ctcm_priv *)d->priv)
271
272static inline gfp_t gfp_type(void)
273{
274 return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
275}
276
277/*
278 * Definition of our link level header.
279 */
280struct ll_header {
281 __u16 length;
282 __u16 type;
283 __u16 unused;
284};
285#define LL_HEADER_LENGTH (sizeof(struct ll_header))
286
287#endif
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
new file mode 100644
index 000000000000..044addee64a2
--- /dev/null
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -0,0 +1,2472 @@
1/*
2 * drivers/s390/net/ctcm_mpc.c
3 *
4 * Copyright IBM Corp. 2004, 2007
5 * Authors: Belinda Thompson (belindat@us.ibm.com)
6 * Andy Richter (richtera@us.ibm.com)
7 * Peter Tiedemann (ptiedem@de.ibm.com)
8 */
9
10/*
11 This module exports functions to be used by CCS:
12 EXPORT_SYMBOL(ctc_mpc_alloc_channel);
13 EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
14 EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
15 EXPORT_SYMBOL(ctc_mpc_flow_control);
16*/
17
18#undef DEBUG
19#undef DEBUGDATA
20#undef DEBUGCCW
21
22#include <linux/version.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/errno.h>
28#include <linux/types.h>
29#include <linux/interrupt.h>
30#include <linux/timer.h>
31#include <linux/sched.h>
32
33#include <linux/signal.h>
34#include <linux/string.h>
35#include <linux/proc_fs.h>
36
37#include <linux/ip.h>
38#include <linux/if_arp.h>
39#include <linux/tcp.h>
40#include <linux/skbuff.h>
41#include <linux/ctype.h>
42#include <linux/netdevice.h>
43#include <net/dst.h>
44
45#include <linux/io.h> /* instead of <asm/io.h> ok ? */
46#include <asm/ccwdev.h>
47#include <asm/ccwgroup.h>
48#include <linux/bitops.h> /* instead of <asm/bitops.h> ok ? */
49#include <linux/uaccess.h> /* instead of <asm/uaccess.h> ok ? */
50#include <linux/wait.h>
51#include <linux/moduleparam.h>
52#include <asm/idals.h>
53
54#include "cu3088.h"
55#include "ctcm_mpc.h"
56#include "ctcm_main.h"
57#include "ctcm_fsms.h"
58
59static const struct xid2 init_xid = {
60 .xid2_type_id = XID_FM2,
61 .xid2_len = 0x45,
62 .xid2_adj_id = 0,
63 .xid2_rlen = 0x31,
64 .xid2_resv1 = 0,
65 .xid2_flag1 = 0,
66 .xid2_fmtt = 0,
67 .xid2_flag4 = 0x80,
68 .xid2_resv2 = 0,
69 .xid2_tgnum = 0,
70 .xid2_sender_id = 0,
71 .xid2_flag2 = 0,
72 .xid2_option = XID2_0,
73 .xid2_resv3 = "\x00",
74 .xid2_resv4 = 0,
75 .xid2_dlc_type = XID2_READ_SIDE,
76 .xid2_resv5 = 0,
77 .xid2_mpc_flag = 0,
78 .xid2_resv6 = 0,
79 .xid2_buf_len = (MPC_BUFSIZE_DEFAULT - 35),
80};
81
82static const struct th_header thnorm = {
83 .th_seg = 0x00,
84 .th_ch_flag = TH_IS_XID,
85 .th_blk_flag = TH_DATA_IS_XID,
86 .th_is_xid = 0x01,
87 .th_seq_num = 0x00000000,
88};
89
90static const struct th_header thdummy = {
91 .th_seg = 0x00,
92 .th_ch_flag = 0x00,
93 .th_blk_flag = TH_DATA_IS_XID,
94 .th_is_xid = 0x01,
95 .th_seq_num = 0x00000000,
96};
97
98/*
99 * Definition of one MPC group
100 */
101
102/*
103 * Compatibility macros for busy handling
104 * of network devices.
105 */
106
107static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb);
108
109/*
110 * MPC Group state machine actions (static prototypes)
111 */
112static void mpc_action_nop(fsm_instance *fsm, int event, void *arg);
113static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg);
114static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg);
115static void mpc_action_timeout(fsm_instance *fi, int event, void *arg);
116static int mpc_validate_xid(struct mpcg_info *mpcginfo);
117static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg);
118static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg);
119static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg);
120static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg);
121static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg);
122static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg);
123
124#ifdef DEBUGDATA
125/*-------------------------------------------------------------------*
126* Dump buffer format *
127* *
128*--------------------------------------------------------------------*/
129void ctcmpc_dumpit(char *buf, int len)
130{
131 __u32 ct, sw, rm, dup;
132 char *ptr, *rptr;
133 char tbuf[82], tdup[82];
134 #if (UTS_MACHINE == s390x)
135 char addr[22];
136 #else
137 char addr[12];
138 #endif
139 char boff[12];
140 char bhex[82], duphex[82];
141 char basc[40];
142
143 sw = 0;
144 rptr = ptr = buf;
145 rm = 16;
146 duphex[0] = 0x00;
147 dup = 0;
148
149 for (ct = 0; ct < len; ct++, ptr++, rptr++) {
150 if (sw == 0) {
151 #if (UTS_MACHINE == s390x)
152 sprintf(addr, "%16.16lx", (unsigned long)rptr);
153 #else
154 sprintf(addr, "%8.8X", (__u32)rptr);
155 #endif
156
157 sprintf(boff, "%4.4X", (__u32)ct);
158 bhex[0] = '\0';
159 basc[0] = '\0';
160 }
161 if ((sw == 4) || (sw == 12))
162 strcat(bhex, " ");
163 if (sw == 8)
164 strcat(bhex, " ");
165
166 #if (UTS_MACHINE == s390x)
167 sprintf(tbuf, "%2.2lX", (unsigned long)*ptr);
168 #else
169 sprintf(tbuf, "%2.2X", (__u32)*ptr);
170 #endif
171
172 tbuf[2] = '\0';
173 strcat(bhex, tbuf);
174 if ((0 != isprint(*ptr)) && (*ptr >= 0x20))
175 basc[sw] = *ptr;
176 else
177 basc[sw] = '.';
178
179 basc[sw+1] = '\0';
180 sw++;
181 rm--;
182 if (sw == 16) {
183 if ((strcmp(duphex, bhex)) != 0) {
184 if (dup != 0) {
185 sprintf(tdup, "Duplicate as above "
186 "to %s", addr);
187 printk(KERN_INFO " "
188 " --- %s ---\n", tdup);
189 }
190 printk(KERN_INFO " %s (+%s) : %s [%s]\n",
191 addr, boff, bhex, basc);
192 dup = 0;
193 strcpy(duphex, bhex);
194 } else
195 dup++;
196
197 sw = 0;
198 rm = 16;
199 }
200 } /* endfor */
201
202 if (sw != 0) {
203 for ( ; rm > 0; rm--, sw++) {
204 if ((sw == 4) || (sw == 12))
205 strcat(bhex, " ");
206 if (sw == 8)
207 strcat(bhex, " ");
208 strcat(bhex, " ");
209 strcat(basc, " ");
210 }
211 if (dup != 0) {
212 sprintf(tdup, "Duplicate as above to %s", addr);
213 printk(KERN_INFO " "
214 " --- %s ---\n", tdup);
215 }
216 printk(KERN_INFO " %s (+%s) : %s [%s]\n",
217 addr, boff, bhex, basc);
218 } else {
219 if (dup >= 1) {
220 sprintf(tdup, "Duplicate as above to %s", addr);
221 printk(KERN_INFO " "
222 " --- %s ---\n", tdup);
223 }
224 if (dup != 0) {
225 printk(KERN_INFO " %s (+%s) : %s [%s]\n",
226 addr, boff, bhex, basc);
227 }
228 }
229
230 return;
231
232} /* end of ctcmpc_dumpit */
233#endif
234
235#ifdef DEBUGDATA
236/*
237 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
238 *
239 * skb The sk_buff to dump.
240 * offset Offset relative to skb-data, where to start the dump.
241 */
242void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
243{
244 unsigned char *p = skb->data;
245 struct th_header *header;
246 struct pdu *pheader;
247 int bl = skb->len;
248 int i;
249
250 if (p == NULL)
251 return;
252
253 p += offset;
254 header = (struct th_header *)p;
255
256 printk(KERN_INFO "dump:\n");
257 printk(KERN_INFO "skb len=%d \n", skb->len);
258 if (skb->len > 2) {
259 switch (header->th_ch_flag) {
260 case TH_HAS_PDU:
261 break;
262 case 0x00:
263 case TH_IS_XID:
264 if ((header->th_blk_flag == TH_DATA_IS_XID) &&
265 (header->th_is_xid == 0x01))
266 goto dumpth;
267 case TH_SWEEP_REQ:
268 goto dumpth;
269 case TH_SWEEP_RESP:
270 goto dumpth;
271 default:
272 break;
273 }
274
275 pheader = (struct pdu *)p;
276 printk(KERN_INFO "pdu->offset: %d hex: %04x\n",
277 pheader->pdu_offset, pheader->pdu_offset);
278 printk(KERN_INFO "pdu->flag : %02x\n", pheader->pdu_flag);
279 printk(KERN_INFO "pdu->proto : %02x\n", pheader->pdu_proto);
280 printk(KERN_INFO "pdu->seq : %02x\n", pheader->pdu_seq);
281 goto dumpdata;
282
283dumpth:
284 printk(KERN_INFO "th->seg : %02x\n", header->th_seg);
285 printk(KERN_INFO "th->ch : %02x\n", header->th_ch_flag);
286 printk(KERN_INFO "th->blk_flag: %02x\n", header->th_blk_flag);
287 printk(KERN_INFO "th->type : %s\n",
288 (header->th_is_xid) ? "DATA" : "XID");
289 printk(KERN_INFO "th->seqnum : %04x\n", header->th_seq_num);
290
291 }
292dumpdata:
293 if (bl > 32)
294 bl = 32;
295 printk(KERN_INFO "data: ");
296 for (i = 0; i < bl; i++)
297 printk(KERN_INFO "%02x%s", *p++, (i % 16) ? " " : "\n<7>");
298 printk(KERN_INFO "\n");
299}
300#endif
301
302/*
303 * ctc_mpc_alloc_channel
304 * (exported interface)
305 *
306 * Device Initialization :
307 * ACTPATH driven IO operations
308 */
309int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
310{
311 char device[20];
312 struct net_device *dev;
313 struct mpc_group *grp;
314 struct ctcm_priv *priv;
315
316 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
317
318 sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
319 dev = __dev_get_by_name(&init_net, device);
320
321 if (dev == NULL) {
322 printk(KERN_INFO "ctc_mpc_alloc_channel %s dev=NULL\n", device);
323 return 1;
324 }
325
326 priv = dev->priv;
327 grp = priv->mpcg;
328 if (!grp)
329 return 1;
330
331 grp->allochanfunc = callback;
332 grp->port_num = port_num;
333 grp->port_persist = 1;
334
335 ctcm_pr_debug("ctcmpc: %s called for device %s state=%s\n",
336 __FUNCTION__,
337 dev->name,
338 fsm_getstate_str(grp->fsm));
339
340 switch (fsm_getstate(grp->fsm)) {
341 case MPCG_STATE_INOP:
342 /* Group is in the process of terminating */
343 grp->alloc_called = 1;
344 break;
345 case MPCG_STATE_RESET:
346 /* MPC Group will transition to state */
347 /* MPCG_STATE_XID2INITW iff the minimum number */
348 /* of 1 read and 1 write channel have successfully*/
349 /* activated */
350 /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
351 if (callback)
352 grp->send_qllc_disc = 1;
353 case MPCG_STATE_XID0IOWAIT:
354 fsm_deltimer(&grp->timer);
355 grp->outstanding_xid2 = 0;
356 grp->outstanding_xid7 = 0;
357 grp->outstanding_xid7_p2 = 0;
358 grp->saved_xid2 = NULL;
359 if (callback)
360 ctcm_open(dev);
361 fsm_event(priv->fsm, DEV_EVENT_START, dev);
362 break;
363 case MPCG_STATE_READY:
364 /* XID exchanges completed after PORT was activated */
365 /* Link station already active */
366 /* Maybe timing issue...retry callback */
367 grp->allocchan_callback_retries++;
368 if (grp->allocchan_callback_retries < 4) {
369 if (grp->allochanfunc)
370 grp->allochanfunc(grp->port_num,
371 grp->group_max_buflen);
372 } else {
373 /* there are problems...bail out */
374 /* there may be a state mismatch so restart */
375 grp->port_persist = 1;
376 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
377 grp->allocchan_callback_retries = 0;
378 }
379 break;
380 default:
381 return 0;
382
383 }
384
385 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
386 return 0;
387}
388EXPORT_SYMBOL(ctc_mpc_alloc_channel);
389
390/*
391 * ctc_mpc_establish_connectivity
392 * (exported interface)
393 */
394void ctc_mpc_establish_connectivity(int port_num,
395 void (*callback)(int, int, int))
396{
397 char device[20];
398 struct net_device *dev;
399 struct mpc_group *grp;
400 struct ctcm_priv *priv;
401 struct channel *rch, *wch;
402
403 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
404
405 sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
406 dev = __dev_get_by_name(&init_net, device);
407
408 if (dev == NULL) {
409 printk(KERN_INFO "ctc_mpc_establish_connectivity "
410 "%s dev=NULL\n", device);
411 return;
412 }
413 priv = dev->priv;
414 rch = priv->channel[READ];
415 wch = priv->channel[WRITE];
416
417 grp = priv->mpcg;
418
419 ctcm_pr_debug("ctcmpc: %s() called for device %s state=%s\n",
420 __FUNCTION__, dev->name,
421 fsm_getstate_str(grp->fsm));
422
423 grp->estconnfunc = callback;
424 grp->port_num = port_num;
425
426 switch (fsm_getstate(grp->fsm)) {
427 case MPCG_STATE_READY:
428 /* XID exchanges completed after PORT was activated */
429 /* Link station already active */
430 /* Maybe timing issue...retry callback */
431 fsm_deltimer(&grp->timer);
432 grp->estconn_callback_retries++;
433 if (grp->estconn_callback_retries < 4) {
434 if (grp->estconnfunc) {
435 grp->estconnfunc(grp->port_num, 0,
436 grp->group_max_buflen);
437 grp->estconnfunc = NULL;
438 }
439 } else {
440 /* there are problems...bail out */
441 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
442 grp->estconn_callback_retries = 0;
443 }
444 break;
445 case MPCG_STATE_INOP:
446 case MPCG_STATE_RESET:
447 /* MPC Group is not ready to start XID - min num of */
448 /* 1 read and 1 write channel have not been acquired*/
449 printk(KERN_WARNING "ctcmpc: %s() REJECTED ACTIVE XID Req"
450 "uest - Channel Pair is not Active\n", __FUNCTION__);
451 if (grp->estconnfunc) {
452 grp->estconnfunc(grp->port_num, -1, 0);
453 grp->estconnfunc = NULL;
454 }
455 break;
456 case MPCG_STATE_XID2INITW:
457 /* alloc channel was called but no XID exchange */
458 /* has occurred. initiate xside XID exchange */
459 /* make sure yside XID0 processing has not started */
460 if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) ||
461 (fsm_getstate(wch->fsm) > CH_XID0_PENDING)) {
462 printk(KERN_WARNING "mpc: %s() ABORT ACTIVE XID"
463 " Request- PASSIVE XID in process\n"
464 , __FUNCTION__);
465 break;
466 }
467 grp->send_qllc_disc = 1;
468 fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIT);
469 fsm_deltimer(&grp->timer);
470 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
471 MPCG_EVENT_TIMER, dev);
472 grp->outstanding_xid7 = 0;
473 grp->outstanding_xid7_p2 = 0;
474 grp->saved_xid2 = NULL;
475 if ((rch->in_mpcgroup) &&
476 (fsm_getstate(rch->fsm) == CH_XID0_PENDING))
477 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch);
478 else {
479 printk(KERN_WARNING "mpc: %s() Unable to start"
480 " ACTIVE XID0 on read channel\n",
481 __FUNCTION__);
482 if (grp->estconnfunc) {
483 grp->estconnfunc(grp->port_num, -1, 0);
484 grp->estconnfunc = NULL;
485 }
486 fsm_deltimer(&grp->timer);
487 goto done;
488 }
489 if ((wch->in_mpcgroup) &&
490 (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
491 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch);
492 else {
493 printk(KERN_WARNING "mpc: %s() Unable to start"
494 " ACTIVE XID0 on write channel\n",
495 __FUNCTION__);
496 if (grp->estconnfunc) {
497 grp->estconnfunc(grp->port_num, -1, 0);
498 grp->estconnfunc = NULL;
499 }
500 fsm_deltimer(&grp->timer);
501 goto done;
502 }
503 break;
504 case MPCG_STATE_XID0IOWAIT:
505 /* already in active XID negotiations */
506 default:
507 break;
508 }
509
510done:
511 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
512 return;
513}
514EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
515
516/*
517 * ctc_mpc_dealloc_ch
518 * (exported interface)
519 */
520void ctc_mpc_dealloc_ch(int port_num)
521{
522 struct net_device *dev;
523 char device[20];
524 struct ctcm_priv *priv;
525 struct mpc_group *grp;
526
527 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
528 sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
529 dev = __dev_get_by_name(&init_net, device);
530
531 if (dev == NULL) {
532 printk(KERN_INFO "%s() %s dev=NULL\n", __FUNCTION__, device);
533 goto done;
534 }
535
536 ctcm_pr_debug("ctcmpc:%s %s() called for device %s refcount=%d\n",
537 dev->name, __FUNCTION__,
538 dev->name, atomic_read(&dev->refcnt));
539
540 priv = dev->priv;
541 if (priv == NULL) {
542 printk(KERN_INFO "%s() %s priv=NULL\n",
543 __FUNCTION__, device);
544 goto done;
545 }
546 fsm_deltimer(&priv->restart_timer);
547
548 grp = priv->mpcg;
549 if (grp == NULL) {
550 printk(KERN_INFO "%s() %s dev=NULL\n", __FUNCTION__, device);
551 goto done;
552 }
553 grp->channels_terminating = 0;
554
555 fsm_deltimer(&grp->timer);
556
557 grp->allochanfunc = NULL;
558 grp->estconnfunc = NULL;
559 grp->port_persist = 0;
560 grp->send_qllc_disc = 0;
561 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
562
563 ctcm_close(dev);
564done:
565 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
566 return;
567}
568EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
569
570/*
571 * ctc_mpc_flow_control
572 * (exported interface)
573 */
574void ctc_mpc_flow_control(int port_num, int flowc)
575{
576 char device[20];
577 struct ctcm_priv *priv;
578 struct mpc_group *grp;
579 struct net_device *dev;
580 struct channel *rch;
581 int mpcg_state;
582
583 ctcm_pr_debug("ctcmpc enter: %s() %i\n", __FUNCTION__, flowc);
584
585 sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
586 dev = __dev_get_by_name(&init_net, device);
587
588 if (dev == NULL) {
589 printk(KERN_INFO "ctc_mpc_flow_control %s dev=NULL\n", device);
590 return;
591 }
592
593 ctcm_pr_debug("ctcmpc: %s %s called \n", dev->name, __FUNCTION__);
594
595 priv = dev->priv;
596 if (priv == NULL) {
597 printk(KERN_INFO "ctcmpc:%s() %s priv=NULL\n",
598 __FUNCTION__, device);
599 return;
600 }
601 grp = priv->mpcg;
602 rch = priv->channel[READ];
603
604 mpcg_state = fsm_getstate(grp->fsm);
605 switch (flowc) {
606 case 1:
607 if (mpcg_state == MPCG_STATE_FLOWC)
608 break;
609 if (mpcg_state == MPCG_STATE_READY) {
610 if (grp->flow_off_called == 1)
611 grp->flow_off_called = 0;
612 else
613 fsm_newstate(grp->fsm, MPCG_STATE_FLOWC);
614 break;
615 }
616 break;
617 case 0:
618 if (mpcg_state == MPCG_STATE_FLOWC) {
619 fsm_newstate(grp->fsm, MPCG_STATE_READY);
620 /* ensure any data that has accumulated */
621 /* on the io_queue will now be sen t */
622 tasklet_schedule(&rch->ch_tasklet);
623 }
624 /* possible race condition */
625 if (mpcg_state == MPCG_STATE_READY) {
626 grp->flow_off_called = 1;
627 break;
628 }
629 break;
630 }
631
632 ctcm_pr_debug("ctcmpc exit: %s() %i\n", __FUNCTION__, flowc);
633}
634EXPORT_SYMBOL(ctc_mpc_flow_control);
635
636static int mpc_send_qllc_discontact(struct net_device *);
637
638/*
639 * helper function of ctcmpc_unpack_skb
640*/
641static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
642{
643 struct channel *rch = mpcginfo->ch;
644 struct net_device *dev = rch->netdev;
645 struct ctcm_priv *priv = dev->priv;
646 struct mpc_group *grp = priv->mpcg;
647 struct channel *ch = priv->channel[WRITE];
648
649 if (do_debug)
650 ctcm_pr_debug("ctcmpc enter: %s(): ch=0x%p id=%s\n",
651 __FUNCTION__, ch, ch->id);
652
653 if (do_debug_data)
654 ctcmpc_dumpit((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
655
656 grp->sweep_rsp_pend_num--;
657
658 if ((grp->sweep_req_pend_num == 0) &&
659 (grp->sweep_rsp_pend_num == 0)) {
660 fsm_deltimer(&ch->sweep_timer);
661 grp->in_sweep = 0;
662 rch->th_seq_num = 0x00;
663 ch->th_seq_num = 0x00;
664 ctcm_clear_busy_do(dev);
665 }
666
667 kfree(mpcginfo);
668
669 return;
670
671}
672
673/*
674 * helper function of mpc_rcvd_sweep_req
675 * which is a helper of ctcmpc_unpack_skb
676 */
677static void ctcmpc_send_sweep_resp(struct channel *rch)
678{
679 struct net_device *dev = rch->netdev;
680 struct ctcm_priv *priv = dev->priv;
681 struct mpc_group *grp = priv->mpcg;
682 int rc = 0;
683 struct th_sweep *header;
684 struct sk_buff *sweep_skb;
685 struct channel *ch = priv->channel[WRITE];
686
687 if (do_debug)
688 ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n",
689 __FUNCTION__, rch, rch->id);
690
691 sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
692 GFP_ATOMIC|GFP_DMA);
693 if (sweep_skb == NULL) {
694 printk(KERN_INFO "Couldn't alloc sweep_skb\n");
695 rc = -ENOMEM;
696 goto done;
697 }
698
699 header = (struct th_sweep *)
700 kmalloc(sizeof(struct th_sweep), gfp_type());
701
702 if (!header) {
703 dev_kfree_skb_any(sweep_skb);
704 rc = -ENOMEM;
705 goto done;
706 }
707
708 header->th.th_seg = 0x00 ;
709 header->th.th_ch_flag = TH_SWEEP_RESP;
710 header->th.th_blk_flag = 0x00;
711 header->th.th_is_xid = 0x00;
712 header->th.th_seq_num = 0x00;
713 header->sw.th_last_seq = ch->th_seq_num;
714
715 memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
716
717 kfree(header);
718
719 dev->trans_start = jiffies;
720 skb_queue_tail(&ch->sweep_queue, sweep_skb);
721
722 fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
723
724 return;
725
726done:
727 if (rc != 0) {
728 grp->in_sweep = 0;
729 ctcm_clear_busy_do(dev);
730 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
731 }
732
733 return;
734}
735
736/*
737 * helper function of ctcmpc_unpack_skb
738 */
739static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
740{
741 struct channel *rch = mpcginfo->ch;
742 struct net_device *dev = rch->netdev;
743 struct ctcm_priv *priv = dev->priv;
744 struct mpc_group *grp = priv->mpcg;
745 struct channel *ch = priv->channel[WRITE];
746
747 if (do_debug)
748 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
749 " %s(): ch=0x%p id=%s\n", __FUNCTION__, ch, ch->id);
750
751 if (grp->in_sweep == 0) {
752 grp->in_sweep = 1;
753 ctcm_test_and_set_busy(dev);
754 grp->sweep_req_pend_num = grp->active_channels[READ];
755 grp->sweep_rsp_pend_num = grp->active_channels[READ];
756 }
757
758 if (do_debug_data)
759 ctcmpc_dumpit((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
760
761 grp->sweep_req_pend_num--;
762 ctcmpc_send_sweep_resp(ch);
763 kfree(mpcginfo);
764 return;
765}
766
767/*
768 * MPC Group Station FSM definitions
769 */
770static const char *mpcg_event_names[] = {
771 [MPCG_EVENT_INOP] = "INOP Condition",
772 [MPCG_EVENT_DISCONC] = "Discontact Received",
773 [MPCG_EVENT_XID0DO] = "Channel Active - Start XID",
774 [MPCG_EVENT_XID2] = "XID2 Received",
775 [MPCG_EVENT_XID2DONE] = "XID0 Complete",
776 [MPCG_EVENT_XID7DONE] = "XID7 Complete",
777 [MPCG_EVENT_TIMER] = "XID Setup Timer",
778 [MPCG_EVENT_DOIO] = "XID DoIO",
779};
780
781static const char *mpcg_state_names[] = {
782 [MPCG_STATE_RESET] = "Reset",
783 [MPCG_STATE_INOP] = "INOP",
784 [MPCG_STATE_XID2INITW] = "Passive XID- XID0 Pending Start",
785 [MPCG_STATE_XID2INITX] = "Passive XID- XID0 Pending Complete",
786 [MPCG_STATE_XID7INITW] = "Passive XID- XID7 Pending P1 Start",
787 [MPCG_STATE_XID7INITX] = "Passive XID- XID7 Pending P2 Complete",
788 [MPCG_STATE_XID0IOWAIT] = "Active XID- XID0 Pending Start",
789 [MPCG_STATE_XID0IOWAIX] = "Active XID- XID0 Pending Complete",
790 [MPCG_STATE_XID7INITI] = "Active XID- XID7 Pending Start",
791 [MPCG_STATE_XID7INITZ] = "Active XID- XID7 Pending Complete ",
792 [MPCG_STATE_XID7INITF] = "XID - XID7 Complete ",
793 [MPCG_STATE_FLOWC] = "FLOW CONTROL ON",
794 [MPCG_STATE_READY] = "READY",
795};
796
797/*
798 * The MPC Group Station FSM
799 * 22 events
800 */
801static const fsm_node mpcg_fsm[] = {
802 { MPCG_STATE_RESET, MPCG_EVENT_INOP, mpc_action_go_inop },
803 { MPCG_STATE_INOP, MPCG_EVENT_INOP, mpc_action_nop },
804 { MPCG_STATE_FLOWC, MPCG_EVENT_INOP, mpc_action_go_inop },
805
806 { MPCG_STATE_READY, MPCG_EVENT_DISCONC, mpc_action_discontact },
807 { MPCG_STATE_READY, MPCG_EVENT_INOP, mpc_action_go_inop },
808
809 { MPCG_STATE_XID2INITW, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
810 { MPCG_STATE_XID2INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
811 { MPCG_STATE_XID2INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
812 { MPCG_STATE_XID2INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
813 { MPCG_STATE_XID2INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
814
815 { MPCG_STATE_XID2INITX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
816 { MPCG_STATE_XID2INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
817 { MPCG_STATE_XID2INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
818 { MPCG_STATE_XID2INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
819 { MPCG_STATE_XID2INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
820
821 { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
822 { MPCG_STATE_XID7INITW, MPCG_EVENT_DISCONC, mpc_action_discontact },
823 { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
824 { MPCG_STATE_XID7INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
825 { MPCG_STATE_XID7INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
826 { MPCG_STATE_XID7INITW, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
827 { MPCG_STATE_XID7INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
828
829 { MPCG_STATE_XID7INITX, MPCG_EVENT_DISCONC, mpc_action_discontact },
830 { MPCG_STATE_XID7INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
831 { MPCG_STATE_XID7INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
832 { MPCG_STATE_XID7INITX, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
833 { MPCG_STATE_XID7INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
834 { MPCG_STATE_XID7INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
835
836 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
837 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DISCONC, mpc_action_discontact },
838 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
839 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_INOP, mpc_action_go_inop },
840 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_TIMER, mpc_action_timeout },
841 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DOIO, mpc_action_xside_xid },
842
843 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
844 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DISCONC, mpc_action_discontact },
845 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
846 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_INOP, mpc_action_go_inop },
847 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_TIMER, mpc_action_timeout },
848 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DOIO, mpc_action_xside_xid },
849
850 { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
851 { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
852 { MPCG_STATE_XID7INITI, MPCG_EVENT_DISCONC, mpc_action_discontact },
853 { MPCG_STATE_XID7INITI, MPCG_EVENT_INOP, mpc_action_go_inop },
854 { MPCG_STATE_XID7INITI, MPCG_EVENT_TIMER, mpc_action_timeout },
855 { MPCG_STATE_XID7INITI, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
856 { MPCG_STATE_XID7INITI, MPCG_EVENT_DOIO, mpc_action_xside_xid },
857
858 { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
859 { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
860 { MPCG_STATE_XID7INITZ, MPCG_EVENT_DISCONC, mpc_action_discontact },
861 { MPCG_STATE_XID7INITZ, MPCG_EVENT_INOP, mpc_action_go_inop },
862 { MPCG_STATE_XID7INITZ, MPCG_EVENT_TIMER, mpc_action_timeout },
863 { MPCG_STATE_XID7INITZ, MPCG_EVENT_DOIO, mpc_action_xside_xid },
864
865 { MPCG_STATE_XID7INITF, MPCG_EVENT_INOP, mpc_action_go_inop },
866 { MPCG_STATE_XID7INITF, MPCG_EVENT_XID7DONE, mpc_action_go_ready },
867};
868
869static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm);
870
871/*
872 * MPC Group Station FSM action
873 * CTCM_PROTO_MPC only
874 */
875static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
876{
877 struct net_device *dev = arg;
878 struct ctcm_priv *priv = NULL;
879 struct mpc_group *grp = NULL;
880
881 if (dev == NULL) {
882 printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__);
883 return;
884 }
885
886 ctcm_pr_debug("ctcmpc enter: %s %s()\n", dev->name, __FUNCTION__);
887
888 priv = dev->priv;
889 if (priv == NULL) {
890 printk(KERN_INFO "%s() priv=NULL\n", __FUNCTION__);
891 return;
892 }
893
894 grp = priv->mpcg;
895 if (grp == NULL) {
896 printk(KERN_INFO "%s() grp=NULL\n", __FUNCTION__);
897 return;
898 }
899
900 fsm_deltimer(&grp->timer);
901
902 if (grp->saved_xid2->xid2_flag2 == 0x40) {
903 priv->xid->xid2_flag2 = 0x00;
904 if (grp->estconnfunc) {
905 grp->estconnfunc(grp->port_num, 1,
906 grp->group_max_buflen);
907 grp->estconnfunc = NULL;
908 } else if (grp->allochanfunc)
909 grp->send_qllc_disc = 1;
910 goto done;
911 }
912
913 grp->port_persist = 1;
914 grp->out_of_sequence = 0;
915 grp->estconn_called = 0;
916
917 tasklet_hi_schedule(&grp->mpc_tasklet2);
918
919 ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__);
920 return;
921
922done:
923 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
924
925
926 ctcm_pr_info("ctcmpc: %s()failure occurred\n", __FUNCTION__);
927}
928
929/*
930 * helper of ctcm_init_netdevice
931 * CTCM_PROTO_MPC only
932 */
933void mpc_group_ready(unsigned long adev)
934{
935 struct net_device *dev = (struct net_device *)adev;
936 struct ctcm_priv *priv = NULL;
937 struct mpc_group *grp = NULL;
938 struct channel *ch = NULL;
939
940
941 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
942
943 if (dev == NULL) {
944 printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__);
945 return;
946 }
947
948 priv = dev->priv;
949 if (priv == NULL) {
950 printk(KERN_INFO "%s() priv=NULL\n", __FUNCTION__);
951 return;
952 }
953
954 grp = priv->mpcg;
955 if (grp == NULL) {
956 printk(KERN_INFO "ctcmpc:%s() grp=NULL\n", __FUNCTION__);
957 return;
958 }
959
960 printk(KERN_NOTICE "ctcmpc: %s GROUP TRANSITIONED TO READY"
961 " maxbuf:%d\n",
962 dev->name, grp->group_max_buflen);
963
964 fsm_newstate(grp->fsm, MPCG_STATE_READY);
965
966 /* Put up a read on the channel */
967 ch = priv->channel[READ];
968 ch->pdu_seq = 0;
969 if (do_debug_data)
970 ctcm_pr_debug("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
971 __FUNCTION__, ch->pdu_seq);
972
973 ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
974 /* Put the write channel in idle state */
975 ch = priv->channel[WRITE];
976 if (ch->collect_len > 0) {
977 spin_lock(&ch->collect_lock);
978 ctcm_purge_skb_queue(&ch->collect_queue);
979 ch->collect_len = 0;
980 spin_unlock(&ch->collect_lock);
981 }
982 ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch);
983
984 ctcm_clear_busy(dev);
985
986 if (grp->estconnfunc) {
987 grp->estconnfunc(grp->port_num, 0,
988 grp->group_max_buflen);
989 grp->estconnfunc = NULL;
990 } else
991 if (grp->allochanfunc)
992 grp->allochanfunc(grp->port_num,
993 grp->group_max_buflen);
994
995 grp->send_qllc_disc = 1;
996 grp->changed_side = 0;
997
998 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
999 return;
1000
1001}
1002
1003/*
1004 * Increment the MPC Group Active Channel Counts
1005 * helper of dev_action (called from channel fsm)
1006 */
1007int mpc_channel_action(struct channel *ch, int direction, int action)
1008{
1009 struct net_device *dev = ch->netdev;
1010 struct ctcm_priv *priv;
1011 struct mpc_group *grp = NULL;
1012 int rc = 0;
1013
1014 if (do_debug)
1015 ctcm_pr_debug("ctcmpc enter: %s(): ch=0x%p id=%s\n",
1016 __FUNCTION__, ch, ch->id);
1017
1018 if (dev == NULL) {
1019 printk(KERN_INFO "ctcmpc_channel_action %i dev=NULL\n",
1020 action);
1021 rc = 1;
1022 goto done;
1023 }
1024
1025 priv = dev->priv;
1026 if (priv == NULL) {
1027 printk(KERN_INFO
1028 "ctcmpc_channel_action%i priv=NULL, dev=%s\n",
1029 action, dev->name);
1030 rc = 2;
1031 goto done;
1032 }
1033
1034 grp = priv->mpcg;
1035
1036 if (grp == NULL) {
1037 printk(KERN_INFO "ctcmpc: %s()%i mpcgroup=NULL, dev=%s\n",
1038 __FUNCTION__, action, dev->name);
1039 rc = 3;
1040 goto done;
1041 }
1042
1043 ctcm_pr_info(
1044 "ctcmpc: %s() %i(): Grp:%s total_channel_paths=%i "
1045 "active_channels read=%i, write=%i\n",
1046 __FUNCTION__,
1047 action,
1048 fsm_getstate_str(grp->fsm),
1049 grp->num_channel_paths,
1050 grp->active_channels[READ],
1051 grp->active_channels[WRITE]);
1052
1053 if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
1054 grp->num_channel_paths++;
1055 grp->active_channels[direction]++;
1056 grp->outstanding_xid2++;
1057 ch->in_mpcgroup = 1;
1058
1059 if (ch->xid_skb != NULL)
1060 dev_kfree_skb_any(ch->xid_skb);
1061
1062 ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
1063 GFP_ATOMIC | GFP_DMA);
1064 if (ch->xid_skb == NULL) {
1065 printk(KERN_INFO "ctcmpc: %s()"
1066 "Couldn't alloc ch xid_skb\n", __FUNCTION__);
1067 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1068 return 1;
1069 }
1070 ch->xid_skb_data = ch->xid_skb->data;
1071 ch->xid_th = (struct th_header *)ch->xid_skb->data;
1072 skb_put(ch->xid_skb, TH_HEADER_LENGTH);
1073 ch->xid = (struct xid2 *)skb_tail_pointer(ch->xid_skb);
1074 skb_put(ch->xid_skb, XID2_LENGTH);
1075 ch->xid_id = skb_tail_pointer(ch->xid_skb);
1076 ch->xid_skb->data = ch->xid_skb_data;
1077 skb_reset_tail_pointer(ch->xid_skb);
1078 ch->xid_skb->len = 0;
1079
1080 memcpy(skb_put(ch->xid_skb, grp->xid_skb->len),
1081 grp->xid_skb->data,
1082 grp->xid_skb->len);
1083
1084 ch->xid->xid2_dlc_type = ((CHANNEL_DIRECTION(ch->flags) == READ)
1085 ? XID2_READ_SIDE : XID2_WRITE_SIDE);
1086
1087 if (CHANNEL_DIRECTION(ch->flags) == WRITE)
1088 ch->xid->xid2_buf_len = 0x00;
1089
1090 ch->xid_skb->data = ch->xid_skb_data;
1091 skb_reset_tail_pointer(ch->xid_skb);
1092 ch->xid_skb->len = 0;
1093
1094 fsm_newstate(ch->fsm, CH_XID0_PENDING);
1095
1096 if ((grp->active_channels[READ] > 0) &&
1097 (grp->active_channels[WRITE] > 0) &&
1098 (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
1099 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
1100 printk(KERN_NOTICE "ctcmpc: %s MPC GROUP "
1101 "CHANNELS ACTIVE\n", dev->name);
1102 }
1103 } else if ((action == MPC_CHANNEL_REMOVE) &&
1104 (ch->in_mpcgroup == 1)) {
1105 ch->in_mpcgroup = 0;
1106 grp->num_channel_paths--;
1107 grp->active_channels[direction]--;
1108
1109 if (ch->xid_skb != NULL)
1110 dev_kfree_skb_any(ch->xid_skb);
1111 ch->xid_skb = NULL;
1112
1113 if (grp->channels_terminating)
1114 goto done;
1115
1116 if (((grp->active_channels[READ] == 0) &&
1117 (grp->active_channels[WRITE] > 0))
1118 || ((grp->active_channels[WRITE] == 0) &&
1119 (grp->active_channels[READ] > 0)))
1120 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1121 }
1122
1123done:
1124
1125 if (do_debug) {
1126 ctcm_pr_debug(
1127 "ctcmpc: %s() %i Grp:%s ttl_chan_paths=%i "
1128 "active_chans read=%i, write=%i\n",
1129 __FUNCTION__,
1130 action,
1131 fsm_getstate_str(grp->fsm),
1132 grp->num_channel_paths,
1133 grp->active_channels[READ],
1134 grp->active_channels[WRITE]);
1135
1136 ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n",
1137 __FUNCTION__, ch, ch->id);
1138 }
1139 return rc;
1140
1141}
1142
1143/**
1144 * Unpack a just received skb and hand it over to
1145 * upper layers.
1146 * special MPC version of unpack_skb.
1147 *
1148 * ch The channel where this skb has been received.
1149 * pskb The received skb.
1150 */
1151static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
1152{
1153 struct net_device *dev = ch->netdev;
1154 struct ctcm_priv *priv = dev->priv;
1155 struct mpc_group *grp = priv->mpcg;
1156 struct pdu *curr_pdu;
1157 struct mpcg_info *mpcginfo;
1158 struct th_header *header = NULL;
1159 struct th_sweep *sweep = NULL;
1160 int pdu_last_seen = 0;
1161 __u32 new_len;
1162 struct sk_buff *skb;
1163 int skblen;
1164 int sendrc = 0;
1165
1166 if (do_debug)
1167 ctcm_pr_debug("ctcmpc enter: %s() %s cp:%i ch:%s\n",
1168 __FUNCTION__, dev->name, smp_processor_id(), ch->id);
1169
1170 header = (struct th_header *)pskb->data;
1171 if ((header->th_seg == 0) &&
1172 (header->th_ch_flag == 0) &&
1173 (header->th_blk_flag == 0) &&
1174 (header->th_seq_num == 0))
1175 /* nothing for us */ goto done;
1176
1177 if (do_debug_data) {
1178 ctcm_pr_debug("ctcmpc: %s() th_header\n", __FUNCTION__);
1179 ctcmpc_dumpit((char *)header, TH_HEADER_LENGTH);
1180 ctcm_pr_debug("ctcmpc: %s() pskb len: %04x \n",
1181 __FUNCTION__, pskb->len);
1182 }
1183
1184 pskb->dev = dev;
1185 pskb->ip_summed = CHECKSUM_UNNECESSARY;
1186 skb_pull(pskb, TH_HEADER_LENGTH);
1187
1188 if (likely(header->th_ch_flag == TH_HAS_PDU)) {
1189 if (do_debug_data)
1190 ctcm_pr_debug("ctcmpc: %s() came into th_has_pdu\n",
1191 __FUNCTION__);
1192 if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) ||
1193 ((fsm_getstate(grp->fsm) == MPCG_STATE_READY) &&
1194 (header->th_seq_num != ch->th_seq_num + 1) &&
1195 (ch->th_seq_num != 0))) {
1196 /* This is NOT the next segment *
1197 * we are not the correct race winner *
1198 * go away and let someone else win *
1199 * BUT..this only applies if xid negot *
1200 * is done *
1201 */
1202 grp->out_of_sequence += 1;
1203 __skb_push(pskb, TH_HEADER_LENGTH);
1204 skb_queue_tail(&ch->io_queue, pskb);
1205 if (do_debug_data)
1206 ctcm_pr_debug("ctcmpc: %s() th_seq_num "
1207 "expect:%08x got:%08x\n", __FUNCTION__,
1208 ch->th_seq_num + 1, header->th_seq_num);
1209
1210 return;
1211 }
1212 grp->out_of_sequence = 0;
1213 ch->th_seq_num = header->th_seq_num;
1214
1215 if (do_debug_data)
1216 ctcm_pr_debug("ctcmpc: %s() FromVTAM_th_seq=%08x\n",
1217 __FUNCTION__, ch->th_seq_num);
1218
1219 if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY))
1220 goto done;
1221 pdu_last_seen = 0;
1222 while ((pskb->len > 0) && !pdu_last_seen) {
1223 curr_pdu = (struct pdu *)pskb->data;
1224 if (do_debug_data) {
1225 ctcm_pr_debug("ctcm: %s() pdu_header\n",
1226 __FUNCTION__);
1227 ctcmpc_dumpit((char *)pskb->data,
1228 PDU_HEADER_LENGTH);
1229 ctcm_pr_debug("ctcm: %s() pskb len: %04x \n",
1230 __FUNCTION__, pskb->len);
1231 }
1232 skb_pull(pskb, PDU_HEADER_LENGTH);
1233
1234 if (curr_pdu->pdu_flag & PDU_LAST)
1235 pdu_last_seen = 1;
1236 if (curr_pdu->pdu_flag & PDU_CNTL)
1237 pskb->protocol = htons(ETH_P_SNAP);
1238 else
1239 pskb->protocol = htons(ETH_P_SNA_DIX);
1240
1241 if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) {
1242 printk(KERN_INFO
1243 "%s Illegal packet size %d "
1244 "received "
1245 "dropping\n", dev->name,
1246 pskb->len);
1247 priv->stats.rx_dropped++;
1248 priv->stats.rx_length_errors++;
1249 goto done;
1250 }
1251 skb_reset_mac_header(pskb);
1252 new_len = curr_pdu->pdu_offset;
1253 if (do_debug_data)
1254 ctcm_pr_debug("ctcmpc: %s() new_len: %04x \n",
1255 __FUNCTION__, new_len);
1256 if ((new_len == 0) || (new_len > pskb->len)) {
1257 /* should never happen */
1258 /* pskb len must be hosed...bail out */
1259 printk(KERN_INFO
1260 "ctcmpc: %s(): invalid pdu"
1261 " offset of %04x - data may be"
1262 "lost\n", __FUNCTION__, new_len);
1263 goto done;
1264 }
1265 skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC);
1266
1267 if (!skb) {
1268 printk(KERN_INFO
1269 "ctcm: %s Out of memory in "
1270 "%s()- request-len:%04x \n",
1271 dev->name,
1272 __FUNCTION__,
1273 new_len+4);
1274 priv->stats.rx_dropped++;
1275 fsm_event(grp->fsm,
1276 MPCG_EVENT_INOP, dev);
1277 goto done;
1278 }
1279
1280 memcpy(skb_put(skb, new_len),
1281 pskb->data, new_len);
1282
1283 skb_reset_mac_header(skb);
1284 skb->dev = pskb->dev;
1285 skb->protocol = pskb->protocol;
1286 skb->ip_summed = CHECKSUM_UNNECESSARY;
1287 *((__u32 *) skb_push(skb, 4)) = ch->pdu_seq;
1288 ch->pdu_seq++;
1289
1290 if (do_debug_data)
1291 ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n",
1292 __FUNCTION__, ch->pdu_seq);
1293
1294 ctcm_pr_debug("ctcm: %s() skb:%0lx "
1295 "skb len: %d \n", __FUNCTION__,
1296 (unsigned long)skb, skb->len);
1297 if (do_debug_data) {
1298 ctcm_pr_debug("ctcmpc: %s() up to 32 bytes"
1299 " of pdu_data sent\n",
1300 __FUNCTION__);
1301 ctcmpc_dump32((char *)skb->data, skb->len);
1302 }
1303
1304 skblen = skb->len;
1305 sendrc = netif_rx(skb);
1306 priv->stats.rx_packets++;
1307 priv->stats.rx_bytes += skblen;
1308 skb_pull(pskb, new_len); /* point to next PDU */
1309 }
1310 } else {
1311 mpcginfo = (struct mpcg_info *)
1312 kmalloc(sizeof(struct mpcg_info), gfp_type());
1313 if (mpcginfo == NULL)
1314 goto done;
1315
1316 mpcginfo->ch = ch;
1317 mpcginfo->th = header;
1318 mpcginfo->skb = pskb;
1319 ctcm_pr_debug("ctcmpc: %s() Not PDU - may be control pkt\n",
1320 __FUNCTION__);
1321 /* it's a sweep? */
1322 sweep = (struct th_sweep *)pskb->data;
1323 mpcginfo->sweep = sweep;
1324 if (header->th_ch_flag == TH_SWEEP_REQ)
1325 mpc_rcvd_sweep_req(mpcginfo);
1326 else if (header->th_ch_flag == TH_SWEEP_RESP)
1327 mpc_rcvd_sweep_resp(mpcginfo);
1328 else if (header->th_blk_flag == TH_DATA_IS_XID) {
1329 struct xid2 *thisxid = (struct xid2 *)pskb->data;
1330 skb_pull(pskb, XID2_LENGTH);
1331 mpcginfo->xid = thisxid;
1332 fsm_event(grp->fsm, MPCG_EVENT_XID2, mpcginfo);
1333 } else if (header->th_blk_flag == TH_DISCONTACT)
1334 fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo);
1335 else if (header->th_seq_num != 0) {
1336 printk(KERN_INFO "%s unexpected packet"
1337 " expected control pkt\n", dev->name);
1338 priv->stats.rx_dropped++;
1339 /* mpcginfo only used for non-data transfers */
1340 kfree(mpcginfo);
1341 if (do_debug_data)
1342 ctcmpc_dump_skb(pskb, -8);
1343 }
1344 }
1345done:
1346
1347 dev_kfree_skb_any(pskb);
1348 if (sendrc == NET_RX_DROP) {
1349 printk(KERN_WARNING "%s %s() NETWORK BACKLOG EXCEEDED"
1350 " - PACKET DROPPED\n", dev->name, __FUNCTION__);
1351 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1352 }
1353
1354 if (do_debug)
1355 ctcm_pr_debug("ctcmpc exit : %s %s(): ch=0x%p id=%s\n",
1356 dev->name, __FUNCTION__, ch, ch->id);
1357}
1358
1359/**
1360 * tasklet helper for mpc's skb unpacking.
1361 *
1362 * ch The channel to work on.
1363 * Allow flow control back pressure to occur here.
1364 * Throttling back channel can result in excessive
1365 * channel inactivity and system deact of channel
1366 */
1367void ctcmpc_bh(unsigned long thischan)
1368{
1369 struct channel *ch = (struct channel *)thischan;
1370 struct sk_buff *skb;
1371 struct net_device *dev = ch->netdev;
1372 struct ctcm_priv *priv = dev->priv;
1373 struct mpc_group *grp = priv->mpcg;
1374
1375 if (do_debug)
1376 ctcm_pr_debug("%s cp:%i enter: %s() %s\n",
1377 dev->name, smp_processor_id(), __FUNCTION__, ch->id);
1378 /* caller has requested driver to throttle back */
1379 while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) &&
1380 (skb = skb_dequeue(&ch->io_queue))) {
1381 ctcmpc_unpack_skb(ch, skb);
1382 if (grp->out_of_sequence > 20) {
1383 /* assume data loss has occurred if */
1384 /* missing seq_num for extended */
1385 /* period of time */
1386 grp->out_of_sequence = 0;
1387 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1388 break;
1389 }
1390 if (skb == skb_peek(&ch->io_queue))
1391 break;
1392 }
1393 if (do_debug)
1394 ctcm_pr_debug("ctcmpc exit : %s %s(): ch=0x%p id=%s\n",
1395 dev->name, __FUNCTION__, ch, ch->id);
1396 return;
1397}
1398
1399/*
1400 * MPC Group Initializations
1401 */
1402struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv)
1403{
1404 struct mpc_group *grp;
1405
1406 CTCM_DBF_TEXT(MPC_SETUP, 3, __FUNCTION__);
1407
1408 grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL);
1409 if (grp == NULL)
1410 return NULL;
1411
1412 grp->fsm =
1413 init_fsm("mpcg", mpcg_state_names, mpcg_event_names,
1414 MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm,
1415 mpcg_fsm_len, GFP_KERNEL);
1416 if (grp->fsm == NULL) {
1417 kfree(grp);
1418 return NULL;
1419 }
1420
1421 fsm_newstate(grp->fsm, MPCG_STATE_RESET);
1422 fsm_settimer(grp->fsm, &grp->timer);
1423
1424 grp->xid_skb =
1425 __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
1426 if (grp->xid_skb == NULL) {
1427 printk(KERN_INFO "Couldn't alloc MPCgroup xid_skb\n");
1428 kfree_fsm(grp->fsm);
1429 kfree(grp);
1430 return NULL;
1431 }
1432 /* base xid for all channels in group */
1433 grp->xid_skb_data = grp->xid_skb->data;
1434 grp->xid_th = (struct th_header *)grp->xid_skb->data;
1435 memcpy(skb_put(grp->xid_skb, TH_HEADER_LENGTH),
1436 &thnorm, TH_HEADER_LENGTH);
1437
1438 grp->xid = (struct xid2 *) skb_tail_pointer(grp->xid_skb);
1439 memcpy(skb_put(grp->xid_skb, XID2_LENGTH), &init_xid, XID2_LENGTH);
1440 grp->xid->xid2_adj_id = jiffies | 0xfff00000;
1441 grp->xid->xid2_sender_id = jiffies;
1442
1443 grp->xid_id = skb_tail_pointer(grp->xid_skb);
1444 memcpy(skb_put(grp->xid_skb, 4), "VTAM", 4);
1445
1446 grp->rcvd_xid_skb =
1447 __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
1448 if (grp->rcvd_xid_skb == NULL) {
1449 printk(KERN_INFO "Couldn't alloc MPCgroup rcvd_xid_skb\n");
1450 kfree_fsm(grp->fsm);
1451 dev_kfree_skb(grp->xid_skb);
1452 kfree(grp);
1453 return NULL;
1454 }
1455 grp->rcvd_xid_data = grp->rcvd_xid_skb->data;
1456 grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
1457 memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH),
1458 &thnorm, TH_HEADER_LENGTH);
1459 grp->saved_xid2 = NULL;
1460 priv->xid = grp->xid;
1461 priv->mpcg = grp;
1462 return grp;
1463}
1464
1465/*
1466 * The MPC Group Station FSM
1467 */
1468
1469/*
1470 * MPC Group Station FSM actions
1471 * CTCM_PROTO_MPC only
1472 */
1473
1474/**
1475 * NOP action for statemachines
1476 */
1477static void mpc_action_nop(fsm_instance *fi, int event, void *arg)
1478{
1479}
1480
1481/*
1482 * invoked when the device transitions to dev_stopped
1483 * MPC will stop each individual channel if a single XID failure
1484 * occurs, or will intitiate all channels be stopped if a GROUP
1485 * level failure occurs.
1486 */
1487static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
1488{
1489 struct net_device *dev = arg;
1490 struct ctcm_priv *priv;
1491 struct mpc_group *grp;
1492 int rc = 0;
1493 struct channel *wch, *rch;
1494
1495 if (dev == NULL) {
1496 printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__);
1497 return;
1498 }
1499
1500 ctcm_pr_debug("ctcmpc enter: %s %s()\n", dev->name, __FUNCTION__);
1501
1502 priv = dev->priv;
1503 grp = priv->mpcg;
1504 grp->flow_off_called = 0;
1505
1506 fsm_deltimer(&grp->timer);
1507
1508 if (grp->channels_terminating)
1509 goto done;
1510
1511 grp->channels_terminating = 1;
1512
1513 grp->saved_state = fsm_getstate(grp->fsm);
1514 fsm_newstate(grp->fsm, MPCG_STATE_INOP);
1515 if (grp->saved_state > MPCG_STATE_XID7INITF)
1516 printk(KERN_NOTICE "%s:MPC GROUP INOPERATIVE\n", dev->name);
1517 if ((grp->saved_state != MPCG_STATE_RESET) ||
1518 /* dealloc_channel has been called */
1519 ((grp->saved_state == MPCG_STATE_RESET) &&
1520 (grp->port_persist == 0)))
1521 fsm_deltimer(&priv->restart_timer);
1522
1523 wch = priv->channel[WRITE];
1524 rch = priv->channel[READ];
1525
1526 switch (grp->saved_state) {
1527 case MPCG_STATE_RESET:
1528 case MPCG_STATE_INOP:
1529 case MPCG_STATE_XID2INITW:
1530 case MPCG_STATE_XID0IOWAIT:
1531 case MPCG_STATE_XID2INITX:
1532 case MPCG_STATE_XID7INITW:
1533 case MPCG_STATE_XID7INITX:
1534 case MPCG_STATE_XID0IOWAIX:
1535 case MPCG_STATE_XID7INITI:
1536 case MPCG_STATE_XID7INITZ:
1537 case MPCG_STATE_XID7INITF:
1538 break;
1539 case MPCG_STATE_FLOWC:
1540 case MPCG_STATE_READY:
1541 default:
1542 tasklet_hi_schedule(&wch->ch_disc_tasklet);
1543 }
1544
1545 grp->xid2_tgnum = 0;
1546 grp->group_max_buflen = 0; /*min of all received */
1547 grp->outstanding_xid2 = 0;
1548 grp->outstanding_xid7 = 0;
1549 grp->outstanding_xid7_p2 = 0;
1550 grp->saved_xid2 = NULL;
1551 grp->xidnogood = 0;
1552 grp->changed_side = 0;
1553
1554 grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
1555 skb_reset_tail_pointer(grp->rcvd_xid_skb);
1556 grp->rcvd_xid_skb->len = 0;
1557 grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
1558 memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH), &thnorm,
1559 TH_HEADER_LENGTH);
1560
1561 if (grp->send_qllc_disc == 1) {
1562 grp->send_qllc_disc = 0;
1563 rc = mpc_send_qllc_discontact(dev);
1564 }
1565
1566 /* DO NOT issue DEV_EVENT_STOP directly out of this code */
1567 /* This can result in INOP of VTAM PU due to halting of */
1568 /* outstanding IO which causes a sense to be returned */
1569 /* Only about 3 senses are allowed and then IOS/VTAM will*/
1570 /* ebcome unreachable without manual intervention */
1571 if ((grp->port_persist == 1) || (grp->alloc_called)) {
1572 grp->alloc_called = 0;
1573 fsm_deltimer(&priv->restart_timer);
1574 fsm_addtimer(&priv->restart_timer,
1575 500,
1576 DEV_EVENT_RESTART,
1577 dev);
1578 fsm_newstate(grp->fsm, MPCG_STATE_RESET);
1579 if (grp->saved_state > MPCG_STATE_XID7INITF)
1580 printk(KERN_NOTICE "%s:MPC GROUP RECOVERY SCHEDULED\n",
1581 dev->name);
1582 } else {
1583 fsm_deltimer(&priv->restart_timer);
1584 fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev);
1585 fsm_newstate(grp->fsm, MPCG_STATE_RESET);
1586 printk(KERN_NOTICE "%s:MPC GROUP RECOVERY NOT ATTEMPTED\n",
1587 dev->name);
1588 }
1589
1590done:
1591 ctcm_pr_debug("ctcmpc exit:%s %s()\n", dev->name, __FUNCTION__);
1592 return;
1593}
1594
1595/**
1596 * Handle mpc group action timeout.
1597 * MPC Group Station FSM action
1598 * CTCM_PROTO_MPC only
1599 *
1600 * fi An instance of an mpc_group fsm.
1601 * event The event, just happened.
1602 * arg Generic pointer, casted from net_device * upon call.
1603 */
1604static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
1605{
1606 struct net_device *dev = arg;
1607 struct ctcm_priv *priv;
1608 struct mpc_group *grp;
1609 struct channel *wch;
1610 struct channel *rch;
1611
1612 CTCM_DBF_TEXT(MPC_TRACE, 6, __FUNCTION__);
1613
1614 if (dev == NULL) {
1615 CTCM_DBF_TEXT_(MPC_ERROR, 4, "%s: dev=NULL\n", __FUNCTION__);
1616 return;
1617 }
1618
1619 priv = dev->priv;
1620 grp = priv->mpcg;
1621 wch = priv->channel[WRITE];
1622 rch = priv->channel[READ];
1623
1624 switch (fsm_getstate(grp->fsm)) {
1625 case MPCG_STATE_XID2INITW:
1626 /* Unless there is outstanding IO on the */
1627 /* channel just return and wait for ATTN */
1628 /* interrupt to begin XID negotiations */
1629 if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
1630 (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
1631 break;
1632 default:
1633 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1634 }
1635
1636 CTCM_DBF_TEXT_(MPC_TRACE, 6, "%s: dev=%s exit",
1637 __FUNCTION__, dev->name);
1638 return;
1639}
1640
1641/*
1642 * MPC Group Station FSM action
1643 * CTCM_PROTO_MPC only
1644 */
1645void mpc_action_discontact(fsm_instance *fi, int event, void *arg)
1646{
1647 struct mpcg_info *mpcginfo = arg;
1648 struct channel *ch = mpcginfo->ch;
1649 struct net_device *dev = ch->netdev;
1650 struct ctcm_priv *priv = dev->priv;
1651 struct mpc_group *grp = priv->mpcg;
1652
1653 if (ch == NULL) {
1654 printk(KERN_INFO "%s() ch=NULL\n", __FUNCTION__);
1655 return;
1656 }
1657 if (ch->netdev == NULL) {
1658 printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__);
1659 return;
1660 }
1661
1662 ctcm_pr_debug("ctcmpc enter: %s %s()\n", dev->name, __FUNCTION__);
1663
1664 grp->send_qllc_disc = 1;
1665 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1666
1667 ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__);
1668 return;
1669}
1670
1671/*
1672 * MPC Group Station - not part of FSM
1673 * CTCM_PROTO_MPC only
1674 * called from add_channel in ctcm_main.c
1675 */
1676void mpc_action_send_discontact(unsigned long thischan)
1677{
1678 struct channel *ch;
1679 struct net_device *dev;
1680 struct ctcm_priv *priv;
1681 struct mpc_group *grp;
1682 int rc = 0;
1683 unsigned long saveflags;
1684
1685 ch = (struct channel *)thischan;
1686 dev = ch->netdev;
1687 priv = dev->priv;
1688 grp = priv->mpcg;
1689
1690 ctcm_pr_info("ctcmpc: %s cp:%i enter: %s() GrpState:%s ChState:%s\n",
1691 dev->name,
1692 smp_processor_id(),
1693 __FUNCTION__,
1694 fsm_getstate_str(grp->fsm),
1695 fsm_getstate_str(ch->fsm));
1696 saveflags = 0; /* avoids compiler warning with
1697 spin_unlock_irqrestore */
1698
1699 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1700 rc = ccw_device_start(ch->cdev, &ch->ccw[15],
1701 (unsigned long)ch, 0xff, 0);
1702 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1703
1704 if (rc != 0) {
1705 ctcm_pr_info("ctcmpc: %s() ch:%s IO failed \n",
1706 __FUNCTION__,
1707 ch->id);
1708 ctcm_ccw_check_rc(ch, rc, "send discontact");
1709 /* Not checking return code value here */
1710 /* Making best effort to notify partner*/
1711 /* that MPC Group is going down */
1712 }
1713
1714 ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__);
1715 return;
1716}
1717
1718
1719/*
1720 * helper function of mpc FSM
1721 * CTCM_PROTO_MPC only
1722 * mpc_action_rcvd_xid7
1723*/
1724static int mpc_validate_xid(struct mpcg_info *mpcginfo)
1725{
1726 struct channel *ch = mpcginfo->ch;
1727 struct net_device *dev = ch->netdev;
1728 struct ctcm_priv *priv = dev->priv;
1729 struct mpc_group *grp = priv->mpcg;
1730 struct xid2 *xid = mpcginfo->xid;
1731 int failed = 0;
1732 int rc = 0;
1733 __u64 our_id, their_id = 0;
1734 int len;
1735
1736 len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
1737
1738 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
1739
1740 if (mpcginfo->xid == NULL) {
1741 printk(KERN_INFO "%s() xid=NULL\n", __FUNCTION__);
1742 rc = 1;
1743 goto done;
1744 }
1745
1746 ctcm_pr_debug("ctcmpc : %s xid received()\n", __FUNCTION__);
1747 ctcmpc_dumpit((char *)mpcginfo->xid, XID2_LENGTH);
1748
1749 /*the received direction should be the opposite of ours */
1750 if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE :
1751 XID2_READ_SIDE) != xid->xid2_dlc_type) {
1752 failed = 1;
1753 printk(KERN_INFO "ctcmpc:%s() XID REJECTED - READ-WRITE CH "
1754 "Pairing Invalid \n", __FUNCTION__);
1755 }
1756
1757 if (xid->xid2_dlc_type == XID2_READ_SIDE) {
1758 ctcm_pr_debug("ctcmpc: %s(): grpmaxbuf:%d xid2buflen:%d\n",
1759 __FUNCTION__, grp->group_max_buflen,
1760 xid->xid2_buf_len);
1761
1762 if (grp->group_max_buflen == 0 ||
1763 grp->group_max_buflen > xid->xid2_buf_len - len)
1764 grp->group_max_buflen = xid->xid2_buf_len - len;
1765 }
1766
1767
1768 if (grp->saved_xid2 == NULL) {
1769 grp->saved_xid2 =
1770 (struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb);
1771
1772 memcpy(skb_put(grp->rcvd_xid_skb,
1773 XID2_LENGTH), xid, XID2_LENGTH);
1774 grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
1775
1776 skb_reset_tail_pointer(grp->rcvd_xid_skb);
1777 grp->rcvd_xid_skb->len = 0;
1778
1779 /* convert two 32 bit numbers into 1 64 bit for id compare */
1780 our_id = (__u64)priv->xid->xid2_adj_id;
1781 our_id = our_id << 32;
1782 our_id = our_id + priv->xid->xid2_sender_id;
1783 their_id = (__u64)xid->xid2_adj_id;
1784 their_id = their_id << 32;
1785 their_id = their_id + xid->xid2_sender_id;
1786 /* lower id assume the xside role */
1787 if (our_id < their_id) {
1788 grp->roll = XSIDE;
1789 ctcm_pr_debug("ctcmpc :%s() WE HAVE LOW ID-"
1790 "TAKE XSIDE\n", __FUNCTION__);
1791 } else {
1792 grp->roll = YSIDE;
1793 ctcm_pr_debug("ctcmpc :%s() WE HAVE HIGH ID-"
1794 "TAKE YSIDE\n", __FUNCTION__);
1795 }
1796
1797 } else {
1798 if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) {
1799 failed = 1;
1800 printk(KERN_INFO "%s XID REJECTED - XID Flag Byte4\n",
1801 __FUNCTION__);
1802 }
1803 if (xid->xid2_flag2 == 0x40) {
1804 failed = 1;
1805 printk(KERN_INFO "%s XID REJECTED - XID NOGOOD\n",
1806 __FUNCTION__);
1807 }
1808 if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) {
1809 failed = 1;
1810 printk(KERN_INFO "%s XID REJECTED - "
1811 "Adjacent Station ID Mismatch\n",
1812 __FUNCTION__);
1813 }
1814 if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) {
1815 failed = 1;
1816 printk(KERN_INFO "%s XID REJECTED - "
1817 "Sender Address Mismatch\n", __FUNCTION__);
1818
1819 }
1820 }
1821
1822 if (failed) {
1823 ctcm_pr_info("ctcmpc : %s() failed\n", __FUNCTION__);
1824 priv->xid->xid2_flag2 = 0x40;
1825 grp->saved_xid2->xid2_flag2 = 0x40;
1826 rc = 1;
1827 }
1828
1829done:
1830
1831 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
1832 return rc;
1833}
1834
1835/*
1836 * MPC Group Station FSM action
1837 * CTCM_PROTO_MPC only
1838 */
1839static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
1840{
1841 struct channel *ch = arg;
1842 struct ctcm_priv *priv;
1843 struct mpc_group *grp = NULL;
1844 struct net_device *dev = NULL;
1845 int rc = 0;
1846 int gotlock = 0;
1847 unsigned long saveflags = 0; /* avoids compiler warning with
1848 spin_unlock_irqrestore */
1849
1850 if (ch == NULL) {
1851 printk(KERN_INFO "%s ch=NULL\n", __FUNCTION__);
1852 goto done;
1853 }
1854
1855 if (do_debug)
1856 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
1857 __FUNCTION__, smp_processor_id(), ch, ch->id);
1858
1859 dev = ch->netdev;
1860 if (dev == NULL) {
1861 printk(KERN_INFO "%s dev=NULL\n", __FUNCTION__);
1862 goto done;
1863 }
1864
1865 priv = dev->priv;
1866 if (priv == NULL) {
1867 printk(KERN_INFO "%s priv=NULL\n", __FUNCTION__);
1868 goto done;
1869 }
1870
1871 grp = priv->mpcg;
1872 if (grp == NULL) {
1873 printk(KERN_INFO "%s grp=NULL\n", __FUNCTION__);
1874 goto done;
1875 }
1876
1877 if (ctcm_checkalloc_buffer(ch))
1878 goto done;
1879
1880 /* skb data-buffer referencing: */
1881
1882 ch->trans_skb->data = ch->trans_skb_data;
1883 skb_reset_tail_pointer(ch->trans_skb);
1884 ch->trans_skb->len = 0;
1885 /* result of the previous 3 statements is NOT always
1886 * already set after ctcm_checkalloc_buffer
1887 * because of possible reuse of the trans_skb
1888 */
1889 memset(ch->trans_skb->data, 0, 16);
1890 ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
1891 /* check is main purpose here: */
1892 skb_put(ch->trans_skb, TH_HEADER_LENGTH);
1893 ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb);
1894 /* check is main purpose here: */
1895 skb_put(ch->trans_skb, XID2_LENGTH);
1896 ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb);
1897 /* cleanup back to startpoint */
1898 ch->trans_skb->data = ch->trans_skb_data;
1899 skb_reset_tail_pointer(ch->trans_skb);
1900 ch->trans_skb->len = 0;
1901
1902 /* non-checking rewrite of above skb data-buffer referencing: */
1903 /*
1904 memset(ch->trans_skb->data, 0, 16);
1905 ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
1906 ch->rcvd_xid = (struct xid2 *)(ch->trans_skb_data + TH_HEADER_LENGTH);
1907 ch->rcvd_xid_id = ch->trans_skb_data + TH_HEADER_LENGTH + XID2_LENGTH;
1908 */
1909
1910 ch->ccw[8].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1911 ch->ccw[8].count = 0;
1912 ch->ccw[8].cda = 0x00;
1913
1914 if (side == XSIDE) {
1915 /* mpc_action_xside_xid */
1916 if (ch->xid_th == NULL) {
1917 printk(KERN_INFO "%s ch->xid_th=NULL\n", __FUNCTION__);
1918 goto done;
1919 }
1920 ch->ccw[9].cmd_code = CCW_CMD_WRITE;
1921 ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1922 ch->ccw[9].count = TH_HEADER_LENGTH;
1923 ch->ccw[9].cda = virt_to_phys(ch->xid_th);
1924
1925 if (ch->xid == NULL) {
1926 printk(KERN_INFO "%s ch->xid=NULL\n", __FUNCTION__);
1927 goto done;
1928 }
1929
1930 ch->ccw[10].cmd_code = CCW_CMD_WRITE;
1931 ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1932 ch->ccw[10].count = XID2_LENGTH;
1933 ch->ccw[10].cda = virt_to_phys(ch->xid);
1934
1935 ch->ccw[11].cmd_code = CCW_CMD_READ;
1936 ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1937 ch->ccw[11].count = TH_HEADER_LENGTH;
1938 ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th);
1939
1940 ch->ccw[12].cmd_code = CCW_CMD_READ;
1941 ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1942 ch->ccw[12].count = XID2_LENGTH;
1943 ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid);
1944
1945 ch->ccw[13].cmd_code = CCW_CMD_READ;
1946 ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id);
1947
1948 } else { /* side == YSIDE : mpc_action_yside_xid */
1949 ch->ccw[9].cmd_code = CCW_CMD_READ;
1950 ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1951 ch->ccw[9].count = TH_HEADER_LENGTH;
1952 ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th);
1953
1954 ch->ccw[10].cmd_code = CCW_CMD_READ;
1955 ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1956 ch->ccw[10].count = XID2_LENGTH;
1957 ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid);
1958
1959 if (ch->xid_th == NULL) {
1960 printk(KERN_INFO "%s ch->xid_th=NULL\n", __FUNCTION__);
1961 goto done;
1962 }
1963 ch->ccw[11].cmd_code = CCW_CMD_WRITE;
1964 ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1965 ch->ccw[11].count = TH_HEADER_LENGTH;
1966 ch->ccw[11].cda = virt_to_phys(ch->xid_th);
1967
1968 if (ch->xid == NULL) {
1969 printk(KERN_INFO "%s ch->xid=NULL\n", __FUNCTION__);
1970 goto done;
1971 }
1972 ch->ccw[12].cmd_code = CCW_CMD_WRITE;
1973 ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1974 ch->ccw[12].count = XID2_LENGTH;
1975 ch->ccw[12].cda = virt_to_phys(ch->xid);
1976
1977 if (ch->xid_id == NULL) {
1978 printk(KERN_INFO "%s ch->xid_id=NULL\n", __FUNCTION__);
1979 goto done;
1980 }
1981 ch->ccw[13].cmd_code = CCW_CMD_WRITE;
1982 ch->ccw[13].cda = virt_to_phys(ch->xid_id);
1983
1984 }
1985 ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1986 ch->ccw[13].count = 4;
1987
1988 ch->ccw[14].cmd_code = CCW_CMD_NOOP;
1989 ch->ccw[14].flags = CCW_FLAG_SLI;
1990 ch->ccw[14].count = 0;
1991 ch->ccw[14].cda = 0;
1992
1993 if (do_debug_ccw)
1994 ctcmpc_dumpit((char *)&ch->ccw[8], sizeof(struct ccw1) * 7);
1995
1996 ctcmpc_dumpit((char *)ch->xid_th, TH_HEADER_LENGTH);
1997 ctcmpc_dumpit((char *)ch->xid, XID2_LENGTH);
1998 ctcmpc_dumpit((char *)ch->xid_id, 4);
1999 if (!in_irq()) {
2000 /* Such conditional locking is a known problem for
2001 * sparse because its static undeterministic.
2002 * Warnings should be ignored here. */
2003 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2004 gotlock = 1;
2005 }
2006
2007 fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch);
2008 rc = ccw_device_start(ch->cdev, &ch->ccw[8],
2009 (unsigned long)ch, 0xff, 0);
2010
2011 if (gotlock) /* see remark above about conditional locking */
2012 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2013
2014 if (rc != 0) {
2015 ctcm_pr_info("ctcmpc: %s() ch:%s IO failed \n",
2016 __FUNCTION__, ch->id);
2017 ctcm_ccw_check_rc(ch, rc,
2018 (side == XSIDE) ? "x-side XID" : "y-side XID");
2019 }
2020
2021done:
2022 if (do_debug)
2023 ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n",
2024 __FUNCTION__, ch, ch->id);
2025 return;
2026
2027}
2028
2029/*
2030 * MPC Group Station FSM action
2031 * CTCM_PROTO_MPC only
2032 */
2033static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg)
2034{
2035 mpc_action_side_xid(fsm, arg, XSIDE);
2036}
2037
2038/*
2039 * MPC Group Station FSM action
2040 * CTCM_PROTO_MPC only
2041 */
2042static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg)
2043{
2044 mpc_action_side_xid(fsm, arg, YSIDE);
2045}
2046
2047/*
2048 * MPC Group Station FSM action
2049 * CTCM_PROTO_MPC only
2050 */
2051static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
2052{
2053 struct channel *ch = arg;
2054 struct ctcm_priv *priv;
2055 struct mpc_group *grp = NULL;
2056 struct net_device *dev = NULL;
2057
2058 if (do_debug)
2059 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
2060 __FUNCTION__, smp_processor_id(), ch, ch->id);
2061
2062 if (ch == NULL) {
2063 printk(KERN_WARNING "%s ch=NULL\n", __FUNCTION__);
2064 goto done;
2065 }
2066
2067 dev = ch->netdev;
2068 if (dev == NULL) {
2069 printk(KERN_WARNING "%s dev=NULL\n", __FUNCTION__);
2070 goto done;
2071 }
2072
2073 priv = dev->priv;
2074 if (priv == NULL) {
2075 printk(KERN_WARNING "%s priv=NULL\n", __FUNCTION__);
2076 goto done;
2077 }
2078
2079 grp = priv->mpcg;
2080 if (grp == NULL) {
2081 printk(KERN_WARNING "%s grp=NULL\n", __FUNCTION__);
2082 goto done;
2083 }
2084
2085 if (ch->xid == NULL) {
2086 printk(KERN_WARNING "%s ch-xid=NULL\n", __FUNCTION__);
2087 goto done;
2088 }
2089
2090 fsm_newstate(ch->fsm, CH_XID0_INPROGRESS);
2091
2092 ch->xid->xid2_option = XID2_0;
2093
2094 switch (fsm_getstate(grp->fsm)) {
2095 case MPCG_STATE_XID2INITW:
2096 case MPCG_STATE_XID2INITX:
2097 ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
2098 break;
2099 case MPCG_STATE_XID0IOWAIT:
2100 case MPCG_STATE_XID0IOWAIX:
2101 ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
2102 break;
2103 }
2104
2105 fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
2106
2107done:
2108 if (do_debug)
2109 ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n",
2110 __FUNCTION__, ch, ch->id);
2111 return;
2112
2113}
2114
2115/*
2116 * MPC Group Station FSM action
2117 * CTCM_PROTO_MPC only
2118*/
2119static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
2120{
2121 struct net_device *dev = arg;
2122 struct ctcm_priv *priv = NULL;
2123 struct mpc_group *grp = NULL;
2124 int direction;
2125 int rc = 0;
2126 int send = 0;
2127
2128 ctcm_pr_debug("ctcmpc enter: %s() \n", __FUNCTION__);
2129
2130 if (dev == NULL) {
2131 printk(KERN_INFO "%s dev=NULL \n", __FUNCTION__);
2132 rc = 1;
2133 goto done;
2134 }
2135
2136 priv = dev->priv;
2137 if (priv == NULL) {
2138 printk(KERN_INFO "%s priv=NULL \n", __FUNCTION__);
2139 rc = 1;
2140 goto done;
2141 }
2142
2143 grp = priv->mpcg;
2144 if (grp == NULL) {
2145 printk(KERN_INFO "%s grp=NULL \n", __FUNCTION__);
2146 rc = 1;
2147 goto done;
2148 }
2149
2150 for (direction = READ; direction <= WRITE; direction++) {
2151 struct channel *ch = priv->channel[direction];
2152 struct xid2 *thisxid = ch->xid;
2153 ch->xid_skb->data = ch->xid_skb_data;
2154 skb_reset_tail_pointer(ch->xid_skb);
2155 ch->xid_skb->len = 0;
2156 thisxid->xid2_option = XID2_7;
2157 send = 0;
2158
2159 /* xid7 phase 1 */
2160 if (grp->outstanding_xid7_p2 > 0) {
2161 if (grp->roll == YSIDE) {
2162 if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) {
2163 fsm_newstate(ch->fsm, CH_XID7_PENDING2);
2164 ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
2165 memcpy(skb_put(ch->xid_skb,
2166 TH_HEADER_LENGTH),
2167 &thdummy, TH_HEADER_LENGTH);
2168 send = 1;
2169 }
2170 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) {
2171 fsm_newstate(ch->fsm, CH_XID7_PENDING2);
2172 ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
2173 memcpy(skb_put(ch->xid_skb,
2174 TH_HEADER_LENGTH),
2175 &thnorm, TH_HEADER_LENGTH);
2176 send = 1;
2177 }
2178 } else {
2179 /* xid7 phase 2 */
2180 if (grp->roll == YSIDE) {
2181 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) {
2182 fsm_newstate(ch->fsm, CH_XID7_PENDING4);
2183 memcpy(skb_put(ch->xid_skb,
2184 TH_HEADER_LENGTH),
2185 &thnorm, TH_HEADER_LENGTH);
2186 ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
2187 send = 1;
2188 }
2189 } else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) {
2190 fsm_newstate(ch->fsm, CH_XID7_PENDING4);
2191 ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
2192 memcpy(skb_put(ch->xid_skb, TH_HEADER_LENGTH),
2193 &thdummy, TH_HEADER_LENGTH);
2194 send = 1;
2195 }
2196 }
2197
2198 if (send)
2199 fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
2200 }
2201
2202done:
2203
2204 if (rc != 0)
2205 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
2206
2207 return;
2208}
2209
2210/*
2211 * MPC Group Station FSM action
2212 * CTCM_PROTO_MPC only
2213 */
2214static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
2215{
2216
2217 struct mpcg_info *mpcginfo = arg;
2218 struct channel *ch = mpcginfo->ch;
2219 struct net_device *dev = ch->netdev;
2220 struct ctcm_priv *priv;
2221 struct mpc_group *grp;
2222
2223 if (do_debug)
2224 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
2225 __FUNCTION__, smp_processor_id(), ch, ch->id);
2226
2227 priv = dev->priv;
2228 grp = priv->mpcg;
2229
2230 ctcm_pr_debug("ctcmpc in:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
2231 __FUNCTION__, ch->id,
2232 grp->outstanding_xid2,
2233 grp->outstanding_xid7,
2234 grp->outstanding_xid7_p2);
2235
2236 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING)
2237 fsm_newstate(ch->fsm, CH_XID7_PENDING);
2238
2239 grp->outstanding_xid2--;
2240 grp->outstanding_xid7++;
2241 grp->outstanding_xid7_p2++;
2242
2243 /* must change state before validating xid to */
2244 /* properly handle interim interrupts received*/
2245 switch (fsm_getstate(grp->fsm)) {
2246 case MPCG_STATE_XID2INITW:
2247 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITX);
2248 mpc_validate_xid(mpcginfo);
2249 break;
2250 case MPCG_STATE_XID0IOWAIT:
2251 fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIX);
2252 mpc_validate_xid(mpcginfo);
2253 break;
2254 case MPCG_STATE_XID2INITX:
2255 if (grp->outstanding_xid2 == 0) {
2256 fsm_newstate(grp->fsm, MPCG_STATE_XID7INITW);
2257 mpc_validate_xid(mpcginfo);
2258 fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
2259 }
2260 break;
2261 case MPCG_STATE_XID0IOWAIX:
2262 if (grp->outstanding_xid2 == 0) {
2263 fsm_newstate(grp->fsm, MPCG_STATE_XID7INITI);
2264 mpc_validate_xid(mpcginfo);
2265 fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
2266 }
2267 break;
2268 }
2269 kfree(mpcginfo);
2270
2271 if (do_debug) {
2272 ctcm_pr_debug("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
2273 __FUNCTION__, ch->id,
2274 grp->outstanding_xid2,
2275 grp->outstanding_xid7,
2276 grp->outstanding_xid7_p2);
2277 ctcm_pr_debug("ctcmpc:%s() %s grpstate: %s chanstate: %s \n",
2278 __FUNCTION__, ch->id,
2279 fsm_getstate_str(grp->fsm),
2280 fsm_getstate_str(ch->fsm));
2281 }
2282 return;
2283
2284}
2285
2286
2287/*
2288 * MPC Group Station FSM action
2289 * CTCM_PROTO_MPC only
2290 */
2291static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
2292{
2293 struct mpcg_info *mpcginfo = arg;
2294 struct channel *ch = mpcginfo->ch;
2295 struct net_device *dev = ch->netdev;
2296 struct ctcm_priv *priv = dev->priv;
2297 struct mpc_group *grp = priv->mpcg;
2298
2299 if (do_debug) {
2300 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
2301 __FUNCTION__, smp_processor_id(), ch, ch->id);
2302
2303 ctcm_pr_debug("ctcmpc: outstanding_xid7: %i, "
2304 " outstanding_xid7_p2: %i\n",
2305 grp->outstanding_xid7,
2306 grp->outstanding_xid7_p2);
2307 }
2308
2309 grp->outstanding_xid7--;
2310 ch->xid_skb->data = ch->xid_skb_data;
2311 skb_reset_tail_pointer(ch->xid_skb);
2312 ch->xid_skb->len = 0;
2313
2314 switch (fsm_getstate(grp->fsm)) {
2315 case MPCG_STATE_XID7INITI:
2316 fsm_newstate(grp->fsm, MPCG_STATE_XID7INITZ);
2317 mpc_validate_xid(mpcginfo);
2318 break;
2319 case MPCG_STATE_XID7INITW:
2320 fsm_newstate(grp->fsm, MPCG_STATE_XID7INITX);
2321 mpc_validate_xid(mpcginfo);
2322 break;
2323 case MPCG_STATE_XID7INITZ:
2324 case MPCG_STATE_XID7INITX:
2325 if (grp->outstanding_xid7 == 0) {
2326 if (grp->outstanding_xid7_p2 > 0) {
2327 grp->outstanding_xid7 =
2328 grp->outstanding_xid7_p2;
2329 grp->outstanding_xid7_p2 = 0;
2330 } else
2331 fsm_newstate(grp->fsm, MPCG_STATE_XID7INITF);
2332
2333 mpc_validate_xid(mpcginfo);
2334 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
2335 break;
2336 }
2337 mpc_validate_xid(mpcginfo);
2338 break;
2339 }
2340
2341 kfree(mpcginfo);
2342
2343 if (do_debug)
2344 ctcm_pr_debug("ctcmpc exit: %s(): cp=%i ch=0x%p id=%s\n",
2345 __FUNCTION__, smp_processor_id(), ch, ch->id);
2346 return;
2347
2348}
2349
2350/*
2351 * mpc_action helper of an MPC Group Station FSM action
2352 * CTCM_PROTO_MPC only
2353 */
2354static int mpc_send_qllc_discontact(struct net_device *dev)
2355{
2356 int rc = 0;
2357 __u32 new_len = 0;
2358 struct sk_buff *skb;
2359 struct qllc *qllcptr;
2360 struct ctcm_priv *priv;
2361 struct mpc_group *grp;
2362
2363 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
2364
2365 if (dev == NULL) {
2366 printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__);
2367 rc = 1;
2368 goto done;
2369 }
2370
2371 priv = dev->priv;
2372 if (priv == NULL) {
2373 printk(KERN_INFO "%s() priv=NULL\n", __FUNCTION__);
2374 rc = 1;
2375 goto done;
2376 }
2377
2378 grp = priv->mpcg;
2379 if (grp == NULL) {
2380 printk(KERN_INFO "%s() grp=NULL\n", __FUNCTION__);
2381 rc = 1;
2382 goto done;
2383 }
2384 ctcm_pr_info("ctcmpc: %s() GROUP STATE: %s\n", __FUNCTION__,
2385 mpcg_state_names[grp->saved_state]);
2386
2387 switch (grp->saved_state) {
2388 /*
2389 * establish conn callback function is
2390 * preferred method to report failure
2391 */
2392 case MPCG_STATE_XID0IOWAIT:
2393 case MPCG_STATE_XID0IOWAIX:
2394 case MPCG_STATE_XID7INITI:
2395 case MPCG_STATE_XID7INITZ:
2396 case MPCG_STATE_XID2INITW:
2397 case MPCG_STATE_XID2INITX:
2398 case MPCG_STATE_XID7INITW:
2399 case MPCG_STATE_XID7INITX:
2400 if (grp->estconnfunc) {
2401 grp->estconnfunc(grp->port_num, -1, 0);
2402 grp->estconnfunc = NULL;
2403 break;
2404 }
2405 case MPCG_STATE_FLOWC:
2406 case MPCG_STATE_READY:
2407 grp->send_qllc_disc = 2;
2408 new_len = sizeof(struct qllc);
2409 qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA);
2410 if (qllcptr == NULL) {
2411 printk(KERN_INFO
2412 "ctcmpc: Out of memory in %s()\n",
2413 dev->name);
2414 rc = 1;
2415 goto done;
2416 }
2417
2418 qllcptr->qllc_address = 0xcc;
2419 qllcptr->qllc_commands = 0x03;
2420
2421 skb = __dev_alloc_skb(new_len, GFP_ATOMIC);
2422
2423 if (skb == NULL) {
2424 printk(KERN_INFO "%s Out of memory in mpc_send_qllc\n",
2425 dev->name);
2426 priv->stats.rx_dropped++;
2427 rc = 1;
2428 kfree(qllcptr);
2429 goto done;
2430 }
2431
2432 memcpy(skb_put(skb, new_len), qllcptr, new_len);
2433 kfree(qllcptr);
2434
2435 if (skb_headroom(skb) < 4) {
2436 printk(KERN_INFO "ctcmpc: %s() Unable to"
2437 " build discontact for %s\n",
2438 __FUNCTION__, dev->name);
2439 rc = 1;
2440 dev_kfree_skb_any(skb);
2441 goto done;
2442 }
2443
2444 *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq;
2445 priv->channel[READ]->pdu_seq++;
2446 if (do_debug_data)
2447 ctcm_pr_debug("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
2448 __FUNCTION__, priv->channel[READ]->pdu_seq);
2449
2450 /* receipt of CC03 resets anticipated sequence number on
2451 receiving side */
2452 priv->channel[READ]->pdu_seq = 0x00;
2453 skb_reset_mac_header(skb);
2454 skb->dev = dev;
2455 skb->protocol = htons(ETH_P_SNAP);
2456 skb->ip_summed = CHECKSUM_UNNECESSARY;
2457
2458 ctcmpc_dumpit((char *)skb->data, (sizeof(struct qllc) + 4));
2459
2460 netif_rx(skb);
2461 break;
2462 default:
2463 break;
2464
2465 }
2466
2467done:
2468 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
2469 return rc;
2470}
2471/* --- This is the END my friend --- */
2472
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
new file mode 100644
index 000000000000..f99686069a91
--- /dev/null
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -0,0 +1,239 @@
1/*
2 * drivers/s390/net/ctcm_mpc.h
3 *
4 * Copyright IBM Corp. 2007
5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
6 *
7 * MPC additions:
8 * Belinda Thompson (belindat@us.ibm.com)
9 * Andy Richter (richtera@us.ibm.com)
10 */
11
12#ifndef _CTC_MPC_H_
13#define _CTC_MPC_H_
14
15#include <linux/skbuff.h>
16#include "fsm.h"
17
18/*
19 * MPC external interface
20 * Note that ctc_mpc_xyz are called with a lock on ................
21 */
22
23/* port_number is the mpc device 0, 1, 2 etc mpc2 is port_number 2 */
24
25/* passive open Just wait for XID2 exchange */
26extern int ctc_mpc_alloc_channel(int port,
27 void (*callback)(int port_num, int max_write_size));
28/* active open Alloc then send XID2 */
29extern void ctc_mpc_establish_connectivity(int port,
30 void (*callback)(int port_num, int rc, int max_write_size));
31
32extern void ctc_mpc_dealloc_ch(int port);
33extern void ctc_mpc_flow_control(int port, int flowc);
34
35/*
36 * other MPC Group prototypes and structures
37 */
38
39#define ETH_P_SNA_DIX 0x80D5
40
41/*
42 * Declaration of an XID2
43 *
44 */
45#define ALLZEROS 0x0000000000000000
46
47#define XID_FM2 0x20
48#define XID2_0 0x00
49#define XID2_7 0x07
50#define XID2_WRITE_SIDE 0x04
51#define XID2_READ_SIDE 0x05
52
53struct xid2 {
54 __u8 xid2_type_id;
55 __u8 xid2_len;
56 __u32 xid2_adj_id;
57 __u8 xid2_rlen;
58 __u8 xid2_resv1;
59 __u8 xid2_flag1;
60 __u8 xid2_fmtt;
61 __u8 xid2_flag4;
62 __u16 xid2_resv2;
63 __u8 xid2_tgnum;
64 __u32 xid2_sender_id;
65 __u8 xid2_flag2;
66 __u8 xid2_option;
67 char xid2_resv3[8];
68 __u16 xid2_resv4;
69 __u8 xid2_dlc_type;
70 __u16 xid2_resv5;
71 __u8 xid2_mpc_flag;
72 __u8 xid2_resv6;
73 __u16 xid2_buf_len;
74 char xid2_buffer[255 - (13 * sizeof(__u8) +
75 2 * sizeof(__u32) +
76 4 * sizeof(__u16) +
77 8 * sizeof(char))];
78} __attribute__ ((packed));
79
80#define XID2_LENGTH (sizeof(struct xid2))
81
82struct th_header {
83 __u8 th_seg;
84 __u8 th_ch_flag;
85#define TH_HAS_PDU 0xf0
86#define TH_IS_XID 0x01
87#define TH_SWEEP_REQ 0xfe
88#define TH_SWEEP_RESP 0xff
89 __u8 th_blk_flag;
90#define TH_DATA_IS_XID 0x80
91#define TH_RETRY 0x40
92#define TH_DISCONTACT 0xc0
93#define TH_SEG_BLK 0x20
94#define TH_LAST_SEG 0x10
95#define TH_PDU_PART 0x08
96 __u8 th_is_xid; /* is 0x01 if this is XID */
97 __u32 th_seq_num;
98} __attribute__ ((packed));
99
100struct th_addon {
101 __u32 th_last_seq;
102 __u32 th_resvd;
103} __attribute__ ((packed));
104
105struct th_sweep {
106 struct th_header th;
107 struct th_addon sw;
108} __attribute__ ((packed));
109
110#define TH_HEADER_LENGTH (sizeof(struct th_header))
111#define TH_SWEEP_LENGTH (sizeof(struct th_sweep))
112
113#define PDU_LAST 0x80
114#define PDU_CNTL 0x40
115#define PDU_FIRST 0x20
116
117struct pdu {
118 __u32 pdu_offset;
119 __u8 pdu_flag;
120 __u8 pdu_proto; /* 0x01 is APPN SNA */
121 __u16 pdu_seq;
122} __attribute__ ((packed));
123
124#define PDU_HEADER_LENGTH (sizeof(struct pdu))
125
126struct qllc {
127 __u8 qllc_address;
128#define QLLC_REQ 0xFF
129#define QLLC_RESP 0x00
130 __u8 qllc_commands;
131#define QLLC_DISCONNECT 0x53
132#define QLLC_UNSEQACK 0x73
133#define QLLC_SETMODE 0x93
134#define QLLC_EXCHID 0xBF
135} __attribute__ ((packed));
136
137
138/*
139 * Definition of one MPC group
140 */
141
142#define MAX_MPCGCHAN 10
143#define MPC_XID_TIMEOUT_VALUE 10000
144#define MPC_CHANNEL_ADD 0
145#define MPC_CHANNEL_REMOVE 1
146#define MPC_CHANNEL_ATTN 2
147#define XSIDE 1
148#define YSIDE 0
149
150struct mpcg_info {
151 struct sk_buff *skb;
152 struct channel *ch;
153 struct xid2 *xid;
154 struct th_sweep *sweep;
155 struct th_header *th;
156};
157
158struct mpc_group {
159 struct tasklet_struct mpc_tasklet;
160 struct tasklet_struct mpc_tasklet2;
161 int changed_side;
162 int saved_state;
163 int channels_terminating;
164 int out_of_sequence;
165 int flow_off_called;
166 int port_num;
167 int port_persist;
168 int alloc_called;
169 __u32 xid2_adj_id;
170 __u8 xid2_tgnum;
171 __u32 xid2_sender_id;
172 int num_channel_paths;
173 int active_channels[2];
174 __u16 group_max_buflen;
175 int outstanding_xid2;
176 int outstanding_xid7;
177 int outstanding_xid7_p2;
178 int sweep_req_pend_num;
179 int sweep_rsp_pend_num;
180 struct sk_buff *xid_skb;
181 char *xid_skb_data;
182 struct th_header *xid_th;
183 struct xid2 *xid;
184 char *xid_id;
185 struct th_header *rcvd_xid_th;
186 struct sk_buff *rcvd_xid_skb;
187 char *rcvd_xid_data;
188 __u8 in_sweep;
189 __u8 roll;
190 struct xid2 *saved_xid2;
191 void (*allochanfunc)(int, int);
192 int allocchan_callback_retries;
193 void (*estconnfunc)(int, int, int);
194 int estconn_callback_retries;
195 int estconn_called;
196 int xidnogood;
197 int send_qllc_disc;
198 fsm_timer timer;
199 fsm_instance *fsm; /* group xid fsm */
200};
201
202#ifdef DEBUGDATA
203void ctcmpc_dumpit(char *buf, int len);
204#else
205static inline void ctcmpc_dumpit(char *buf, int len)
206{
207}
208#endif
209
210#ifdef DEBUGDATA
211/*
212 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
213 *
214 * skb The struct sk_buff to dump.
215 * offset Offset relative to skb-data, where to start the dump.
216 */
217void ctcmpc_dump_skb(struct sk_buff *skb, int offset);
218#else
219static inline void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
220{}
221#endif
222
223static inline void ctcmpc_dump32(char *buf, int len)
224{
225 if (len < 32)
226 ctcmpc_dumpit(buf, len);
227 else
228 ctcmpc_dumpit(buf, 32);
229}
230
231int ctcmpc_open(struct net_device *);
232void ctcm_ccw_check_rc(struct channel *, int, char *);
233void mpc_group_ready(unsigned long adev);
234int mpc_channel_action(struct channel *ch, int direction, int action);
235void mpc_action_send_discontact(unsigned long thischan);
236void mpc_action_discontact(fsm_instance *fi, int event, void *arg);
237void ctcmpc_bh(unsigned long thischan);
238#endif
239/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
new file mode 100644
index 000000000000..bb2d13721d34
--- /dev/null
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -0,0 +1,210 @@
1/*
2 * drivers/s390/net/ctcm_sysfs.c
3 *
4 * Copyright IBM Corp. 2007, 2007
5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
6 *
7 */
8
9#undef DEBUG
10#undef DEBUGDATA
11#undef DEBUGCCW
12
13#include <linux/sysfs.h>
14#include "ctcm_main.h"
15
16/*
17 * sysfs attributes
18 */
19
20static ssize_t ctcm_buffer_show(struct device *dev,
21 struct device_attribute *attr, char *buf)
22{
23 struct ctcm_priv *priv = dev_get_drvdata(dev);
24
25 if (!priv)
26 return -ENODEV;
27 return sprintf(buf, "%d\n", priv->buffer_size);
28}
29
30static ssize_t ctcm_buffer_write(struct device *dev,
31 struct device_attribute *attr, const char *buf, size_t count)
32{
33 struct net_device *ndev;
34 int bs1;
35 struct ctcm_priv *priv = dev_get_drvdata(dev);
36
37 if (!(priv && priv->channel[READ] &&
38 (ndev = priv->channel[READ]->netdev))) {
39 CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
40 return -ENODEV;
41 }
42
43 sscanf(buf, "%u", &bs1);
44 if (bs1 > CTCM_BUFSIZE_LIMIT)
45 goto einval;
46 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
47 goto einval;
48 priv->buffer_size = bs1; /* just to overwrite the default */
49
50 if ((ndev->flags & IFF_RUNNING) &&
51 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
52 goto einval;
53
54 priv->channel[READ]->max_bufsize = bs1;
55 priv->channel[WRITE]->max_bufsize = bs1;
56 if (!(ndev->flags & IFF_RUNNING))
57 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
58 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
59 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
60
61 CTCM_DBF_DEV(SETUP, ndev, buf);
62 return count;
63
64einval:
65 CTCM_DBF_DEV(SETUP, ndev, "buff_err");
66 return -EINVAL;
67}
68
69static void ctcm_print_statistics(struct ctcm_priv *priv)
70{
71 char *sbuf;
72 char *p;
73
74 if (!priv)
75 return;
76 sbuf = kmalloc(2048, GFP_KERNEL);
77 if (sbuf == NULL)
78 return;
79 p = sbuf;
80
81 p += sprintf(p, " Device FSM state: %s\n",
82 fsm_getstate_str(priv->fsm));
83 p += sprintf(p, " RX channel FSM state: %s\n",
84 fsm_getstate_str(priv->channel[READ]->fsm));
85 p += sprintf(p, " TX channel FSM state: %s\n",
86 fsm_getstate_str(priv->channel[WRITE]->fsm));
87 p += sprintf(p, " Max. TX buffer used: %ld\n",
88 priv->channel[WRITE]->prof.maxmulti);
89 p += sprintf(p, " Max. chained SKBs: %ld\n",
90 priv->channel[WRITE]->prof.maxcqueue);
91 p += sprintf(p, " TX single write ops: %ld\n",
92 priv->channel[WRITE]->prof.doios_single);
93 p += sprintf(p, " TX multi write ops: %ld\n",
94 priv->channel[WRITE]->prof.doios_multi);
95 p += sprintf(p, " Netto bytes written: %ld\n",
96 priv->channel[WRITE]->prof.txlen);
97 p += sprintf(p, " Max. TX IO-time: %ld\n",
98 priv->channel[WRITE]->prof.tx_time);
99
100 printk(KERN_INFO "Statistics for %s:\n%s",
101 priv->channel[WRITE]->netdev->name, sbuf);
102 kfree(sbuf);
103 return;
104}
105
106static ssize_t stats_show(struct device *dev,
107 struct device_attribute *attr, char *buf)
108{
109 struct ctcm_priv *priv = dev_get_drvdata(dev);
110 if (!priv)
111 return -ENODEV;
112 ctcm_print_statistics(priv);
113 return sprintf(buf, "0\n");
114}
115
116static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
117 const char *buf, size_t count)
118{
119 struct ctcm_priv *priv = dev_get_drvdata(dev);
120 if (!priv)
121 return -ENODEV;
122 /* Reset statistics */
123 memset(&priv->channel[WRITE]->prof, 0,
124 sizeof(priv->channel[WRITE]->prof));
125 return count;
126}
127
128static ssize_t ctcm_proto_show(struct device *dev,
129 struct device_attribute *attr, char *buf)
130{
131 struct ctcm_priv *priv = dev_get_drvdata(dev);
132 if (!priv)
133 return -ENODEV;
134
135 return sprintf(buf, "%d\n", priv->protocol);
136}
137
138static ssize_t ctcm_proto_store(struct device *dev,
139 struct device_attribute *attr, const char *buf, size_t count)
140{
141 int value;
142 struct ctcm_priv *priv = dev_get_drvdata(dev);
143
144 if (!priv)
145 return -ENODEV;
146 sscanf(buf, "%u", &value);
147 if (!((value == CTCM_PROTO_S390) ||
148 (value == CTCM_PROTO_LINUX) ||
149 (value == CTCM_PROTO_MPC) ||
150 (value == CTCM_PROTO_OS390)))
151 return -EINVAL;
152 priv->protocol = value;
153 CTCM_DBF_DEV(SETUP, dev, buf);
154
155 return count;
156}
157
158static ssize_t ctcm_type_show(struct device *dev,
159 struct device_attribute *attr, char *buf)
160{
161 struct ccwgroup_device *cgdev;
162
163 cgdev = to_ccwgroupdev(dev);
164 if (!cgdev)
165 return -ENODEV;
166
167 return sprintf(buf, "%s\n",
168 cu3088_type[cgdev->cdev[0]->id.driver_info]);
169}
170
171static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
172static DEVICE_ATTR(protocol, 0644, ctcm_proto_show, ctcm_proto_store);
173static DEVICE_ATTR(type, 0444, ctcm_type_show, NULL);
174static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
175
176static struct attribute *ctcm_attr[] = {
177 &dev_attr_protocol.attr,
178 &dev_attr_type.attr,
179 &dev_attr_buffer.attr,
180 NULL,
181};
182
183static struct attribute_group ctcm_attr_group = {
184 .attrs = ctcm_attr,
185};
186
187int ctcm_add_attributes(struct device *dev)
188{
189 int rc;
190
191 rc = device_create_file(dev, &dev_attr_stats);
192
193 return rc;
194}
195
196void ctcm_remove_attributes(struct device *dev)
197{
198 device_remove_file(dev, &dev_attr_stats);
199}
200
201int ctcm_add_files(struct device *dev)
202{
203 return sysfs_create_group(&dev->kobj, &ctcm_attr_group);
204}
205
206void ctcm_remove_files(struct device *dev)
207{
208 sysfs_remove_group(&dev->kobj, &ctcm_attr_group);
209}
210
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
deleted file mode 100644
index 77a503139e32..000000000000
--- a/drivers/s390/net/ctcmain.c
+++ /dev/null
@@ -1,3062 +0,0 @@
1/*
2 * CTC / ESCON network driver
3 *
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
7 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 Peter Tiedemann (ptiedem@de.ibm.com)
9 * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com>
10 *
11 * Documentation used:
12 * - Principles of Operation (IBM doc#: SA22-7201-06)
13 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
14 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
15 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
16 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
17 *
18 * and the source of the original CTC driver by:
19 * Dieter Wellerdiek (wel@de.ibm.com)
20 * Martin Schwidefsky (schwidefsky@de.ibm.com)
21 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
22 * Jochen Röhrig (roehrig@de.ibm.com)
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
28 *
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
33 *
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
37 *
38 */
39#undef DEBUG
40#include <linux/module.h>
41#include <linux/init.h>
42#include <linux/kernel.h>
43#include <linux/slab.h>
44#include <linux/errno.h>
45#include <linux/types.h>
46#include <linux/interrupt.h>
47#include <linux/timer.h>
48#include <linux/bitops.h>
49
50#include <linux/signal.h>
51#include <linux/string.h>
52
53#include <linux/ip.h>
54#include <linux/if_arp.h>
55#include <linux/tcp.h>
56#include <linux/skbuff.h>
57#include <linux/ctype.h>
58#include <net/dst.h>
59
60#include <asm/io.h>
61#include <asm/ccwdev.h>
62#include <asm/ccwgroup.h>
63#include <asm/uaccess.h>
64
65#include <asm/idals.h>
66
67#include "fsm.h"
68#include "cu3088.h"
69
70#include "ctcdbug.h"
71#include "ctcmain.h"
72
73MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
74MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
75MODULE_LICENSE("GPL");
76/**
77 * States of the interface statemachine.
78 */
79enum dev_states {
80 DEV_STATE_STOPPED,
81 DEV_STATE_STARTWAIT_RXTX,
82 DEV_STATE_STARTWAIT_RX,
83 DEV_STATE_STARTWAIT_TX,
84 DEV_STATE_STOPWAIT_RXTX,
85 DEV_STATE_STOPWAIT_RX,
86 DEV_STATE_STOPWAIT_TX,
87 DEV_STATE_RUNNING,
88 /**
89 * MUST be always the last element!!
90 */
91 CTC_NR_DEV_STATES
92};
93
94static const char *dev_state_names[] = {
95 "Stopped",
96 "StartWait RXTX",
97 "StartWait RX",
98 "StartWait TX",
99 "StopWait RXTX",
100 "StopWait RX",
101 "StopWait TX",
102 "Running",
103};
104
105/**
106 * Events of the interface statemachine.
107 */
108enum dev_events {
109 DEV_EVENT_START,
110 DEV_EVENT_STOP,
111 DEV_EVENT_RXUP,
112 DEV_EVENT_TXUP,
113 DEV_EVENT_RXDOWN,
114 DEV_EVENT_TXDOWN,
115 DEV_EVENT_RESTART,
116 /**
117 * MUST be always the last element!!
118 */
119 CTC_NR_DEV_EVENTS
120};
121
122static const char *dev_event_names[] = {
123 "Start",
124 "Stop",
125 "RX up",
126 "TX up",
127 "RX down",
128 "TX down",
129 "Restart",
130};
131
132/**
133 * Events of the channel statemachine
134 */
135enum ch_events {
136 /**
137 * Events, representing return code of
138 * I/O operations (ccw_device_start, ccw_device_halt et al.)
139 */
140 CH_EVENT_IO_SUCCESS,
141 CH_EVENT_IO_EBUSY,
142 CH_EVENT_IO_ENODEV,
143 CH_EVENT_IO_EIO,
144 CH_EVENT_IO_UNKNOWN,
145
146 CH_EVENT_ATTNBUSY,
147 CH_EVENT_ATTN,
148 CH_EVENT_BUSY,
149
150 /**
151 * Events, representing unit-check
152 */
153 CH_EVENT_UC_RCRESET,
154 CH_EVENT_UC_RSRESET,
155 CH_EVENT_UC_TXTIMEOUT,
156 CH_EVENT_UC_TXPARITY,
157 CH_EVENT_UC_HWFAIL,
158 CH_EVENT_UC_RXPARITY,
159 CH_EVENT_UC_ZERO,
160 CH_EVENT_UC_UNKNOWN,
161
162 /**
163 * Events, representing subchannel-check
164 */
165 CH_EVENT_SC_UNKNOWN,
166
167 /**
168 * Events, representing machine checks
169 */
170 CH_EVENT_MC_FAIL,
171 CH_EVENT_MC_GOOD,
172
173 /**
174 * Event, representing normal IRQ
175 */
176 CH_EVENT_IRQ,
177 CH_EVENT_FINSTAT,
178
179 /**
180 * Event, representing timer expiry.
181 */
182 CH_EVENT_TIMER,
183
184 /**
185 * Events, representing commands from upper levels.
186 */
187 CH_EVENT_START,
188 CH_EVENT_STOP,
189
190 /**
191 * MUST be always the last element!!
192 */
193 NR_CH_EVENTS,
194};
195
196/**
197 * States of the channel statemachine.
198 */
199enum ch_states {
200 /**
201 * Channel not assigned to any device,
202 * initial state, direction invalid
203 */
204 CH_STATE_IDLE,
205
206 /**
207 * Channel assigned but not operating
208 */
209 CH_STATE_STOPPED,
210 CH_STATE_STARTWAIT,
211 CH_STATE_STARTRETRY,
212 CH_STATE_SETUPWAIT,
213 CH_STATE_RXINIT,
214 CH_STATE_TXINIT,
215 CH_STATE_RX,
216 CH_STATE_TX,
217 CH_STATE_RXIDLE,
218 CH_STATE_TXIDLE,
219 CH_STATE_RXERR,
220 CH_STATE_TXERR,
221 CH_STATE_TERM,
222 CH_STATE_DTERM,
223 CH_STATE_NOTOP,
224
225 /**
226 * MUST be always the last element!!
227 */
228 NR_CH_STATES,
229};
230
231static int loglevel = CTC_LOGLEVEL_DEFAULT;
232
233/**
234 * Linked list of all detected channels.
235 */
236static struct channel *channels = NULL;
237
238/**
239 * Print Banner.
240 */
241static void
242print_banner(void)
243{
244 static int printed = 0;
245
246 if (printed)
247 return;
248
249 printk(KERN_INFO "CTC driver initialized\n");
250 printed = 1;
251}
252
253/**
254 * Return type of a detected device.
255 */
256static enum channel_types
257get_channel_type(struct ccw_device_id *id)
258{
259 enum channel_types type = (enum channel_types) id->driver_info;
260
261 if (type == channel_type_ficon)
262 type = channel_type_escon;
263
264 return type;
265}
266
267static const char *ch_event_names[] = {
268 "ccw_device success",
269 "ccw_device busy",
270 "ccw_device enodev",
271 "ccw_device ioerr",
272 "ccw_device unknown",
273
274 "Status ATTN & BUSY",
275 "Status ATTN",
276 "Status BUSY",
277
278 "Unit check remote reset",
279 "Unit check remote system reset",
280 "Unit check TX timeout",
281 "Unit check TX parity",
282 "Unit check Hardware failure",
283 "Unit check RX parity",
284 "Unit check ZERO",
285 "Unit check Unknown",
286
287 "SubChannel check Unknown",
288
289 "Machine check failure",
290 "Machine check operational",
291
292 "IRQ normal",
293 "IRQ final",
294
295 "Timer",
296
297 "Start",
298 "Stop",
299};
300
301static const char *ch_state_names[] = {
302 "Idle",
303 "Stopped",
304 "StartWait",
305 "StartRetry",
306 "SetupWait",
307 "RX init",
308 "TX init",
309 "RX",
310 "TX",
311 "RX idle",
312 "TX idle",
313 "RX error",
314 "TX error",
315 "Terminating",
316 "Restarting",
317 "Not operational",
318};
319
320#ifdef DEBUG
321/**
322 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
323 *
324 * @param skb The sk_buff to dump.
325 * @param offset Offset relative to skb-data, where to start the dump.
326 */
327static void
328ctc_dump_skb(struct sk_buff *skb, int offset)
329{
330 unsigned char *p = skb->data;
331 __u16 bl;
332 struct ll_header *header;
333 int i;
334
335 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
336 return;
337 p += offset;
338 bl = *((__u16 *) p);
339 p += 2;
340 header = (struct ll_header *) p;
341 p -= 2;
342
343 printk(KERN_DEBUG "dump:\n");
344 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
345
346 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
347 header->length);
348 printk(KERN_DEBUG "h->type=%04x\n", header->type);
349 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
350 if (bl > 16)
351 bl = 16;
352 printk(KERN_DEBUG "data: ");
353 for (i = 0; i < bl; i++)
354 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
355 printk("\n");
356}
357#else
358static inline void
359ctc_dump_skb(struct sk_buff *skb, int offset)
360{
361}
362#endif
363
364/**
365 * Unpack a just received skb and hand it over to
366 * upper layers.
367 *
368 * @param ch The channel where this skb has been received.
369 * @param pskb The received skb.
370 */
371static void
372ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
373{
374 struct net_device *dev = ch->netdev;
375 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
376 __u16 len = *((__u16 *) pskb->data);
377
378 DBF_TEXT(trace, 4, __FUNCTION__);
379 skb_put(pskb, 2 + LL_HEADER_LENGTH);
380 skb_pull(pskb, 2);
381 pskb->dev = dev;
382 pskb->ip_summed = CHECKSUM_UNNECESSARY;
383 while (len > 0) {
384 struct sk_buff *skb;
385 struct ll_header *header = (struct ll_header *) pskb->data;
386
387 skb_pull(pskb, LL_HEADER_LENGTH);
388 if ((ch->protocol == CTC_PROTO_S390) &&
389 (header->type != ETH_P_IP)) {
390
391#ifndef DEBUG
392 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
393#endif
394 /**
395 * Check packet type only if we stick strictly
396 * to S/390's protocol of OS390. This only
397 * supports IP. Otherwise allow any packet
398 * type.
399 */
400 ctc_pr_warn(
401 "%s Illegal packet type 0x%04x received, dropping\n",
402 dev->name, header->type);
403 ch->logflags |= LOG_FLAG_ILLEGALPKT;
404#ifndef DEBUG
405 }
406#endif
407#ifdef DEBUG
408 ctc_dump_skb(pskb, -6);
409#endif
410 privptr->stats.rx_dropped++;
411 privptr->stats.rx_frame_errors++;
412 return;
413 }
414 pskb->protocol = ntohs(header->type);
415 if (header->length <= LL_HEADER_LENGTH) {
416#ifndef DEBUG
417 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
418#endif
419 ctc_pr_warn(
420 "%s Illegal packet size %d "
421 "received (MTU=%d blocklen=%d), "
422 "dropping\n", dev->name, header->length,
423 dev->mtu, len);
424 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
425#ifndef DEBUG
426 }
427#endif
428#ifdef DEBUG
429 ctc_dump_skb(pskb, -6);
430#endif
431 privptr->stats.rx_dropped++;
432 privptr->stats.rx_length_errors++;
433 return;
434 }
435 header->length -= LL_HEADER_LENGTH;
436 len -= LL_HEADER_LENGTH;
437 if ((header->length > skb_tailroom(pskb)) ||
438 (header->length > len)) {
439#ifndef DEBUG
440 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
441#endif
442 ctc_pr_warn(
443 "%s Illegal packet size %d "
444 "(beyond the end of received data), "
445 "dropping\n", dev->name, header->length);
446 ch->logflags |= LOG_FLAG_OVERRUN;
447#ifndef DEBUG
448 }
449#endif
450#ifdef DEBUG
451 ctc_dump_skb(pskb, -6);
452#endif
453 privptr->stats.rx_dropped++;
454 privptr->stats.rx_length_errors++;
455 return;
456 }
457 skb_put(pskb, header->length);
458 skb_reset_mac_header(pskb);
459 len -= header->length;
460 skb = dev_alloc_skb(pskb->len);
461 if (!skb) {
462#ifndef DEBUG
463 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
464#endif
465 ctc_pr_warn(
466 "%s Out of memory in ctc_unpack_skb\n",
467 dev->name);
468 ch->logflags |= LOG_FLAG_NOMEM;
469#ifndef DEBUG
470 }
471#endif
472 privptr->stats.rx_dropped++;
473 return;
474 }
475 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
476 pskb->len);
477 skb_reset_mac_header(skb);
478 skb->dev = pskb->dev;
479 skb->protocol = pskb->protocol;
480 pskb->ip_summed = CHECKSUM_UNNECESSARY;
481 /**
482 * reset logflags
483 */
484 ch->logflags = 0;
485 privptr->stats.rx_packets++;
486 privptr->stats.rx_bytes += skb->len;
487 netif_rx_ni(skb);
488 dev->last_rx = jiffies;
489 if (len > 0) {
490 skb_pull(pskb, header->length);
491 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
492#ifndef DEBUG
493 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
494#endif
495 ctc_pr_warn(
496 "%s Overrun in ctc_unpack_skb\n",
497 dev->name);
498 ch->logflags |= LOG_FLAG_OVERRUN;
499#ifndef DEBUG
500 }
501#endif
502 return;
503 }
504 skb_put(pskb, LL_HEADER_LENGTH);
505 }
506 }
507}
508
509/**
510 * Check return code of a preceeding ccw_device call, halt_IO etc...
511 *
512 * @param ch The channel, the error belongs to.
513 * @param return_code The error code to inspect.
514 */
515static void
516ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517{
518 DBF_TEXT(trace, 5, __FUNCTION__);
519 switch (return_code) {
520 case 0:
521 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
522 break;
523 case -EBUSY:
524 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
525 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
526 break;
527 case -ENODEV:
528 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
529 ch->id, msg);
530 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
531 break;
532 case -EIO:
533 ctc_pr_emerg("%s (%s): Status pending... \n",
534 ch->id, msg);
535 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
536 break;
537 default:
538 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
539 ch->id, msg, return_code);
540 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
541 }
542}
543
544/**
545 * Check sense of a unit check.
546 *
547 * @param ch The channel, the sense code belongs to.
548 * @param sense The sense code to inspect.
549 */
550static void
551ccw_unit_check(struct channel *ch, unsigned char sense)
552{
553 DBF_TEXT(trace, 5, __FUNCTION__);
554 if (sense & SNS0_INTERVENTION_REQ) {
555 if (sense & 0x01) {
556 ctc_pr_debug("%s: Interface disc. or Sel. reset "
557 "(remote)\n", ch->id);
558 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
559 } else {
560 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
561 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
562 }
563 } else if (sense & SNS0_EQUIPMENT_CHECK) {
564 if (sense & SNS0_BUS_OUT_CHECK) {
565 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
566 ch->id);
567 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
568 } else {
569 ctc_pr_warn("%s: Read-data parity error (remote)\n",
570 ch->id);
571 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
572 }
573 } else if (sense & SNS0_BUS_OUT_CHECK) {
574 if (sense & 0x04) {
575 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
576 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
577 } else {
578 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
579 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
580 }
581 } else if (sense & SNS0_CMD_REJECT) {
582 ctc_pr_warn("%s: Command reject\n", ch->id);
583 } else if (sense == 0) {
584 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
585 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
586 } else {
587 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
588 ch->id, sense);
589 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
590 }
591}
592
593static void
594ctc_purge_skb_queue(struct sk_buff_head *q)
595{
596 struct sk_buff *skb;
597
598 DBF_TEXT(trace, 5, __FUNCTION__);
599
600 while ((skb = skb_dequeue(q))) {
601 atomic_dec(&skb->users);
602 dev_kfree_skb_irq(skb);
603 }
604}
605
606static int
607ctc_checkalloc_buffer(struct channel *ch, int warn)
608{
609 DBF_TEXT(trace, 5, __FUNCTION__);
610 if ((ch->trans_skb == NULL) ||
611 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
612 if (ch->trans_skb != NULL)
613 dev_kfree_skb(ch->trans_skb);
614 clear_normalized_cda(&ch->ccw[1]);
615 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
616 GFP_ATOMIC | GFP_DMA);
617 if (ch->trans_skb == NULL) {
618 if (warn)
619 ctc_pr_warn(
620 "%s: Couldn't alloc %s trans_skb\n",
621 ch->id,
622 (CHANNEL_DIRECTION(ch->flags) == READ) ?
623 "RX" : "TX");
624 return -ENOMEM;
625 }
626 ch->ccw[1].count = ch->max_bufsize;
627 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
628 dev_kfree_skb(ch->trans_skb);
629 ch->trans_skb = NULL;
630 if (warn)
631 ctc_pr_warn(
632 "%s: set_normalized_cda for %s "
633 "trans_skb failed, dropping packets\n",
634 ch->id,
635 (CHANNEL_DIRECTION(ch->flags) == READ) ?
636 "RX" : "TX");
637 return -ENOMEM;
638 }
639 ch->ccw[1].count = 0;
640 ch->trans_skb_data = ch->trans_skb->data;
641 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
642 }
643 return 0;
644}
645
646/**
647 * Dummy NOP action for statemachines
648 */
649static void
650fsm_action_nop(fsm_instance * fi, int event, void *arg)
651{
652}
653
654/**
655 * Actions for channel - statemachines.
656 *****************************************************************************/
657
658/**
659 * Normal data has been send. Free the corresponding
660 * skb (it's in io_queue), reset dev->tbusy and
661 * revert to idle state.
662 *
663 * @param fi An instance of a channel statemachine.
664 * @param event The event, just happened.
665 * @param arg Generic pointer, casted from channel * upon call.
666 */
667static void
668ch_action_txdone(fsm_instance * fi, int event, void *arg)
669{
670 struct channel *ch = (struct channel *) arg;
671 struct net_device *dev = ch->netdev;
672 struct ctc_priv *privptr = dev->priv;
673 struct sk_buff *skb;
674 int first = 1;
675 int i;
676 unsigned long duration;
677 struct timespec done_stamp = current_kernel_time();
678
679 DBF_TEXT(trace, 4, __FUNCTION__);
680
681 duration =
682 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
683 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
684 if (duration > ch->prof.tx_time)
685 ch->prof.tx_time = duration;
686
687 if (ch->irb->scsw.count != 0)
688 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
689 dev->name, ch->irb->scsw.count);
690 fsm_deltimer(&ch->timer);
691 while ((skb = skb_dequeue(&ch->io_queue))) {
692 privptr->stats.tx_packets++;
693 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
694 if (first) {
695 privptr->stats.tx_bytes += 2;
696 first = 0;
697 }
698 atomic_dec(&skb->users);
699 dev_kfree_skb_irq(skb);
700 }
701 spin_lock(&ch->collect_lock);
702 clear_normalized_cda(&ch->ccw[4]);
703 if (ch->collect_len > 0) {
704 int rc;
705
706 if (ctc_checkalloc_buffer(ch, 1)) {
707 spin_unlock(&ch->collect_lock);
708 return;
709 }
710 ch->trans_skb->data = ch->trans_skb_data;
711 skb_reset_tail_pointer(ch->trans_skb);
712 ch->trans_skb->len = 0;
713 if (ch->prof.maxmulti < (ch->collect_len + 2))
714 ch->prof.maxmulti = ch->collect_len + 2;
715 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
716 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
717 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
718 i = 0;
719 while ((skb = skb_dequeue(&ch->collect_queue))) {
720 skb_copy_from_linear_data(skb, skb_put(ch->trans_skb,
721 skb->len),
722 skb->len);
723 privptr->stats.tx_packets++;
724 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
725 atomic_dec(&skb->users);
726 dev_kfree_skb_irq(skb);
727 i++;
728 }
729 ch->collect_len = 0;
730 spin_unlock(&ch->collect_lock);
731 ch->ccw[1].count = ch->trans_skb->len;
732 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
733 ch->prof.send_stamp = current_kernel_time();
734 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
735 (unsigned long) ch, 0xff, 0);
736 ch->prof.doios_multi++;
737 if (rc != 0) {
738 privptr->stats.tx_dropped += i;
739 privptr->stats.tx_errors += i;
740 fsm_deltimer(&ch->timer);
741 ccw_check_return_code(ch, rc, "chained TX");
742 }
743 } else {
744 spin_unlock(&ch->collect_lock);
745 fsm_newstate(fi, CH_STATE_TXIDLE);
746 }
747 ctc_clear_busy(dev);
748}
749
750/**
751 * Initial data is sent.
752 * Notify device statemachine that we are up and
753 * running.
754 *
755 * @param fi An instance of a channel statemachine.
756 * @param event The event, just happened.
757 * @param arg Generic pointer, casted from channel * upon call.
758 */
759static void
760ch_action_txidle(fsm_instance * fi, int event, void *arg)
761{
762 struct channel *ch = (struct channel *) arg;
763
764 DBF_TEXT(trace, 4, __FUNCTION__);
765 fsm_deltimer(&ch->timer);
766 fsm_newstate(fi, CH_STATE_TXIDLE);
767 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
768 ch->netdev);
769}
770
771/**
772 * Got normal data, check for sanity, queue it up, allocate new buffer
773 * trigger bottom half, and initiate next read.
774 *
775 * @param fi An instance of a channel statemachine.
776 * @param event The event, just happened.
777 * @param arg Generic pointer, casted from channel * upon call.
778 */
779static void
780ch_action_rx(fsm_instance * fi, int event, void *arg)
781{
782 struct channel *ch = (struct channel *) arg;
783 struct net_device *dev = ch->netdev;
784 struct ctc_priv *privptr = dev->priv;
785 int len = ch->max_bufsize - ch->irb->scsw.count;
786 struct sk_buff *skb = ch->trans_skb;
787 __u16 block_len = *((__u16 *) skb->data);
788 int check_len;
789 int rc;
790
791 DBF_TEXT(trace, 4, __FUNCTION__);
792 fsm_deltimer(&ch->timer);
793 if (len < 8) {
794 ctc_pr_debug("%s: got packet with length %d < 8\n",
795 dev->name, len);
796 privptr->stats.rx_dropped++;
797 privptr->stats.rx_length_errors++;
798 goto again;
799 }
800 if (len > ch->max_bufsize) {
801 ctc_pr_debug("%s: got packet with length %d > %d\n",
802 dev->name, len, ch->max_bufsize);
803 privptr->stats.rx_dropped++;
804 privptr->stats.rx_length_errors++;
805 goto again;
806 }
807
808 /**
809 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
810 */
811 switch (ch->protocol) {
812 case CTC_PROTO_S390:
813 case CTC_PROTO_OS390:
814 check_len = block_len + 2;
815 break;
816 default:
817 check_len = block_len;
818 break;
819 }
820 if ((len < block_len) || (len > check_len)) {
821 ctc_pr_debug("%s: got block length %d != rx length %d\n",
822 dev->name, block_len, len);
823#ifdef DEBUG
824 ctc_dump_skb(skb, 0);
825#endif
826 *((__u16 *) skb->data) = len;
827 privptr->stats.rx_dropped++;
828 privptr->stats.rx_length_errors++;
829 goto again;
830 }
831 block_len -= 2;
832 if (block_len > 0) {
833 *((__u16 *) skb->data) = block_len;
834 ctc_unpack_skb(ch, skb);
835 }
836 again:
837 skb->data = ch->trans_skb_data;
838 skb_reset_tail_pointer(skb);
839 skb->len = 0;
840 if (ctc_checkalloc_buffer(ch, 1))
841 return;
842 ch->ccw[1].count = ch->max_bufsize;
843 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
844 if (rc != 0)
845 ccw_check_return_code(ch, rc, "normal RX");
846}
847
848static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
849
850/**
851 * Initialize connection by sending a __u16 of value 0.
852 *
853 * @param fi An instance of a channel statemachine.
854 * @param event The event, just happened.
855 * @param arg Generic pointer, casted from channel * upon call.
856 */
857static void
858ch_action_firstio(fsm_instance * fi, int event, void *arg)
859{
860 struct channel *ch = (struct channel *) arg;
861 int rc;
862
863 DBF_TEXT(trace, 4, __FUNCTION__);
864
865 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
866 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
867 fsm_deltimer(&ch->timer);
868 if (ctc_checkalloc_buffer(ch, 1))
869 return;
870 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
871 (ch->protocol == CTC_PROTO_OS390)) {
872 /* OS/390 resp. z/OS */
873 if (CHANNEL_DIRECTION(ch->flags) == READ) {
874 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
875 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
876 CH_EVENT_TIMER, ch);
877 ch_action_rxidle(fi, event, arg);
878 } else {
879 struct net_device *dev = ch->netdev;
880 fsm_newstate(fi, CH_STATE_TXIDLE);
881 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
882 DEV_EVENT_TXUP, dev);
883 }
884 return;
885 }
886
887 /**
888 * Don't setup a timer for receiving the initial RX frame
889 * if in compatibility mode, since VM TCP delays the initial
890 * frame until it has some data to send.
891 */
892 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
893 (ch->protocol != CTC_PROTO_S390))
894 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
895
896 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
897 ch->ccw[1].count = 2; /* Transfer only length */
898
899 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
900 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
901 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
902 if (rc != 0) {
903 fsm_deltimer(&ch->timer);
904 fsm_newstate(fi, CH_STATE_SETUPWAIT);
905 ccw_check_return_code(ch, rc, "init IO");
906 }
907 /**
908 * If in compatibility mode since we don't setup a timer, we
909 * also signal RX channel up immediately. This enables us
910 * to send packets early which in turn usually triggers some
911 * reply from VM TCP which brings up the RX channel to it's
912 * final state.
913 */
914 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
915 (ch->protocol == CTC_PROTO_S390)) {
916 struct net_device *dev = ch->netdev;
917 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
918 dev);
919 }
920}
921
922/**
923 * Got initial data, check it. If OK,
924 * notify device statemachine that we are up and
925 * running.
926 *
927 * @param fi An instance of a channel statemachine.
928 * @param event The event, just happened.
929 * @param arg Generic pointer, casted from channel * upon call.
930 */
931static void
932ch_action_rxidle(fsm_instance * fi, int event, void *arg)
933{
934 struct channel *ch = (struct channel *) arg;
935 struct net_device *dev = ch->netdev;
936 __u16 buflen;
937 int rc;
938
939 DBF_TEXT(trace, 4, __FUNCTION__);
940 fsm_deltimer(&ch->timer);
941 buflen = *((__u16 *) ch->trans_skb->data);
942#ifdef DEBUG
943 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
944#endif
945 if (buflen >= CTC_INITIAL_BLOCKLEN) {
946 if (ctc_checkalloc_buffer(ch, 1))
947 return;
948 ch->ccw[1].count = ch->max_bufsize;
949 fsm_newstate(fi, CH_STATE_RXIDLE);
950 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
951 (unsigned long) ch, 0xff, 0);
952 if (rc != 0) {
953 fsm_newstate(fi, CH_STATE_RXINIT);
954 ccw_check_return_code(ch, rc, "initial RX");
955 } else
956 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
957 DEV_EVENT_RXUP, dev);
958 } else {
959 ctc_pr_debug("%s: Initial RX count %d not %d\n",
960 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
961 ch_action_firstio(fi, event, arg);
962 }
963}
964
965/**
966 * Set channel into extended mode.
967 *
968 * @param fi An instance of a channel statemachine.
969 * @param event The event, just happened.
970 * @param arg Generic pointer, casted from channel * upon call.
971 */
972static void
973ch_action_setmode(fsm_instance * fi, int event, void *arg)
974{
975 struct channel *ch = (struct channel *) arg;
976 int rc;
977 unsigned long saveflags;
978
979 DBF_TEXT(trace, 4, __FUNCTION__);
980 fsm_deltimer(&ch->timer);
981 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
982 fsm_newstate(fi, CH_STATE_SETUPWAIT);
983 saveflags = 0; /* avoids compiler warning with
984 spin_unlock_irqrestore */
985 if (event == CH_EVENT_TIMER) // only for timer not yet locked
986 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
987 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
988 if (event == CH_EVENT_TIMER)
989 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
990 if (rc != 0) {
991 fsm_deltimer(&ch->timer);
992 fsm_newstate(fi, CH_STATE_STARTWAIT);
993 ccw_check_return_code(ch, rc, "set Mode");
994 } else
995 ch->retry = 0;
996}
997
998/**
999 * Setup channel.
1000 *
1001 * @param fi An instance of a channel statemachine.
1002 * @param event The event, just happened.
1003 * @param arg Generic pointer, casted from channel * upon call.
1004 */
1005static void
1006ch_action_start(fsm_instance * fi, int event, void *arg)
1007{
1008 struct channel *ch = (struct channel *) arg;
1009 unsigned long saveflags;
1010 int rc;
1011 struct net_device *dev;
1012
1013 DBF_TEXT(trace, 4, __FUNCTION__);
1014 if (ch == NULL) {
1015 ctc_pr_warn("ch_action_start ch=NULL\n");
1016 return;
1017 }
1018 if (ch->netdev == NULL) {
1019 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1020 return;
1021 }
1022 dev = ch->netdev;
1023
1024#ifdef DEBUG
1025 ctc_pr_debug("%s: %s channel start\n", dev->name,
1026 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1027#endif
1028
1029 if (ch->trans_skb != NULL) {
1030 clear_normalized_cda(&ch->ccw[1]);
1031 dev_kfree_skb(ch->trans_skb);
1032 ch->trans_skb = NULL;
1033 }
1034 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1035 ch->ccw[1].cmd_code = CCW_CMD_READ;
1036 ch->ccw[1].flags = CCW_FLAG_SLI;
1037 ch->ccw[1].count = 0;
1038 } else {
1039 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1040 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1041 ch->ccw[1].count = 0;
1042 }
1043 if (ctc_checkalloc_buffer(ch, 0)) {
1044 ctc_pr_notice(
1045 "%s: Could not allocate %s trans_skb, delaying "
1046 "allocation until first transfer\n",
1047 dev->name,
1048 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1049 }
1050
1051 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1052 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1053 ch->ccw[0].count = 0;
1054 ch->ccw[0].cda = 0;
1055 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1056 ch->ccw[2].flags = CCW_FLAG_SLI;
1057 ch->ccw[2].count = 0;
1058 ch->ccw[2].cda = 0;
1059 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1060 ch->ccw[4].cda = 0;
1061 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1062
1063 fsm_newstate(fi, CH_STATE_STARTWAIT);
1064 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1065 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1066 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1067 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1068 if (rc != 0) {
1069 if (rc != -EBUSY)
1070 fsm_deltimer(&ch->timer);
1071 ccw_check_return_code(ch, rc, "initial HaltIO");
1072 }
1073#ifdef DEBUG
1074 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1075#endif
1076}
1077
1078/**
1079 * Shutdown a channel.
1080 *
1081 * @param fi An instance of a channel statemachine.
1082 * @param event The event, just happened.
1083 * @param arg Generic pointer, casted from channel * upon call.
1084 */
1085static void
1086ch_action_haltio(fsm_instance * fi, int event, void *arg)
1087{
1088 struct channel *ch = (struct channel *) arg;
1089 unsigned long saveflags;
1090 int rc;
1091 int oldstate;
1092
1093 DBF_TEXT(trace, 3, __FUNCTION__);
1094 fsm_deltimer(&ch->timer);
1095 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1096 saveflags = 0; /* avoids comp warning with
1097 spin_unlock_irqrestore */
1098 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1099 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1100 oldstate = fsm_getstate(fi);
1101 fsm_newstate(fi, CH_STATE_TERM);
1102 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1103 if (event == CH_EVENT_STOP)
1104 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1105 if (rc != 0) {
1106 if (rc != -EBUSY) {
1107 fsm_deltimer(&ch->timer);
1108 fsm_newstate(fi, oldstate);
1109 }
1110 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1111 }
1112}
1113
1114/**
1115 * A channel has successfully been halted.
1116 * Cleanup it's queue and notify interface statemachine.
1117 *
1118 * @param fi An instance of a channel statemachine.
1119 * @param event The event, just happened.
1120 * @param arg Generic pointer, casted from channel * upon call.
1121 */
1122static void
1123ch_action_stopped(fsm_instance * fi, int event, void *arg)
1124{
1125 struct channel *ch = (struct channel *) arg;
1126 struct net_device *dev = ch->netdev;
1127
1128 DBF_TEXT(trace, 3, __FUNCTION__);
1129 fsm_deltimer(&ch->timer);
1130 fsm_newstate(fi, CH_STATE_STOPPED);
1131 if (ch->trans_skb != NULL) {
1132 clear_normalized_cda(&ch->ccw[1]);
1133 dev_kfree_skb(ch->trans_skb);
1134 ch->trans_skb = NULL;
1135 }
1136 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1137 skb_queue_purge(&ch->io_queue);
1138 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1139 DEV_EVENT_RXDOWN, dev);
1140 } else {
1141 ctc_purge_skb_queue(&ch->io_queue);
1142 spin_lock(&ch->collect_lock);
1143 ctc_purge_skb_queue(&ch->collect_queue);
1144 ch->collect_len = 0;
1145 spin_unlock(&ch->collect_lock);
1146 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1147 DEV_EVENT_TXDOWN, dev);
1148 }
1149}
1150
1151/**
1152 * A stop command from device statemachine arrived and we are in
1153 * not operational mode. Set state to stopped.
1154 *
1155 * @param fi An instance of a channel statemachine.
1156 * @param event The event, just happened.
1157 * @param arg Generic pointer, casted from channel * upon call.
1158 */
1159static void
1160ch_action_stop(fsm_instance * fi, int event, void *arg)
1161{
1162 fsm_newstate(fi, CH_STATE_STOPPED);
1163}
1164
1165/**
1166 * A machine check for no path, not operational status or gone device has
1167 * happened.
1168 * Cleanup queue and notify interface statemachine.
1169 *
1170 * @param fi An instance of a channel statemachine.
1171 * @param event The event, just happened.
1172 * @param arg Generic pointer, casted from channel * upon call.
1173 */
1174static void
1175ch_action_fail(fsm_instance * fi, int event, void *arg)
1176{
1177 struct channel *ch = (struct channel *) arg;
1178 struct net_device *dev = ch->netdev;
1179
1180 DBF_TEXT(trace, 3, __FUNCTION__);
1181 fsm_deltimer(&ch->timer);
1182 fsm_newstate(fi, CH_STATE_NOTOP);
1183 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1184 skb_queue_purge(&ch->io_queue);
1185 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1186 DEV_EVENT_RXDOWN, dev);
1187 } else {
1188 ctc_purge_skb_queue(&ch->io_queue);
1189 spin_lock(&ch->collect_lock);
1190 ctc_purge_skb_queue(&ch->collect_queue);
1191 ch->collect_len = 0;
1192 spin_unlock(&ch->collect_lock);
1193 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1194 DEV_EVENT_TXDOWN, dev);
1195 }
1196}
1197
1198/**
1199 * Handle error during setup of channel.
1200 *
1201 * @param fi An instance of a channel statemachine.
1202 * @param event The event, just happened.
1203 * @param arg Generic pointer, casted from channel * upon call.
1204 */
1205static void
1206ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1207{
1208 struct channel *ch = (struct channel *) arg;
1209 struct net_device *dev = ch->netdev;
1210
1211 DBF_TEXT(setup, 3, __FUNCTION__);
1212 /**
1213 * Special case: Got UC_RCRESET on setmode.
1214 * This means that remote side isn't setup. In this case
1215 * simply retry after some 10 secs...
1216 */
1217 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1218 ((event == CH_EVENT_UC_RCRESET) ||
1219 (event == CH_EVENT_UC_RSRESET))) {
1220 fsm_newstate(fi, CH_STATE_STARTRETRY);
1221 fsm_deltimer(&ch->timer);
1222 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1223 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1224 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1225 if (rc != 0)
1226 ccw_check_return_code(
1227 ch, rc, "HaltIO in ch_action_setuperr");
1228 }
1229 return;
1230 }
1231
1232 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1233 dev->name, ch_event_names[event],
1234 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1235 fsm_getstate_str(fi));
1236 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1237 fsm_newstate(fi, CH_STATE_RXERR);
1238 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1239 DEV_EVENT_RXDOWN, dev);
1240 } else {
1241 fsm_newstate(fi, CH_STATE_TXERR);
1242 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1243 DEV_EVENT_TXDOWN, dev);
1244 }
1245}
1246
1247/**
1248 * Restart a channel after an error.
1249 *
1250 * @param fi An instance of a channel statemachine.
1251 * @param event The event, just happened.
1252 * @param arg Generic pointer, casted from channel * upon call.
1253 */
1254static void
1255ch_action_restart(fsm_instance * fi, int event, void *arg)
1256{
1257 unsigned long saveflags;
1258 int oldstate;
1259 int rc;
1260
1261 struct channel *ch = (struct channel *) arg;
1262 struct net_device *dev = ch->netdev;
1263
1264 DBF_TEXT(trace, 3, __FUNCTION__);
1265 fsm_deltimer(&ch->timer);
1266 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1267 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1268 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1269 oldstate = fsm_getstate(fi);
1270 fsm_newstate(fi, CH_STATE_STARTWAIT);
1271 saveflags = 0; /* avoids compiler warning with
1272 spin_unlock_irqrestore */
1273 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1274 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1275 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1276 if (event == CH_EVENT_TIMER)
1277 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1278 if (rc != 0) {
1279 if (rc != -EBUSY) {
1280 fsm_deltimer(&ch->timer);
1281 fsm_newstate(fi, oldstate);
1282 }
1283 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1284 }
1285}
1286
1287/**
1288 * Handle error during RX initial handshake (exchange of
1289 * 0-length block header)
1290 *
1291 * @param fi An instance of a channel statemachine.
1292 * @param event The event, just happened.
1293 * @param arg Generic pointer, casted from channel * upon call.
1294 */
1295static void
1296ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1297{
1298 struct channel *ch = (struct channel *) arg;
1299 struct net_device *dev = ch->netdev;
1300
1301 DBF_TEXT(setup, 3, __FUNCTION__);
1302 if (event == CH_EVENT_TIMER) {
1303 fsm_deltimer(&ch->timer);
1304 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1305 if (ch->retry++ < 3)
1306 ch_action_restart(fi, event, arg);
1307 else {
1308 fsm_newstate(fi, CH_STATE_RXERR);
1309 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1310 DEV_EVENT_RXDOWN, dev);
1311 }
1312 } else
1313 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1314}
1315
1316/**
1317 * Notify device statemachine if we gave up initialization
1318 * of RX channel.
1319 *
1320 * @param fi An instance of a channel statemachine.
1321 * @param event The event, just happened.
1322 * @param arg Generic pointer, casted from channel * upon call.
1323 */
1324static void
1325ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1326{
1327 struct channel *ch = (struct channel *) arg;
1328 struct net_device *dev = ch->netdev;
1329
1330 DBF_TEXT(setup, 3, __FUNCTION__);
1331 fsm_newstate(fi, CH_STATE_RXERR);
1332 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1333 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1334 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1335}
1336
1337/**
1338 * Handle RX Unit check remote reset (remote disconnected)
1339 *
1340 * @param fi An instance of a channel statemachine.
1341 * @param event The event, just happened.
1342 * @param arg Generic pointer, casted from channel * upon call.
1343 */
1344static void
1345ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1346{
1347 struct channel *ch = (struct channel *) arg;
1348 struct channel *ch2;
1349 struct net_device *dev = ch->netdev;
1350
1351 DBF_TEXT(trace, 3, __FUNCTION__);
1352 fsm_deltimer(&ch->timer);
1353 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1354 dev->name);
1355
1356 /**
1357 * Notify device statemachine
1358 */
1359 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1360 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1361
1362 fsm_newstate(fi, CH_STATE_DTERM);
1363 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1364 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1365
1366 ccw_device_halt(ch->cdev, (unsigned long) ch);
1367 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1368}
1369
1370/**
1371 * Handle error during TX channel initialization.
1372 *
1373 * @param fi An instance of a channel statemachine.
1374 * @param event The event, just happened.
1375 * @param arg Generic pointer, casted from channel * upon call.
1376 */
1377static void
1378ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1379{
1380 struct channel *ch = (struct channel *) arg;
1381 struct net_device *dev = ch->netdev;
1382
1383 DBF_TEXT(setup, 2, __FUNCTION__);
1384 if (event == CH_EVENT_TIMER) {
1385 fsm_deltimer(&ch->timer);
1386 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1387 if (ch->retry++ < 3)
1388 ch_action_restart(fi, event, arg);
1389 else {
1390 fsm_newstate(fi, CH_STATE_TXERR);
1391 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1392 DEV_EVENT_TXDOWN, dev);
1393 }
1394 } else
1395 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1396}
1397
1398/**
1399 * Handle TX timeout by retrying operation.
1400 *
1401 * @param fi An instance of a channel statemachine.
1402 * @param event The event, just happened.
1403 * @param arg Generic pointer, casted from channel * upon call.
1404 */
1405static void
1406ch_action_txretry(fsm_instance * fi, int event, void *arg)
1407{
1408 struct channel *ch = (struct channel *) arg;
1409 struct net_device *dev = ch->netdev;
1410 unsigned long saveflags;
1411
1412 DBF_TEXT(trace, 4, __FUNCTION__);
1413 fsm_deltimer(&ch->timer);
1414 if (ch->retry++ > 3) {
1415 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1416 dev->name);
1417 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1418 DEV_EVENT_TXDOWN, dev);
1419 ch_action_restart(fi, event, arg);
1420 } else {
1421 struct sk_buff *skb;
1422
1423 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1424 if ((skb = skb_peek(&ch->io_queue))) {
1425 int rc = 0;
1426
1427 clear_normalized_cda(&ch->ccw[4]);
1428 ch->ccw[4].count = skb->len;
1429 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1430 ctc_pr_debug(
1431 "%s: IDAL alloc failed, chan restart\n",
1432 dev->name);
1433 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1434 DEV_EVENT_TXDOWN, dev);
1435 ch_action_restart(fi, event, arg);
1436 return;
1437 }
1438 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1439 saveflags = 0; /* avoids compiler warning with
1440 spin_unlock_irqrestore */
1441 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1442 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1443 saveflags);
1444 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1445 (unsigned long) ch, 0xff, 0);
1446 if (event == CH_EVENT_TIMER)
1447 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1448 saveflags);
1449 if (rc != 0) {
1450 fsm_deltimer(&ch->timer);
1451 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1452 ctc_purge_skb_queue(&ch->io_queue);
1453 }
1454 }
1455 }
1456
1457}
1458
1459/**
1460 * Handle fatal errors during an I/O command.
1461 *
1462 * @param fi An instance of a channel statemachine.
1463 * @param event The event, just happened.
1464 * @param arg Generic pointer, casted from channel * upon call.
1465 */
1466static void
1467ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1468{
1469 struct channel *ch = (struct channel *) arg;
1470 struct net_device *dev = ch->netdev;
1471
1472 DBF_TEXT(trace, 3, __FUNCTION__);
1473 fsm_deltimer(&ch->timer);
1474 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1475 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1476 fsm_newstate(fi, CH_STATE_RXERR);
1477 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1478 DEV_EVENT_RXDOWN, dev);
1479 } else {
1480 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1481 fsm_newstate(fi, CH_STATE_TXERR);
1482 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1483 DEV_EVENT_TXDOWN, dev);
1484 }
1485}
1486
1487static void
1488ch_action_reinit(fsm_instance *fi, int event, void *arg)
1489{
1490 struct channel *ch = (struct channel *)arg;
1491 struct net_device *dev = ch->netdev;
1492 struct ctc_priv *privptr = dev->priv;
1493
1494 DBF_TEXT(trace, 4, __FUNCTION__);
1495 ch_action_iofatal(fi, event, arg);
1496 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1497}
1498
1499/**
1500 * The statemachine for a channel.
1501 */
1502static const fsm_node ch_fsm[] = {
1503 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1504 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1505 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1506 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1507
1508 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1509 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1510 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1511 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1512 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1513
1514 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1515 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1516 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1517 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1518 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1519 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1520 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1521
1522 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1523 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1524 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1525 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1526
1527 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1528 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1529 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1530 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1531 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1532 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1533 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1534 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1535 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1536
1537 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1538 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1539 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1540 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1541 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1542 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1543 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1544 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1545 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1546 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1547 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1548
1549 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1550 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1551 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1552 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1553// {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1554 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1555 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1556 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1557 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1558
1559 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1560 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1561 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1562 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1563 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1564 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1565 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1566 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1567 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1568
1569 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1570 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1571 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1572 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1573 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1574 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1575 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1576 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1577
1578 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1579 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1580 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1581 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1582 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1583 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1584
1585 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1586 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1587 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1588 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1589 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1590 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1591
1592 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1593 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1594 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1595 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1596 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1597 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1598 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1599 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1600 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1601
1602 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1603 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1604 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1605 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1606};
1607
1608static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1609
1610/**
1611 * Functions related to setup and device detection.
1612 *****************************************************************************/
1613
1614static inline int
1615less_than(char *id1, char *id2)
1616{
1617 int dev1, dev2, i;
1618
1619 for (i = 0; i < 5; i++) {
1620 id1++;
1621 id2++;
1622 }
1623 dev1 = simple_strtoul(id1, &id1, 16);
1624 dev2 = simple_strtoul(id2, &id2, 16);
1625
1626 return (dev1 < dev2);
1627}
1628
1629/**
1630 * Add a new channel to the list of channels.
1631 * Keeps the channel list sorted.
1632 *
1633 * @param cdev The ccw_device to be added.
1634 * @param type The type class of the new channel.
1635 *
1636 * @return 0 on success, !0 on error.
1637 */
1638static int
1639add_channel(struct ccw_device *cdev, enum channel_types type)
1640{
1641 struct channel **c = &channels;
1642 struct channel *ch;
1643
1644 DBF_TEXT(trace, 2, __FUNCTION__);
1645 ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1646 if (!ch) {
1647 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1648 return -1;
1649 }
1650 /* assure all flags and counters are reset */
1651 ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1652 if (!ch->ccw) {
1653 kfree(ch);
1654 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1655 return -1;
1656 }
1657
1658
1659 /**
1660 * "static" ccws are used in the following way:
1661 *
1662 * ccw[0..2] (Channel program for generic I/O):
1663 * 0: prepare
1664 * 1: read or write (depending on direction) with fixed
1665 * buffer (idal allocated once when buffer is allocated)
1666 * 2: nop
1667 * ccw[3..5] (Channel program for direct write of packets)
1668 * 3: prepare
1669 * 4: write (idal allocated on every write).
1670 * 5: nop
1671 * ccw[6..7] (Channel program for initial channel setup):
1672 * 6: set extended mode
1673 * 7: nop
1674 *
1675 * ch->ccw[0..5] are initialized in ch_action_start because
1676 * the channel's direction is yet unknown here.
1677 */
1678 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1679 ch->ccw[6].flags = CCW_FLAG_SLI;
1680
1681 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1682 ch->ccw[7].flags = CCW_FLAG_SLI;
1683
1684 ch->cdev = cdev;
1685 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1686 ch->type = type;
1687 ch->fsm = init_fsm(ch->id, ch_state_names,
1688 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1689 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1690 if (ch->fsm == NULL) {
1691 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1692 kfree(ch->ccw);
1693 kfree(ch);
1694 return -1;
1695 }
1696 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1697 ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1698 if (!ch->irb) {
1699 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1700 kfree_fsm(ch->fsm);
1701 kfree(ch->ccw);
1702 kfree(ch);
1703 return -1;
1704 }
1705 while (*c && less_than((*c)->id, ch->id))
1706 c = &(*c)->next;
1707 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1708 ctc_pr_debug(
1709 "ctc: add_channel: device %s already in list, "
1710 "using old entry\n", (*c)->id);
1711 kfree(ch->irb);
1712 kfree_fsm(ch->fsm);
1713 kfree(ch->ccw);
1714 kfree(ch);
1715 return 0;
1716 }
1717
1718 spin_lock_init(&ch->collect_lock);
1719
1720 fsm_settimer(ch->fsm, &ch->timer);
1721 skb_queue_head_init(&ch->io_queue);
1722 skb_queue_head_init(&ch->collect_queue);
1723 ch->next = *c;
1724 *c = ch;
1725 return 0;
1726}
1727
1728/**
1729 * Release a specific channel in the channel list.
1730 *
1731 * @param ch Pointer to channel struct to be released.
1732 */
1733static void
1734channel_free(struct channel *ch)
1735{
1736 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1737 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1738}
1739
1740/**
1741 * Remove a specific channel in the channel list.
1742 *
1743 * @param ch Pointer to channel struct to be released.
1744 */
1745static void
1746channel_remove(struct channel *ch)
1747{
1748 struct channel **c = &channels;
1749
1750 DBF_TEXT(trace, 2, __FUNCTION__);
1751 if (ch == NULL)
1752 return;
1753
1754 channel_free(ch);
1755 while (*c) {
1756 if (*c == ch) {
1757 *c = ch->next;
1758 fsm_deltimer(&ch->timer);
1759 kfree_fsm(ch->fsm);
1760 clear_normalized_cda(&ch->ccw[4]);
1761 if (ch->trans_skb != NULL) {
1762 clear_normalized_cda(&ch->ccw[1]);
1763 dev_kfree_skb(ch->trans_skb);
1764 }
1765 kfree(ch->ccw);
1766 kfree(ch->irb);
1767 kfree(ch);
1768 return;
1769 }
1770 c = &((*c)->next);
1771 }
1772}
1773
1774/**
1775 * Get a specific channel from the channel list.
1776 *
1777 * @param type Type of channel we are interested in.
1778 * @param id Id of channel we are interested in.
1779 * @param direction Direction we want to use this channel for.
1780 *
1781 * @return Pointer to a channel or NULL if no matching channel available.
1782 */
1783static struct channel
1784*
1785channel_get(enum channel_types type, char *id, int direction)
1786{
1787 struct channel *ch = channels;
1788
1789 DBF_TEXT(trace, 3, __FUNCTION__);
1790#ifdef DEBUG
1791 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
1792 __func__, id, type);
1793#endif
1794
1795 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
1796#ifdef DEBUG
1797 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
1798 __func__, ch, ch->id, ch->type);
1799#endif
1800 ch = ch->next;
1801 }
1802#ifdef DEBUG
1803 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
1804 __func__, ch, ch->id, ch->type);
1805#endif
1806 if (!ch) {
1807 ctc_pr_warn("ctc: %s(): channel with id %s "
1808 "and type %d not found in channel list\n",
1809 __func__, id, type);
1810 } else {
1811 if (ch->flags & CHANNEL_FLAGS_INUSE)
1812 ch = NULL;
1813 else {
1814 ch->flags |= CHANNEL_FLAGS_INUSE;
1815 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
1816 ch->flags |= (direction == WRITE)
1817 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
1818 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
1819 }
1820 }
1821 return ch;
1822}
1823
1824/**
1825 * Return the channel type by name.
1826 *
1827 * @param name Name of network interface.
1828 *
1829 * @return Type class of channel to be used for that interface.
1830 */
1831static enum channel_types inline
1832extract_channel_media(char *name)
1833{
1834 enum channel_types ret = channel_type_unknown;
1835
1836 if (name != NULL) {
1837 if (strncmp(name, "ctc", 3) == 0)
1838 ret = channel_type_parallel;
1839 if (strncmp(name, "escon", 5) == 0)
1840 ret = channel_type_escon;
1841 }
1842 return ret;
1843}
1844
1845static long
1846__ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1847{
1848 if (!IS_ERR(irb))
1849 return 0;
1850
1851 switch (PTR_ERR(irb)) {
1852 case -EIO:
1853 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
1854// CTC_DBF_TEXT(trace, 2, "ckirberr");
1855// CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1856 break;
1857 case -ETIMEDOUT:
1858 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
1859// CTC_DBF_TEXT(trace, 2, "ckirberr");
1860// CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
1861 break;
1862 default:
1863 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
1864 cdev->dev.bus_id);
1865// CTC_DBF_TEXT(trace, 2, "ckirberr");
1866// CTC_DBF_TEXT(trace, 2, " rc???");
1867 }
1868 return PTR_ERR(irb);
1869}
1870
1871/**
1872 * Main IRQ handler.
1873 *
1874 * @param cdev The ccw_device the interrupt is for.
1875 * @param intparm interruption parameter.
1876 * @param irb interruption response block.
1877 */
1878static void
1879ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1880{
1881 struct channel *ch;
1882 struct net_device *dev;
1883 struct ctc_priv *priv;
1884
1885 DBF_TEXT(trace, 5, __FUNCTION__);
1886 if (__ctc_check_irb_error(cdev, irb))
1887 return;
1888
1889 /* Check for unsolicited interrupts. */
1890 if (!cdev->dev.driver_data) {
1891 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
1892 cdev->dev.bus_id, irb->scsw.cstat,
1893 irb->scsw.dstat);
1894 return;
1895 }
1896
1897 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
1898 ->dev.driver_data;
1899
1900 /* Try to extract channel from driver data. */
1901 if (priv->channel[READ]->cdev == cdev)
1902 ch = priv->channel[READ];
1903 else if (priv->channel[WRITE]->cdev == cdev)
1904 ch = priv->channel[WRITE];
1905 else {
1906 ctc_pr_err("ctc: Can't determine channel for interrupt, "
1907 "device %s\n", cdev->dev.bus_id);
1908 return;
1909 }
1910
1911 dev = (struct net_device *) (ch->netdev);
1912 if (dev == NULL) {
1913 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
1914 cdev->dev.bus_id, ch);
1915 return;
1916 }
1917
1918#ifdef DEBUG
1919 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
1920 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
1921#endif
1922
1923 /* Copy interruption response block. */
1924 memcpy(ch->irb, irb, sizeof(struct irb));
1925
1926 /* Check for good subchannel return code, otherwise error message */
1927 if (ch->irb->scsw.cstat) {
1928 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
1929 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
1930 dev->name, ch->id, ch->irb->scsw.cstat,
1931 ch->irb->scsw.dstat);
1932 return;
1933 }
1934
1935 /* Check the reason-code of a unit check */
1936 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1937 ccw_unit_check(ch, ch->irb->ecw[0]);
1938 return;
1939 }
1940 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
1941 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
1942 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
1943 else
1944 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
1945 return;
1946 }
1947 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
1948 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
1949 return;
1950 }
1951 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1952 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1953 (ch->irb->scsw.stctl ==
1954 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1955 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
1956 else
1957 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1958
1959}
1960
1961/**
1962 * Actions for interface - statemachine.
1963 *****************************************************************************/
1964
1965/**
1966 * Startup channels by sending CH_EVENT_START to each channel.
1967 *
1968 * @param fi An instance of an interface statemachine.
1969 * @param event The event, just happened.
1970 * @param arg Generic pointer, casted from struct net_device * upon call.
1971 */
1972static void
1973dev_action_start(fsm_instance * fi, int event, void *arg)
1974{
1975 struct net_device *dev = (struct net_device *) arg;
1976 struct ctc_priv *privptr = dev->priv;
1977 int direction;
1978
1979 DBF_TEXT(setup, 3, __FUNCTION__);
1980 fsm_deltimer(&privptr->restart_timer);
1981 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
1982 for (direction = READ; direction <= WRITE; direction++) {
1983 struct channel *ch = privptr->channel[direction];
1984 fsm_event(ch->fsm, CH_EVENT_START, ch);
1985 }
1986}
1987
1988/**
1989 * Shutdown channels by sending CH_EVENT_STOP to each channel.
1990 *
1991 * @param fi An instance of an interface statemachine.
1992 * @param event The event, just happened.
1993 * @param arg Generic pointer, casted from struct net_device * upon call.
1994 */
1995static void
1996dev_action_stop(fsm_instance * fi, int event, void *arg)
1997{
1998 struct net_device *dev = (struct net_device *) arg;
1999 struct ctc_priv *privptr = dev->priv;
2000 int direction;
2001
2002 DBF_TEXT(trace, 3, __FUNCTION__);
2003 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2004 for (direction = READ; direction <= WRITE; direction++) {
2005 struct channel *ch = privptr->channel[direction];
2006 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2007 }
2008}
2009static void
2010dev_action_restart(fsm_instance *fi, int event, void *arg)
2011{
2012 struct net_device *dev = (struct net_device *)arg;
2013 struct ctc_priv *privptr = dev->priv;
2014
2015 DBF_TEXT(trace, 3, __FUNCTION__);
2016 ctc_pr_debug("%s: Restarting\n", dev->name);
2017 dev_action_stop(fi, event, arg);
2018 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2019 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2020 DEV_EVENT_START, dev);
2021}
2022
2023/**
2024 * Called from channel statemachine
2025 * when a channel is up and running.
2026 *
2027 * @param fi An instance of an interface statemachine.
2028 * @param event The event, just happened.
2029 * @param arg Generic pointer, casted from struct net_device * upon call.
2030 */
2031static void
2032dev_action_chup(fsm_instance * fi, int event, void *arg)
2033{
2034 struct net_device *dev = (struct net_device *) arg;
2035
2036 DBF_TEXT(trace, 3, __FUNCTION__);
2037 switch (fsm_getstate(fi)) {
2038 case DEV_STATE_STARTWAIT_RXTX:
2039 if (event == DEV_EVENT_RXUP)
2040 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2041 else
2042 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2043 break;
2044 case DEV_STATE_STARTWAIT_RX:
2045 if (event == DEV_EVENT_RXUP) {
2046 fsm_newstate(fi, DEV_STATE_RUNNING);
2047 ctc_pr_info("%s: connected with remote side\n",
2048 dev->name);
2049 ctc_clear_busy(dev);
2050 }
2051 break;
2052 case DEV_STATE_STARTWAIT_TX:
2053 if (event == DEV_EVENT_TXUP) {
2054 fsm_newstate(fi, DEV_STATE_RUNNING);
2055 ctc_pr_info("%s: connected with remote side\n",
2056 dev->name);
2057 ctc_clear_busy(dev);
2058 }
2059 break;
2060 case DEV_STATE_STOPWAIT_TX:
2061 if (event == DEV_EVENT_RXUP)
2062 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2063 break;
2064 case DEV_STATE_STOPWAIT_RX:
2065 if (event == DEV_EVENT_TXUP)
2066 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2067 break;
2068 }
2069}
2070
2071/**
2072 * Called from channel statemachine
2073 * when a channel has been shutdown.
2074 *
2075 * @param fi An instance of an interface statemachine.
2076 * @param event The event, just happened.
2077 * @param arg Generic pointer, casted from struct net_device * upon call.
2078 */
2079static void
2080dev_action_chdown(fsm_instance * fi, int event, void *arg)
2081{
2082
2083 DBF_TEXT(trace, 3, __FUNCTION__);
2084 switch (fsm_getstate(fi)) {
2085 case DEV_STATE_RUNNING:
2086 if (event == DEV_EVENT_TXDOWN)
2087 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2088 else
2089 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2090 break;
2091 case DEV_STATE_STARTWAIT_RX:
2092 if (event == DEV_EVENT_TXDOWN)
2093 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2094 break;
2095 case DEV_STATE_STARTWAIT_TX:
2096 if (event == DEV_EVENT_RXDOWN)
2097 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2098 break;
2099 case DEV_STATE_STOPWAIT_RXTX:
2100 if (event == DEV_EVENT_TXDOWN)
2101 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2102 else
2103 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2104 break;
2105 case DEV_STATE_STOPWAIT_RX:
2106 if (event == DEV_EVENT_RXDOWN)
2107 fsm_newstate(fi, DEV_STATE_STOPPED);
2108 break;
2109 case DEV_STATE_STOPWAIT_TX:
2110 if (event == DEV_EVENT_TXDOWN)
2111 fsm_newstate(fi, DEV_STATE_STOPPED);
2112 break;
2113 }
2114}
2115
2116static const fsm_node dev_fsm[] = {
2117 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2118
2119 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2120 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2121 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2122 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2123
2124 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2125 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2126 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2127 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2128 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2129
2130 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2131 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2132 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2133 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2134 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2135
2136 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2137 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2138 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2139 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2140 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2141 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2142
2143 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2144 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2145 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2146 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2147 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2148
2149 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2150 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2151 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2152 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2153 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2154
2155 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2156 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2157 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2158 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2159 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2160 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2161};
2162
2163static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2164
2165/**
2166 * Transmit a packet.
2167 * This is a helper function for ctc_tx().
2168 *
2169 * @param ch Channel to be used for sending.
2170 * @param skb Pointer to struct sk_buff of packet to send.
2171 * The linklevel header has already been set up
2172 * by ctc_tx().
2173 *
2174 * @return 0 on success, -ERRNO on failure. (Never fails.)
2175 */
2176static int
2177transmit_skb(struct channel *ch, struct sk_buff *skb)
2178{
2179 unsigned long saveflags;
2180 struct ll_header header;
2181 int rc = 0;
2182
2183 DBF_TEXT(trace, 5, __FUNCTION__);
2184 /* we need to acquire the lock for testing the state
2185 * otherwise we can have an IRQ changing the state to
2186 * TXIDLE after the test but before acquiring the lock.
2187 */
2188 spin_lock_irqsave(&ch->collect_lock, saveflags);
2189 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2190 int l = skb->len + LL_HEADER_LENGTH;
2191
2192 if (ch->collect_len + l > ch->max_bufsize - 2) {
2193 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2194 return -EBUSY;
2195 } else {
2196 atomic_inc(&skb->users);
2197 header.length = l;
2198 header.type = skb->protocol;
2199 header.unused = 0;
2200 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2201 LL_HEADER_LENGTH);
2202 skb_queue_tail(&ch->collect_queue, skb);
2203 ch->collect_len += l;
2204 }
2205 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2206 } else {
2207 __u16 block_len;
2208 int ccw_idx;
2209 struct sk_buff *nskb;
2210 unsigned long hi;
2211 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2212 /**
2213 * Protect skb against beeing free'd by upper
2214 * layers.
2215 */
2216 atomic_inc(&skb->users);
2217 ch->prof.txlen += skb->len;
2218 header.length = skb->len + LL_HEADER_LENGTH;
2219 header.type = skb->protocol;
2220 header.unused = 0;
2221 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2222 LL_HEADER_LENGTH);
2223 block_len = skb->len + 2;
2224 *((__u16 *) skb_push(skb, 2)) = block_len;
2225
2226 /**
2227 * IDAL support in CTC is broken, so we have to
2228 * care about skb's above 2G ourselves.
2229 */
2230 hi = ((unsigned long)skb_tail_pointer(skb) +
2231 LL_HEADER_LENGTH) >> 31;
2232 if (hi) {
2233 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2234 if (!nskb) {
2235 atomic_dec(&skb->users);
2236 skb_pull(skb, LL_HEADER_LENGTH + 2);
2237 ctc_clear_busy(ch->netdev);
2238 return -ENOMEM;
2239 } else {
2240 memcpy(skb_put(nskb, skb->len),
2241 skb->data, skb->len);
2242 atomic_inc(&nskb->users);
2243 atomic_dec(&skb->users);
2244 dev_kfree_skb_irq(skb);
2245 skb = nskb;
2246 }
2247 }
2248
2249 ch->ccw[4].count = block_len;
2250 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2251 /**
2252 * idal allocation failed, try via copying to
2253 * trans_skb. trans_skb usually has a pre-allocated
2254 * idal.
2255 */
2256 if (ctc_checkalloc_buffer(ch, 1)) {
2257 /**
2258 * Remove our header. It gets added
2259 * again on retransmit.
2260 */
2261 atomic_dec(&skb->users);
2262 skb_pull(skb, LL_HEADER_LENGTH + 2);
2263 ctc_clear_busy(ch->netdev);
2264 return -EBUSY;
2265 }
2266
2267 skb_reset_tail_pointer(ch->trans_skb);
2268 ch->trans_skb->len = 0;
2269 ch->ccw[1].count = skb->len;
2270 skb_copy_from_linear_data(skb, skb_put(ch->trans_skb,
2271 skb->len),
2272 skb->len);
2273 atomic_dec(&skb->users);
2274 dev_kfree_skb_irq(skb);
2275 ccw_idx = 0;
2276 } else {
2277 skb_queue_tail(&ch->io_queue, skb);
2278 ccw_idx = 3;
2279 }
2280 ch->retry = 0;
2281 fsm_newstate(ch->fsm, CH_STATE_TX);
2282 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2283 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2284 ch->prof.send_stamp = current_kernel_time();
2285 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2286 (unsigned long) ch, 0xff, 0);
2287 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2288 if (ccw_idx == 3)
2289 ch->prof.doios_single++;
2290 if (rc != 0) {
2291 fsm_deltimer(&ch->timer);
2292 ccw_check_return_code(ch, rc, "single skb TX");
2293 if (ccw_idx == 3)
2294 skb_dequeue_tail(&ch->io_queue);
2295 /**
2296 * Remove our header. It gets added
2297 * again on retransmit.
2298 */
2299 skb_pull(skb, LL_HEADER_LENGTH + 2);
2300 } else {
2301 if (ccw_idx == 0) {
2302 struct net_device *dev = ch->netdev;
2303 struct ctc_priv *privptr = dev->priv;
2304 privptr->stats.tx_packets++;
2305 privptr->stats.tx_bytes +=
2306 skb->len - LL_HEADER_LENGTH;
2307 }
2308 }
2309 }
2310
2311 ctc_clear_busy(ch->netdev);
2312 return rc;
2313}
2314
2315/**
2316 * Interface API for upper network layers
2317 *****************************************************************************/
2318
2319/**
2320 * Open an interface.
2321 * Called from generic network layer when ifconfig up is run.
2322 *
2323 * @param dev Pointer to interface struct.
2324 *
2325 * @return 0 on success, -ERRNO on failure. (Never fails.)
2326 */
2327static int
2328ctc_open(struct net_device * dev)
2329{
2330 DBF_TEXT(trace, 5, __FUNCTION__);
2331 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2332 return 0;
2333}
2334
2335/**
2336 * Close an interface.
2337 * Called from generic network layer when ifconfig down is run.
2338 *
2339 * @param dev Pointer to interface struct.
2340 *
2341 * @return 0 on success, -ERRNO on failure. (Never fails.)
2342 */
2343static int
2344ctc_close(struct net_device * dev)
2345{
2346 DBF_TEXT(trace, 5, __FUNCTION__);
2347 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2348 return 0;
2349}
2350
2351/**
2352 * Start transmission of a packet.
2353 * Called from generic network device layer.
2354 *
2355 * @param skb Pointer to buffer containing the packet.
2356 * @param dev Pointer to interface struct.
2357 *
2358 * @return 0 if packet consumed, !0 if packet rejected.
2359 * Note: If we return !0, then the packet is free'd by
2360 * the generic network layer.
2361 */
2362static int
2363ctc_tx(struct sk_buff *skb, struct net_device * dev)
2364{
2365 int rc = 0;
2366 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2367
2368 DBF_TEXT(trace, 5, __FUNCTION__);
2369 /**
2370 * Some sanity checks ...
2371 */
2372 if (skb == NULL) {
2373 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2374 privptr->stats.tx_dropped++;
2375 return 0;
2376 }
2377 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2378 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2379 dev->name, LL_HEADER_LENGTH + 2);
2380 dev_kfree_skb(skb);
2381 privptr->stats.tx_dropped++;
2382 return 0;
2383 }
2384
2385 /**
2386 * If channels are not running, try to restart them
2387 * and throw away packet.
2388 */
2389 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2390 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2391 dev_kfree_skb(skb);
2392 privptr->stats.tx_dropped++;
2393 privptr->stats.tx_errors++;
2394 privptr->stats.tx_carrier_errors++;
2395 return 0;
2396 }
2397
2398 if (ctc_test_and_set_busy(dev))
2399 return -EBUSY;
2400
2401 dev->trans_start = jiffies;
2402 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2403 rc = 1;
2404 return rc;
2405}
2406
2407/**
2408 * Sets MTU of an interface.
2409 *
2410 * @param dev Pointer to interface struct.
2411 * @param new_mtu The new MTU to use for this interface.
2412 *
2413 * @return 0 on success, -EINVAL if MTU is out of valid range.
2414 * (valid range is 576 .. 65527). If VM is on the
2415 * remote side, maximum MTU is 32760, however this is
2416 * <em>not</em> checked here.
2417 */
2418static int
2419ctc_change_mtu(struct net_device * dev, int new_mtu)
2420{
2421 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2422
2423 DBF_TEXT(trace, 3, __FUNCTION__);
2424 if ((new_mtu < 576) || (new_mtu > 65527) ||
2425 (new_mtu > (privptr->channel[READ]->max_bufsize -
2426 LL_HEADER_LENGTH - 2)))
2427 return -EINVAL;
2428 dev->mtu = new_mtu;
2429 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2430 return 0;
2431}
2432
2433/**
2434 * Returns interface statistics of a device.
2435 *
2436 * @param dev Pointer to interface struct.
2437 *
2438 * @return Pointer to stats struct of this interface.
2439 */
2440static struct net_device_stats *
2441ctc_stats(struct net_device * dev)
2442{
2443 return &((struct ctc_priv *) dev->priv)->stats;
2444}
2445
2446/*
2447 * sysfs attributes
2448 */
2449
2450static ssize_t
2451buffer_show(struct device *dev, struct device_attribute *attr, char *buf)
2452{
2453 struct ctc_priv *priv;
2454
2455 priv = dev->driver_data;
2456 if (!priv)
2457 return -ENODEV;
2458 return sprintf(buf, "%d\n",
2459 priv->buffer_size);
2460}
2461
2462static ssize_t
2463buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2464{
2465 struct ctc_priv *priv;
2466 struct net_device *ndev;
2467 int bs1;
2468 char buffer[16];
2469
2470 DBF_TEXT(trace, 3, __FUNCTION__);
2471 DBF_TEXT(trace, 3, buf);
2472 priv = dev->driver_data;
2473 if (!priv) {
2474 DBF_TEXT(trace, 3, "bfnopriv");
2475 return -ENODEV;
2476 }
2477
2478 sscanf(buf, "%u", &bs1);
2479 if (bs1 > CTC_BUFSIZE_LIMIT)
2480 goto einval;
2481 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2482 goto einval;
2483 priv->buffer_size = bs1; // just to overwrite the default
2484
2485 ndev = priv->channel[READ]->netdev;
2486 if (!ndev) {
2487 DBF_TEXT(trace, 3, "bfnondev");
2488 return -ENODEV;
2489 }
2490
2491 if ((ndev->flags & IFF_RUNNING) &&
2492 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2493 goto einval;
2494
2495 priv->channel[READ]->max_bufsize = bs1;
2496 priv->channel[WRITE]->max_bufsize = bs1;
2497 if (!(ndev->flags & IFF_RUNNING))
2498 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2499 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2500 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2501
2502 sprintf(buffer, "%d",priv->buffer_size);
2503 DBF_TEXT(trace, 3, buffer);
2504 return count;
2505
2506einval:
2507 DBF_TEXT(trace, 3, "buff_err");
2508 return -EINVAL;
2509}
2510
2511static ssize_t
2512loglevel_show(struct device *dev, struct device_attribute *attr, char *buf)
2513{
2514 return sprintf(buf, "%d\n", loglevel);
2515}
2516
2517static ssize_t
2518loglevel_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2519{
2520 int ll1;
2521
2522 DBF_TEXT(trace, 5, __FUNCTION__);
2523 sscanf(buf, "%i", &ll1);
2524
2525 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2526 return -EINVAL;
2527 loglevel = ll1;
2528 return count;
2529}
2530
2531static void
2532ctc_print_statistics(struct ctc_priv *priv)
2533{
2534 char *sbuf;
2535 char *p;
2536
2537 DBF_TEXT(trace, 4, __FUNCTION__);
2538 if (!priv)
2539 return;
2540 sbuf = kmalloc(2048, GFP_KERNEL);
2541 if (sbuf == NULL)
2542 return;
2543 p = sbuf;
2544
2545 p += sprintf(p, " Device FSM state: %s\n",
2546 fsm_getstate_str(priv->fsm));
2547 p += sprintf(p, " RX channel FSM state: %s\n",
2548 fsm_getstate_str(priv->channel[READ]->fsm));
2549 p += sprintf(p, " TX channel FSM state: %s\n",
2550 fsm_getstate_str(priv->channel[WRITE]->fsm));
2551 p += sprintf(p, " Max. TX buffer used: %ld\n",
2552 priv->channel[WRITE]->prof.maxmulti);
2553 p += sprintf(p, " Max. chained SKBs: %ld\n",
2554 priv->channel[WRITE]->prof.maxcqueue);
2555 p += sprintf(p, " TX single write ops: %ld\n",
2556 priv->channel[WRITE]->prof.doios_single);
2557 p += sprintf(p, " TX multi write ops: %ld\n",
2558 priv->channel[WRITE]->prof.doios_multi);
2559 p += sprintf(p, " Netto bytes written: %ld\n",
2560 priv->channel[WRITE]->prof.txlen);
2561 p += sprintf(p, " Max. TX IO-time: %ld\n",
2562 priv->channel[WRITE]->prof.tx_time);
2563
2564 ctc_pr_debug("Statistics for %s:\n%s",
2565 priv->channel[WRITE]->netdev->name, sbuf);
2566 kfree(sbuf);
2567 return;
2568}
2569
2570static ssize_t
2571stats_show(struct device *dev, struct device_attribute *attr, char *buf)
2572{
2573 struct ctc_priv *priv = dev->driver_data;
2574 if (!priv)
2575 return -ENODEV;
2576 ctc_print_statistics(priv);
2577 return sprintf(buf, "0\n");
2578}
2579
2580static ssize_t
2581stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2582{
2583 struct ctc_priv *priv = dev->driver_data;
2584 if (!priv)
2585 return -ENODEV;
2586 /* Reset statistics */
2587 memset(&priv->channel[WRITE]->prof, 0,
2588 sizeof(priv->channel[WRITE]->prof));
2589 return count;
2590}
2591
2592static void
2593ctc_netdev_unregister(struct net_device * dev)
2594{
2595 struct ctc_priv *privptr;
2596
2597 if (!dev)
2598 return;
2599 privptr = (struct ctc_priv *) dev->priv;
2600 unregister_netdev(dev);
2601}
2602
2603static int
2604ctc_netdev_register(struct net_device * dev)
2605{
2606 return register_netdev(dev);
2607}
2608
2609static void
2610ctc_free_netdevice(struct net_device * dev, int free_dev)
2611{
2612 struct ctc_priv *privptr;
2613 if (!dev)
2614 return;
2615 privptr = dev->priv;
2616 if (privptr) {
2617 if (privptr->fsm)
2618 kfree_fsm(privptr->fsm);
2619 kfree(privptr);
2620 }
2621#ifdef MODULE
2622 if (free_dev)
2623 free_netdev(dev);
2624#endif
2625}
2626
2627static ssize_t
2628ctc_proto_show(struct device *dev, struct device_attribute *attr, char *buf)
2629{
2630 struct ctc_priv *priv;
2631
2632 priv = dev->driver_data;
2633 if (!priv)
2634 return -ENODEV;
2635
2636 return sprintf(buf, "%d\n", priv->protocol);
2637}
2638
2639static ssize_t
2640ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2641{
2642 struct ctc_priv *priv;
2643 int value;
2644
2645 DBF_TEXT(trace, 3, __FUNCTION__);
2646 pr_debug("%s() called\n", __FUNCTION__);
2647
2648 priv = dev->driver_data;
2649 if (!priv)
2650 return -ENODEV;
2651 sscanf(buf, "%u", &value);
2652 if (!((value == CTC_PROTO_S390) ||
2653 (value == CTC_PROTO_LINUX) ||
2654 (value == CTC_PROTO_OS390)))
2655 return -EINVAL;
2656 priv->protocol = value;
2657
2658 return count;
2659}
2660
2661static ssize_t
2662ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2663{
2664 struct ccwgroup_device *cgdev;
2665
2666 cgdev = to_ccwgroupdev(dev);
2667 if (!cgdev)
2668 return -ENODEV;
2669
2670 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2671}
2672
2673static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2674static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2675static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2676
2677static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2678static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2679
2680static struct attribute *ctc_attr[] = {
2681 &dev_attr_protocol.attr,
2682 &dev_attr_type.attr,
2683 &dev_attr_buffer.attr,
2684 NULL,
2685};
2686
2687static struct attribute_group ctc_attr_group = {
2688 .attrs = ctc_attr,
2689};
2690
2691static int
2692ctc_add_attributes(struct device *dev)
2693{
2694 int rc;
2695
2696 rc = device_create_file(dev, &dev_attr_loglevel);
2697 if (rc)
2698 goto out;
2699 rc = device_create_file(dev, &dev_attr_stats);
2700 if (!rc)
2701 goto out;
2702 device_remove_file(dev, &dev_attr_loglevel);
2703out:
2704 return rc;
2705}
2706
2707static void
2708ctc_remove_attributes(struct device *dev)
2709{
2710 device_remove_file(dev, &dev_attr_stats);
2711 device_remove_file(dev, &dev_attr_loglevel);
2712}
2713
2714static int
2715ctc_add_files(struct device *dev)
2716{
2717 pr_debug("%s() called\n", __FUNCTION__);
2718
2719 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
2720}
2721
2722static void
2723ctc_remove_files(struct device *dev)
2724{
2725 pr_debug("%s() called\n", __FUNCTION__);
2726
2727 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
2728}
2729
2730/**
2731 * Add ctc specific attributes.
2732 * Add ctc private data.
2733 *
2734 * @param cgdev pointer to ccwgroup_device just added
2735 *
2736 * @returns 0 on success, !0 on failure.
2737 */
2738static int
2739ctc_probe_device(struct ccwgroup_device *cgdev)
2740{
2741 struct ctc_priv *priv;
2742 int rc;
2743 char buffer[16];
2744
2745 pr_debug("%s() called\n", __FUNCTION__);
2746 DBF_TEXT(setup, 3, __FUNCTION__);
2747
2748 if (!get_device(&cgdev->dev))
2749 return -ENODEV;
2750
2751 priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL);
2752 if (!priv) {
2753 ctc_pr_err("%s: Out of memory\n", __func__);
2754 put_device(&cgdev->dev);
2755 return -ENOMEM;
2756 }
2757
2758 rc = ctc_add_files(&cgdev->dev);
2759 if (rc) {
2760 kfree(priv);
2761 put_device(&cgdev->dev);
2762 return rc;
2763 }
2764 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
2765 cgdev->cdev[0]->handler = ctc_irq_handler;
2766 cgdev->cdev[1]->handler = ctc_irq_handler;
2767 cgdev->dev.driver_data = priv;
2768
2769 sprintf(buffer, "%p", priv);
2770 DBF_TEXT(data, 3, buffer);
2771
2772 sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
2773 DBF_TEXT(data, 3, buffer);
2774
2775 sprintf(buffer, "%p", &channels);
2776 DBF_TEXT(data, 3, buffer);
2777
2778 sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
2779 DBF_TEXT(data, 3, buffer);
2780
2781 return 0;
2782}
2783
2784/**
2785 * Device setup function called by alloc_netdev().
2786 *
2787 * @param dev Device to be setup.
2788 */
2789void ctc_init_netdevice(struct net_device * dev)
2790{
2791 DBF_TEXT(setup, 3, __FUNCTION__);
2792
2793 if (dev->mtu == 0)
2794 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2795 dev->hard_start_xmit = ctc_tx;
2796 dev->open = ctc_open;
2797 dev->stop = ctc_close;
2798 dev->get_stats = ctc_stats;
2799 dev->change_mtu = ctc_change_mtu;
2800 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2801 dev->addr_len = 0;
2802 dev->type = ARPHRD_SLIP;
2803 dev->tx_queue_len = 100;
2804 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2805}
2806
2807
2808/**
2809 *
2810 * Setup an interface.
2811 *
2812 * @param cgdev Device to be setup.
2813 *
2814 * @returns 0 on success, !0 on failure.
2815 */
2816static int
2817ctc_new_device(struct ccwgroup_device *cgdev)
2818{
2819 char read_id[CTC_ID_SIZE];
2820 char write_id[CTC_ID_SIZE];
2821 int direction;
2822 enum channel_types type;
2823 struct ctc_priv *privptr;
2824 struct net_device *dev;
2825 int ret;
2826 char buffer[16];
2827
2828 pr_debug("%s() called\n", __FUNCTION__);
2829 DBF_TEXT(setup, 3, __FUNCTION__);
2830
2831 privptr = cgdev->dev.driver_data;
2832 if (!privptr)
2833 return -ENODEV;
2834
2835 sprintf(buffer, "%d", privptr->buffer_size);
2836 DBF_TEXT(setup, 3, buffer);
2837
2838 type = get_channel_type(&cgdev->cdev[0]->id);
2839
2840 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
2841 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
2842
2843 if (add_channel(cgdev->cdev[0], type))
2844 return -ENOMEM;
2845 if (add_channel(cgdev->cdev[1], type))
2846 return -ENOMEM;
2847
2848 ret = ccw_device_set_online(cgdev->cdev[0]);
2849 if (ret != 0) {
2850 printk(KERN_WARNING
2851 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
2852 }
2853
2854 ret = ccw_device_set_online(cgdev->cdev[1]);
2855 if (ret != 0) {
2856 printk(KERN_WARNING
2857 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
2858 }
2859
2860 dev = alloc_netdev(0, "ctc%d", ctc_init_netdevice);
2861 if (!dev) {
2862 ctc_pr_warn("ctc_init_netdevice failed\n");
2863 goto out;
2864 }
2865 dev->priv = privptr;
2866
2867 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2868 dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
2869 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2870 if (privptr->fsm == NULL) {
2871 free_netdev(dev);
2872 goto out;
2873 }
2874 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2875 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2876
2877 for (direction = READ; direction <= WRITE; direction++) {
2878 privptr->channel[direction] =
2879 channel_get(type, direction == READ ? read_id : write_id,
2880 direction);
2881 if (privptr->channel[direction] == NULL) {
2882 if (direction == WRITE)
2883 channel_free(privptr->channel[READ]);
2884
2885 ctc_free_netdevice(dev, 1);
2886 goto out;
2887 }
2888 privptr->channel[direction]->netdev = dev;
2889 privptr->channel[direction]->protocol = privptr->protocol;
2890 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
2891 }
2892 /* sysfs magic */
2893 SET_NETDEV_DEV(dev, &cgdev->dev);
2894
2895 if (ctc_netdev_register(dev) != 0) {
2896 ctc_free_netdevice(dev, 1);
2897 goto out;
2898 }
2899
2900 if (ctc_add_attributes(&cgdev->dev)) {
2901 ctc_netdev_unregister(dev);
2902 dev->priv = NULL;
2903 ctc_free_netdevice(dev, 1);
2904 goto out;
2905 }
2906
2907 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2908
2909 print_banner();
2910
2911 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
2912 dev->name, privptr->channel[READ]->id,
2913 privptr->channel[WRITE]->id, privptr->protocol);
2914
2915 return 0;
2916out:
2917 ccw_device_set_offline(cgdev->cdev[1]);
2918 ccw_device_set_offline(cgdev->cdev[0]);
2919
2920 return -ENODEV;
2921}
2922
2923/**
2924 * Shutdown an interface.
2925 *
2926 * @param cgdev Device to be shut down.
2927 *
2928 * @returns 0 on success, !0 on failure.
2929 */
2930static int
2931ctc_shutdown_device(struct ccwgroup_device *cgdev)
2932{
2933 struct ctc_priv *priv;
2934 struct net_device *ndev;
2935
2936 DBF_TEXT(setup, 3, __FUNCTION__);
2937 pr_debug("%s() called\n", __FUNCTION__);
2938
2939
2940 priv = cgdev->dev.driver_data;
2941 ndev = NULL;
2942 if (!priv)
2943 return -ENODEV;
2944
2945 if (priv->channel[READ]) {
2946 ndev = priv->channel[READ]->netdev;
2947
2948 /* Close the device */
2949 ctc_close(ndev);
2950 ndev->flags &=~IFF_RUNNING;
2951
2952 ctc_remove_attributes(&cgdev->dev);
2953
2954 channel_free(priv->channel[READ]);
2955 }
2956 if (priv->channel[WRITE])
2957 channel_free(priv->channel[WRITE]);
2958
2959 if (ndev) {
2960 ctc_netdev_unregister(ndev);
2961 ndev->priv = NULL;
2962 ctc_free_netdevice(ndev, 1);
2963 }
2964
2965 if (priv->fsm)
2966 kfree_fsm(priv->fsm);
2967
2968 ccw_device_set_offline(cgdev->cdev[1]);
2969 ccw_device_set_offline(cgdev->cdev[0]);
2970
2971 if (priv->channel[READ])
2972 channel_remove(priv->channel[READ]);
2973 if (priv->channel[WRITE])
2974 channel_remove(priv->channel[WRITE]);
2975 priv->channel[READ] = priv->channel[WRITE] = NULL;
2976
2977 return 0;
2978
2979}
2980
2981static void
2982ctc_remove_device(struct ccwgroup_device *cgdev)
2983{
2984 struct ctc_priv *priv;
2985
2986 pr_debug("%s() called\n", __FUNCTION__);
2987 DBF_TEXT(setup, 3, __FUNCTION__);
2988
2989 priv = cgdev->dev.driver_data;
2990 if (!priv)
2991 return;
2992 if (cgdev->state == CCWGROUP_ONLINE)
2993 ctc_shutdown_device(cgdev);
2994 ctc_remove_files(&cgdev->dev);
2995 cgdev->dev.driver_data = NULL;
2996 kfree(priv);
2997 put_device(&cgdev->dev);
2998}
2999
3000static struct ccwgroup_driver ctc_group_driver = {
3001 .owner = THIS_MODULE,
3002 .name = "ctc",
3003 .max_slaves = 2,
3004 .driver_id = 0xC3E3C3,
3005 .probe = ctc_probe_device,
3006 .remove = ctc_remove_device,
3007 .set_online = ctc_new_device,
3008 .set_offline = ctc_shutdown_device,
3009};
3010
3011/**
3012 * Module related routines
3013 *****************************************************************************/
3014
3015/**
3016 * Prepare to be unloaded. Free IRQ's and release all resources.
3017 * This is called just before this module is unloaded. It is
3018 * <em>not</em> called, if the usage count is !0, so we don't need to check
3019 * for that.
3020 */
3021static void __exit
3022ctc_exit(void)
3023{
3024 DBF_TEXT(setup, 3, __FUNCTION__);
3025 unregister_cu3088_discipline(&ctc_group_driver);
3026 ctc_unregister_dbf_views();
3027 ctc_pr_info("CTC driver unloaded\n");
3028}
3029
3030/**
3031 * Initialize module.
3032 * This is called just after the module is loaded.
3033 *
3034 * @return 0 on success, !0 on error.
3035 */
3036static int __init
3037ctc_init(void)
3038{
3039 int ret = 0;
3040
3041 loglevel = CTC_LOGLEVEL_DEFAULT;
3042
3043 DBF_TEXT(setup, 3, __FUNCTION__);
3044
3045 print_banner();
3046
3047 ret = ctc_register_dbf_views();
3048 if (ret){
3049 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3050 return ret;
3051 }
3052 ret = register_cu3088_discipline(&ctc_group_driver);
3053 if (ret) {
3054 ctc_unregister_dbf_views();
3055 }
3056 return ret;
3057}
3058
3059module_init(ctc_init);
3060module_exit(ctc_exit);
3061
3062/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctcmain.h b/drivers/s390/net/ctcmain.h
deleted file mode 100644
index 7f305d119f3d..000000000000
--- a/drivers/s390/net/ctcmain.h
+++ /dev/null
@@ -1,270 +0,0 @@
1/*
2 * CTC / ESCON network driver
3 *
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 Peter Tiedemann (ptiedem@de.ibm.com)
7 *
8 *
9 * Documentation used:
10 * - Principles of Operation (IBM doc#: SA22-7201-06)
11 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
12 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
13 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
14 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2, or (at your option)
19 * any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 *
30 */
31
32#ifndef _CTCMAIN_H_
33#define _CTCMAIN_H_
34
35#include <asm/ccwdev.h>
36#include <asm/ccwgroup.h>
37
38#include <linux/skbuff.h>
39#include <linux/netdevice.h>
40
41#include "fsm.h"
42#include "cu3088.h"
43
44
45/**
46 * CCW commands, used in this driver.
47 */
48#define CCW_CMD_WRITE 0x01
49#define CCW_CMD_READ 0x02
50#define CCW_CMD_SET_EXTENDED 0xc3
51#define CCW_CMD_PREPARE 0xe3
52
53#define CTC_PROTO_S390 0
54#define CTC_PROTO_LINUX 1
55#define CTC_PROTO_OS390 3
56
57#define CTC_BUFSIZE_LIMIT 65535
58#define CTC_BUFSIZE_DEFAULT 32768
59
60#define CTC_TIMEOUT_5SEC 5000
61
62#define CTC_INITIAL_BLOCKLEN 2
63
64#define READ 0
65#define WRITE 1
66
67#define CTC_ID_SIZE BUS_ID_SIZE+3
68
69
70struct ctc_profile {
71 unsigned long maxmulti;
72 unsigned long maxcqueue;
73 unsigned long doios_single;
74 unsigned long doios_multi;
75 unsigned long txlen;
76 unsigned long tx_time;
77 struct timespec send_stamp;
78};
79
80/**
81 * Definition of one channel
82 */
83struct channel {
84
85 /**
86 * Pointer to next channel in list.
87 */
88 struct channel *next;
89 char id[CTC_ID_SIZE];
90 struct ccw_device *cdev;
91
92 /**
93 * Type of this channel.
94 * CTC/A or Escon for valid channels.
95 */
96 enum channel_types type;
97
98 /**
99 * Misc. flags. See CHANNEL_FLAGS_... below
100 */
101 __u32 flags;
102
103 /**
104 * The protocol of this channel
105 */
106 __u16 protocol;
107
108 /**
109 * I/O and irq related stuff
110 */
111 struct ccw1 *ccw;
112 struct irb *irb;
113
114 /**
115 * RX/TX buffer size
116 */
117 int max_bufsize;
118
119 /**
120 * Transmit/Receive buffer.
121 */
122 struct sk_buff *trans_skb;
123
124 /**
125 * Universal I/O queue.
126 */
127 struct sk_buff_head io_queue;
128
129 /**
130 * TX queue for collecting skb's during busy.
131 */
132 struct sk_buff_head collect_queue;
133
134 /**
135 * Amount of data in collect_queue.
136 */
137 int collect_len;
138
139 /**
140 * spinlock for collect_queue and collect_len
141 */
142 spinlock_t collect_lock;
143
144 /**
145 * Timer for detecting unresposive
146 * I/O operations.
147 */
148 fsm_timer timer;
149
150 /**
151 * Retry counter for misc. operations.
152 */
153 int retry;
154
155 /**
156 * The finite state machine of this channel
157 */
158 fsm_instance *fsm;
159
160 /**
161 * The corresponding net_device this channel
162 * belongs to.
163 */
164 struct net_device *netdev;
165
166 struct ctc_profile prof;
167
168 unsigned char *trans_skb_data;
169
170 __u16 logflags;
171};
172
173#define CHANNEL_FLAGS_READ 0
174#define CHANNEL_FLAGS_WRITE 1
175#define CHANNEL_FLAGS_INUSE 2
176#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
177#define CHANNEL_FLAGS_FAILED 8
178#define CHANNEL_FLAGS_WAITIRQ 16
179#define CHANNEL_FLAGS_RWMASK 1
180#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
181
182#define LOG_FLAG_ILLEGALPKT 1
183#define LOG_FLAG_ILLEGALSIZE 2
184#define LOG_FLAG_OVERRUN 4
185#define LOG_FLAG_NOMEM 8
186
187#define CTC_LOGLEVEL_INFO 1
188#define CTC_LOGLEVEL_NOTICE 2
189#define CTC_LOGLEVEL_WARN 4
190#define CTC_LOGLEVEL_EMERG 8
191#define CTC_LOGLEVEL_ERR 16
192#define CTC_LOGLEVEL_DEBUG 32
193#define CTC_LOGLEVEL_CRIT 64
194
195#define CTC_LOGLEVEL_DEFAULT \
196(CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT)
197
198#define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1)
199
200#define ctc_pr_debug(fmt, arg...) \
201do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0)
202
203#define ctc_pr_info(fmt, arg...) \
204do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0)
205
206#define ctc_pr_notice(fmt, arg...) \
207do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0)
208
209#define ctc_pr_warn(fmt, arg...) \
210do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0)
211
212#define ctc_pr_emerg(fmt, arg...) \
213do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0)
214
215#define ctc_pr_err(fmt, arg...) \
216do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0)
217
218#define ctc_pr_crit(fmt, arg...) \
219do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0)
220
221struct ctc_priv {
222 struct net_device_stats stats;
223 unsigned long tbusy;
224 /**
225 * The finite state machine of this interface.
226 */
227 fsm_instance *fsm;
228 /**
229 * The protocol of this device
230 */
231 __u16 protocol;
232 /**
233 * Timer for restarting after I/O Errors
234 */
235 fsm_timer restart_timer;
236
237 int buffer_size;
238
239 struct channel *channel[2];
240};
241
242/**
243 * Definition of our link level header.
244 */
245struct ll_header {
246 __u16 length;
247 __u16 type;
248 __u16 unused;
249};
250#define LL_HEADER_LENGTH (sizeof(struct ll_header))
251
252/**
253 * Compatibility macros for busy handling
254 * of network devices.
255 */
256static __inline__ void
257ctc_clear_busy(struct net_device * dev)
258{
259 clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
260 netif_wake_queue(dev);
261}
262
263static __inline__ int
264ctc_test_and_set_busy(struct net_device * dev)
265{
266 netif_stop_queue(dev);
267 return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
268}
269
270#endif
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
deleted file mode 100644
index 8c6b72d05b1d..000000000000
--- a/drivers/s390/net/qeth.h
+++ /dev/null
@@ -1,1253 +0,0 @@
1#ifndef __QETH_H__
2#define __QETH_H__
3
4#include <linux/if.h>
5#include <linux/if_arp.h>
6
7#include <linux/if_tr.h>
8#include <linux/trdevice.h>
9#include <linux/etherdevice.h>
10#include <linux/if_vlan.h>
11#include <linux/ctype.h>
12
13#include <net/ipv6.h>
14#include <linux/in6.h>
15#include <net/if_inet6.h>
16#include <net/addrconf.h>
17
18
19#include <linux/bitops.h>
20
21#include <asm/debug.h>
22#include <asm/qdio.h>
23#include <asm/ccwdev.h>
24#include <asm/ccwgroup.h>
25
26#include "qeth_mpc.h"
27
28#ifdef CONFIG_QETH_IPV6
29#define QETH_VERSION_IPV6 ":IPv6"
30#else
31#define QETH_VERSION_IPV6 ""
32#endif
33#ifdef CONFIG_QETH_VLAN
34#define QETH_VERSION_VLAN ":VLAN"
35#else
36#define QETH_VERSION_VLAN ""
37#endif
38
39/**
40 * Debug Facility stuff
41 */
42#define QETH_DBF_SETUP_NAME "qeth_setup"
43#define QETH_DBF_SETUP_LEN 8
44#define QETH_DBF_SETUP_PAGES 8
45#define QETH_DBF_SETUP_NR_AREAS 1
46#define QETH_DBF_SETUP_LEVEL 5
47
48#define QETH_DBF_MISC_NAME "qeth_misc"
49#define QETH_DBF_MISC_LEN 128
50#define QETH_DBF_MISC_PAGES 2
51#define QETH_DBF_MISC_NR_AREAS 1
52#define QETH_DBF_MISC_LEVEL 2
53
54#define QETH_DBF_DATA_NAME "qeth_data"
55#define QETH_DBF_DATA_LEN 96
56#define QETH_DBF_DATA_PAGES 8
57#define QETH_DBF_DATA_NR_AREAS 1
58#define QETH_DBF_DATA_LEVEL 2
59
60#define QETH_DBF_CONTROL_NAME "qeth_control"
61#define QETH_DBF_CONTROL_LEN 256
62#define QETH_DBF_CONTROL_PAGES 8
63#define QETH_DBF_CONTROL_NR_AREAS 2
64#define QETH_DBF_CONTROL_LEVEL 5
65
66#define QETH_DBF_TRACE_NAME "qeth_trace"
67#define QETH_DBF_TRACE_LEN 8
68#define QETH_DBF_TRACE_PAGES 4
69#define QETH_DBF_TRACE_NR_AREAS 2
70#define QETH_DBF_TRACE_LEVEL 3
71extern debug_info_t *qeth_dbf_trace;
72
73#define QETH_DBF_SENSE_NAME "qeth_sense"
74#define QETH_DBF_SENSE_LEN 64
75#define QETH_DBF_SENSE_PAGES 2
76#define QETH_DBF_SENSE_NR_AREAS 1
77#define QETH_DBF_SENSE_LEVEL 2
78
79#define QETH_DBF_QERR_NAME "qeth_qerr"
80#define QETH_DBF_QERR_LEN 8
81#define QETH_DBF_QERR_PAGES 2
82#define QETH_DBF_QERR_NR_AREAS 2
83#define QETH_DBF_QERR_LEVEL 2
84
85#define QETH_DBF_TEXT(name,level,text) \
86 do { \
87 debug_text_event(qeth_dbf_##name,level,text); \
88 } while (0)
89
90#define QETH_DBF_HEX(name,level,addr,len) \
91 do { \
92 debug_event(qeth_dbf_##name,level,(void*)(addr),len); \
93 } while (0)
94
95DECLARE_PER_CPU(char[256], qeth_dbf_txt_buf);
96
97#define QETH_DBF_TEXT_(name,level,text...) \
98 do { \
99 char* dbf_txt_buf = get_cpu_var(qeth_dbf_txt_buf); \
100 sprintf(dbf_txt_buf, text); \
101 debug_text_event(qeth_dbf_##name,level,dbf_txt_buf); \
102 put_cpu_var(qeth_dbf_txt_buf); \
103 } while (0)
104
105#define QETH_DBF_SPRINTF(name,level,text...) \
106 do { \
107 debug_sprintf_event(qeth_dbf_trace, level, ##text ); \
108 debug_sprintf_event(qeth_dbf_trace, level, text ); \
109 } while (0)
110
111/**
112 * some more debug stuff
113 */
114#define PRINTK_HEADER "qeth: "
115
116#define HEXDUMP16(importance,header,ptr) \
117PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
118 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
119 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
120 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
121 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
122 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
123 *(((char*)ptr)+12),*(((char*)ptr)+13), \
124 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
125PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
126 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
127 *(((char*)ptr)+16),*(((char*)ptr)+17), \
128 *(((char*)ptr)+18),*(((char*)ptr)+19), \
129 *(((char*)ptr)+20),*(((char*)ptr)+21), \
130 *(((char*)ptr)+22),*(((char*)ptr)+23), \
131 *(((char*)ptr)+24),*(((char*)ptr)+25), \
132 *(((char*)ptr)+26),*(((char*)ptr)+27), \
133 *(((char*)ptr)+28),*(((char*)ptr)+29), \
134 *(((char*)ptr)+30),*(((char*)ptr)+31));
135
136static inline void
137qeth_hex_dump(unsigned char *buf, size_t len)
138{
139 size_t i;
140
141 for (i = 0; i < len; i++) {
142 if (i && !(i % 16))
143 printk("\n");
144 printk("%02x ", *(buf + i));
145 }
146 printk("\n");
147}
148
149#define SENSE_COMMAND_REJECT_BYTE 0
150#define SENSE_COMMAND_REJECT_FLAG 0x80
151#define SENSE_RESETTING_EVENT_BYTE 1
152#define SENSE_RESETTING_EVENT_FLAG 0x80
153
154/*
155 * Common IO related definitions
156 */
157extern struct device *qeth_root_dev;
158extern struct ccw_driver qeth_ccw_driver;
159extern struct ccwgroup_driver qeth_ccwgroup_driver;
160
161#define CARD_RDEV(card) card->read.ccwdev
162#define CARD_WDEV(card) card->write.ccwdev
163#define CARD_DDEV(card) card->data.ccwdev
164#define CARD_BUS_ID(card) card->gdev->dev.bus_id
165#define CARD_RDEV_ID(card) card->read.ccwdev->dev.bus_id
166#define CARD_WDEV_ID(card) card->write.ccwdev->dev.bus_id
167#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id
168#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id
169
170#define CARD_FROM_CDEV(cdev) (struct qeth_card *) \
171 ((struct ccwgroup_device *)cdev->dev.driver_data)\
172 ->dev.driver_data;
173
174/**
175 * card stuff
176 */
177struct qeth_perf_stats {
178 unsigned int bufs_rec;
179 unsigned int bufs_sent;
180
181 unsigned int skbs_sent_pack;
182 unsigned int bufs_sent_pack;
183
184 unsigned int sc_dp_p;
185 unsigned int sc_p_dp;
186 /* qdio_input_handler: number of times called, time spent in */
187 __u64 inbound_start_time;
188 unsigned int inbound_cnt;
189 unsigned int inbound_time;
190 /* qeth_send_packet: number of times called, time spent in */
191 __u64 outbound_start_time;
192 unsigned int outbound_cnt;
193 unsigned int outbound_time;
194 /* qdio_output_handler: number of times called, time spent in */
195 __u64 outbound_handler_start_time;
196 unsigned int outbound_handler_cnt;
197 unsigned int outbound_handler_time;
198 /* number of calls to and time spent in do_QDIO for inbound queue */
199 __u64 inbound_do_qdio_start_time;
200 unsigned int inbound_do_qdio_cnt;
201 unsigned int inbound_do_qdio_time;
202 /* number of calls to and time spent in do_QDIO for outbound queues */
203 __u64 outbound_do_qdio_start_time;
204 unsigned int outbound_do_qdio_cnt;
205 unsigned int outbound_do_qdio_time;
206 /* eddp data */
207 unsigned int large_send_bytes;
208 unsigned int large_send_cnt;
209 unsigned int sg_skbs_sent;
210 unsigned int sg_frags_sent;
211 /* initial values when measuring starts */
212 unsigned long initial_rx_packets;
213 unsigned long initial_tx_packets;
214 /* inbound scatter gather data */
215 unsigned int sg_skbs_rx;
216 unsigned int sg_frags_rx;
217 unsigned int sg_alloc_page_rx;
218};
219
220/* Routing stuff */
221struct qeth_routing_info {
222 enum qeth_routing_types type;
223};
224
225/* IPA stuff */
226struct qeth_ipa_info {
227 __u32 supported_funcs;
228 __u32 enabled_funcs;
229};
230
231static inline int
232qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
233{
234 return (ipa->supported_funcs & func);
235}
236
237static inline int
238qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
239{
240 return (ipa->supported_funcs & ipa->enabled_funcs & func);
241}
242
243#define qeth_adp_supported(c,f) \
244 qeth_is_ipa_supported(&c->options.adp, f)
245#define qeth_adp_enabled(c,f) \
246 qeth_is_ipa_enabled(&c->options.adp, f)
247#define qeth_is_supported(c,f) \
248 qeth_is_ipa_supported(&c->options.ipa4, f)
249#define qeth_is_enabled(c,f) \
250 qeth_is_ipa_enabled(&c->options.ipa4, f)
251#ifdef CONFIG_QETH_IPV6
252#define qeth_is_supported6(c,f) \
253 qeth_is_ipa_supported(&c->options.ipa6, f)
254#define qeth_is_enabled6(c,f) \
255 qeth_is_ipa_enabled(&c->options.ipa6, f)
256#else /* CONFIG_QETH_IPV6 */
257#define qeth_is_supported6(c,f) 0
258#define qeth_is_enabled6(c,f) 0
259#endif /* CONFIG_QETH_IPV6 */
260#define qeth_is_ipafunc_supported(c,prot,f) \
261 (prot==QETH_PROT_IPV6)? qeth_is_supported6(c,f):qeth_is_supported(c,f)
262#define qeth_is_ipafunc_enabled(c,prot,f) \
263 (prot==QETH_PROT_IPV6)? qeth_is_enabled6(c,f):qeth_is_enabled(c,f)
264
265
266#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101
267#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101
268#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
269#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
270
271#define QETH_MODELLIST_ARRAY \
272 {{0x1731,0x01,0x1732,0x01,QETH_CARD_TYPE_OSAE,1, \
273 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
274 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
275 QETH_MAX_QUEUES,0}, \
276 {0x1731,0x05,0x1732,0x05,QETH_CARD_TYPE_IQD,0, \
277 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
278 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
279 QETH_MAX_QUEUES,0x103}, \
280 {0x1731,0x06,0x1732,0x06,QETH_CARD_TYPE_OSN,0, \
281 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
282 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
283 QETH_MAX_QUEUES,0}, \
284 {0,0,0,0,0,0,0,0,0}}
285
286#define QETH_REAL_CARD 1
287#define QETH_VLAN_CARD 2
288#define QETH_BUFSIZE 4096
289
290/**
291 * some more defs
292 */
293#define IF_NAME_LEN 16
294#define QETH_TX_TIMEOUT 100 * HZ
295#define QETH_RCD_TIMEOUT 60 * HZ
296#define QETH_HEADER_SIZE 32
297#define MAX_PORTNO 15
298#define QETH_FAKE_LL_LEN_ETH ETH_HLEN
299#define QETH_FAKE_LL_LEN_TR (sizeof(struct trh_hdr)-TR_MAXRIFLEN+sizeof(struct trllc))
300#define QETH_FAKE_LL_V6_ADDR_POS 24
301
302/*IPv6 address autoconfiguration stuff*/
303#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
304#define UNIQUE_ID_NOT_BY_CARD 0x10000
305
306/*****************************************************************************/
307/* QDIO queue and buffer handling */
308/*****************************************************************************/
309#define QETH_MAX_QUEUES 4
310#define QETH_IN_BUF_SIZE_DEFAULT 65536
311#define QETH_IN_BUF_COUNT_DEFAULT 16
312#define QETH_IN_BUF_COUNT_MIN 8
313#define QETH_IN_BUF_COUNT_MAX 128
314#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
315#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
316 ((card)->qdio.in_buf_pool.buf_count / 2)
317
318/* buffers we have to be behind before we get a PCI */
319#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
320/*enqueued free buffers left before we get a PCI*/
321#define QETH_PCI_THRESHOLD_B(card) 0
322/*not used unless the microcode gets patched*/
323#define QETH_PCI_TIMER_VALUE(card) 3
324
325#define QETH_MIN_INPUT_THRESHOLD 1
326#define QETH_MAX_INPUT_THRESHOLD 500
327#define QETH_MIN_OUTPUT_THRESHOLD 1
328#define QETH_MAX_OUTPUT_THRESHOLD 300
329
330/* priority queing */
331#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
332#define QETH_DEFAULT_QUEUE 2
333#define QETH_NO_PRIO_QUEUEING 0
334#define QETH_PRIO_Q_ING_PREC 1
335#define QETH_PRIO_Q_ING_TOS 2
336#define IP_TOS_LOWDELAY 0x10
337#define IP_TOS_HIGHTHROUGHPUT 0x08
338#define IP_TOS_HIGHRELIABILITY 0x04
339#define IP_TOS_NOTIMPORTANT 0x02
340
341/* Packing */
342#define QETH_LOW_WATERMARK_PACK 2
343#define QETH_HIGH_WATERMARK_PACK 5
344#define QETH_WATERMARK_PACK_FUZZ 1
345
346#define QETH_IP_HEADER_SIZE 40
347
348/* large receive scatter gather copy break */
349#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
350
351struct qeth_hdr_layer3 {
352 __u8 id;
353 __u8 flags;
354 __u16 inbound_checksum; /*TSO:__u16 seqno */
355 __u32 token; /*TSO: __u32 reserved */
356 __u16 length;
357 __u8 vlan_prio;
358 __u8 ext_flags;
359 __u16 vlan_id;
360 __u16 frame_offset;
361 __u8 dest_addr[16];
362} __attribute__ ((packed));
363
364struct qeth_hdr_layer2 {
365 __u8 id;
366 __u8 flags[3];
367 __u8 port_no;
368 __u8 hdr_length;
369 __u16 pkt_length;
370 __u16 seq_no;
371 __u16 vlan_id;
372 __u32 reserved;
373 __u8 reserved2[16];
374} __attribute__ ((packed));
375
376struct qeth_hdr_osn {
377 __u8 id;
378 __u8 reserved;
379 __u16 seq_no;
380 __u16 reserved2;
381 __u16 control_flags;
382 __u16 pdu_length;
383 __u8 reserved3[18];
384 __u32 ccid;
385} __attribute__ ((packed));
386
387struct qeth_hdr {
388 union {
389 struct qeth_hdr_layer2 l2;
390 struct qeth_hdr_layer3 l3;
391 struct qeth_hdr_osn osn;
392 } hdr;
393} __attribute__ ((packed));
394
395/*TCP Segmentation Offload header*/
396struct qeth_hdr_ext_tso {
397 __u16 hdr_tot_len;
398 __u8 imb_hdr_no;
399 __u8 reserved;
400 __u8 hdr_type;
401 __u8 hdr_version;
402 __u16 hdr_len;
403 __u32 payload_len;
404 __u16 mss;
405 __u16 dg_hdr_len;
406 __u8 padding[16];
407} __attribute__ ((packed));
408
409struct qeth_hdr_tso {
410 struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
411 struct qeth_hdr_ext_tso ext;
412} __attribute__ ((packed));
413
414
415/* flags for qeth_hdr.flags */
416#define QETH_HDR_PASSTHRU 0x10
417#define QETH_HDR_IPV6 0x80
418#define QETH_HDR_CAST_MASK 0x07
419enum qeth_cast_flags {
420 QETH_CAST_UNICAST = 0x06,
421 QETH_CAST_MULTICAST = 0x04,
422 QETH_CAST_BROADCAST = 0x05,
423 QETH_CAST_ANYCAST = 0x07,
424 QETH_CAST_NOCAST = 0x00,
425};
426
427enum qeth_layer2_frame_flags {
428 QETH_LAYER2_FLAG_MULTICAST = 0x01,
429 QETH_LAYER2_FLAG_BROADCAST = 0x02,
430 QETH_LAYER2_FLAG_UNICAST = 0x04,
431 QETH_LAYER2_FLAG_VLAN = 0x10,
432};
433
434enum qeth_header_ids {
435 QETH_HEADER_TYPE_LAYER3 = 0x01,
436 QETH_HEADER_TYPE_LAYER2 = 0x02,
437 QETH_HEADER_TYPE_TSO = 0x03,
438 QETH_HEADER_TYPE_OSN = 0x04,
439};
440/* flags for qeth_hdr.ext_flags */
441#define QETH_HDR_EXT_VLAN_FRAME 0x01
442#define QETH_HDR_EXT_TOKEN_ID 0x02
443#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
444#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
445#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
446#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
447#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/
448
449static inline int
450qeth_is_last_sbale(struct qdio_buffer_element *sbale)
451{
452 return (sbale->flags & SBAL_FLAGS_LAST_ENTRY);
453}
454
455enum qeth_qdio_buffer_states {
456 /*
457 * inbound: read out by driver; owned by hardware in order to be filled
458 * outbound: owned by driver in order to be filled
459 */
460 QETH_QDIO_BUF_EMPTY,
461 /*
462 * inbound: filled by hardware; owned by driver in order to be read out
463 * outbound: filled by driver; owned by hardware in order to be sent
464 */
465 QETH_QDIO_BUF_PRIMED,
466};
467
468enum qeth_qdio_info_states {
469 QETH_QDIO_UNINITIALIZED,
470 QETH_QDIO_ALLOCATED,
471 QETH_QDIO_ESTABLISHED,
472 QETH_QDIO_CLEANING
473};
474
475struct qeth_buffer_pool_entry {
476 struct list_head list;
477 struct list_head init_list;
478 void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
479};
480
481struct qeth_qdio_buffer_pool {
482 struct list_head entry_list;
483 int buf_count;
484};
485
486struct qeth_qdio_buffer {
487 struct qdio_buffer *buffer;
488 volatile enum qeth_qdio_buffer_states state;
489 /* the buffer pool entry currently associated to this buffer */
490 struct qeth_buffer_pool_entry *pool_entry;
491};
492
493struct qeth_qdio_q {
494 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
495 struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
496 /*
497 * buf_to_init means "buffer must be initialized by driver and must
498 * be made available for hardware" -> state is set to EMPTY
499 */
500 volatile int next_buf_to_init;
501} __attribute__ ((aligned(256)));
502
503/* possible types of qeth large_send support */
504enum qeth_large_send_types {
505 QETH_LARGE_SEND_NO,
506 QETH_LARGE_SEND_EDDP,
507 QETH_LARGE_SEND_TSO,
508};
509
510struct qeth_qdio_out_buffer {
511 struct qdio_buffer *buffer;
512 atomic_t state;
513 volatile int next_element_to_fill;
514 struct sk_buff_head skb_list;
515 struct list_head ctx_list;
516};
517
518struct qeth_card;
519
520enum qeth_out_q_states {
521 QETH_OUT_Q_UNLOCKED,
522 QETH_OUT_Q_LOCKED,
523 QETH_OUT_Q_LOCKED_FLUSH,
524};
525
526struct qeth_qdio_out_q {
527 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
528 struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
529 int queue_no;
530 struct qeth_card *card;
531 atomic_t state;
532 volatile int do_pack;
533 /*
534 * index of buffer to be filled by driver; state EMPTY or PACKING
535 */
536 volatile int next_buf_to_fill;
537 /*
538 * number of buffers that are currently filled (PRIMED)
539 * -> these buffers are hardware-owned
540 */
541 atomic_t used_buffers;
542 /* indicates whether PCI flag must be set (or if one is outstanding) */
543 atomic_t set_pci_flags_count;
544} __attribute__ ((aligned(256)));
545
546struct qeth_qdio_info {
547 atomic_t state;
548 /* input */
549 struct qeth_qdio_q *in_q;
550 struct qeth_qdio_buffer_pool in_buf_pool;
551 struct qeth_qdio_buffer_pool init_pool;
552 int in_buf_size;
553
554 /* output */
555 int no_out_queues;
556 struct qeth_qdio_out_q **out_qs;
557
558 /* priority queueing */
559 int do_prio_queueing;
560 int default_out_queue;
561};
562
563enum qeth_send_errors {
564 QETH_SEND_ERROR_NONE,
565 QETH_SEND_ERROR_LINK_FAILURE,
566 QETH_SEND_ERROR_RETRY,
567 QETH_SEND_ERROR_KICK_IT,
568};
569
570#define QETH_ETH_MAC_V4 0x0100 /* like v4 */
571#define QETH_ETH_MAC_V6 0x3333 /* like v6 */
572/* tr mc mac is longer, but that will be enough to detect mc frames */
573#define QETH_TR_MAC_NC 0xc000 /* non-canonical */
574#define QETH_TR_MAC_C 0x0300 /* canonical */
575
576#define DEFAULT_ADD_HHLEN 0
577#define MAX_ADD_HHLEN 1024
578
579/**
580 * buffer stuff for read channel
581 */
582#define QETH_CMD_BUFFER_NO 8
583
584/**
585 * channel state machine
586 */
587enum qeth_channel_states {
588 CH_STATE_UP,
589 CH_STATE_DOWN,
590 CH_STATE_ACTIVATING,
591 CH_STATE_HALTED,
592 CH_STATE_STOPPED,
593 CH_STATE_RCD,
594 CH_STATE_RCD_DONE,
595};
596/**
597 * card state machine
598 */
599enum qeth_card_states {
600 CARD_STATE_DOWN,
601 CARD_STATE_HARDSETUP,
602 CARD_STATE_SOFTSETUP,
603 CARD_STATE_UP,
604 CARD_STATE_RECOVER,
605};
606
607/**
608 * Protocol versions
609 */
610enum qeth_prot_versions {
611 QETH_PROT_IPV4 = 0x0004,
612 QETH_PROT_IPV6 = 0x0006,
613};
614
615enum qeth_ip_types {
616 QETH_IP_TYPE_NORMAL,
617 QETH_IP_TYPE_VIPA,
618 QETH_IP_TYPE_RXIP,
619 QETH_IP_TYPE_DEL_ALL_MC,
620};
621
622enum qeth_cmd_buffer_state {
623 BUF_STATE_FREE,
624 BUF_STATE_LOCKED,
625 BUF_STATE_PROCESSED,
626};
627/**
628 * IP address and multicast list
629 */
630struct qeth_ipaddr {
631 struct list_head entry;
632 enum qeth_ip_types type;
633 enum qeth_ipa_setdelip_flags set_flags;
634 enum qeth_ipa_setdelip_flags del_flags;
635 int is_multicast;
636 volatile int users;
637 enum qeth_prot_versions proto;
638 unsigned char mac[OSA_ADDR_LEN];
639 union {
640 struct {
641 unsigned int addr;
642 unsigned int mask;
643 } a4;
644 struct {
645 struct in6_addr addr;
646 unsigned int pfxlen;
647 } a6;
648 } u;
649};
650
651struct qeth_ipato_entry {
652 struct list_head entry;
653 enum qeth_prot_versions proto;
654 char addr[16];
655 int mask_bits;
656};
657
658struct qeth_ipato {
659 int enabled;
660 int invert4;
661 int invert6;
662 struct list_head entries;
663};
664
665struct qeth_channel;
666
667struct qeth_cmd_buffer {
668 enum qeth_cmd_buffer_state state;
669 struct qeth_channel *channel;
670 unsigned char *data;
671 int rc;
672 void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
673};
674
675
676/**
677 * definition of a qeth channel, used for read and write
678 */
679struct qeth_channel {
680 enum qeth_channel_states state;
681 struct ccw1 ccw;
682 spinlock_t iob_lock;
683 wait_queue_head_t wait_q;
684 struct tasklet_struct irq_tasklet;
685 struct ccw_device *ccwdev;
686/*command buffer for control data*/
687 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
688 atomic_t irq_pending;
689 volatile int io_buf_no;
690 volatile int buf_no;
691};
692
693/**
694 * OSA card related definitions
695 */
696struct qeth_token {
697 __u32 issuer_rm_w;
698 __u32 issuer_rm_r;
699 __u32 cm_filter_w;
700 __u32 cm_filter_r;
701 __u32 cm_connection_w;
702 __u32 cm_connection_r;
703 __u32 ulp_filter_w;
704 __u32 ulp_filter_r;
705 __u32 ulp_connection_w;
706 __u32 ulp_connection_r;
707};
708
709struct qeth_seqno {
710 __u32 trans_hdr;
711 __u32 pdu_hdr;
712 __u32 pdu_hdr_ack;
713 __u16 ipa;
714 __u32 pkt_seqno;
715};
716
717struct qeth_reply {
718 struct list_head list;
719 wait_queue_head_t wait_q;
720 int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long);
721 u32 seqno;
722 unsigned long offset;
723 atomic_t received;
724 int rc;
725 void *param;
726 struct qeth_card *card;
727 atomic_t refcnt;
728};
729
730
731struct qeth_card_blkt {
732 int time_total;
733 int inter_packet;
734 int inter_packet_jumbo;
735};
736
737#define QETH_BROADCAST_WITH_ECHO 0x01
738#define QETH_BROADCAST_WITHOUT_ECHO 0x02
739#define QETH_LAYER2_MAC_READ 0x01
740#define QETH_LAYER2_MAC_REGISTERED 0x02
741struct qeth_card_info {
742 unsigned short unit_addr2;
743 unsigned short cula;
744 unsigned short chpid;
745 __u16 func_level;
746 char mcl_level[QETH_MCL_LENGTH + 1];
747 int guestlan;
748 int mac_bits;
749 int portname_required;
750 int portno;
751 char portname[9];
752 enum qeth_card_types type;
753 enum qeth_link_types link_type;
754 int is_multicast_different;
755 int initial_mtu;
756 int max_mtu;
757 int broadcast_capable;
758 int unique_id;
759 struct qeth_card_blkt blkt;
760 __u32 csum_mask;
761 enum qeth_ipa_promisc_modes promisc_mode;
762};
763
764struct qeth_card_options {
765 struct qeth_routing_info route4;
766 struct qeth_ipa_info ipa4;
767 struct qeth_ipa_info adp; /*Adapter parameters*/
768#ifdef CONFIG_QETH_IPV6
769 struct qeth_routing_info route6;
770 struct qeth_ipa_info ipa6;
771#endif /* QETH_IPV6 */
772 enum qeth_checksum_types checksum_type;
773 int broadcast_mode;
774 int macaddr_mode;
775 int fake_broadcast;
776 int add_hhlen;
777 int fake_ll;
778 int layer2;
779 enum qeth_large_send_types large_send;
780 int performance_stats;
781 int rx_sg_cb;
782};
783
784/*
785 * thread bits for qeth_card thread masks
786 */
787enum qeth_threads {
788 QETH_SET_IP_THREAD = 1,
789 QETH_RECOVER_THREAD = 2,
790 QETH_SET_PROMISC_MODE_THREAD = 4,
791};
792
793struct qeth_osn_info {
794 int (*assist_cb)(struct net_device *dev, void *data);
795 int (*data_cb)(struct sk_buff *skb);
796};
797
798struct qeth_card {
799 struct list_head list;
800 enum qeth_card_states state;
801 int lan_online;
802 spinlock_t lock;
803/*hardware and sysfs stuff*/
804 struct ccwgroup_device *gdev;
805 struct qeth_channel read;
806 struct qeth_channel write;
807 struct qeth_channel data;
808
809 struct net_device *dev;
810 struct net_device_stats stats;
811
812 struct qeth_card_info info;
813 struct qeth_token token;
814 struct qeth_seqno seqno;
815 struct qeth_card_options options;
816
817 wait_queue_head_t wait_q;
818#ifdef CONFIG_QETH_VLAN
819 spinlock_t vlanlock;
820 struct vlan_group *vlangrp;
821#endif
822 struct work_struct kernel_thread_starter;
823 spinlock_t thread_mask_lock;
824 volatile unsigned long thread_start_mask;
825 volatile unsigned long thread_allowed_mask;
826 volatile unsigned long thread_running_mask;
827 spinlock_t ip_lock;
828 struct list_head ip_list;
829 struct list_head *ip_tbd_list;
830 struct qeth_ipato ipato;
831 struct list_head cmd_waiter_list;
832 /* QDIO buffer handling */
833 struct qeth_qdio_info qdio;
834 struct qeth_perf_stats perf_stats;
835 int use_hard_stop;
836 const struct header_ops *orig_header_ops;
837 struct qeth_osn_info osn_info;
838 atomic_t force_alloc_skb;
839};
840
841struct qeth_card_list_struct {
842 struct list_head list;
843 rwlock_t rwlock;
844};
845
846extern struct qeth_card_list_struct qeth_card_list;
847
848/*notifier list */
849struct qeth_notify_list_struct {
850 struct list_head list;
851 struct task_struct *task;
852 int signum;
853};
854extern spinlock_t qeth_notify_lock;
855extern struct list_head qeth_notify_list;
856
857/*some helper functions*/
858
859#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
860
861static inline __u8
862qeth_get_ipa_adp_type(enum qeth_link_types link_type)
863{
864 switch (link_type) {
865 case QETH_LINK_TYPE_HSTR:
866 return 2;
867 default:
868 return 1;
869 }
870}
871
872static inline struct sk_buff *
873qeth_realloc_headroom(struct qeth_card *card, struct sk_buff *skb, int size)
874{
875 struct sk_buff *new_skb = skb;
876
877 if (skb_headroom(skb) >= size)
878 return skb;
879 new_skb = skb_realloc_headroom(skb, size);
880 if (!new_skb)
881 PRINT_ERR("Could not realloc headroom for qeth_hdr "
882 "on interface %s", QETH_CARD_IFNAME(card));
883 return new_skb;
884}
885
886static inline struct sk_buff *
887qeth_pskb_unshare(struct sk_buff *skb, gfp_t pri)
888{
889 struct sk_buff *nskb;
890 if (!skb_cloned(skb))
891 return skb;
892 nskb = skb_copy(skb, pri);
893 return nskb;
894}
895
896static inline void *
897qeth_push_skb(struct qeth_card *card, struct sk_buff *skb, int size)
898{
899 void *hdr;
900
901 hdr = (void *) skb_push(skb, size);
902 /*
903 * sanity check, the Linux memory allocation scheme should
904 * never present us cases like this one (the qdio header size plus
905 * the first 40 bytes of the paket cross a 4k boundary)
906 */
907 if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
908 (((unsigned long) hdr + size +
909 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
910 PRINT_ERR("Misaligned packet on interface %s. Discarded.",
911 QETH_CARD_IFNAME(card));
912 return NULL;
913 }
914 return hdr;
915}
916
917
918static inline int
919qeth_get_hlen(__u8 link_type)
920{
921#ifdef CONFIG_QETH_IPV6
922 switch (link_type) {
923 case QETH_LINK_TYPE_HSTR:
924 case QETH_LINK_TYPE_LANE_TR:
925 return sizeof(struct qeth_hdr_tso) + TR_HLEN;
926 default:
927#ifdef CONFIG_QETH_VLAN
928 return sizeof(struct qeth_hdr_tso) + VLAN_ETH_HLEN;
929#else
930 return sizeof(struct qeth_hdr_tso) + ETH_HLEN;
931#endif
932 }
933#else /* CONFIG_QETH_IPV6 */
934#ifdef CONFIG_QETH_VLAN
935 return sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
936#else
937 return sizeof(struct qeth_hdr_tso);
938#endif
939#endif /* CONFIG_QETH_IPV6 */
940}
941
942static inline unsigned short
943qeth_get_netdev_flags(struct qeth_card *card)
944{
945 if (card->options.layer2 &&
946 (card->info.type == QETH_CARD_TYPE_OSAE))
947 return 0;
948 switch (card->info.type) {
949 case QETH_CARD_TYPE_IQD:
950 case QETH_CARD_TYPE_OSN:
951 return IFF_NOARP;
952#ifdef CONFIG_QETH_IPV6
953 default:
954 return 0;
955#else
956 default:
957 return IFF_NOARP;
958#endif
959 }
960}
961
962static inline int
963qeth_get_initial_mtu_for_card(struct qeth_card * card)
964{
965 switch (card->info.type) {
966 case QETH_CARD_TYPE_UNKNOWN:
967 return 1500;
968 case QETH_CARD_TYPE_IQD:
969 return card->info.max_mtu;
970 case QETH_CARD_TYPE_OSAE:
971 switch (card->info.link_type) {
972 case QETH_LINK_TYPE_HSTR:
973 case QETH_LINK_TYPE_LANE_TR:
974 return 2000;
975 default:
976 return 1492;
977 }
978 default:
979 return 1500;
980 }
981}
982
983static inline int
984qeth_get_max_mtu_for_card(int cardtype)
985{
986 switch (cardtype) {
987
988 case QETH_CARD_TYPE_UNKNOWN:
989 case QETH_CARD_TYPE_OSAE:
990 case QETH_CARD_TYPE_OSN:
991 return 61440;
992 case QETH_CARD_TYPE_IQD:
993 return 57344;
994 default:
995 return 1500;
996 }
997}
998
999static inline int
1000qeth_get_mtu_out_of_mpc(int cardtype)
1001{
1002 switch (cardtype) {
1003 case QETH_CARD_TYPE_IQD:
1004 return 1;
1005 default:
1006 return 0;
1007 }
1008}
1009
1010static inline int
1011qeth_get_mtu_outof_framesize(int framesize)
1012{
1013 switch (framesize) {
1014 case 0x4000:
1015 return 8192;
1016 case 0x6000:
1017 return 16384;
1018 case 0xa000:
1019 return 32768;
1020 case 0xffff:
1021 return 57344;
1022 default:
1023 return 0;
1024 }
1025}
1026
1027static inline int
1028qeth_mtu_is_valid(struct qeth_card * card, int mtu)
1029{
1030 switch (card->info.type) {
1031 case QETH_CARD_TYPE_OSAE:
1032 return ((mtu >= 576) && (mtu <= 61440));
1033 case QETH_CARD_TYPE_IQD:
1034 return ((mtu >= 576) &&
1035 (mtu <= card->info.max_mtu + 4096 - 32));
1036 case QETH_CARD_TYPE_OSN:
1037 case QETH_CARD_TYPE_UNKNOWN:
1038 default:
1039 return 1;
1040 }
1041}
1042
1043static inline int
1044qeth_get_arphdr_type(int cardtype, int linktype)
1045{
1046 switch (cardtype) {
1047 case QETH_CARD_TYPE_OSAE:
1048 case QETH_CARD_TYPE_OSN:
1049 switch (linktype) {
1050 case QETH_LINK_TYPE_LANE_TR:
1051 case QETH_LINK_TYPE_HSTR:
1052 return ARPHRD_IEEE802_TR;
1053 default:
1054 return ARPHRD_ETHER;
1055 }
1056 case QETH_CARD_TYPE_IQD:
1057 default:
1058 return ARPHRD_ETHER;
1059 }
1060}
1061
1062static inline int
1063qeth_get_micros(void)
1064{
1065 return (int) (get_clock() >> 12);
1066}
1067
1068static inline int
1069qeth_get_qdio_q_format(struct qeth_card *card)
1070{
1071 switch (card->info.type) {
1072 case QETH_CARD_TYPE_IQD:
1073 return 2;
1074 default:
1075 return 0;
1076 }
1077}
1078
1079static inline int
1080qeth_isxdigit(char * buf)
1081{
1082 while (*buf) {
1083 if (!isxdigit(*buf++))
1084 return 0;
1085 }
1086 return 1;
1087}
1088
1089static inline void
1090qeth_ipaddr4_to_string(const __u8 *addr, char *buf)
1091{
1092 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
1093}
1094
1095static inline int
1096qeth_string_to_ipaddr4(const char *buf, __u8 *addr)
1097{
1098 int count = 0, rc = 0;
1099 int in[4];
1100 char c;
1101
1102 rc = sscanf(buf, "%u.%u.%u.%u%c",
1103 &in[0], &in[1], &in[2], &in[3], &c);
1104 if (rc != 4 && (rc != 5 || c != '\n'))
1105 return -EINVAL;
1106 for (count = 0; count < 4; count++) {
1107 if (in[count] > 255)
1108 return -EINVAL;
1109 addr[count] = in[count];
1110 }
1111 return 0;
1112}
1113
1114static inline void
1115qeth_ipaddr6_to_string(const __u8 *addr, char *buf)
1116{
1117 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1118 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
1119 addr[0], addr[1], addr[2], addr[3],
1120 addr[4], addr[5], addr[6], addr[7],
1121 addr[8], addr[9], addr[10], addr[11],
1122 addr[12], addr[13], addr[14], addr[15]);
1123}
1124
1125static inline int
1126qeth_string_to_ipaddr6(const char *buf, __u8 *addr)
1127{
1128 const char *end, *end_tmp, *start;
1129 __u16 *in;
1130 char num[5];
1131 int num2, cnt, out, found, save_cnt;
1132 unsigned short in_tmp[8] = {0, };
1133
1134 cnt = out = found = save_cnt = num2 = 0;
1135 end = start = buf;
1136 in = (__u16 *) addr;
1137 memset(in, 0, 16);
1138 while (*end) {
1139 end = strchr(start,':');
1140 if (end == NULL) {
1141 end = buf + strlen(buf);
1142 if ((end_tmp = strchr(start, '\n')) != NULL)
1143 end = end_tmp;
1144 out = 1;
1145 }
1146 if ((end - start)) {
1147 memset(num, 0, 5);
1148 if ((end - start) > 4)
1149 return -EINVAL;
1150 memcpy(num, start, end - start);
1151 if (!qeth_isxdigit(num))
1152 return -EINVAL;
1153 sscanf(start, "%x", &num2);
1154 if (found)
1155 in_tmp[save_cnt++] = num2;
1156 else
1157 in[cnt++] = num2;
1158 if (out)
1159 break;
1160 } else {
1161 if (found)
1162 return -EINVAL;
1163 found = 1;
1164 }
1165 start = ++end;
1166 }
1167 if (cnt + save_cnt > 8)
1168 return -EINVAL;
1169 cnt = 7;
1170 while (save_cnt)
1171 in[cnt--] = in_tmp[--save_cnt];
1172 return 0;
1173}
1174
1175static inline void
1176qeth_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
1177 char *buf)
1178{
1179 if (proto == QETH_PROT_IPV4)
1180 qeth_ipaddr4_to_string(addr, buf);
1181 else if (proto == QETH_PROT_IPV6)
1182 qeth_ipaddr6_to_string(addr, buf);
1183}
1184
1185static inline int
1186qeth_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
1187 __u8 *addr)
1188{
1189 if (proto == QETH_PROT_IPV4)
1190 return qeth_string_to_ipaddr4(buf, addr);
1191 else if (proto == QETH_PROT_IPV6)
1192 return qeth_string_to_ipaddr6(buf, addr);
1193 else
1194 return -EINVAL;
1195}
1196
1197extern int
1198qeth_setrouting_v4(struct qeth_card *);
1199extern int
1200qeth_setrouting_v6(struct qeth_card *);
1201
1202extern int
1203qeth_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
1204
1205extern void
1206qeth_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions, u8 *, int);
1207
1208extern int
1209qeth_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1210
1211extern void
1212qeth_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1213
1214extern int
1215qeth_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1216
1217extern void
1218qeth_del_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1219
1220extern int
1221qeth_notifier_register(struct task_struct *, int );
1222
1223extern int
1224qeth_notifier_unregister(struct task_struct * );
1225
1226extern void
1227qeth_schedule_recovery(struct qeth_card *);
1228
1229extern int
1230qeth_realloc_buffer_pool(struct qeth_card *, int);
1231
1232extern int
1233qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
1234
1235extern void
1236qeth_fill_header(struct qeth_card *, struct qeth_hdr *,
1237 struct sk_buff *, int, int);
1238extern void
1239qeth_flush_buffers(struct qeth_qdio_out_q *, int, int, int);
1240
1241extern int
1242qeth_osn_assist(struct net_device *, void *, int);
1243
1244extern int
1245qeth_osn_register(unsigned char *read_dev_no,
1246 struct net_device **,
1247 int (*assist_cb)(struct net_device *, void *),
1248 int (*data_cb)(struct sk_buff *));
1249
1250extern void
1251qeth_osn_deregister(struct net_device *);
1252
1253#endif /* __QETH_H__ */
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
new file mode 100644
index 000000000000..66f4f12503c9
--- /dev/null
+++ b/drivers/s390/net/qeth_core.h
@@ -0,0 +1,905 @@
1/*
2 * drivers/s390/net/qeth_core.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#ifndef __QETH_CORE_H__
12#define __QETH_CORE_H__
13
14#include <linux/if.h>
15#include <linux/if_arp.h>
16#include <linux/if_tr.h>
17#include <linux/trdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/if_vlan.h>
20#include <linux/ctype.h>
21#include <linux/in6.h>
22#include <linux/bitops.h>
23#include <linux/seq_file.h>
24#include <linux/ethtool.h>
25
26#include <net/ipv6.h>
27#include <net/if_inet6.h>
28#include <net/addrconf.h>
29
30#include <asm/debug.h>
31#include <asm/qdio.h>
32#include <asm/ccwdev.h>
33#include <asm/ccwgroup.h>
34
35#include "qeth_core_mpc.h"
36
37#define KMSG_COMPONENT "qeth"
38
39/**
40 * Debug Facility stuff
41 */
42enum qeth_dbf_names {
43 QETH_DBF_SETUP,
44 QETH_DBF_QERR,
45 QETH_DBF_TRACE,
46 QETH_DBF_MSG,
47 QETH_DBF_SENSE,
48 QETH_DBF_MISC,
49 QETH_DBF_CTRL,
50 QETH_DBF_INFOS /* must be last element */
51};
52
53struct qeth_dbf_info {
54 char name[DEBUG_MAX_NAME_LEN];
55 int pages;
56 int areas;
57 int len;
58 int level;
59 struct debug_view *view;
60 debug_info_t *id;
61};
62
63#define QETH_DBF_CTRL_LEN 256
64
65#define QETH_DBF_TEXT(name, level, text) \
66 debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text)
67
68#define QETH_DBF_HEX(name, level, addr, len) \
69 debug_event(qeth_dbf[QETH_DBF_##name].id, level, (void *)(addr), len)
70
71#define QETH_DBF_MESSAGE(level, text...) \
72 debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
73
74#define QETH_DBF_TEXT_(name, level, text...) \
75 do { \
76 if (qeth_dbf_passes(qeth_dbf[QETH_DBF_##name].id, level)) { \
77 char *dbf_txt_buf = \
78 get_cpu_var(QETH_DBF_TXT_BUF); \
79 sprintf(dbf_txt_buf, text); \
80 debug_text_event(qeth_dbf[QETH_DBF_##name].id, \
81 level, dbf_txt_buf); \
82 put_cpu_var(QETH_DBF_TXT_BUF); \
83 } \
84 } while (0)
85
86/* Allow to sort out low debug levels early to avoid wasted sprints */
87static inline int qeth_dbf_passes(debug_info_t *dbf_grp, int level)
88{
89 return (level <= dbf_grp->level);
90}
91
92/**
93 * some more debug stuff
94 */
95#define PRINTK_HEADER "qeth: "
96
97#define SENSE_COMMAND_REJECT_BYTE 0
98#define SENSE_COMMAND_REJECT_FLAG 0x80
99#define SENSE_RESETTING_EVENT_BYTE 1
100#define SENSE_RESETTING_EVENT_FLAG 0x80
101
102/*
103 * Common IO related definitions
104 */
105#define CARD_RDEV(card) card->read.ccwdev
106#define CARD_WDEV(card) card->write.ccwdev
107#define CARD_DDEV(card) card->data.ccwdev
108#define CARD_BUS_ID(card) card->gdev->dev.bus_id
109#define CARD_RDEV_ID(card) card->read.ccwdev->dev.bus_id
110#define CARD_WDEV_ID(card) card->write.ccwdev->dev.bus_id
111#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id
112#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id
113
114/**
115 * card stuff
116 */
117struct qeth_perf_stats {
118 unsigned int bufs_rec;
119 unsigned int bufs_sent;
120
121 unsigned int skbs_sent_pack;
122 unsigned int bufs_sent_pack;
123
124 unsigned int sc_dp_p;
125 unsigned int sc_p_dp;
126 /* qdio_input_handler: number of times called, time spent in */
127 __u64 inbound_start_time;
128 unsigned int inbound_cnt;
129 unsigned int inbound_time;
130 /* qeth_send_packet: number of times called, time spent in */
131 __u64 outbound_start_time;
132 unsigned int outbound_cnt;
133 unsigned int outbound_time;
134 /* qdio_output_handler: number of times called, time spent in */
135 __u64 outbound_handler_start_time;
136 unsigned int outbound_handler_cnt;
137 unsigned int outbound_handler_time;
138 /* number of calls to and time spent in do_QDIO for inbound queue */
139 __u64 inbound_do_qdio_start_time;
140 unsigned int inbound_do_qdio_cnt;
141 unsigned int inbound_do_qdio_time;
142 /* number of calls to and time spent in do_QDIO for outbound queues */
143 __u64 outbound_do_qdio_start_time;
144 unsigned int outbound_do_qdio_cnt;
145 unsigned int outbound_do_qdio_time;
146 /* eddp data */
147 unsigned int large_send_bytes;
148 unsigned int large_send_cnt;
149 unsigned int sg_skbs_sent;
150 unsigned int sg_frags_sent;
151 /* initial values when measuring starts */
152 unsigned long initial_rx_packets;
153 unsigned long initial_tx_packets;
154 /* inbound scatter gather data */
155 unsigned int sg_skbs_rx;
156 unsigned int sg_frags_rx;
157 unsigned int sg_alloc_page_rx;
158};
159
160/* Routing stuff */
161struct qeth_routing_info {
162 enum qeth_routing_types type;
163};
164
165/* IPA stuff */
166struct qeth_ipa_info {
167 __u32 supported_funcs;
168 __u32 enabled_funcs;
169};
170
171static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
172 enum qeth_ipa_funcs func)
173{
174 return (ipa->supported_funcs & func);
175}
176
177static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
178 enum qeth_ipa_funcs func)
179{
180 return (ipa->supported_funcs & ipa->enabled_funcs & func);
181}
182
183#define qeth_adp_supported(c, f) \
184 qeth_is_ipa_supported(&c->options.adp, f)
185#define qeth_adp_enabled(c, f) \
186 qeth_is_ipa_enabled(&c->options.adp, f)
187#define qeth_is_supported(c, f) \
188 qeth_is_ipa_supported(&c->options.ipa4, f)
189#define qeth_is_enabled(c, f) \
190 qeth_is_ipa_enabled(&c->options.ipa4, f)
191#define qeth_is_supported6(c, f) \
192 qeth_is_ipa_supported(&c->options.ipa6, f)
193#define qeth_is_enabled6(c, f) \
194 qeth_is_ipa_enabled(&c->options.ipa6, f)
195#define qeth_is_ipafunc_supported(c, prot, f) \
196 ((prot == QETH_PROT_IPV6) ? \
197 qeth_is_supported6(c, f) : qeth_is_supported(c, f))
198#define qeth_is_ipafunc_enabled(c, prot, f) \
199 ((prot == QETH_PROT_IPV6) ? \
200 qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
201
202#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101
203#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101
204#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
205#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
206
207#define QETH_MODELLIST_ARRAY \
208 {{0x1731, 0x01, 0x1732, 0x01, QETH_CARD_TYPE_OSAE, 1, \
209 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
210 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
211 QETH_MAX_QUEUES, 0}, \
212 {0x1731, 0x05, 0x1732, 0x05, QETH_CARD_TYPE_IQD, 0, \
213 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
214 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
215 QETH_MAX_QUEUES, 0x103}, \
216 {0x1731, 0x06, 0x1732, 0x06, QETH_CARD_TYPE_OSN, 0, \
217 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
218 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
219 QETH_MAX_QUEUES, 0}, \
220 {0, 0, 0, 0, 0, 0, 0, 0, 0} }
221
222#define QETH_REAL_CARD 1
223#define QETH_VLAN_CARD 2
224#define QETH_BUFSIZE 4096
225
226/**
227 * some more defs
228 */
229#define QETH_TX_TIMEOUT 100 * HZ
230#define QETH_RCD_TIMEOUT 60 * HZ
231#define QETH_HEADER_SIZE 32
232#define QETH_MAX_PORTNO 15
233
234/*IPv6 address autoconfiguration stuff*/
235#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
236#define UNIQUE_ID_NOT_BY_CARD 0x10000
237
238/*****************************************************************************/
239/* QDIO queue and buffer handling */
240/*****************************************************************************/
241#define QETH_MAX_QUEUES 4
242#define QETH_IN_BUF_SIZE_DEFAULT 65536
243#define QETH_IN_BUF_COUNT_DEFAULT 16
244#define QETH_IN_BUF_COUNT_MIN 8
245#define QETH_IN_BUF_COUNT_MAX 128
246#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
247#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
248 ((card)->qdio.in_buf_pool.buf_count / 2)
249
250/* buffers we have to be behind before we get a PCI */
251#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
252/*enqueued free buffers left before we get a PCI*/
253#define QETH_PCI_THRESHOLD_B(card) 0
254/*not used unless the microcode gets patched*/
255#define QETH_PCI_TIMER_VALUE(card) 3
256
257#define QETH_MIN_INPUT_THRESHOLD 1
258#define QETH_MAX_INPUT_THRESHOLD 500
259#define QETH_MIN_OUTPUT_THRESHOLD 1
260#define QETH_MAX_OUTPUT_THRESHOLD 300
261
262/* priority queing */
263#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
264#define QETH_DEFAULT_QUEUE 2
265#define QETH_NO_PRIO_QUEUEING 0
266#define QETH_PRIO_Q_ING_PREC 1
267#define QETH_PRIO_Q_ING_TOS 2
268#define IP_TOS_LOWDELAY 0x10
269#define IP_TOS_HIGHTHROUGHPUT 0x08
270#define IP_TOS_HIGHRELIABILITY 0x04
271#define IP_TOS_NOTIMPORTANT 0x02
272
273/* Packing */
274#define QETH_LOW_WATERMARK_PACK 2
275#define QETH_HIGH_WATERMARK_PACK 5
276#define QETH_WATERMARK_PACK_FUZZ 1
277
278#define QETH_IP_HEADER_SIZE 40
279
280/* large receive scatter gather copy break */
281#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
282
283struct qeth_hdr_layer3 {
284 __u8 id;
285 __u8 flags;
286 __u16 inbound_checksum; /*TSO:__u16 seqno */
287 __u32 token; /*TSO: __u32 reserved */
288 __u16 length;
289 __u8 vlan_prio;
290 __u8 ext_flags;
291 __u16 vlan_id;
292 __u16 frame_offset;
293 __u8 dest_addr[16];
294} __attribute__ ((packed));
295
296struct qeth_hdr_layer2 {
297 __u8 id;
298 __u8 flags[3];
299 __u8 port_no;
300 __u8 hdr_length;
301 __u16 pkt_length;
302 __u16 seq_no;
303 __u16 vlan_id;
304 __u32 reserved;
305 __u8 reserved2[16];
306} __attribute__ ((packed));
307
308struct qeth_hdr_osn {
309 __u8 id;
310 __u8 reserved;
311 __u16 seq_no;
312 __u16 reserved2;
313 __u16 control_flags;
314 __u16 pdu_length;
315 __u8 reserved3[18];
316 __u32 ccid;
317} __attribute__ ((packed));
318
319struct qeth_hdr {
320 union {
321 struct qeth_hdr_layer2 l2;
322 struct qeth_hdr_layer3 l3;
323 struct qeth_hdr_osn osn;
324 } hdr;
325} __attribute__ ((packed));
326
327/*TCP Segmentation Offload header*/
328struct qeth_hdr_ext_tso {
329 __u16 hdr_tot_len;
330 __u8 imb_hdr_no;
331 __u8 reserved;
332 __u8 hdr_type;
333 __u8 hdr_version;
334 __u16 hdr_len;
335 __u32 payload_len;
336 __u16 mss;
337 __u16 dg_hdr_len;
338 __u8 padding[16];
339} __attribute__ ((packed));
340
341struct qeth_hdr_tso {
342 struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
343 struct qeth_hdr_ext_tso ext;
344} __attribute__ ((packed));
345
346
347/* flags for qeth_hdr.flags */
348#define QETH_HDR_PASSTHRU 0x10
349#define QETH_HDR_IPV6 0x80
350#define QETH_HDR_CAST_MASK 0x07
351enum qeth_cast_flags {
352 QETH_CAST_UNICAST = 0x06,
353 QETH_CAST_MULTICAST = 0x04,
354 QETH_CAST_BROADCAST = 0x05,
355 QETH_CAST_ANYCAST = 0x07,
356 QETH_CAST_NOCAST = 0x00,
357};
358
359enum qeth_layer2_frame_flags {
360 QETH_LAYER2_FLAG_MULTICAST = 0x01,
361 QETH_LAYER2_FLAG_BROADCAST = 0x02,
362 QETH_LAYER2_FLAG_UNICAST = 0x04,
363 QETH_LAYER2_FLAG_VLAN = 0x10,
364};
365
366enum qeth_header_ids {
367 QETH_HEADER_TYPE_LAYER3 = 0x01,
368 QETH_HEADER_TYPE_LAYER2 = 0x02,
369 QETH_HEADER_TYPE_TSO = 0x03,
370 QETH_HEADER_TYPE_OSN = 0x04,
371};
372/* flags for qeth_hdr.ext_flags */
373#define QETH_HDR_EXT_VLAN_FRAME 0x01
374#define QETH_HDR_EXT_TOKEN_ID 0x02
375#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
376#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
377#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
378#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
379#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/
380
381static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
382{
383 return (sbale->flags & SBAL_FLAGS_LAST_ENTRY);
384}
385
386enum qeth_qdio_buffer_states {
387 /*
388 * inbound: read out by driver; owned by hardware in order to be filled
389 * outbound: owned by driver in order to be filled
390 */
391 QETH_QDIO_BUF_EMPTY,
392 /*
393 * inbound: filled by hardware; owned by driver in order to be read out
394 * outbound: filled by driver; owned by hardware in order to be sent
395 */
396 QETH_QDIO_BUF_PRIMED,
397};
398
399enum qeth_qdio_info_states {
400 QETH_QDIO_UNINITIALIZED,
401 QETH_QDIO_ALLOCATED,
402 QETH_QDIO_ESTABLISHED,
403 QETH_QDIO_CLEANING
404};
405
406struct qeth_buffer_pool_entry {
407 struct list_head list;
408 struct list_head init_list;
409 void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
410};
411
412struct qeth_qdio_buffer_pool {
413 struct list_head entry_list;
414 int buf_count;
415};
416
417struct qeth_qdio_buffer {
418 struct qdio_buffer *buffer;
419 /* the buffer pool entry currently associated to this buffer */
420 struct qeth_buffer_pool_entry *pool_entry;
421};
422
423struct qeth_qdio_q {
424 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
425 struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
426 int next_buf_to_init;
427} __attribute__ ((aligned(256)));
428
429/* possible types of qeth large_send support */
430enum qeth_large_send_types {
431 QETH_LARGE_SEND_NO,
432 QETH_LARGE_SEND_EDDP,
433 QETH_LARGE_SEND_TSO,
434};
435
436struct qeth_qdio_out_buffer {
437 struct qdio_buffer *buffer;
438 atomic_t state;
439 int next_element_to_fill;
440 struct sk_buff_head skb_list;
441 struct list_head ctx_list;
442};
443
444struct qeth_card;
445
446enum qeth_out_q_states {
447 QETH_OUT_Q_UNLOCKED,
448 QETH_OUT_Q_LOCKED,
449 QETH_OUT_Q_LOCKED_FLUSH,
450};
451
452struct qeth_qdio_out_q {
453 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
454 struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
455 int queue_no;
456 struct qeth_card *card;
457 atomic_t state;
458 int do_pack;
459 /*
460 * index of buffer to be filled by driver; state EMPTY or PACKING
461 */
462 int next_buf_to_fill;
463 /*
464 * number of buffers that are currently filled (PRIMED)
465 * -> these buffers are hardware-owned
466 */
467 atomic_t used_buffers;
468 /* indicates whether PCI flag must be set (or if one is outstanding) */
469 atomic_t set_pci_flags_count;
470} __attribute__ ((aligned(256)));
471
472struct qeth_qdio_info {
473 atomic_t state;
474 /* input */
475 struct qeth_qdio_q *in_q;
476 struct qeth_qdio_buffer_pool in_buf_pool;
477 struct qeth_qdio_buffer_pool init_pool;
478 int in_buf_size;
479
480 /* output */
481 int no_out_queues;
482 struct qeth_qdio_out_q **out_qs;
483
484 /* priority queueing */
485 int do_prio_queueing;
486 int default_out_queue;
487};
488
489enum qeth_send_errors {
490 QETH_SEND_ERROR_NONE,
491 QETH_SEND_ERROR_LINK_FAILURE,
492 QETH_SEND_ERROR_RETRY,
493 QETH_SEND_ERROR_KICK_IT,
494};
495
496#define QETH_ETH_MAC_V4 0x0100 /* like v4 */
497#define QETH_ETH_MAC_V6 0x3333 /* like v6 */
498/* tr mc mac is longer, but that will be enough to detect mc frames */
499#define QETH_TR_MAC_NC 0xc000 /* non-canonical */
500#define QETH_TR_MAC_C 0x0300 /* canonical */
501
502#define DEFAULT_ADD_HHLEN 0
503#define MAX_ADD_HHLEN 1024
504
505/**
506 * buffer stuff for read channel
507 */
508#define QETH_CMD_BUFFER_NO 8
509
510/**
511 * channel state machine
512 */
513enum qeth_channel_states {
514 CH_STATE_UP,
515 CH_STATE_DOWN,
516 CH_STATE_ACTIVATING,
517 CH_STATE_HALTED,
518 CH_STATE_STOPPED,
519 CH_STATE_RCD,
520 CH_STATE_RCD_DONE,
521};
522/**
523 * card state machine
524 */
525enum qeth_card_states {
526 CARD_STATE_DOWN,
527 CARD_STATE_HARDSETUP,
528 CARD_STATE_SOFTSETUP,
529 CARD_STATE_UP,
530 CARD_STATE_RECOVER,
531};
532
533/**
534 * Protocol versions
535 */
536enum qeth_prot_versions {
537 QETH_PROT_IPV4 = 0x0004,
538 QETH_PROT_IPV6 = 0x0006,
539};
540
541enum qeth_ip_types {
542 QETH_IP_TYPE_NORMAL,
543 QETH_IP_TYPE_VIPA,
544 QETH_IP_TYPE_RXIP,
545 QETH_IP_TYPE_DEL_ALL_MC,
546};
547
548enum qeth_cmd_buffer_state {
549 BUF_STATE_FREE,
550 BUF_STATE_LOCKED,
551 BUF_STATE_PROCESSED,
552};
553
554struct qeth_ipato {
555 int enabled;
556 int invert4;
557 int invert6;
558 struct list_head entries;
559};
560
561struct qeth_channel;
562
563struct qeth_cmd_buffer {
564 enum qeth_cmd_buffer_state state;
565 struct qeth_channel *channel;
566 unsigned char *data;
567 int rc;
568 void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
569};
570
571/**
572 * definition of a qeth channel, used for read and write
573 */
574struct qeth_channel {
575 enum qeth_channel_states state;
576 struct ccw1 ccw;
577 spinlock_t iob_lock;
578 wait_queue_head_t wait_q;
579 struct tasklet_struct irq_tasklet;
580 struct ccw_device *ccwdev;
581/*command buffer for control data*/
582 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
583 atomic_t irq_pending;
584 int io_buf_no;
585 int buf_no;
586};
587
588/**
589 * OSA card related definitions
590 */
591struct qeth_token {
592 __u32 issuer_rm_w;
593 __u32 issuer_rm_r;
594 __u32 cm_filter_w;
595 __u32 cm_filter_r;
596 __u32 cm_connection_w;
597 __u32 cm_connection_r;
598 __u32 ulp_filter_w;
599 __u32 ulp_filter_r;
600 __u32 ulp_connection_w;
601 __u32 ulp_connection_r;
602};
603
604struct qeth_seqno {
605 __u32 trans_hdr;
606 __u32 pdu_hdr;
607 __u32 pdu_hdr_ack;
608 __u16 ipa;
609 __u32 pkt_seqno;
610};
611
612struct qeth_reply {
613 struct list_head list;
614 wait_queue_head_t wait_q;
615 int (*callback)(struct qeth_card *, struct qeth_reply *,
616 unsigned long);
617 u32 seqno;
618 unsigned long offset;
619 atomic_t received;
620 int rc;
621 void *param;
622 struct qeth_card *card;
623 atomic_t refcnt;
624};
625
626
627struct qeth_card_blkt {
628 int time_total;
629 int inter_packet;
630 int inter_packet_jumbo;
631};
632
633#define QETH_BROADCAST_WITH_ECHO 0x01
634#define QETH_BROADCAST_WITHOUT_ECHO 0x02
635#define QETH_LAYER2_MAC_READ 0x01
636#define QETH_LAYER2_MAC_REGISTERED 0x02
637struct qeth_card_info {
638 unsigned short unit_addr2;
639 unsigned short cula;
640 unsigned short chpid;
641 __u16 func_level;
642 char mcl_level[QETH_MCL_LENGTH + 1];
643 int guestlan;
644 int mac_bits;
645 int portname_required;
646 int portno;
647 char portname[9];
648 enum qeth_card_types type;
649 enum qeth_link_types link_type;
650 int is_multicast_different;
651 int initial_mtu;
652 int max_mtu;
653 int broadcast_capable;
654 int unique_id;
655 struct qeth_card_blkt blkt;
656 __u32 csum_mask;
657 enum qeth_ipa_promisc_modes promisc_mode;
658};
659
660struct qeth_card_options {
661 struct qeth_routing_info route4;
662 struct qeth_ipa_info ipa4;
663 struct qeth_ipa_info adp; /*Adapter parameters*/
664 struct qeth_routing_info route6;
665 struct qeth_ipa_info ipa6;
666 enum qeth_checksum_types checksum_type;
667 int broadcast_mode;
668 int macaddr_mode;
669 int fake_broadcast;
670 int add_hhlen;
671 int fake_ll;
672 int layer2;
673 enum qeth_large_send_types large_send;
674 int performance_stats;
675 int rx_sg_cb;
676};
677
678/*
679 * thread bits for qeth_card thread masks
680 */
681enum qeth_threads {
682 QETH_RECOVER_THREAD = 1,
683};
684
685struct qeth_osn_info {
686 int (*assist_cb)(struct net_device *dev, void *data);
687 int (*data_cb)(struct sk_buff *skb);
688};
689
690enum qeth_discipline_id {
691 QETH_DISCIPLINE_LAYER3 = 0,
692 QETH_DISCIPLINE_LAYER2 = 1,
693};
694
695struct qeth_discipline {
696 qdio_handler_t *input_handler;
697 qdio_handler_t *output_handler;
698 int (*recover)(void *ptr);
699 struct ccwgroup_driver *ccwgdriver;
700};
701
702struct qeth_vlan_vid {
703 struct list_head list;
704 unsigned short vid;
705};
706
707struct qeth_mc_mac {
708 struct list_head list;
709 __u8 mc_addr[MAX_ADDR_LEN];
710 unsigned char mc_addrlen;
711};
712
713struct qeth_card {
714 struct list_head list;
715 enum qeth_card_states state;
716 int lan_online;
717 spinlock_t lock;
718 struct ccwgroup_device *gdev;
719 struct qeth_channel read;
720 struct qeth_channel write;
721 struct qeth_channel data;
722
723 struct net_device *dev;
724 struct net_device_stats stats;
725
726 struct qeth_card_info info;
727 struct qeth_token token;
728 struct qeth_seqno seqno;
729 struct qeth_card_options options;
730
731 wait_queue_head_t wait_q;
732 spinlock_t vlanlock;
733 spinlock_t mclock;
734 struct vlan_group *vlangrp;
735 struct list_head vid_list;
736 struct list_head mc_list;
737 struct work_struct kernel_thread_starter;
738 spinlock_t thread_mask_lock;
739 unsigned long thread_start_mask;
740 unsigned long thread_allowed_mask;
741 unsigned long thread_running_mask;
742 spinlock_t ip_lock;
743 struct list_head ip_list;
744 struct list_head *ip_tbd_list;
745 struct qeth_ipato ipato;
746 struct list_head cmd_waiter_list;
747 /* QDIO buffer handling */
748 struct qeth_qdio_info qdio;
749 struct qeth_perf_stats perf_stats;
750 int use_hard_stop;
751 struct qeth_osn_info osn_info;
752 struct qeth_discipline discipline;
753 atomic_t force_alloc_skb;
754};
755
756struct qeth_card_list_struct {
757 struct list_head list;
758 rwlock_t rwlock;
759};
760
761/*some helper functions*/
762#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
763
764static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
765{
766 struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
767 dev_get_drvdata(&cdev->dev))->dev);
768 return card;
769}
770
771static inline int qeth_get_micros(void)
772{
773 return (int) (get_clock() >> 12);
774}
775
776static inline void *qeth_push_skb(struct qeth_card *card, struct sk_buff *skb,
777 int size)
778{
779 void *hdr;
780
781 hdr = (void *) skb_push(skb, size);
782 /*
783 * sanity check, the Linux memory allocation scheme should
784 * never present us cases like this one (the qdio header size plus
785 * the first 40 bytes of the paket cross a 4k boundary)
786 */
787 if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
788 (((unsigned long) hdr + size +
789 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
790 PRINT_ERR("Misaligned packet on interface %s. Discarded.",
791 QETH_CARD_IFNAME(card));
792 return NULL;
793 }
794 return hdr;
795}
796
797static inline int qeth_get_ip_version(struct sk_buff *skb)
798{
799 switch (skb->protocol) {
800 case ETH_P_IPV6:
801 return 6;
802 case ETH_P_IP:
803 return 4;
804 default:
805 return 0;
806 }
807}
808
809struct qeth_eddp_context;
810extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
811extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
812const char *qeth_get_cardname_short(struct qeth_card *);
813int qeth_realloc_buffer_pool(struct qeth_card *, int);
814int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
815void qeth_core_free_discipline(struct qeth_card *);
816int qeth_core_create_device_attributes(struct device *);
817void qeth_core_remove_device_attributes(struct device *);
818int qeth_core_create_osn_attributes(struct device *);
819void qeth_core_remove_osn_attributes(struct device *);
820
821/* exports for qeth discipline device drivers */
822extern struct qeth_card_list_struct qeth_core_card_list;
823
824extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
825
826void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
827int qeth_threads_running(struct qeth_card *, unsigned long);
828int qeth_wait_for_threads(struct qeth_card *, unsigned long);
829int qeth_do_run_thread(struct qeth_card *, unsigned long);
830void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
831void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
832int qeth_core_hardsetup_card(struct qeth_card *);
833void qeth_print_status_message(struct qeth_card *);
834int qeth_init_qdio_queues(struct qeth_card *);
835int qeth_send_startlan(struct qeth_card *);
836int qeth_send_stoplan(struct qeth_card *);
837int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
838 int (*reply_cb)
839 (struct qeth_card *, struct qeth_reply *, unsigned long),
840 void *);
841struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
842 enum qeth_ipa_cmds, enum qeth_prot_versions);
843int qeth_query_setadapterparms(struct qeth_card *);
844int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int,
845 unsigned int, const char *);
846void qeth_put_buffer_pool_entry(struct qeth_card *,
847 struct qeth_buffer_pool_entry *);
848void qeth_queue_input_buffer(struct qeth_card *, int);
849struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
850 struct qdio_buffer *, struct qdio_buffer_element **, int *,
851 struct qeth_hdr **);
852void qeth_schedule_recovery(struct qeth_card *);
853void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
854 unsigned int, unsigned int,
855 unsigned int, int, int,
856 unsigned long);
857void qeth_clear_ipacmd_list(struct qeth_card *);
858int qeth_qdio_clear_card(struct qeth_card *, int);
859void qeth_clear_working_pool_list(struct qeth_card *);
860void qeth_clear_cmd_buffers(struct qeth_channel *);
861void qeth_clear_qdio_buffers(struct qeth_card *);
862void qeth_setadp_promisc_mode(struct qeth_card *);
863struct net_device_stats *qeth_get_stats(struct net_device *);
864int qeth_change_mtu(struct net_device *, int);
865int qeth_setadpparms_change_macaddr(struct qeth_card *);
866void qeth_tx_timeout(struct net_device *);
867void qeth_prepare_control_data(struct qeth_card *, int,
868 struct qeth_cmd_buffer *);
869void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
870void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
871struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
872int qeth_mdio_read(struct net_device *, int, int);
873int qeth_snmp_command(struct qeth_card *, char __user *);
874int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
875struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
876int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
877 unsigned long);
878int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
879 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
880 void *reply_param);
881int qeth_get_cast_type(struct qeth_card *, struct sk_buff *);
882int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
883struct sk_buff *qeth_prepare_skb(struct qeth_card *, struct sk_buff *,
884 struct qeth_hdr **);
885int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
886int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
887 struct sk_buff *, struct qeth_hdr *, int,
888 struct qeth_eddp_context *);
889int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
890 struct sk_buff *, struct qeth_hdr *,
891 int, struct qeth_eddp_context *);
892int qeth_core_get_stats_count(struct net_device *);
893void qeth_core_get_ethtool_stats(struct net_device *,
894 struct ethtool_stats *, u64 *);
895void qeth_core_get_strings(struct net_device *, u32, u8 *);
896void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
897
898/* exports for OSN */
899int qeth_osn_assist(struct net_device *, void *, int);
900int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
901 int (*assist_cb)(struct net_device *, void *),
902 int (*data_cb)(struct sk_buff *));
903void qeth_osn_deregister(struct net_device *);
904
905#endif /* __QETH_CORE_H__ */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
new file mode 100644
index 000000000000..055f5c3e7b56
--- /dev/null
+++ b/drivers/s390/net/qeth_core_main.c
@@ -0,0 +1,4492 @@
1/*
2 * drivers/s390/net/qeth_core_main.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/tcp.h>
19#include <linux/mii.h>
20#include <linux/kthread.h>
21
22#include <asm-s390/ebcdic.h>
23#include <asm-s390/io.h>
24#include <asm/s390_rdev.h>
25
26#include "qeth_core.h"
27#include "qeth_core_offl.h"
28
29static DEFINE_PER_CPU(char[256], qeth_core_dbf_txt_buf);
30#define QETH_DBF_TXT_BUF qeth_core_dbf_txt_buf
31
32struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
33 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
34 /* N P A M L V H */
35 [QETH_DBF_SETUP] = {"qeth_setup",
36 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
37 [QETH_DBF_QERR] = {"qeth_qerr",
38 2, 1, 8, 2, &debug_hex_ascii_view, NULL},
39 [QETH_DBF_TRACE] = {"qeth_trace",
40 4, 1, 8, 3, &debug_hex_ascii_view, NULL},
41 [QETH_DBF_MSG] = {"qeth_msg",
42 8, 1, 128, 3, &debug_sprintf_view, NULL},
43 [QETH_DBF_SENSE] = {"qeth_sense",
44 2, 1, 64, 2, &debug_hex_ascii_view, NULL},
45 [QETH_DBF_MISC] = {"qeth_misc",
46 2, 1, 256, 2, &debug_hex_ascii_view, NULL},
47 [QETH_DBF_CTRL] = {"qeth_control",
48 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
49};
50EXPORT_SYMBOL_GPL(qeth_dbf);
51
52struct qeth_card_list_struct qeth_core_card_list;
53EXPORT_SYMBOL_GPL(qeth_core_card_list);
54
55static struct device *qeth_core_root_dev;
56static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
57static struct lock_class_key qdio_out_skb_queue_key;
58
59static void qeth_send_control_data_cb(struct qeth_channel *,
60 struct qeth_cmd_buffer *);
61static int qeth_issue_next_read(struct qeth_card *);
62static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
63static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
64static void qeth_free_buffer_pool(struct qeth_card *);
65static int qeth_qdio_establish(struct qeth_card *);
66
67
68static inline void __qeth_fill_buffer_frag(struct sk_buff *skb,
69 struct qdio_buffer *buffer, int is_tso,
70 int *next_element_to_fill)
71{
72 struct skb_frag_struct *frag;
73 int fragno;
74 unsigned long addr;
75 int element, cnt, dlen;
76
77 fragno = skb_shinfo(skb)->nr_frags;
78 element = *next_element_to_fill;
79 dlen = 0;
80
81 if (is_tso)
82 buffer->element[element].flags =
83 SBAL_FLAGS_MIDDLE_FRAG;
84 else
85 buffer->element[element].flags =
86 SBAL_FLAGS_FIRST_FRAG;
87 dlen = skb->len - skb->data_len;
88 if (dlen) {
89 buffer->element[element].addr = skb->data;
90 buffer->element[element].length = dlen;
91 element++;
92 }
93 for (cnt = 0; cnt < fragno; cnt++) {
94 frag = &skb_shinfo(skb)->frags[cnt];
95 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
96 frag->page_offset;
97 buffer->element[element].addr = (char *)addr;
98 buffer->element[element].length = frag->size;
99 if (cnt < (fragno - 1))
100 buffer->element[element].flags =
101 SBAL_FLAGS_MIDDLE_FRAG;
102 else
103 buffer->element[element].flags =
104 SBAL_FLAGS_LAST_FRAG;
105 element++;
106 }
107 *next_element_to_fill = element;
108}
109
110static inline const char *qeth_get_cardname(struct qeth_card *card)
111{
112 if (card->info.guestlan) {
113 switch (card->info.type) {
114 case QETH_CARD_TYPE_OSAE:
115 return " Guest LAN QDIO";
116 case QETH_CARD_TYPE_IQD:
117 return " Guest LAN Hiper";
118 default:
119 return " unknown";
120 }
121 } else {
122 switch (card->info.type) {
123 case QETH_CARD_TYPE_OSAE:
124 return " OSD Express";
125 case QETH_CARD_TYPE_IQD:
126 return " HiperSockets";
127 case QETH_CARD_TYPE_OSN:
128 return " OSN QDIO";
129 default:
130 return " unknown";
131 }
132 }
133 return " n/a";
134}
135
136/* max length to be returned: 14 */
137const char *qeth_get_cardname_short(struct qeth_card *card)
138{
139 if (card->info.guestlan) {
140 switch (card->info.type) {
141 case QETH_CARD_TYPE_OSAE:
142 return "GuestLAN QDIO";
143 case QETH_CARD_TYPE_IQD:
144 return "GuestLAN Hiper";
145 default:
146 return "unknown";
147 }
148 } else {
149 switch (card->info.type) {
150 case QETH_CARD_TYPE_OSAE:
151 switch (card->info.link_type) {
152 case QETH_LINK_TYPE_FAST_ETH:
153 return "OSD_100";
154 case QETH_LINK_TYPE_HSTR:
155 return "HSTR";
156 case QETH_LINK_TYPE_GBIT_ETH:
157 return "OSD_1000";
158 case QETH_LINK_TYPE_10GBIT_ETH:
159 return "OSD_10GIG";
160 case QETH_LINK_TYPE_LANE_ETH100:
161 return "OSD_FE_LANE";
162 case QETH_LINK_TYPE_LANE_TR:
163 return "OSD_TR_LANE";
164 case QETH_LINK_TYPE_LANE_ETH1000:
165 return "OSD_GbE_LANE";
166 case QETH_LINK_TYPE_LANE:
167 return "OSD_ATM_LANE";
168 default:
169 return "OSD_Express";
170 }
171 case QETH_CARD_TYPE_IQD:
172 return "HiperSockets";
173 case QETH_CARD_TYPE_OSN:
174 return "OSN";
175 default:
176 return "unknown";
177 }
178 }
179 return "n/a";
180}
181
182void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
183 int clear_start_mask)
184{
185 unsigned long flags;
186
187 spin_lock_irqsave(&card->thread_mask_lock, flags);
188 card->thread_allowed_mask = threads;
189 if (clear_start_mask)
190 card->thread_start_mask &= threads;
191 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
192 wake_up(&card->wait_q);
193}
194EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
195
196int qeth_threads_running(struct qeth_card *card, unsigned long threads)
197{
198 unsigned long flags;
199 int rc = 0;
200
201 spin_lock_irqsave(&card->thread_mask_lock, flags);
202 rc = (card->thread_running_mask & threads);
203 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
204 return rc;
205}
206EXPORT_SYMBOL_GPL(qeth_threads_running);
207
208int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
209{
210 return wait_event_interruptible(card->wait_q,
211 qeth_threads_running(card, threads) == 0);
212}
213EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
214
215void qeth_clear_working_pool_list(struct qeth_card *card)
216{
217 struct qeth_buffer_pool_entry *pool_entry, *tmp;
218
219 QETH_DBF_TEXT(TRACE, 5, "clwrklst");
220 list_for_each_entry_safe(pool_entry, tmp,
221 &card->qdio.in_buf_pool.entry_list, list){
222 list_del(&pool_entry->list);
223 }
224}
225EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
226
227static int qeth_alloc_buffer_pool(struct qeth_card *card)
228{
229 struct qeth_buffer_pool_entry *pool_entry;
230 void *ptr;
231 int i, j;
232
233 QETH_DBF_TEXT(TRACE, 5, "alocpool");
234 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
235 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
236 if (!pool_entry) {
237 qeth_free_buffer_pool(card);
238 return -ENOMEM;
239 }
240 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
241 ptr = (void *) __get_free_page(GFP_KERNEL);
242 if (!ptr) {
243 while (j > 0)
244 free_page((unsigned long)
245 pool_entry->elements[--j]);
246 kfree(pool_entry);
247 qeth_free_buffer_pool(card);
248 return -ENOMEM;
249 }
250 pool_entry->elements[j] = ptr;
251 }
252 list_add(&pool_entry->init_list,
253 &card->qdio.init_pool.entry_list);
254 }
255 return 0;
256}
257
258int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
259{
260 QETH_DBF_TEXT(TRACE, 2, "realcbp");
261
262 if ((card->state != CARD_STATE_DOWN) &&
263 (card->state != CARD_STATE_RECOVER))
264 return -EPERM;
265
266 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
267 qeth_clear_working_pool_list(card);
268 qeth_free_buffer_pool(card);
269 card->qdio.in_buf_pool.buf_count = bufcnt;
270 card->qdio.init_pool.buf_count = bufcnt;
271 return qeth_alloc_buffer_pool(card);
272}
273
274int qeth_set_large_send(struct qeth_card *card,
275 enum qeth_large_send_types type)
276{
277 int rc = 0;
278
279 if (card->dev == NULL) {
280 card->options.large_send = type;
281 return 0;
282 }
283 if (card->state == CARD_STATE_UP)
284 netif_tx_disable(card->dev);
285 card->options.large_send = type;
286 switch (card->options.large_send) {
287 case QETH_LARGE_SEND_EDDP:
288 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
289 NETIF_F_HW_CSUM;
290 break;
291 case QETH_LARGE_SEND_TSO:
292 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
293 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
294 NETIF_F_HW_CSUM;
295 } else {
296 PRINT_WARN("TSO not supported on %s. "
297 "large_send set to 'no'.\n",
298 card->dev->name);
299 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
300 NETIF_F_HW_CSUM);
301 card->options.large_send = QETH_LARGE_SEND_NO;
302 rc = -EOPNOTSUPP;
303 }
304 break;
305 default: /* includes QETH_LARGE_SEND_NO */
306 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
307 NETIF_F_HW_CSUM);
308 break;
309 }
310 if (card->state == CARD_STATE_UP)
311 netif_wake_queue(card->dev);
312 return rc;
313}
314EXPORT_SYMBOL_GPL(qeth_set_large_send);
315
316static int qeth_issue_next_read(struct qeth_card *card)
317{
318 int rc;
319 struct qeth_cmd_buffer *iob;
320
321 QETH_DBF_TEXT(TRACE, 5, "issnxrd");
322 if (card->read.state != CH_STATE_UP)
323 return -EIO;
324 iob = qeth_get_buffer(&card->read);
325 if (!iob) {
326 PRINT_WARN("issue_next_read failed: no iob available!\n");
327 return -ENOMEM;
328 }
329 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
330 QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
331 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
332 (addr_t) iob, 0, 0);
333 if (rc) {
334 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
335 atomic_set(&card->read.irq_pending, 0);
336 qeth_schedule_recovery(card);
337 wake_up(&card->wait_q);
338 }
339 return rc;
340}
341
342static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
343{
344 struct qeth_reply *reply;
345
346 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
347 if (reply) {
348 atomic_set(&reply->refcnt, 1);
349 atomic_set(&reply->received, 0);
350 reply->card = card;
351 };
352 return reply;
353}
354
355static void qeth_get_reply(struct qeth_reply *reply)
356{
357 WARN_ON(atomic_read(&reply->refcnt) <= 0);
358 atomic_inc(&reply->refcnt);
359}
360
361static void qeth_put_reply(struct qeth_reply *reply)
362{
363 WARN_ON(atomic_read(&reply->refcnt) <= 0);
364 if (atomic_dec_and_test(&reply->refcnt))
365 kfree(reply);
366}
367
368static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
369 struct qeth_card *card)
370{
371 char *ipa_name;
372 int com = cmd->hdr.command;
373 ipa_name = qeth_get_ipa_cmd_name(com);
374 if (rc)
375 QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s returned x%X \"%s\"\n",
376 ipa_name, com, QETH_CARD_IFNAME(card),
377 rc, qeth_get_ipa_msg(rc));
378 else
379 QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s succeeded\n",
380 ipa_name, com, QETH_CARD_IFNAME(card));
381}
382
383static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
384 struct qeth_cmd_buffer *iob)
385{
386 struct qeth_ipa_cmd *cmd = NULL;
387
388 QETH_DBF_TEXT(TRACE, 5, "chkipad");
389 if (IS_IPA(iob->data)) {
390 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
391 if (IS_IPA_REPLY(cmd)) {
392 if (cmd->hdr.command < IPA_CMD_SETCCID ||
393 cmd->hdr.command > IPA_CMD_MODCCID)
394 qeth_issue_ipa_msg(cmd,
395 cmd->hdr.return_code, card);
396 return cmd;
397 } else {
398 switch (cmd->hdr.command) {
399 case IPA_CMD_STOPLAN:
400 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
401 "there is a network problem or "
402 "someone pulled the cable or "
403 "disabled the port.\n",
404 QETH_CARD_IFNAME(card),
405 card->info.chpid);
406 card->lan_online = 0;
407 if (card->dev && netif_carrier_ok(card->dev))
408 netif_carrier_off(card->dev);
409 return NULL;
410 case IPA_CMD_STARTLAN:
411 PRINT_INFO("Link reestablished on %s "
412 "(CHPID 0x%X). Scheduling "
413 "IP address reset.\n",
414 QETH_CARD_IFNAME(card),
415 card->info.chpid);
416 netif_carrier_on(card->dev);
417 card->lan_online = 1;
418 qeth_schedule_recovery(card);
419 return NULL;
420 case IPA_CMD_MODCCID:
421 return cmd;
422 case IPA_CMD_REGISTER_LOCAL_ADDR:
423 QETH_DBF_TEXT(TRACE, 3, "irla");
424 break;
425 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
426 QETH_DBF_TEXT(TRACE, 3, "urla");
427 break;
428 default:
429 PRINT_WARN("Received data is IPA "
430 "but not a reply!\n");
431 break;
432 }
433 }
434 }
435 return cmd;
436}
437
438void qeth_clear_ipacmd_list(struct qeth_card *card)
439{
440 struct qeth_reply *reply, *r;
441 unsigned long flags;
442
443 QETH_DBF_TEXT(TRACE, 4, "clipalst");
444
445 spin_lock_irqsave(&card->lock, flags);
446 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
447 qeth_get_reply(reply);
448 reply->rc = -EIO;
449 atomic_inc(&reply->received);
450 list_del_init(&reply->list);
451 wake_up(&reply->wait_q);
452 qeth_put_reply(reply);
453 }
454 spin_unlock_irqrestore(&card->lock, flags);
455}
456EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
457
458static int qeth_check_idx_response(unsigned char *buffer)
459{
460 if (!buffer)
461 return 0;
462
463 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
464 if ((buffer[2] & 0xc0) == 0xc0) {
465 PRINT_WARN("received an IDX TERMINATE "
466 "with cause code 0x%02x%s\n",
467 buffer[4],
468 ((buffer[4] == 0x22) ?
469 " -- try another portname" : ""));
470 QETH_DBF_TEXT(TRACE, 2, "ckidxres");
471 QETH_DBF_TEXT(TRACE, 2, " idxterm");
472 QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
473 return -EIO;
474 }
475 return 0;
476}
477
478static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
479 __u32 len)
480{
481 struct qeth_card *card;
482
483 QETH_DBF_TEXT(TRACE, 4, "setupccw");
484 card = CARD_FROM_CDEV(channel->ccwdev);
485 if (channel == &card->read)
486 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
487 else
488 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
489 channel->ccw.count = len;
490 channel->ccw.cda = (__u32) __pa(iob);
491}
492
493static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
494{
495 __u8 index;
496
497 QETH_DBF_TEXT(TRACE, 6, "getbuff");
498 index = channel->io_buf_no;
499 do {
500 if (channel->iob[index].state == BUF_STATE_FREE) {
501 channel->iob[index].state = BUF_STATE_LOCKED;
502 channel->io_buf_no = (channel->io_buf_no + 1) %
503 QETH_CMD_BUFFER_NO;
504 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
505 return channel->iob + index;
506 }
507 index = (index + 1) % QETH_CMD_BUFFER_NO;
508 } while (index != channel->io_buf_no);
509
510 return NULL;
511}
512
513void qeth_release_buffer(struct qeth_channel *channel,
514 struct qeth_cmd_buffer *iob)
515{
516 unsigned long flags;
517
518 QETH_DBF_TEXT(TRACE, 6, "relbuff");
519 spin_lock_irqsave(&channel->iob_lock, flags);
520 memset(iob->data, 0, QETH_BUFSIZE);
521 iob->state = BUF_STATE_FREE;
522 iob->callback = qeth_send_control_data_cb;
523 iob->rc = 0;
524 spin_unlock_irqrestore(&channel->iob_lock, flags);
525}
526EXPORT_SYMBOL_GPL(qeth_release_buffer);
527
528static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
529{
530 struct qeth_cmd_buffer *buffer = NULL;
531 unsigned long flags;
532
533 spin_lock_irqsave(&channel->iob_lock, flags);
534 buffer = __qeth_get_buffer(channel);
535 spin_unlock_irqrestore(&channel->iob_lock, flags);
536 return buffer;
537}
538
539struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
540{
541 struct qeth_cmd_buffer *buffer;
542 wait_event(channel->wait_q,
543 ((buffer = qeth_get_buffer(channel)) != NULL));
544 return buffer;
545}
546EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
547
548void qeth_clear_cmd_buffers(struct qeth_channel *channel)
549{
550 int cnt;
551
552 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
553 qeth_release_buffer(channel, &channel->iob[cnt]);
554 channel->buf_no = 0;
555 channel->io_buf_no = 0;
556}
557EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
558
559static void qeth_send_control_data_cb(struct qeth_channel *channel,
560 struct qeth_cmd_buffer *iob)
561{
562 struct qeth_card *card;
563 struct qeth_reply *reply, *r;
564 struct qeth_ipa_cmd *cmd;
565 unsigned long flags;
566 int keep_reply;
567
568 QETH_DBF_TEXT(TRACE, 4, "sndctlcb");
569
570 card = CARD_FROM_CDEV(channel->ccwdev);
571 if (qeth_check_idx_response(iob->data)) {
572 qeth_clear_ipacmd_list(card);
573 qeth_schedule_recovery(card);
574 goto out;
575 }
576
577 cmd = qeth_check_ipa_data(card, iob);
578 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
579 goto out;
580 /*in case of OSN : check if cmd is set */
581 if (card->info.type == QETH_CARD_TYPE_OSN &&
582 cmd &&
583 cmd->hdr.command != IPA_CMD_STARTLAN &&
584 card->osn_info.assist_cb != NULL) {
585 card->osn_info.assist_cb(card->dev, cmd);
586 goto out;
587 }
588
589 spin_lock_irqsave(&card->lock, flags);
590 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
591 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
592 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
593 qeth_get_reply(reply);
594 list_del_init(&reply->list);
595 spin_unlock_irqrestore(&card->lock, flags);
596 keep_reply = 0;
597 if (reply->callback != NULL) {
598 if (cmd) {
599 reply->offset = (__u16)((char *)cmd -
600 (char *)iob->data);
601 keep_reply = reply->callback(card,
602 reply,
603 (unsigned long)cmd);
604 } else
605 keep_reply = reply->callback(card,
606 reply,
607 (unsigned long)iob);
608 }
609 if (cmd)
610 reply->rc = (u16) cmd->hdr.return_code;
611 else if (iob->rc)
612 reply->rc = iob->rc;
613 if (keep_reply) {
614 spin_lock_irqsave(&card->lock, flags);
615 list_add_tail(&reply->list,
616 &card->cmd_waiter_list);
617 spin_unlock_irqrestore(&card->lock, flags);
618 } else {
619 atomic_inc(&reply->received);
620 wake_up(&reply->wait_q);
621 }
622 qeth_put_reply(reply);
623 goto out;
624 }
625 }
626 spin_unlock_irqrestore(&card->lock, flags);
627out:
628 memcpy(&card->seqno.pdu_hdr_ack,
629 QETH_PDU_HEADER_SEQ_NO(iob->data),
630 QETH_SEQ_NO_LENGTH);
631 qeth_release_buffer(channel, iob);
632}
633
634static int qeth_setup_channel(struct qeth_channel *channel)
635{
636 int cnt;
637
638 QETH_DBF_TEXT(SETUP, 2, "setupch");
639 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
640 channel->iob[cnt].data = (char *)
641 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
642 if (channel->iob[cnt].data == NULL)
643 break;
644 channel->iob[cnt].state = BUF_STATE_FREE;
645 channel->iob[cnt].channel = channel;
646 channel->iob[cnt].callback = qeth_send_control_data_cb;
647 channel->iob[cnt].rc = 0;
648 }
649 if (cnt < QETH_CMD_BUFFER_NO) {
650 while (cnt-- > 0)
651 kfree(channel->iob[cnt].data);
652 return -ENOMEM;
653 }
654 channel->buf_no = 0;
655 channel->io_buf_no = 0;
656 atomic_set(&channel->irq_pending, 0);
657 spin_lock_init(&channel->iob_lock);
658
659 init_waitqueue_head(&channel->wait_q);
660 return 0;
661}
662
663static int qeth_set_thread_start_bit(struct qeth_card *card,
664 unsigned long thread)
665{
666 unsigned long flags;
667
668 spin_lock_irqsave(&card->thread_mask_lock, flags);
669 if (!(card->thread_allowed_mask & thread) ||
670 (card->thread_start_mask & thread)) {
671 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
672 return -EPERM;
673 }
674 card->thread_start_mask |= thread;
675 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
676 return 0;
677}
678
679void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
680{
681 unsigned long flags;
682
683 spin_lock_irqsave(&card->thread_mask_lock, flags);
684 card->thread_start_mask &= ~thread;
685 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
686 wake_up(&card->wait_q);
687}
688EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
689
690void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
691{
692 unsigned long flags;
693
694 spin_lock_irqsave(&card->thread_mask_lock, flags);
695 card->thread_running_mask &= ~thread;
696 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
697 wake_up(&card->wait_q);
698}
699EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
700
701static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
702{
703 unsigned long flags;
704 int rc = 0;
705
706 spin_lock_irqsave(&card->thread_mask_lock, flags);
707 if (card->thread_start_mask & thread) {
708 if ((card->thread_allowed_mask & thread) &&
709 !(card->thread_running_mask & thread)) {
710 rc = 1;
711 card->thread_start_mask &= ~thread;
712 card->thread_running_mask |= thread;
713 } else
714 rc = -EPERM;
715 }
716 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
717 return rc;
718}
719
720int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
721{
722 int rc = 0;
723
724 wait_event(card->wait_q,
725 (rc = __qeth_do_run_thread(card, thread)) >= 0);
726 return rc;
727}
728EXPORT_SYMBOL_GPL(qeth_do_run_thread);
729
730void qeth_schedule_recovery(struct qeth_card *card)
731{
732 QETH_DBF_TEXT(TRACE, 2, "startrec");
733 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
734 schedule_work(&card->kernel_thread_starter);
735}
736EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
737
738static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
739{
740 int dstat, cstat;
741 char *sense;
742
743 sense = (char *) irb->ecw;
744 cstat = irb->scsw.cstat;
745 dstat = irb->scsw.dstat;
746
747 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
748 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
749 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
750 QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
751 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
752 cdev->dev.bus_id, dstat, cstat);
753 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
754 16, 1, irb, 64, 1);
755 return 1;
756 }
757
758 if (dstat & DEV_STAT_UNIT_CHECK) {
759 if (sense[SENSE_RESETTING_EVENT_BYTE] &
760 SENSE_RESETTING_EVENT_FLAG) {
761 QETH_DBF_TEXT(TRACE, 2, "REVIND");
762 return 1;
763 }
764 if (sense[SENSE_COMMAND_REJECT_BYTE] &
765 SENSE_COMMAND_REJECT_FLAG) {
766 QETH_DBF_TEXT(TRACE, 2, "CMDREJi");
767 return 0;
768 }
769 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
770 QETH_DBF_TEXT(TRACE, 2, "AFFE");
771 return 1;
772 }
773 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
774 QETH_DBF_TEXT(TRACE, 2, "ZEROSEN");
775 return 0;
776 }
777 QETH_DBF_TEXT(TRACE, 2, "DGENCHK");
778 return 1;
779 }
780 return 0;
781}
782
783static long __qeth_check_irb_error(struct ccw_device *cdev,
784 unsigned long intparm, struct irb *irb)
785{
786 if (!IS_ERR(irb))
787 return 0;
788
789 switch (PTR_ERR(irb)) {
790 case -EIO:
791 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
792 QETH_DBF_TEXT(TRACE, 2, "ckirberr");
793 QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
794 break;
795 case -ETIMEDOUT:
796 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
797 QETH_DBF_TEXT(TRACE, 2, "ckirberr");
798 QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT);
799 if (intparm == QETH_RCD_PARM) {
800 struct qeth_card *card = CARD_FROM_CDEV(cdev);
801
802 if (card && (card->data.ccwdev == cdev)) {
803 card->data.state = CH_STATE_DOWN;
804 wake_up(&card->wait_q);
805 }
806 }
807 break;
808 default:
809 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
810 cdev->dev.bus_id);
811 QETH_DBF_TEXT(TRACE, 2, "ckirberr");
812 QETH_DBF_TEXT(TRACE, 2, " rc???");
813 }
814 return PTR_ERR(irb);
815}
816
817static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
818 struct irb *irb)
819{
820 int rc;
821 int cstat, dstat;
822 struct qeth_cmd_buffer *buffer;
823 struct qeth_channel *channel;
824 struct qeth_card *card;
825 struct qeth_cmd_buffer *iob;
826 __u8 index;
827
828 QETH_DBF_TEXT(TRACE, 5, "irq");
829
830 if (__qeth_check_irb_error(cdev, intparm, irb))
831 return;
832 cstat = irb->scsw.cstat;
833 dstat = irb->scsw.dstat;
834
835 card = CARD_FROM_CDEV(cdev);
836 if (!card)
837 return;
838
839 if (card->read.ccwdev == cdev) {
840 channel = &card->read;
841 QETH_DBF_TEXT(TRACE, 5, "read");
842 } else if (card->write.ccwdev == cdev) {
843 channel = &card->write;
844 QETH_DBF_TEXT(TRACE, 5, "write");
845 } else {
846 channel = &card->data;
847 QETH_DBF_TEXT(TRACE, 5, "data");
848 }
849 atomic_set(&channel->irq_pending, 0);
850
851 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
852 channel->state = CH_STATE_STOPPED;
853
854 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
855 channel->state = CH_STATE_HALTED;
856
857 /*let's wake up immediately on data channel*/
858 if ((channel == &card->data) && (intparm != 0) &&
859 (intparm != QETH_RCD_PARM))
860 goto out;
861
862 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
863 QETH_DBF_TEXT(TRACE, 6, "clrchpar");
864 /* we don't have to handle this further */
865 intparm = 0;
866 }
867 if (intparm == QETH_HALT_CHANNEL_PARM) {
868 QETH_DBF_TEXT(TRACE, 6, "hltchpar");
869 /* we don't have to handle this further */
870 intparm = 0;
871 }
872 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
873 (dstat & DEV_STAT_UNIT_CHECK) ||
874 (cstat)) {
875 if (irb->esw.esw0.erw.cons) {
876 /* TODO: we should make this s390dbf */
877 PRINT_WARN("sense data available on channel %s.\n",
878 CHANNEL_ID(channel));
879 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
880 print_hex_dump(KERN_WARNING, "qeth: irb ",
881 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
882 print_hex_dump(KERN_WARNING, "qeth: sense data ",
883 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
884 }
885 if (intparm == QETH_RCD_PARM) {
886 channel->state = CH_STATE_DOWN;
887 goto out;
888 }
889 rc = qeth_get_problem(cdev, irb);
890 if (rc) {
891 qeth_schedule_recovery(card);
892 goto out;
893 }
894 }
895
896 if (intparm == QETH_RCD_PARM) {
897 channel->state = CH_STATE_RCD_DONE;
898 goto out;
899 }
900 if (intparm) {
901 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
902 buffer->state = BUF_STATE_PROCESSED;
903 }
904 if (channel == &card->data)
905 return;
906 if (channel == &card->read &&
907 channel->state == CH_STATE_UP)
908 qeth_issue_next_read(card);
909
910 iob = channel->iob;
911 index = channel->buf_no;
912 while (iob[index].state == BUF_STATE_PROCESSED) {
913 if (iob[index].callback != NULL)
914 iob[index].callback(channel, iob + index);
915
916 index = (index + 1) % QETH_CMD_BUFFER_NO;
917 }
918 channel->buf_no = index;
919out:
920 wake_up(&card->wait_q);
921 return;
922}
923
924static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
925 struct qeth_qdio_out_buffer *buf)
926{
927 int i;
928 struct sk_buff *skb;
929
930 /* is PCI flag set on buffer? */
931 if (buf->buffer->element[0].flags & 0x40)
932 atomic_dec(&queue->set_pci_flags_count);
933
934 skb = skb_dequeue(&buf->skb_list);
935 while (skb) {
936 atomic_dec(&skb->users);
937 dev_kfree_skb_any(skb);
938 skb = skb_dequeue(&buf->skb_list);
939 }
940 qeth_eddp_buf_release_contexts(buf);
941 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
942 buf->buffer->element[i].length = 0;
943 buf->buffer->element[i].addr = NULL;
944 buf->buffer->element[i].flags = 0;
945 }
946 buf->next_element_to_fill = 0;
947 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
948}
949
950void qeth_clear_qdio_buffers(struct qeth_card *card)
951{
952 int i, j;
953
954 QETH_DBF_TEXT(TRACE, 2, "clearqdbf");
955 /* clear outbound buffers to free skbs */
956 for (i = 0; i < card->qdio.no_out_queues; ++i)
957 if (card->qdio.out_qs[i]) {
958 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
959 qeth_clear_output_buffer(card->qdio.out_qs[i],
960 &card->qdio.out_qs[i]->bufs[j]);
961 }
962}
963EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
964
965static void qeth_free_buffer_pool(struct qeth_card *card)
966{
967 struct qeth_buffer_pool_entry *pool_entry, *tmp;
968 int i = 0;
969 QETH_DBF_TEXT(TRACE, 5, "freepool");
970 list_for_each_entry_safe(pool_entry, tmp,
971 &card->qdio.init_pool.entry_list, init_list){
972 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
973 free_page((unsigned long)pool_entry->elements[i]);
974 list_del(&pool_entry->init_list);
975 kfree(pool_entry);
976 }
977}
978
979static void qeth_free_qdio_buffers(struct qeth_card *card)
980{
981 int i, j;
982
983 QETH_DBF_TEXT(TRACE, 2, "freeqdbf");
984 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
985 QETH_QDIO_UNINITIALIZED)
986 return;
987 kfree(card->qdio.in_q);
988 card->qdio.in_q = NULL;
989 /* inbound buffer pool */
990 qeth_free_buffer_pool(card);
991 /* free outbound qdio_qs */
992 if (card->qdio.out_qs) {
993 for (i = 0; i < card->qdio.no_out_queues; ++i) {
994 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
995 qeth_clear_output_buffer(card->qdio.out_qs[i],
996 &card->qdio.out_qs[i]->bufs[j]);
997 kfree(card->qdio.out_qs[i]);
998 }
999 kfree(card->qdio.out_qs);
1000 card->qdio.out_qs = NULL;
1001 }
1002}
1003
1004static void qeth_clean_channel(struct qeth_channel *channel)
1005{
1006 int cnt;
1007
1008 QETH_DBF_TEXT(SETUP, 2, "freech");
1009 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1010 kfree(channel->iob[cnt].data);
1011}
1012
1013static int qeth_is_1920_device(struct qeth_card *card)
1014{
1015 int single_queue = 0;
1016 struct ccw_device *ccwdev;
1017 struct channelPath_dsc {
1018 u8 flags;
1019 u8 lsn;
1020 u8 desc;
1021 u8 chpid;
1022 u8 swla;
1023 u8 zeroes;
1024 u8 chla;
1025 u8 chpp;
1026 } *chp_dsc;
1027
1028 QETH_DBF_TEXT(SETUP, 2, "chk_1920");
1029
1030 ccwdev = card->data.ccwdev;
1031 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1032 if (chp_dsc != NULL) {
1033 /* CHPP field bit 6 == 1 -> single queue */
1034 single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
1035 kfree(chp_dsc);
1036 }
1037 QETH_DBF_TEXT_(SETUP, 2, "rc:%x", single_queue);
1038 return single_queue;
1039}
1040
1041static void qeth_init_qdio_info(struct qeth_card *card)
1042{
1043 QETH_DBF_TEXT(SETUP, 4, "intqdinf");
1044 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1045 /* inbound */
1046 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1047 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1048 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1049 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1050 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1051}
1052
1053static void qeth_set_intial_options(struct qeth_card *card)
1054{
1055 card->options.route4.type = NO_ROUTER;
1056 card->options.route6.type = NO_ROUTER;
1057 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1058 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1059 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1060 card->options.fake_broadcast = 0;
1061 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1062 card->options.fake_ll = 0;
1063 card->options.performance_stats = 0;
1064 card->options.rx_sg_cb = QETH_RX_SG_CB;
1065}
1066
1067static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1068{
1069 unsigned long flags;
1070 int rc = 0;
1071
1072 spin_lock_irqsave(&card->thread_mask_lock, flags);
1073 QETH_DBF_TEXT_(TRACE, 4, " %02x%02x%02x",
1074 (u8) card->thread_start_mask,
1075 (u8) card->thread_allowed_mask,
1076 (u8) card->thread_running_mask);
1077 rc = (card->thread_start_mask & thread);
1078 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1079 return rc;
1080}
1081
1082static void qeth_start_kernel_thread(struct work_struct *work)
1083{
1084 struct qeth_card *card = container_of(work, struct qeth_card,
1085 kernel_thread_starter);
1086 QETH_DBF_TEXT(TRACE , 2, "strthrd");
1087
1088 if (card->read.state != CH_STATE_UP &&
1089 card->write.state != CH_STATE_UP)
1090 return;
1091 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1092 kthread_run(card->discipline.recover, (void *) card,
1093 "qeth_recover");
1094}
1095
1096static int qeth_setup_card(struct qeth_card *card)
1097{
1098
1099 QETH_DBF_TEXT(SETUP, 2, "setupcrd");
1100 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1101
1102 card->read.state = CH_STATE_DOWN;
1103 card->write.state = CH_STATE_DOWN;
1104 card->data.state = CH_STATE_DOWN;
1105 card->state = CARD_STATE_DOWN;
1106 card->lan_online = 0;
1107 card->use_hard_stop = 0;
1108 card->dev = NULL;
1109 spin_lock_init(&card->vlanlock);
1110 spin_lock_init(&card->mclock);
1111 card->vlangrp = NULL;
1112 spin_lock_init(&card->lock);
1113 spin_lock_init(&card->ip_lock);
1114 spin_lock_init(&card->thread_mask_lock);
1115 card->thread_start_mask = 0;
1116 card->thread_allowed_mask = 0;
1117 card->thread_running_mask = 0;
1118 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1119 INIT_LIST_HEAD(&card->ip_list);
1120 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1121 if (!card->ip_tbd_list) {
1122 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1123 return -ENOMEM;
1124 }
1125 INIT_LIST_HEAD(card->ip_tbd_list);
1126 INIT_LIST_HEAD(&card->cmd_waiter_list);
1127 init_waitqueue_head(&card->wait_q);
1128 /* intial options */
1129 qeth_set_intial_options(card);
1130 /* IP address takeover */
1131 INIT_LIST_HEAD(&card->ipato.entries);
1132 card->ipato.enabled = 0;
1133 card->ipato.invert4 = 0;
1134 card->ipato.invert6 = 0;
1135 /* init QDIO stuff */
1136 qeth_init_qdio_info(card);
1137 return 0;
1138}
1139
1140static struct qeth_card *qeth_alloc_card(void)
1141{
1142 struct qeth_card *card;
1143
1144 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1145 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
1146 if (!card)
1147 return NULL;
1148 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1149 if (qeth_setup_channel(&card->read)) {
1150 kfree(card);
1151 return NULL;
1152 }
1153 if (qeth_setup_channel(&card->write)) {
1154 qeth_clean_channel(&card->read);
1155 kfree(card);
1156 return NULL;
1157 }
1158 card->options.layer2 = -1;
1159 return card;
1160}
1161
1162static int qeth_determine_card_type(struct qeth_card *card)
1163{
1164 int i = 0;
1165
1166 QETH_DBF_TEXT(SETUP, 2, "detcdtyp");
1167
1168 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1169 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1170 while (known_devices[i][4]) {
1171 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1172 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1173 card->info.type = known_devices[i][4];
1174 card->qdio.no_out_queues = known_devices[i][8];
1175 card->info.is_multicast_different = known_devices[i][9];
1176 if (qeth_is_1920_device(card)) {
1177 PRINT_INFO("Priority Queueing not able "
1178 "due to hardware limitations!\n");
1179 card->qdio.no_out_queues = 1;
1180 card->qdio.default_out_queue = 0;
1181 }
1182 return 0;
1183 }
1184 i++;
1185 }
1186 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1187 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1188 return -ENOENT;
1189}
1190
1191static int qeth_clear_channel(struct qeth_channel *channel)
1192{
1193 unsigned long flags;
1194 struct qeth_card *card;
1195 int rc;
1196
1197 QETH_DBF_TEXT(TRACE, 3, "clearch");
1198 card = CARD_FROM_CDEV(channel->ccwdev);
1199 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1200 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1201 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1202
1203 if (rc)
1204 return rc;
1205 rc = wait_event_interruptible_timeout(card->wait_q,
1206 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1207 if (rc == -ERESTARTSYS)
1208 return rc;
1209 if (channel->state != CH_STATE_STOPPED)
1210 return -ETIME;
1211 channel->state = CH_STATE_DOWN;
1212 return 0;
1213}
1214
1215static int qeth_halt_channel(struct qeth_channel *channel)
1216{
1217 unsigned long flags;
1218 struct qeth_card *card;
1219 int rc;
1220
1221 QETH_DBF_TEXT(TRACE, 3, "haltch");
1222 card = CARD_FROM_CDEV(channel->ccwdev);
1223 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1224 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1225 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1226
1227 if (rc)
1228 return rc;
1229 rc = wait_event_interruptible_timeout(card->wait_q,
1230 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1231 if (rc == -ERESTARTSYS)
1232 return rc;
1233 if (channel->state != CH_STATE_HALTED)
1234 return -ETIME;
1235 return 0;
1236}
1237
1238static int qeth_halt_channels(struct qeth_card *card)
1239{
1240 int rc1 = 0, rc2 = 0, rc3 = 0;
1241
1242 QETH_DBF_TEXT(TRACE, 3, "haltchs");
1243 rc1 = qeth_halt_channel(&card->read);
1244 rc2 = qeth_halt_channel(&card->write);
1245 rc3 = qeth_halt_channel(&card->data);
1246 if (rc1)
1247 return rc1;
1248 if (rc2)
1249 return rc2;
1250 return rc3;
1251}
1252
1253static int qeth_clear_channels(struct qeth_card *card)
1254{
1255 int rc1 = 0, rc2 = 0, rc3 = 0;
1256
1257 QETH_DBF_TEXT(TRACE, 3, "clearchs");
1258 rc1 = qeth_clear_channel(&card->read);
1259 rc2 = qeth_clear_channel(&card->write);
1260 rc3 = qeth_clear_channel(&card->data);
1261 if (rc1)
1262 return rc1;
1263 if (rc2)
1264 return rc2;
1265 return rc3;
1266}
1267
1268static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1269{
1270 int rc = 0;
1271
1272 QETH_DBF_TEXT(TRACE, 3, "clhacrd");
1273 QETH_DBF_HEX(TRACE, 3, &card, sizeof(void *));
1274
1275 if (halt)
1276 rc = qeth_halt_channels(card);
1277 if (rc)
1278 return rc;
1279 return qeth_clear_channels(card);
1280}
1281
1282int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1283{
1284 int rc = 0;
1285
1286 QETH_DBF_TEXT(TRACE, 3, "qdioclr");
1287 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1288 QETH_QDIO_CLEANING)) {
1289 case QETH_QDIO_ESTABLISHED:
1290 if (card->info.type == QETH_CARD_TYPE_IQD)
1291 rc = qdio_cleanup(CARD_DDEV(card),
1292 QDIO_FLAG_CLEANUP_USING_HALT);
1293 else
1294 rc = qdio_cleanup(CARD_DDEV(card),
1295 QDIO_FLAG_CLEANUP_USING_CLEAR);
1296 if (rc)
1297 QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc);
1298 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1299 break;
1300 case QETH_QDIO_CLEANING:
1301 return rc;
1302 default:
1303 break;
1304 }
1305 rc = qeth_clear_halt_card(card, use_halt);
1306 if (rc)
1307 QETH_DBF_TEXT_(TRACE, 3, "2err%d", rc);
1308 card->state = CARD_STATE_DOWN;
1309 return rc;
1310}
1311EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1312
1313static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1314 int *length)
1315{
1316 struct ciw *ciw;
1317 char *rcd_buf;
1318 int ret;
1319 struct qeth_channel *channel = &card->data;
1320 unsigned long flags;
1321
1322 /*
1323 * scan for RCD command in extended SenseID data
1324 */
1325 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1326 if (!ciw || ciw->cmd == 0)
1327 return -EOPNOTSUPP;
1328 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1329 if (!rcd_buf)
1330 return -ENOMEM;
1331
1332 channel->ccw.cmd_code = ciw->cmd;
1333 channel->ccw.cda = (__u32) __pa(rcd_buf);
1334 channel->ccw.count = ciw->count;
1335 channel->ccw.flags = CCW_FLAG_SLI;
1336 channel->state = CH_STATE_RCD;
1337 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1338 ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1339 QETH_RCD_PARM, LPM_ANYPATH, 0,
1340 QETH_RCD_TIMEOUT);
1341 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1342 if (!ret)
1343 wait_event(card->wait_q,
1344 (channel->state == CH_STATE_RCD_DONE ||
1345 channel->state == CH_STATE_DOWN));
1346 if (channel->state == CH_STATE_DOWN)
1347 ret = -EIO;
1348 else
1349 channel->state = CH_STATE_DOWN;
1350 if (ret) {
1351 kfree(rcd_buf);
1352 *buffer = NULL;
1353 *length = 0;
1354 } else {
1355 *length = ciw->count;
1356 *buffer = rcd_buf;
1357 }
1358 return ret;
1359}
1360
1361static int qeth_get_unitaddr(struct qeth_card *card)
1362{
1363 int length;
1364 char *prcd;
1365 int rc;
1366
1367 QETH_DBF_TEXT(SETUP, 2, "getunit");
1368 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
1369 if (rc) {
1370 PRINT_ERR("qeth_read_conf_data for device %s returned %i\n",
1371 CARD_DDEV_ID(card), rc);
1372 return rc;
1373 }
1374 card->info.chpid = prcd[30];
1375 card->info.unit_addr2 = prcd[31];
1376 card->info.cula = prcd[63];
1377 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1378 (prcd[0x11] == _ascebc['M']));
1379 kfree(prcd);
1380 return 0;
1381}
1382
1383static void qeth_init_tokens(struct qeth_card *card)
1384{
1385 card->token.issuer_rm_w = 0x00010103UL;
1386 card->token.cm_filter_w = 0x00010108UL;
1387 card->token.cm_connection_w = 0x0001010aUL;
1388 card->token.ulp_filter_w = 0x0001010bUL;
1389 card->token.ulp_connection_w = 0x0001010dUL;
1390}
1391
1392static void qeth_init_func_level(struct qeth_card *card)
1393{
1394 if (card->ipato.enabled) {
1395 if (card->info.type == QETH_CARD_TYPE_IQD)
1396 card->info.func_level =
1397 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
1398 else
1399 card->info.func_level =
1400 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
1401 } else {
1402 if (card->info.type == QETH_CARD_TYPE_IQD)
1403 /*FIXME:why do we have same values for dis and ena for
1404 osae??? */
1405 card->info.func_level =
1406 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
1407 else
1408 card->info.func_level =
1409 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
1410 }
1411}
1412
1413static inline __u16 qeth_raw_devno_from_bus_id(char *id)
1414{
1415 id += (strlen(id) - 4);
1416 return (__u16) simple_strtoul(id, &id, 16);
1417}
1418
1419static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1420 void (*idx_reply_cb)(struct qeth_channel *,
1421 struct qeth_cmd_buffer *))
1422{
1423 struct qeth_cmd_buffer *iob;
1424 unsigned long flags;
1425 int rc;
1426 struct qeth_card *card;
1427
1428 QETH_DBF_TEXT(SETUP, 2, "idxanswr");
1429 card = CARD_FROM_CDEV(channel->ccwdev);
1430 iob = qeth_get_buffer(channel);
1431 iob->callback = idx_reply_cb;
1432 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1433 channel->ccw.count = QETH_BUFSIZE;
1434 channel->ccw.cda = (__u32) __pa(iob->data);
1435
1436 wait_event(card->wait_q,
1437 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1438 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1439 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1440 rc = ccw_device_start(channel->ccwdev,
1441 &channel->ccw, (addr_t) iob, 0, 0);
1442 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1443
1444 if (rc) {
1445 PRINT_ERR("Error2 in activating channel rc=%d\n", rc);
1446 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
1447 atomic_set(&channel->irq_pending, 0);
1448 wake_up(&card->wait_q);
1449 return rc;
1450 }
1451 rc = wait_event_interruptible_timeout(card->wait_q,
1452 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1453 if (rc == -ERESTARTSYS)
1454 return rc;
1455 if (channel->state != CH_STATE_UP) {
1456 rc = -ETIME;
1457 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
1458 qeth_clear_cmd_buffers(channel);
1459 } else
1460 rc = 0;
1461 return rc;
1462}
1463
1464static int qeth_idx_activate_channel(struct qeth_channel *channel,
1465 void (*idx_reply_cb)(struct qeth_channel *,
1466 struct qeth_cmd_buffer *))
1467{
1468 struct qeth_card *card;
1469 struct qeth_cmd_buffer *iob;
1470 unsigned long flags;
1471 __u16 temp;
1472 __u8 tmp;
1473 int rc;
1474
1475 card = CARD_FROM_CDEV(channel->ccwdev);
1476
1477 QETH_DBF_TEXT(SETUP, 2, "idxactch");
1478
1479 iob = qeth_get_buffer(channel);
1480 iob->callback = idx_reply_cb;
1481 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1482 channel->ccw.count = IDX_ACTIVATE_SIZE;
1483 channel->ccw.cda = (__u32) __pa(iob->data);
1484 if (channel == &card->write) {
1485 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1486 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1487 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1488 card->seqno.trans_hdr++;
1489 } else {
1490 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1491 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1492 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1493 }
1494 tmp = ((__u8)card->info.portno) | 0x80;
1495 memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
1496 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1497 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1498 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1499 &card->info.func_level, sizeof(__u16));
1500 temp = qeth_raw_devno_from_bus_id(CARD_DDEV_ID(card));
1501 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1502 temp = (card->info.cula << 8) + card->info.unit_addr2;
1503 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1504
1505 wait_event(card->wait_q,
1506 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1507 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1508 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1509 rc = ccw_device_start(channel->ccwdev,
1510 &channel->ccw, (addr_t) iob, 0, 0);
1511 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1512
1513 if (rc) {
1514 PRINT_ERR("Error1 in activating channel. rc=%d\n", rc);
1515 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
1516 atomic_set(&channel->irq_pending, 0);
1517 wake_up(&card->wait_q);
1518 return rc;
1519 }
1520 rc = wait_event_interruptible_timeout(card->wait_q,
1521 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1522 if (rc == -ERESTARTSYS)
1523 return rc;
1524 if (channel->state != CH_STATE_ACTIVATING) {
1525 PRINT_WARN("IDX activate timed out!\n");
1526 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
1527 qeth_clear_cmd_buffers(channel);
1528 return -ETIME;
1529 }
1530 return qeth_idx_activate_get_answer(channel, idx_reply_cb);
1531}
1532
1533static int qeth_peer_func_level(int level)
1534{
1535 if ((level & 0xff) == 8)
1536 return (level & 0xff) + 0x400;
1537 if (((level >> 8) & 3) == 1)
1538 return (level & 0xff) + 0x200;
1539 return level;
1540}
1541
1542static void qeth_idx_write_cb(struct qeth_channel *channel,
1543 struct qeth_cmd_buffer *iob)
1544{
1545 struct qeth_card *card;
1546 __u16 temp;
1547
1548 QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
1549
1550 if (channel->state == CH_STATE_DOWN) {
1551 channel->state = CH_STATE_ACTIVATING;
1552 goto out;
1553 }
1554 card = CARD_FROM_CDEV(channel->ccwdev);
1555
1556 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1557 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
1558 PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
1559 "adapter exclusively used by another host\n",
1560 CARD_WDEV_ID(card));
1561 else
1562 PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
1563 "negative reply\n", CARD_WDEV_ID(card));
1564 goto out;
1565 }
1566 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1567 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1568 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1569 "function level mismatch "
1570 "(sent: 0x%x, received: 0x%x)\n",
1571 CARD_WDEV_ID(card), card->info.func_level, temp);
1572 goto out;
1573 }
1574 channel->state = CH_STATE_UP;
1575out:
1576 qeth_release_buffer(channel, iob);
1577}
1578
1579static void qeth_idx_read_cb(struct qeth_channel *channel,
1580 struct qeth_cmd_buffer *iob)
1581{
1582 struct qeth_card *card;
1583 __u16 temp;
1584
1585 QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
1586 if (channel->state == CH_STATE_DOWN) {
1587 channel->state = CH_STATE_ACTIVATING;
1588 goto out;
1589 }
1590
1591 card = CARD_FROM_CDEV(channel->ccwdev);
1592 if (qeth_check_idx_response(iob->data))
1593 goto out;
1594
1595 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1596 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
1597 PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
1598 "adapter exclusively used by another host\n",
1599 CARD_RDEV_ID(card));
1600 else
1601 PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
1602 "negative reply\n", CARD_RDEV_ID(card));
1603 goto out;
1604 }
1605
1606/**
1607 * temporary fix for microcode bug
1608 * to revert it,replace OR by AND
1609 */
1610 if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1611 (card->info.type == QETH_CARD_TYPE_OSAE))
1612 card->info.portname_required = 1;
1613
1614 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1615 if (temp != qeth_peer_func_level(card->info.func_level)) {
1616 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1617 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1618 CARD_RDEV_ID(card), card->info.func_level, temp);
1619 goto out;
1620 }
1621 memcpy(&card->token.issuer_rm_r,
1622 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1623 QETH_MPC_TOKEN_LENGTH);
1624 memcpy(&card->info.mcl_level[0],
1625 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1626 channel->state = CH_STATE_UP;
1627out:
1628 qeth_release_buffer(channel, iob);
1629}
1630
1631void qeth_prepare_control_data(struct qeth_card *card, int len,
1632 struct qeth_cmd_buffer *iob)
1633{
1634 qeth_setup_ccw(&card->write, iob->data, len);
1635 iob->callback = qeth_release_buffer;
1636
1637 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1638 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1639 card->seqno.trans_hdr++;
1640 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1641 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1642 card->seqno.pdu_hdr++;
1643 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1644 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1645 QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
1646}
1647EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
1648
1649int qeth_send_control_data(struct qeth_card *card, int len,
1650 struct qeth_cmd_buffer *iob,
1651 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1652 unsigned long),
1653 void *reply_param)
1654{
1655 int rc;
1656 unsigned long flags;
1657 struct qeth_reply *reply = NULL;
1658 unsigned long timeout;
1659
1660 QETH_DBF_TEXT(TRACE, 2, "sendctl");
1661
1662 reply = qeth_alloc_reply(card);
1663 if (!reply) {
1664 PRINT_WARN("Could not alloc qeth_reply!\n");
1665 return -ENOMEM;
1666 }
1667 reply->callback = reply_cb;
1668 reply->param = reply_param;
1669 if (card->state == CARD_STATE_DOWN)
1670 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1671 else
1672 reply->seqno = card->seqno.ipa++;
1673 init_waitqueue_head(&reply->wait_q);
1674 spin_lock_irqsave(&card->lock, flags);
1675 list_add_tail(&reply->list, &card->cmd_waiter_list);
1676 spin_unlock_irqrestore(&card->lock, flags);
1677 QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
1678
1679 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
1680 qeth_prepare_control_data(card, len, iob);
1681
1682 if (IS_IPA(iob->data))
1683 timeout = jiffies + QETH_IPA_TIMEOUT;
1684 else
1685 timeout = jiffies + QETH_TIMEOUT;
1686
1687 QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
1688 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1689 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1690 (addr_t) iob, 0, 0);
1691 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1692 if (rc) {
1693 PRINT_WARN("qeth_send_control_data: "
1694 "ccw_device_start rc = %i\n", rc);
1695 QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
1696 spin_lock_irqsave(&card->lock, flags);
1697 list_del_init(&reply->list);
1698 qeth_put_reply(reply);
1699 spin_unlock_irqrestore(&card->lock, flags);
1700 qeth_release_buffer(iob->channel, iob);
1701 atomic_set(&card->write.irq_pending, 0);
1702 wake_up(&card->wait_q);
1703 return rc;
1704 }
1705 while (!atomic_read(&reply->received)) {
1706 if (time_after(jiffies, timeout)) {
1707 spin_lock_irqsave(&reply->card->lock, flags);
1708 list_del_init(&reply->list);
1709 spin_unlock_irqrestore(&reply->card->lock, flags);
1710 reply->rc = -ETIME;
1711 atomic_inc(&reply->received);
1712 wake_up(&reply->wait_q);
1713 }
1714 cpu_relax();
1715 };
1716 rc = reply->rc;
1717 qeth_put_reply(reply);
1718 return rc;
1719}
1720EXPORT_SYMBOL_GPL(qeth_send_control_data);
1721
1722static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1723 unsigned long data)
1724{
1725 struct qeth_cmd_buffer *iob;
1726
1727 QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
1728
1729 iob = (struct qeth_cmd_buffer *) data;
1730 memcpy(&card->token.cm_filter_r,
1731 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1732 QETH_MPC_TOKEN_LENGTH);
1733 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1734 return 0;
1735}
1736
1737static int qeth_cm_enable(struct qeth_card *card)
1738{
1739 int rc;
1740 struct qeth_cmd_buffer *iob;
1741
1742 QETH_DBF_TEXT(SETUP, 2, "cmenable");
1743
1744 iob = qeth_wait_for_buffer(&card->write);
1745 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
1746 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
1747 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1748 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
1749 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
1750
1751 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
1752 qeth_cm_enable_cb, NULL);
1753 return rc;
1754}
1755
1756static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1757 unsigned long data)
1758{
1759
1760 struct qeth_cmd_buffer *iob;
1761
1762 QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
1763
1764 iob = (struct qeth_cmd_buffer *) data;
1765 memcpy(&card->token.cm_connection_r,
1766 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
1767 QETH_MPC_TOKEN_LENGTH);
1768 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1769 return 0;
1770}
1771
1772static int qeth_cm_setup(struct qeth_card *card)
1773{
1774 int rc;
1775 struct qeth_cmd_buffer *iob;
1776
1777 QETH_DBF_TEXT(SETUP, 2, "cmsetup");
1778
1779 iob = qeth_wait_for_buffer(&card->write);
1780 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
1781 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
1782 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1783 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
1784 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
1785 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
1786 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
1787 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
1788 qeth_cm_setup_cb, NULL);
1789 return rc;
1790
1791}
1792
1793static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
1794{
1795 switch (card->info.type) {
1796 case QETH_CARD_TYPE_UNKNOWN:
1797 return 1500;
1798 case QETH_CARD_TYPE_IQD:
1799 return card->info.max_mtu;
1800 case QETH_CARD_TYPE_OSAE:
1801 switch (card->info.link_type) {
1802 case QETH_LINK_TYPE_HSTR:
1803 case QETH_LINK_TYPE_LANE_TR:
1804 return 2000;
1805 default:
1806 return 1492;
1807 }
1808 default:
1809 return 1500;
1810 }
1811}
1812
1813static inline int qeth_get_max_mtu_for_card(int cardtype)
1814{
1815 switch (cardtype) {
1816
1817 case QETH_CARD_TYPE_UNKNOWN:
1818 case QETH_CARD_TYPE_OSAE:
1819 case QETH_CARD_TYPE_OSN:
1820 return 61440;
1821 case QETH_CARD_TYPE_IQD:
1822 return 57344;
1823 default:
1824 return 1500;
1825 }
1826}
1827
1828static inline int qeth_get_mtu_out_of_mpc(int cardtype)
1829{
1830 switch (cardtype) {
1831 case QETH_CARD_TYPE_IQD:
1832 return 1;
1833 default:
1834 return 0;
1835 }
1836}
1837
1838static inline int qeth_get_mtu_outof_framesize(int framesize)
1839{
1840 switch (framesize) {
1841 case 0x4000:
1842 return 8192;
1843 case 0x6000:
1844 return 16384;
1845 case 0xa000:
1846 return 32768;
1847 case 0xffff:
1848 return 57344;
1849 default:
1850 return 0;
1851 }
1852}
1853
1854static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
1855{
1856 switch (card->info.type) {
1857 case QETH_CARD_TYPE_OSAE:
1858 return ((mtu >= 576) && (mtu <= 61440));
1859 case QETH_CARD_TYPE_IQD:
1860 return ((mtu >= 576) &&
1861 (mtu <= card->info.max_mtu + 4096 - 32));
1862 case QETH_CARD_TYPE_OSN:
1863 case QETH_CARD_TYPE_UNKNOWN:
1864 default:
1865 return 1;
1866 }
1867}
1868
1869static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1870 unsigned long data)
1871{
1872
1873 __u16 mtu, framesize;
1874 __u16 len;
1875 __u8 link_type;
1876 struct qeth_cmd_buffer *iob;
1877
1878 QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
1879
1880 iob = (struct qeth_cmd_buffer *) data;
1881 memcpy(&card->token.ulp_filter_r,
1882 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1883 QETH_MPC_TOKEN_LENGTH);
1884 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
1885 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1886 mtu = qeth_get_mtu_outof_framesize(framesize);
1887 if (!mtu) {
1888 iob->rc = -EINVAL;
1889 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1890 return 0;
1891 }
1892 card->info.max_mtu = mtu;
1893 card->info.initial_mtu = mtu;
1894 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1895 } else {
1896 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1897 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
1898 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1899 }
1900
1901 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
1902 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
1903 memcpy(&link_type,
1904 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
1905 card->info.link_type = link_type;
1906 } else
1907 card->info.link_type = 0;
1908 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1909 return 0;
1910}
1911
1912static int qeth_ulp_enable(struct qeth_card *card)
1913{
1914 int rc;
1915 char prot_type;
1916 struct qeth_cmd_buffer *iob;
1917
1918 /*FIXME: trace view callbacks*/
1919 QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
1920
1921 iob = qeth_wait_for_buffer(&card->write);
1922 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
1923
1924 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
1925 (__u8) card->info.portno;
1926 if (card->options.layer2)
1927 if (card->info.type == QETH_CARD_TYPE_OSN)
1928 prot_type = QETH_PROT_OSN2;
1929 else
1930 prot_type = QETH_PROT_LAYER2;
1931 else
1932 prot_type = QETH_PROT_TCPIP;
1933
1934 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
1935 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
1936 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
1937 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
1938 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
1939 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
1940 card->info.portname, 9);
1941 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
1942 qeth_ulp_enable_cb, NULL);
1943 return rc;
1944
1945}
1946
1947static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1948 unsigned long data)
1949{
1950 struct qeth_cmd_buffer *iob;
1951
1952 QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
1953
1954 iob = (struct qeth_cmd_buffer *) data;
1955 memcpy(&card->token.ulp_connection_r,
1956 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
1957 QETH_MPC_TOKEN_LENGTH);
1958 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1959 return 0;
1960}
1961
1962static int qeth_ulp_setup(struct qeth_card *card)
1963{
1964 int rc;
1965 __u16 temp;
1966 struct qeth_cmd_buffer *iob;
1967 struct ccw_dev_id dev_id;
1968
1969 QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
1970
1971 iob = qeth_wait_for_buffer(&card->write);
1972 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
1973
1974 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
1975 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
1976 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
1977 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
1978 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
1979 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
1980
1981 ccw_device_get_id(CARD_DDEV(card), &dev_id);
1982 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
1983 temp = (card->info.cula << 8) + card->info.unit_addr2;
1984 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
1985 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
1986 qeth_ulp_setup_cb, NULL);
1987 return rc;
1988}
1989
1990static int qeth_alloc_qdio_buffers(struct qeth_card *card)
1991{
1992 int i, j;
1993
1994 QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
1995
1996 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
1997 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
1998 return 0;
1999
2000 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
2001 GFP_KERNEL);
2002 if (!card->qdio.in_q)
2003 goto out_nomem;
2004 QETH_DBF_TEXT(SETUP, 2, "inq");
2005 QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *));
2006 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
2007 /* give inbound qeth_qdio_buffers their qdio_buffers */
2008 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
2009 card->qdio.in_q->bufs[i].buffer =
2010 &card->qdio.in_q->qdio_bufs[i];
2011 /* inbound buffer pool */
2012 if (qeth_alloc_buffer_pool(card))
2013 goto out_freeinq;
2014 /* outbound */
2015 card->qdio.out_qs =
2016 kmalloc(card->qdio.no_out_queues *
2017 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
2018 if (!card->qdio.out_qs)
2019 goto out_freepool;
2020 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2021 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
2022 GFP_KERNEL);
2023 if (!card->qdio.out_qs[i])
2024 goto out_freeoutq;
2025 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
2026 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
2027 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
2028 card->qdio.out_qs[i]->queue_no = i;
2029 /* give outbound qeth_qdio_buffers their qdio_buffers */
2030 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2031 card->qdio.out_qs[i]->bufs[j].buffer =
2032 &card->qdio.out_qs[i]->qdio_bufs[j];
2033 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
2034 skb_list);
2035 lockdep_set_class(
2036 &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
2037 &qdio_out_skb_queue_key);
2038 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
2039 }
2040 }
2041 return 0;
2042
2043out_freeoutq:
2044 while (i > 0)
2045 kfree(card->qdio.out_qs[--i]);
2046 kfree(card->qdio.out_qs);
2047 card->qdio.out_qs = NULL;
2048out_freepool:
2049 qeth_free_buffer_pool(card);
2050out_freeinq:
2051 kfree(card->qdio.in_q);
2052 card->qdio.in_q = NULL;
2053out_nomem:
2054 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2055 return -ENOMEM;
2056}
2057
2058static void qeth_create_qib_param_field(struct qeth_card *card,
2059 char *param_field)
2060{
2061
2062 param_field[0] = _ascebc['P'];
2063 param_field[1] = _ascebc['C'];
2064 param_field[2] = _ascebc['I'];
2065 param_field[3] = _ascebc['T'];
2066 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2067 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2068 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2069}
2070
2071static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2072 char *param_field)
2073{
2074 param_field[16] = _ascebc['B'];
2075 param_field[17] = _ascebc['L'];
2076 param_field[18] = _ascebc['K'];
2077 param_field[19] = _ascebc['T'];
2078 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2079 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2080 *((unsigned int *) (&param_field[28])) =
2081 card->info.blkt.inter_packet_jumbo;
2082}
2083
2084static int qeth_qdio_activate(struct qeth_card *card)
2085{
2086 QETH_DBF_TEXT(SETUP, 3, "qdioact");
2087 return qdio_activate(CARD_DDEV(card), 0);
2088}
2089
2090static int qeth_dm_act(struct qeth_card *card)
2091{
2092 int rc;
2093 struct qeth_cmd_buffer *iob;
2094
2095 QETH_DBF_TEXT(SETUP, 2, "dmact");
2096
2097 iob = qeth_wait_for_buffer(&card->write);
2098 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
2099
2100 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2101 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2102 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2103 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2104 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
2105 return rc;
2106}
2107
2108static int qeth_mpc_initialize(struct qeth_card *card)
2109{
2110 int rc;
2111
2112 QETH_DBF_TEXT(SETUP, 2, "mpcinit");
2113
2114 rc = qeth_issue_next_read(card);
2115 if (rc) {
2116 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2117 return rc;
2118 }
2119 rc = qeth_cm_enable(card);
2120 if (rc) {
2121 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2122 goto out_qdio;
2123 }
2124 rc = qeth_cm_setup(card);
2125 if (rc) {
2126 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
2127 goto out_qdio;
2128 }
2129 rc = qeth_ulp_enable(card);
2130 if (rc) {
2131 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
2132 goto out_qdio;
2133 }
2134 rc = qeth_ulp_setup(card);
2135 if (rc) {
2136 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2137 goto out_qdio;
2138 }
2139 rc = qeth_alloc_qdio_buffers(card);
2140 if (rc) {
2141 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2142 goto out_qdio;
2143 }
2144 rc = qeth_qdio_establish(card);
2145 if (rc) {
2146 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
2147 qeth_free_qdio_buffers(card);
2148 goto out_qdio;
2149 }
2150 rc = qeth_qdio_activate(card);
2151 if (rc) {
2152 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
2153 goto out_qdio;
2154 }
2155 rc = qeth_dm_act(card);
2156 if (rc) {
2157 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
2158 goto out_qdio;
2159 }
2160
2161 return 0;
2162out_qdio:
2163 qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
2164 return rc;
2165}
2166
2167static void qeth_print_status_with_portname(struct qeth_card *card)
2168{
2169 char dbf_text[15];
2170 int i;
2171
2172 sprintf(dbf_text, "%s", card->info.portname + 1);
2173 for (i = 0; i < 8; i++)
2174 dbf_text[i] =
2175 (char) _ebcasc[(__u8) dbf_text[i]];
2176 dbf_text[8] = 0;
2177 PRINT_INFO("Device %s/%s/%s is a%s card%s%s%s\n"
2178 "with link type %s (portname: %s)\n",
2179 CARD_RDEV_ID(card),
2180 CARD_WDEV_ID(card),
2181 CARD_DDEV_ID(card),
2182 qeth_get_cardname(card),
2183 (card->info.mcl_level[0]) ? " (level: " : "",
2184 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2185 (card->info.mcl_level[0]) ? ")" : "",
2186 qeth_get_cardname_short(card),
2187 dbf_text);
2188
2189}
2190
2191static void qeth_print_status_no_portname(struct qeth_card *card)
2192{
2193 if (card->info.portname[0])
2194 PRINT_INFO("Device %s/%s/%s is a%s "
2195 "card%s%s%s\nwith link type %s "
2196 "(no portname needed by interface).\n",
2197 CARD_RDEV_ID(card),
2198 CARD_WDEV_ID(card),
2199 CARD_DDEV_ID(card),
2200 qeth_get_cardname(card),
2201 (card->info.mcl_level[0]) ? " (level: " : "",
2202 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2203 (card->info.mcl_level[0]) ? ")" : "",
2204 qeth_get_cardname_short(card));
2205 else
2206 PRINT_INFO("Device %s/%s/%s is a%s "
2207 "card%s%s%s\nwith link type %s.\n",
2208 CARD_RDEV_ID(card),
2209 CARD_WDEV_ID(card),
2210 CARD_DDEV_ID(card),
2211 qeth_get_cardname(card),
2212 (card->info.mcl_level[0]) ? " (level: " : "",
2213 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2214 (card->info.mcl_level[0]) ? ")" : "",
2215 qeth_get_cardname_short(card));
2216}
2217
2218void qeth_print_status_message(struct qeth_card *card)
2219{
2220 switch (card->info.type) {
2221 case QETH_CARD_TYPE_OSAE:
2222 /* VM will use a non-zero first character
2223 * to indicate a HiperSockets like reporting
2224 * of the level OSA sets the first character to zero
2225 * */
2226 if (!card->info.mcl_level[0]) {
2227 sprintf(card->info.mcl_level, "%02x%02x",
2228 card->info.mcl_level[2],
2229 card->info.mcl_level[3]);
2230
2231 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2232 break;
2233 }
2234 /* fallthrough */
2235 case QETH_CARD_TYPE_IQD:
2236 if (card->info.guestlan) {
2237 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2238 card->info.mcl_level[0]];
2239 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2240 card->info.mcl_level[1]];
2241 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2242 card->info.mcl_level[2]];
2243 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2244 card->info.mcl_level[3]];
2245 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2246 }
2247 break;
2248 default:
2249 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2250 }
2251 if (card->info.portname_required)
2252 qeth_print_status_with_portname(card);
2253 else
2254 qeth_print_status_no_portname(card);
2255}
2256EXPORT_SYMBOL_GPL(qeth_print_status_message);
2257
2258void qeth_put_buffer_pool_entry(struct qeth_card *card,
2259 struct qeth_buffer_pool_entry *entry)
2260{
2261 QETH_DBF_TEXT(TRACE, 6, "ptbfplen");
2262 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2263}
2264EXPORT_SYMBOL_GPL(qeth_put_buffer_pool_entry);
2265
2266static void qeth_initialize_working_pool_list(struct qeth_card *card)
2267{
2268 struct qeth_buffer_pool_entry *entry;
2269
2270 QETH_DBF_TEXT(TRACE, 5, "inwrklst");
2271
2272 list_for_each_entry(entry,
2273 &card->qdio.init_pool.entry_list, init_list) {
2274 qeth_put_buffer_pool_entry(card, entry);
2275 }
2276}
2277
2278static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2279 struct qeth_card *card)
2280{
2281 struct list_head *plh;
2282 struct qeth_buffer_pool_entry *entry;
2283 int i, free;
2284 struct page *page;
2285
2286 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2287 return NULL;
2288
2289 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2290 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2291 free = 1;
2292 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2293 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2294 free = 0;
2295 break;
2296 }
2297 }
2298 if (free) {
2299 list_del_init(&entry->list);
2300 return entry;
2301 }
2302 }
2303
2304 /* no free buffer in pool so take first one and swap pages */
2305 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2306 struct qeth_buffer_pool_entry, list);
2307 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2308 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2309 page = alloc_page(GFP_ATOMIC);
2310 if (!page) {
2311 return NULL;
2312 } else {
2313 free_page((unsigned long)entry->elements[i]);
2314 entry->elements[i] = page_address(page);
2315 if (card->options.performance_stats)
2316 card->perf_stats.sg_alloc_page_rx++;
2317 }
2318 }
2319 }
2320 list_del_init(&entry->list);
2321 return entry;
2322}
2323
2324static int qeth_init_input_buffer(struct qeth_card *card,
2325 struct qeth_qdio_buffer *buf)
2326{
2327 struct qeth_buffer_pool_entry *pool_entry;
2328 int i;
2329
2330 pool_entry = qeth_find_free_buffer_pool_entry(card);
2331 if (!pool_entry)
2332 return 1;
2333
2334 /*
2335 * since the buffer is accessed only from the input_tasklet
2336 * there shouldn't be a need to synchronize; also, since we use
2337 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2338 * buffers
2339 */
2340 BUG_ON(!pool_entry);
2341
2342 buf->pool_entry = pool_entry;
2343 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2344 buf->buffer->element[i].length = PAGE_SIZE;
2345 buf->buffer->element[i].addr = pool_entry->elements[i];
2346 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2347 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2348 else
2349 buf->buffer->element[i].flags = 0;
2350 }
2351 return 0;
2352}
2353
2354int qeth_init_qdio_queues(struct qeth_card *card)
2355{
2356 int i, j;
2357 int rc;
2358
2359 QETH_DBF_TEXT(SETUP, 2, "initqdqs");
2360
2361 /* inbound queue */
2362 memset(card->qdio.in_q->qdio_bufs, 0,
2363 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2364 qeth_initialize_working_pool_list(card);
2365 /*give only as many buffers to hardware as we have buffer pool entries*/
2366 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2367 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2368 card->qdio.in_q->next_buf_to_init =
2369 card->qdio.in_buf_pool.buf_count - 1;
2370 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2371 card->qdio.in_buf_pool.buf_count - 1, NULL);
2372 if (rc) {
2373 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2374 return rc;
2375 }
2376 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
2377 if (rc) {
2378 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2379 return rc;
2380 }
2381 /* outbound queue */
2382 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2383 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
2384 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2385 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2386 qeth_clear_output_buffer(card->qdio.out_qs[i],
2387 &card->qdio.out_qs[i]->bufs[j]);
2388 }
2389 card->qdio.out_qs[i]->card = card;
2390 card->qdio.out_qs[i]->next_buf_to_fill = 0;
2391 card->qdio.out_qs[i]->do_pack = 0;
2392 atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
2393 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
2394 atomic_set(&card->qdio.out_qs[i]->state,
2395 QETH_OUT_Q_UNLOCKED);
2396 }
2397 return 0;
2398}
2399EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2400
2401static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
2402{
2403 switch (link_type) {
2404 case QETH_LINK_TYPE_HSTR:
2405 return 2;
2406 default:
2407 return 1;
2408 }
2409}
2410
2411static void qeth_fill_ipacmd_header(struct qeth_card *card,
2412 struct qeth_ipa_cmd *cmd, __u8 command,
2413 enum qeth_prot_versions prot)
2414{
2415 memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
2416 cmd->hdr.command = command;
2417 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2418 cmd->hdr.seqno = card->seqno.ipa;
2419 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2420 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
2421 if (card->options.layer2)
2422 cmd->hdr.prim_version_no = 2;
2423 else
2424 cmd->hdr.prim_version_no = 1;
2425 cmd->hdr.param_count = 1;
2426 cmd->hdr.prot_version = prot;
2427 cmd->hdr.ipa_supported = 0;
2428 cmd->hdr.ipa_enabled = 0;
2429}
2430
2431struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2432 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2433{
2434 struct qeth_cmd_buffer *iob;
2435 struct qeth_ipa_cmd *cmd;
2436
2437 iob = qeth_wait_for_buffer(&card->write);
2438 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2439 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
2440
2441 return iob;
2442}
2443EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
2444
2445void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2446 char prot_type)
2447{
2448 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2449 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2450 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2451 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2452}
2453EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2454
2455int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2456 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2457 unsigned long),
2458 void *reply_param)
2459{
2460 int rc;
2461 char prot_type;
2462
2463 QETH_DBF_TEXT(TRACE, 4, "sendipa");
2464
2465 if (card->options.layer2)
2466 if (card->info.type == QETH_CARD_TYPE_OSN)
2467 prot_type = QETH_PROT_OSN2;
2468 else
2469 prot_type = QETH_PROT_LAYER2;
2470 else
2471 prot_type = QETH_PROT_TCPIP;
2472 qeth_prepare_ipa_cmd(card, iob, prot_type);
2473 rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
2474 iob, reply_cb, reply_param);
2475 return rc;
2476}
2477EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2478
2479static int qeth_send_startstoplan(struct qeth_card *card,
2480 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2481{
2482 int rc;
2483 struct qeth_cmd_buffer *iob;
2484
2485 iob = qeth_get_ipacmd_buffer(card, ipacmd, prot);
2486 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2487
2488 return rc;
2489}
2490
2491int qeth_send_startlan(struct qeth_card *card)
2492{
2493 int rc;
2494
2495 QETH_DBF_TEXT(SETUP, 2, "strtlan");
2496
2497 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0);
2498 return rc;
2499}
2500EXPORT_SYMBOL_GPL(qeth_send_startlan);
2501
2502int qeth_send_stoplan(struct qeth_card *card)
2503{
2504 int rc = 0;
2505
2506 /*
2507 * TODO: according to the IPA format document page 14,
2508 * TCP/IP (we!) never issue a STOPLAN
2509 * is this right ?!?
2510 */
2511 QETH_DBF_TEXT(SETUP, 2, "stoplan");
2512
2513 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0);
2514 return rc;
2515}
2516EXPORT_SYMBOL_GPL(qeth_send_stoplan);
2517
2518int qeth_default_setadapterparms_cb(struct qeth_card *card,
2519 struct qeth_reply *reply, unsigned long data)
2520{
2521 struct qeth_ipa_cmd *cmd;
2522
2523 QETH_DBF_TEXT(TRACE, 4, "defadpcb");
2524
2525 cmd = (struct qeth_ipa_cmd *) data;
2526 if (cmd->hdr.return_code == 0)
2527 cmd->hdr.return_code =
2528 cmd->data.setadapterparms.hdr.return_code;
2529 return 0;
2530}
2531EXPORT_SYMBOL_GPL(qeth_default_setadapterparms_cb);
2532
2533static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2534 struct qeth_reply *reply, unsigned long data)
2535{
2536 struct qeth_ipa_cmd *cmd;
2537
2538 QETH_DBF_TEXT(TRACE, 3, "quyadpcb");
2539
2540 cmd = (struct qeth_ipa_cmd *) data;
2541 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
2542 card->info.link_type =
2543 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2544 card->options.adp.supported_funcs =
2545 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2546 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
2547}
2548
2549struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2550 __u32 command, __u32 cmdlen)
2551{
2552 struct qeth_cmd_buffer *iob;
2553 struct qeth_ipa_cmd *cmd;
2554
2555 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
2556 QETH_PROT_IPV4);
2557 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2558 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
2559 cmd->data.setadapterparms.hdr.command_code = command;
2560 cmd->data.setadapterparms.hdr.used_total = 1;
2561 cmd->data.setadapterparms.hdr.seq_no = 1;
2562
2563 return iob;
2564}
2565EXPORT_SYMBOL_GPL(qeth_get_adapter_cmd);
2566
2567int qeth_query_setadapterparms(struct qeth_card *card)
2568{
2569 int rc;
2570 struct qeth_cmd_buffer *iob;
2571
2572 QETH_DBF_TEXT(TRACE, 3, "queryadp");
2573 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2574 sizeof(struct qeth_ipacmd_setadpparms));
2575 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2576 return rc;
2577}
2578EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2579
2580int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2581 unsigned int siga_error, const char *dbftext)
2582{
2583 if (qdio_error || siga_error) {
2584 QETH_DBF_TEXT(TRACE, 2, dbftext);
2585 QETH_DBF_TEXT(QERR, 2, dbftext);
2586 QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
2587 buf->element[15].flags & 0xff);
2588 QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
2589 buf->element[14].flags & 0xff);
2590 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
2591 QETH_DBF_TEXT_(QERR, 2, " serr=%X", siga_error);
2592 return 1;
2593 }
2594 return 0;
2595}
2596EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
2597
2598void qeth_queue_input_buffer(struct qeth_card *card, int index)
2599{
2600 struct qeth_qdio_q *queue = card->qdio.in_q;
2601 int count;
2602 int i;
2603 int rc;
2604 int newcount = 0;
2605
2606 QETH_DBF_TEXT(TRACE, 6, "queinbuf");
2607 count = (index < queue->next_buf_to_init)?
2608 card->qdio.in_buf_pool.buf_count -
2609 (queue->next_buf_to_init - index) :
2610 card->qdio.in_buf_pool.buf_count -
2611 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2612 /* only requeue at a certain threshold to avoid SIGAs */
2613 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
2614 for (i = queue->next_buf_to_init;
2615 i < queue->next_buf_to_init + count; ++i) {
2616 if (qeth_init_input_buffer(card,
2617 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
2618 break;
2619 } else {
2620 newcount++;
2621 }
2622 }
2623
2624 if (newcount < count) {
2625 /* we are in memory shortage so we switch back to
2626 traditional skb allocation and drop packages */
2627 if (!atomic_read(&card->force_alloc_skb) &&
2628 net_ratelimit())
2629 PRINT_WARN("Switch to alloc skb\n");
2630 atomic_set(&card->force_alloc_skb, 3);
2631 count = newcount;
2632 } else {
2633 if ((atomic_read(&card->force_alloc_skb) == 1) &&
2634 net_ratelimit())
2635 PRINT_WARN("Switch to sg\n");
2636 atomic_add_unless(&card->force_alloc_skb, -1, 0);
2637 }
2638
2639 /*
2640 * according to old code it should be avoided to requeue all
2641 * 128 buffers in order to benefit from PCI avoidance.
2642 * this function keeps at least one buffer (the buffer at
2643 * 'index') un-requeued -> this buffer is the first buffer that
2644 * will be requeued the next time
2645 */
2646 if (card->options.performance_stats) {
2647 card->perf_stats.inbound_do_qdio_cnt++;
2648 card->perf_stats.inbound_do_qdio_start_time =
2649 qeth_get_micros();
2650 }
2651 rc = do_QDIO(CARD_DDEV(card),
2652 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2653 0, queue->next_buf_to_init, count, NULL);
2654 if (card->options.performance_stats)
2655 card->perf_stats.inbound_do_qdio_time +=
2656 qeth_get_micros() -
2657 card->perf_stats.inbound_do_qdio_start_time;
2658 if (rc) {
2659 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2660 "return %i (device %s).\n",
2661 rc, CARD_DDEV_ID(card));
2662 QETH_DBF_TEXT(TRACE, 2, "qinberr");
2663 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
2664 }
2665 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2666 QDIO_MAX_BUFFERS_PER_Q;
2667 }
2668}
2669EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2670
2671static int qeth_handle_send_error(struct qeth_card *card,
2672 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err,
2673 unsigned int siga_err)
2674{
2675 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2676 int cc = siga_err & 3;
2677
2678 QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
2679 qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr");
2680 switch (cc) {
2681 case 0:
2682 if (qdio_err) {
2683 QETH_DBF_TEXT(TRACE, 1, "lnkfail");
2684 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2685 QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
2686 (u16)qdio_err, (u8)sbalf15);
2687 return QETH_SEND_ERROR_LINK_FAILURE;
2688 }
2689 return QETH_SEND_ERROR_NONE;
2690 case 2:
2691 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2692 QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B");
2693 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2694 return QETH_SEND_ERROR_KICK_IT;
2695 }
2696 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2697 return QETH_SEND_ERROR_RETRY;
2698 return QETH_SEND_ERROR_LINK_FAILURE;
2699 /* look at qdio_error and sbalf 15 */
2700 case 1:
2701 QETH_DBF_TEXT(TRACE, 1, "SIGAcc1");
2702 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2703 return QETH_SEND_ERROR_LINK_FAILURE;
2704 case 3:
2705 default:
2706 QETH_DBF_TEXT(TRACE, 1, "SIGAcc3");
2707 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2708 return QETH_SEND_ERROR_KICK_IT;
2709 }
2710}
2711
2712/*
2713 * Switched to packing state if the number of used buffers on a queue
2714 * reaches a certain limit.
2715 */
2716static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2717{
2718 if (!queue->do_pack) {
2719 if (atomic_read(&queue->used_buffers)
2720 >= QETH_HIGH_WATERMARK_PACK){
2721 /* switch non-PACKING -> PACKING */
2722 QETH_DBF_TEXT(TRACE, 6, "np->pack");
2723 if (queue->card->options.performance_stats)
2724 queue->card->perf_stats.sc_dp_p++;
2725 queue->do_pack = 1;
2726 }
2727 }
2728}
2729
2730/*
2731 * Switches from packing to non-packing mode. If there is a packing
2732 * buffer on the queue this buffer will be prepared to be flushed.
2733 * In that case 1 is returned to inform the caller. If no buffer
2734 * has to be flushed, zero is returned.
2735 */
2736static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2737{
2738 struct qeth_qdio_out_buffer *buffer;
2739 int flush_count = 0;
2740
2741 if (queue->do_pack) {
2742 if (atomic_read(&queue->used_buffers)
2743 <= QETH_LOW_WATERMARK_PACK) {
2744 /* switch PACKING -> non-PACKING */
2745 QETH_DBF_TEXT(TRACE, 6, "pack->np");
2746 if (queue->card->options.performance_stats)
2747 queue->card->perf_stats.sc_p_dp++;
2748 queue->do_pack = 0;
2749 /* flush packing buffers */
2750 buffer = &queue->bufs[queue->next_buf_to_fill];
2751 if ((atomic_read(&buffer->state) ==
2752 QETH_QDIO_BUF_EMPTY) &&
2753 (buffer->next_element_to_fill > 0)) {
2754 atomic_set(&buffer->state,
2755 QETH_QDIO_BUF_PRIMED);
2756 flush_count++;
2757 queue->next_buf_to_fill =
2758 (queue->next_buf_to_fill + 1) %
2759 QDIO_MAX_BUFFERS_PER_Q;
2760 }
2761 }
2762 }
2763 return flush_count;
2764}
2765
2766/*
2767 * Called to flush a packing buffer if no more pci flags are on the queue.
2768 * Checks if there is a packing buffer and prepares it to be flushed.
2769 * In that case returns 1, otherwise zero.
2770 */
2771static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2772{
2773 struct qeth_qdio_out_buffer *buffer;
2774
2775 buffer = &queue->bufs[queue->next_buf_to_fill];
2776 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2777 (buffer->next_element_to_fill > 0)) {
2778 /* it's a packing buffer */
2779 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2780 queue->next_buf_to_fill =
2781 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2782 return 1;
2783 }
2784 return 0;
2785}
2786
2787static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2788 int index, int count)
2789{
2790 struct qeth_qdio_out_buffer *buf;
2791 int rc;
2792 int i;
2793 unsigned int qdio_flags;
2794
2795 QETH_DBF_TEXT(TRACE, 6, "flushbuf");
2796
2797 for (i = index; i < index + count; ++i) {
2798 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2799 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2800 SBAL_FLAGS_LAST_ENTRY;
2801
2802 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2803 continue;
2804
2805 if (!queue->do_pack) {
2806 if ((atomic_read(&queue->used_buffers) >=
2807 (QETH_HIGH_WATERMARK_PACK -
2808 QETH_WATERMARK_PACK_FUZZ)) &&
2809 !atomic_read(&queue->set_pci_flags_count)) {
2810 /* it's likely that we'll go to packing
2811 * mode soon */
2812 atomic_inc(&queue->set_pci_flags_count);
2813 buf->buffer->element[0].flags |= 0x40;
2814 }
2815 } else {
2816 if (!atomic_read(&queue->set_pci_flags_count)) {
2817 /*
2818 * there's no outstanding PCI any more, so we
2819 * have to request a PCI to be sure the the PCI
2820 * will wake at some time in the future then we
2821 * can flush packed buffers that might still be
2822 * hanging around, which can happen if no
2823 * further send was requested by the stack
2824 */
2825 atomic_inc(&queue->set_pci_flags_count);
2826 buf->buffer->element[0].flags |= 0x40;
2827 }
2828 }
2829 }
2830
2831 queue->card->dev->trans_start = jiffies;
2832 if (queue->card->options.performance_stats) {
2833 queue->card->perf_stats.outbound_do_qdio_cnt++;
2834 queue->card->perf_stats.outbound_do_qdio_start_time =
2835 qeth_get_micros();
2836 }
2837 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
2838 if (under_int)
2839 qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
2840 if (atomic_read(&queue->set_pci_flags_count))
2841 qdio_flags |= QDIO_FLAG_PCI_OUT;
2842 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
2843 queue->queue_no, index, count, NULL);
2844 if (queue->card->options.performance_stats)
2845 queue->card->perf_stats.outbound_do_qdio_time +=
2846 qeth_get_micros() -
2847 queue->card->perf_stats.outbound_do_qdio_start_time;
2848 if (rc) {
2849 QETH_DBF_TEXT(TRACE, 2, "flushbuf");
2850 QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
2851 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card));
2852 queue->card->stats.tx_errors += count;
2853 /* this must not happen under normal circumstances. if it
2854 * happens something is really wrong -> recover */
2855 qeth_schedule_recovery(queue->card);
2856 return;
2857 }
2858 atomic_add(count, &queue->used_buffers);
2859 if (queue->card->options.performance_stats)
2860 queue->card->perf_stats.bufs_sent += count;
2861}
2862
2863static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2864{
2865 int index;
2866 int flush_cnt = 0;
2867 int q_was_packing = 0;
2868
2869 /*
2870 * check if weed have to switch to non-packing mode or if
2871 * we have to get a pci flag out on the queue
2872 */
2873 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2874 !atomic_read(&queue->set_pci_flags_count)) {
2875 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2876 QETH_OUT_Q_UNLOCKED) {
2877 /*
2878 * If we get in here, there was no action in
2879 * do_send_packet. So, we check if there is a
2880 * packing buffer to be flushed here.
2881 */
2882 netif_stop_queue(queue->card->dev);
2883 index = queue->next_buf_to_fill;
2884 q_was_packing = queue->do_pack;
2885 /* queue->do_pack may change */
2886 barrier();
2887 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
2888 if (!flush_cnt &&
2889 !atomic_read(&queue->set_pci_flags_count))
2890 flush_cnt +=
2891 qeth_flush_buffers_on_no_pci(queue);
2892 if (queue->card->options.performance_stats &&
2893 q_was_packing)
2894 queue->card->perf_stats.bufs_sent_pack +=
2895 flush_cnt;
2896 if (flush_cnt)
2897 qeth_flush_buffers(queue, 1, index, flush_cnt);
2898 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2899 }
2900 }
2901}
2902
2903void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
2904 unsigned int qdio_error, unsigned int siga_error,
2905 unsigned int __queue, int first_element, int count,
2906 unsigned long card_ptr)
2907{
2908 struct qeth_card *card = (struct qeth_card *) card_ptr;
2909 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2910 struct qeth_qdio_out_buffer *buffer;
2911 int i;
2912
2913 QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
2914 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2915 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
2916 QETH_DBF_TEXT(TRACE, 2, "achkcond");
2917 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
2918 QETH_DBF_TEXT_(TRACE, 2, "%08x", status);
2919 netif_stop_queue(card->dev);
2920 qeth_schedule_recovery(card);
2921 return;
2922 }
2923 }
2924 if (card->options.performance_stats) {
2925 card->perf_stats.outbound_handler_cnt++;
2926 card->perf_stats.outbound_handler_start_time =
2927 qeth_get_micros();
2928 }
2929 for (i = first_element; i < (first_element + count); ++i) {
2930 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2931 /*we only handle the KICK_IT error by doing a recovery */
2932 if (qeth_handle_send_error(card, buffer,
2933 qdio_error, siga_error)
2934 == QETH_SEND_ERROR_KICK_IT){
2935 netif_stop_queue(card->dev);
2936 qeth_schedule_recovery(card);
2937 return;
2938 }
2939 qeth_clear_output_buffer(queue, buffer);
2940 }
2941 atomic_sub(count, &queue->used_buffers);
2942 /* check if we need to do something on this outbound queue */
2943 if (card->info.type != QETH_CARD_TYPE_IQD)
2944 qeth_check_outbound_queue(queue);
2945
2946 netif_wake_queue(queue->card->dev);
2947 if (card->options.performance_stats)
2948 card->perf_stats.outbound_handler_time += qeth_get_micros() -
2949 card->perf_stats.outbound_handler_start_time;
2950}
2951EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
2952
2953int qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2954{
2955 int cast_type = RTN_UNSPEC;
2956
2957 if (card->info.type == QETH_CARD_TYPE_OSN)
2958 return cast_type;
2959
2960 if (skb->dst && skb->dst->neighbour) {
2961 cast_type = skb->dst->neighbour->type;
2962 if ((cast_type == RTN_BROADCAST) ||
2963 (cast_type == RTN_MULTICAST) ||
2964 (cast_type == RTN_ANYCAST))
2965 return cast_type;
2966 else
2967 return RTN_UNSPEC;
2968 }
2969 /* try something else */
2970 if (skb->protocol == ETH_P_IPV6)
2971 return (skb_network_header(skb)[24] == 0xff) ?
2972 RTN_MULTICAST : 0;
2973 else if (skb->protocol == ETH_P_IP)
2974 return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
2975 RTN_MULTICAST : 0;
2976 /* ... */
2977 if (!memcmp(skb->data, skb->dev->broadcast, 6))
2978 return RTN_BROADCAST;
2979 else {
2980 u16 hdr_mac;
2981
2982 hdr_mac = *((u16 *)skb->data);
2983 /* tr multicast? */
2984 switch (card->info.link_type) {
2985 case QETH_LINK_TYPE_HSTR:
2986 case QETH_LINK_TYPE_LANE_TR:
2987 if ((hdr_mac == QETH_TR_MAC_NC) ||
2988 (hdr_mac == QETH_TR_MAC_C))
2989 return RTN_MULTICAST;
2990 break;
2991 /* eth or so multicast? */
2992 default:
2993 if ((hdr_mac == QETH_ETH_MAC_V4) ||
2994 (hdr_mac == QETH_ETH_MAC_V6))
2995 return RTN_MULTICAST;
2996 }
2997 }
2998 return cast_type;
2999}
3000EXPORT_SYMBOL_GPL(qeth_get_cast_type);
3001
3002int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3003 int ipv, int cast_type)
3004{
3005 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
3006 return card->qdio.default_out_queue;
3007 switch (card->qdio.no_out_queues) {
3008 case 4:
3009 if (cast_type && card->info.is_multicast_different)
3010 return card->info.is_multicast_different &
3011 (card->qdio.no_out_queues - 1);
3012 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3013 const u8 tos = ip_hdr(skb)->tos;
3014
3015 if (card->qdio.do_prio_queueing ==
3016 QETH_PRIO_Q_ING_TOS) {
3017 if (tos & IP_TOS_NOTIMPORTANT)
3018 return 3;
3019 if (tos & IP_TOS_HIGHRELIABILITY)
3020 return 2;
3021 if (tos & IP_TOS_HIGHTHROUGHPUT)
3022 return 1;
3023 if (tos & IP_TOS_LOWDELAY)
3024 return 0;
3025 }
3026 if (card->qdio.do_prio_queueing ==
3027 QETH_PRIO_Q_ING_PREC)
3028 return 3 - (tos >> 6);
3029 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3030 /* TODO: IPv6!!! */
3031 }
3032 return card->qdio.default_out_queue;
3033 case 1: /* fallthrough for single-out-queue 1920-device */
3034 default:
3035 return card->qdio.default_out_queue;
3036 }
3037}
3038EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3039
3040static void __qeth_free_new_skb(struct sk_buff *orig_skb,
3041 struct sk_buff *new_skb)
3042{
3043 if (orig_skb != new_skb)
3044 dev_kfree_skb_any(new_skb);
3045}
3046
3047static inline struct sk_buff *qeth_realloc_headroom(struct qeth_card *card,
3048 struct sk_buff *skb, int size)
3049{
3050 struct sk_buff *new_skb = skb;
3051
3052 if (skb_headroom(skb) >= size)
3053 return skb;
3054 new_skb = skb_realloc_headroom(skb, size);
3055 if (!new_skb)
3056 PRINT_ERR("Could not realloc headroom for qeth_hdr "
3057 "on interface %s", QETH_CARD_IFNAME(card));
3058 return new_skb;
3059}
3060
3061struct sk_buff *qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3062 struct qeth_hdr **hdr)
3063{
3064 struct sk_buff *new_skb;
3065
3066 QETH_DBF_TEXT(TRACE, 6, "prepskb");
3067
3068 new_skb = qeth_realloc_headroom(card, skb,
3069 sizeof(struct qeth_hdr));
3070 if (!new_skb)
3071 return NULL;
3072
3073 *hdr = ((struct qeth_hdr *)qeth_push_skb(card, new_skb,
3074 sizeof(struct qeth_hdr)));
3075 if (*hdr == NULL) {
3076 __qeth_free_new_skb(skb, new_skb);
3077 return NULL;
3078 }
3079 return new_skb;
3080}
3081EXPORT_SYMBOL_GPL(qeth_prepare_skb);
3082
3083int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3084 struct sk_buff *skb, int elems)
3085{
3086 int elements_needed = 0;
3087
3088 if (skb_shinfo(skb)->nr_frags > 0)
3089 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
3090 if (elements_needed == 0)
3091 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
3092 + skb->len) >> PAGE_SHIFT);
3093 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3094 PRINT_ERR("Invalid size of IP packet "
3095 "(Number=%d / Length=%d). Discarded.\n",
3096 (elements_needed+elems), skb->len);
3097 return 0;
3098 }
3099 return elements_needed;
3100}
3101EXPORT_SYMBOL_GPL(qeth_get_elements_no);
3102
3103static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
3104 int is_tso, int *next_element_to_fill)
3105{
3106 int length = skb->len;
3107 int length_here;
3108 int element;
3109 char *data;
3110 int first_lap ;
3111
3112 element = *next_element_to_fill;
3113 data = skb->data;
3114 first_lap = (is_tso == 0 ? 1 : 0);
3115
3116 while (length > 0) {
3117 /* length_here is the remaining amount of data in this page */
3118 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3119 if (length < length_here)
3120 length_here = length;
3121
3122 buffer->element[element].addr = data;
3123 buffer->element[element].length = length_here;
3124 length -= length_here;
3125 if (!length) {
3126 if (first_lap)
3127 buffer->element[element].flags = 0;
3128 else
3129 buffer->element[element].flags =
3130 SBAL_FLAGS_LAST_FRAG;
3131 } else {
3132 if (first_lap)
3133 buffer->element[element].flags =
3134 SBAL_FLAGS_FIRST_FRAG;
3135 else
3136 buffer->element[element].flags =
3137 SBAL_FLAGS_MIDDLE_FRAG;
3138 }
3139 data += length_here;
3140 element++;
3141 first_lap = 0;
3142 }
3143 *next_element_to_fill = element;
3144}
3145
3146static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3147 struct qeth_qdio_out_buffer *buf, struct sk_buff *skb)
3148{
3149 struct qdio_buffer *buffer;
3150 struct qeth_hdr_tso *hdr;
3151 int flush_cnt = 0, hdr_len, large_send = 0;
3152
3153 QETH_DBF_TEXT(TRACE, 6, "qdfillbf");
3154
3155 buffer = buf->buffer;
3156 atomic_inc(&skb->users);
3157 skb_queue_tail(&buf->skb_list, skb);
3158
3159 hdr = (struct qeth_hdr_tso *) skb->data;
3160 /*check first on TSO ....*/
3161 if (hdr->hdr.hdr.l3.id == QETH_HEADER_TYPE_TSO) {
3162 int element = buf->next_element_to_fill;
3163
3164 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
3165 /*fill first buffer entry only with header information */
3166 buffer->element[element].addr = skb->data;
3167 buffer->element[element].length = hdr_len;
3168 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
3169 buf->next_element_to_fill++;
3170 skb->data += hdr_len;
3171 skb->len -= hdr_len;
3172 large_send = 1;
3173 }
3174 if (skb_shinfo(skb)->nr_frags == 0)
3175 __qeth_fill_buffer(skb, buffer, large_send,
3176 (int *)&buf->next_element_to_fill);
3177 else
3178 __qeth_fill_buffer_frag(skb, buffer, large_send,
3179 (int *)&buf->next_element_to_fill);
3180
3181 if (!queue->do_pack) {
3182 QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
3183 /* set state to PRIMED -> will be flushed */
3184 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3185 flush_cnt = 1;
3186 } else {
3187 QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
3188 if (queue->card->options.performance_stats)
3189 queue->card->perf_stats.skbs_sent_pack++;
3190 if (buf->next_element_to_fill >=
3191 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
3192 /*
3193 * packed buffer if full -> set state PRIMED
3194 * -> will be flushed
3195 */
3196 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3197 flush_cnt = 1;
3198 }
3199 }
3200 return flush_cnt;
3201}
3202
3203int qeth_do_send_packet_fast(struct qeth_card *card,
3204 struct qeth_qdio_out_q *queue, struct sk_buff *skb,
3205 struct qeth_hdr *hdr, int elements_needed,
3206 struct qeth_eddp_context *ctx)
3207{
3208 struct qeth_qdio_out_buffer *buffer;
3209 int buffers_needed = 0;
3210 int flush_cnt = 0;
3211 int index;
3212
3213 QETH_DBF_TEXT(TRACE, 6, "dosndpfa");
3214
3215 /* spin until we get the queue ... */
3216 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3217 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3218 /* ... now we've got the queue */
3219 index = queue->next_buf_to_fill;
3220 buffer = &queue->bufs[queue->next_buf_to_fill];
3221 /*
3222 * check if buffer is empty to make sure that we do not 'overtake'
3223 * ourselves and try to fill a buffer that is already primed
3224 */
3225 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3226 goto out;
3227 if (ctx == NULL)
3228 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3229 QDIO_MAX_BUFFERS_PER_Q;
3230 else {
3231 buffers_needed = qeth_eddp_check_buffers_for_context(queue,
3232 ctx);
3233 if (buffers_needed < 0)
3234 goto out;
3235 queue->next_buf_to_fill =
3236 (queue->next_buf_to_fill + buffers_needed) %
3237 QDIO_MAX_BUFFERS_PER_Q;
3238 }
3239 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3240 if (ctx == NULL) {
3241 qeth_fill_buffer(queue, buffer, skb);
3242 qeth_flush_buffers(queue, 0, index, 1);
3243 } else {
3244 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
3245 WARN_ON(buffers_needed != flush_cnt);
3246 qeth_flush_buffers(queue, 0, index, flush_cnt);
3247 }
3248 return 0;
3249out:
3250 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3251 return -EBUSY;
3252}
3253EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
3254
3255int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3256 struct sk_buff *skb, struct qeth_hdr *hdr,
3257 int elements_needed, struct qeth_eddp_context *ctx)
3258{
3259 struct qeth_qdio_out_buffer *buffer;
3260 int start_index;
3261 int flush_count = 0;
3262 int do_pack = 0;
3263 int tmp;
3264 int rc = 0;
3265
3266 QETH_DBF_TEXT(TRACE, 6, "dosndpkt");
3267
3268 /* spin until we get the queue ... */
3269 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3270 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3271 start_index = queue->next_buf_to_fill;
3272 buffer = &queue->bufs[queue->next_buf_to_fill];
3273 /*
3274 * check if buffer is empty to make sure that we do not 'overtake'
3275 * ourselves and try to fill a buffer that is already primed
3276 */
3277 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3278 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3279 return -EBUSY;
3280 }
3281 /* check if we need to switch packing state of this queue */
3282 qeth_switch_to_packing_if_needed(queue);
3283 if (queue->do_pack) {
3284 do_pack = 1;
3285 if (ctx == NULL) {
3286 /* does packet fit in current buffer? */
3287 if ((QETH_MAX_BUFFER_ELEMENTS(card) -
3288 buffer->next_element_to_fill) < elements_needed) {
3289 /* ... no -> set state PRIMED */
3290 atomic_set(&buffer->state,
3291 QETH_QDIO_BUF_PRIMED);
3292 flush_count++;
3293 queue->next_buf_to_fill =
3294 (queue->next_buf_to_fill + 1) %
3295 QDIO_MAX_BUFFERS_PER_Q;
3296 buffer = &queue->bufs[queue->next_buf_to_fill];
3297 /* we did a step forward, so check buffer state
3298 * again */
3299 if (atomic_read(&buffer->state) !=
3300 QETH_QDIO_BUF_EMPTY){
3301 qeth_flush_buffers(queue, 0,
3302 start_index, flush_count);
3303 atomic_set(&queue->state,
3304 QETH_OUT_Q_UNLOCKED);
3305 return -EBUSY;
3306 }
3307 }
3308 } else {
3309 /* check if we have enough elements (including following
3310 * free buffers) to handle eddp context */
3311 if (qeth_eddp_check_buffers_for_context(queue, ctx)
3312 < 0) {
3313 if (net_ratelimit())
3314 PRINT_WARN("eddp tx_dropped 1\n");
3315 rc = -EBUSY;
3316 goto out;
3317 }
3318 }
3319 }
3320 if (ctx == NULL)
3321 tmp = qeth_fill_buffer(queue, buffer, skb);
3322 else {
3323 tmp = qeth_eddp_fill_buffer(queue, ctx,
3324 queue->next_buf_to_fill);
3325 if (tmp < 0) {
3326 PRINT_ERR("eddp tx_dropped 2\n");
3327 rc = -EBUSY;
3328 goto out;
3329 }
3330 }
3331 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
3332 QDIO_MAX_BUFFERS_PER_Q;
3333 flush_count += tmp;
3334out:
3335 if (flush_count)
3336 qeth_flush_buffers(queue, 0, start_index, flush_count);
3337 else if (!atomic_read(&queue->set_pci_flags_count))
3338 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3339 /*
3340 * queue->state will go from LOCKED -> UNLOCKED or from
3341 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3342 * (switch packing state or flush buffer to get another pci flag out).
3343 * In that case we will enter this loop
3344 */
3345 while (atomic_dec_return(&queue->state)) {
3346 flush_count = 0;
3347 start_index = queue->next_buf_to_fill;
3348 /* check if we can go back to non-packing state */
3349 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
3350 /*
3351 * check if we need to flush a packing buffer to get a pci
3352 * flag out on the queue
3353 */
3354 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
3355 flush_count += qeth_flush_buffers_on_no_pci(queue);
3356 if (flush_count)
3357 qeth_flush_buffers(queue, 0, start_index, flush_count);
3358 }
3359 /* at this point the queue is UNLOCKED again */
3360 if (queue->card->options.performance_stats && do_pack)
3361 queue->card->perf_stats.bufs_sent_pack += flush_count;
3362
3363 return rc;
3364}
3365EXPORT_SYMBOL_GPL(qeth_do_send_packet);
3366
3367static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
3368 struct qeth_reply *reply, unsigned long data)
3369{
3370 struct qeth_ipa_cmd *cmd;
3371 struct qeth_ipacmd_setadpparms *setparms;
3372
3373 QETH_DBF_TEXT(TRACE, 4, "prmadpcb");
3374
3375 cmd = (struct qeth_ipa_cmd *) data;
3376 setparms = &(cmd->data.setadapterparms);
3377
3378 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
3379 if (cmd->hdr.return_code) {
3380 QETH_DBF_TEXT_(TRACE, 4, "prmrc%2.2x", cmd->hdr.return_code);
3381 setparms->data.mode = SET_PROMISC_MODE_OFF;
3382 }
3383 card->info.promisc_mode = setparms->data.mode;
3384 return 0;
3385}
3386
3387void qeth_setadp_promisc_mode(struct qeth_card *card)
3388{
3389 enum qeth_ipa_promisc_modes mode;
3390 struct net_device *dev = card->dev;
3391 struct qeth_cmd_buffer *iob;
3392 struct qeth_ipa_cmd *cmd;
3393
3394 QETH_DBF_TEXT(TRACE, 4, "setprom");
3395
3396 if (((dev->flags & IFF_PROMISC) &&
3397 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
3398 (!(dev->flags & IFF_PROMISC) &&
3399 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
3400 return;
3401 mode = SET_PROMISC_MODE_OFF;
3402 if (dev->flags & IFF_PROMISC)
3403 mode = SET_PROMISC_MODE_ON;
3404 QETH_DBF_TEXT_(TRACE, 4, "mode:%x", mode);
3405
3406 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
3407 sizeof(struct qeth_ipacmd_setadpparms));
3408 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
3409 cmd->data.setadapterparms.data.mode = mode;
3410 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
3411}
3412EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
3413
3414int qeth_change_mtu(struct net_device *dev, int new_mtu)
3415{
3416 struct qeth_card *card;
3417 char dbf_text[15];
3418
3419 card = netdev_priv(dev);
3420
3421 QETH_DBF_TEXT(TRACE, 4, "chgmtu");
3422 sprintf(dbf_text, "%8x", new_mtu);
3423 QETH_DBF_TEXT(TRACE, 4, dbf_text);
3424
3425 if (new_mtu < 64)
3426 return -EINVAL;
3427 if (new_mtu > 65535)
3428 return -EINVAL;
3429 if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) &&
3430 (!qeth_mtu_is_valid(card, new_mtu)))
3431 return -EINVAL;
3432 dev->mtu = new_mtu;
3433 return 0;
3434}
3435EXPORT_SYMBOL_GPL(qeth_change_mtu);
3436
3437struct net_device_stats *qeth_get_stats(struct net_device *dev)
3438{
3439 struct qeth_card *card;
3440
3441 card = netdev_priv(dev);
3442
3443 QETH_DBF_TEXT(TRACE, 5, "getstat");
3444
3445 return &card->stats;
3446}
3447EXPORT_SYMBOL_GPL(qeth_get_stats);
3448
3449static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
3450 struct qeth_reply *reply, unsigned long data)
3451{
3452 struct qeth_ipa_cmd *cmd;
3453
3454 QETH_DBF_TEXT(TRACE, 4, "chgmaccb");
3455
3456 cmd = (struct qeth_ipa_cmd *) data;
3457 if (!card->options.layer2 ||
3458 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
3459 memcpy(card->dev->dev_addr,
3460 &cmd->data.setadapterparms.data.change_addr.addr,
3461 OSA_ADDR_LEN);
3462 card->info.mac_bits |= QETH_LAYER2_MAC_READ;
3463 }
3464 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3465 return 0;
3466}
3467
3468int qeth_setadpparms_change_macaddr(struct qeth_card *card)
3469{
3470 int rc;
3471 struct qeth_cmd_buffer *iob;
3472 struct qeth_ipa_cmd *cmd;
3473
3474 QETH_DBF_TEXT(TRACE, 4, "chgmac");
3475
3476 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
3477 sizeof(struct qeth_ipacmd_setadpparms));
3478 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3479 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
3480 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
3481 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
3482 card->dev->dev_addr, OSA_ADDR_LEN);
3483 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
3484 NULL);
3485 return rc;
3486}
3487EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
3488
3489void qeth_tx_timeout(struct net_device *dev)
3490{
3491 struct qeth_card *card;
3492
3493 card = netdev_priv(dev);
3494 card->stats.tx_errors++;
3495 qeth_schedule_recovery(card);
3496}
3497EXPORT_SYMBOL_GPL(qeth_tx_timeout);
3498
3499int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
3500{
3501 struct qeth_card *card = netdev_priv(dev);
3502 int rc = 0;
3503
3504 switch (regnum) {
3505 case MII_BMCR: /* Basic mode control register */
3506 rc = BMCR_FULLDPLX;
3507 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
3508 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
3509 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
3510 rc |= BMCR_SPEED100;
3511 break;
3512 case MII_BMSR: /* Basic mode status register */
3513 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
3514 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
3515 BMSR_100BASE4;
3516 break;
3517 case MII_PHYSID1: /* PHYS ID 1 */
3518 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
3519 dev->dev_addr[2];
3520 rc = (rc >> 5) & 0xFFFF;
3521 break;
3522 case MII_PHYSID2: /* PHYS ID 2 */
3523 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
3524 break;
3525 case MII_ADVERTISE: /* Advertisement control reg */
3526 rc = ADVERTISE_ALL;
3527 break;
3528 case MII_LPA: /* Link partner ability reg */
3529 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
3530 LPA_100BASE4 | LPA_LPACK;
3531 break;
3532 case MII_EXPANSION: /* Expansion register */
3533 break;
3534 case MII_DCOUNTER: /* disconnect counter */
3535 break;
3536 case MII_FCSCOUNTER: /* false carrier counter */
3537 break;
3538 case MII_NWAYTEST: /* N-way auto-neg test register */
3539 break;
3540 case MII_RERRCOUNTER: /* rx error counter */
3541 rc = card->stats.rx_errors;
3542 break;
3543 case MII_SREVISION: /* silicon revision */
3544 break;
3545 case MII_RESV1: /* reserved 1 */
3546 break;
3547 case MII_LBRERROR: /* loopback, rx, bypass error */
3548 break;
3549 case MII_PHYADDR: /* physical address */
3550 break;
3551 case MII_RESV2: /* reserved 2 */
3552 break;
3553 case MII_TPISTATUS: /* TPI status for 10mbps */
3554 break;
3555 case MII_NCONFIG: /* network interface config */
3556 break;
3557 default:
3558 break;
3559 }
3560 return rc;
3561}
3562EXPORT_SYMBOL_GPL(qeth_mdio_read);
3563
3564static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
3565 struct qeth_cmd_buffer *iob, int len,
3566 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
3567 unsigned long),
3568 void *reply_param)
3569{
3570 u16 s1, s2;
3571
3572 QETH_DBF_TEXT(TRACE, 4, "sendsnmp");
3573
3574 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3575 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3576 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3577 /* adjust PDU length fields in IPA_PDU_HEADER */
3578 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
3579 s2 = (u32) len;
3580 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
3581 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
3582 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
3583 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
3584 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
3585 reply_cb, reply_param);
3586}
3587
3588static int qeth_snmp_command_cb(struct qeth_card *card,
3589 struct qeth_reply *reply, unsigned long sdata)
3590{
3591 struct qeth_ipa_cmd *cmd;
3592 struct qeth_arp_query_info *qinfo;
3593 struct qeth_snmp_cmd *snmp;
3594 unsigned char *data;
3595 __u16 data_len;
3596
3597 QETH_DBF_TEXT(TRACE, 3, "snpcmdcb");
3598
3599 cmd = (struct qeth_ipa_cmd *) sdata;
3600 data = (unsigned char *)((char *)cmd - reply->offset);
3601 qinfo = (struct qeth_arp_query_info *) reply->param;
3602 snmp = &cmd->data.setadapterparms.data.snmp;
3603
3604 if (cmd->hdr.return_code) {
3605 QETH_DBF_TEXT_(TRACE, 4, "scer1%i", cmd->hdr.return_code);
3606 return 0;
3607 }
3608 if (cmd->data.setadapterparms.hdr.return_code) {
3609 cmd->hdr.return_code =
3610 cmd->data.setadapterparms.hdr.return_code;
3611 QETH_DBF_TEXT_(TRACE, 4, "scer2%i", cmd->hdr.return_code);
3612 return 0;
3613 }
3614 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
3615 if (cmd->data.setadapterparms.hdr.seq_no == 1)
3616 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
3617 else
3618 data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
3619
3620 /* check if there is enough room in userspace */
3621 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
3622 QETH_DBF_TEXT_(TRACE, 4, "scer3%i", -ENOMEM);
3623 cmd->hdr.return_code = -ENOMEM;
3624 return 0;
3625 }
3626 QETH_DBF_TEXT_(TRACE, 4, "snore%i",
3627 cmd->data.setadapterparms.hdr.used_total);
3628 QETH_DBF_TEXT_(TRACE, 4, "sseqn%i",
3629 cmd->data.setadapterparms.hdr.seq_no);
3630 /*copy entries to user buffer*/
3631 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
3632 memcpy(qinfo->udata + qinfo->udata_offset,
3633 (char *)snmp,
3634 data_len + offsetof(struct qeth_snmp_cmd, data));
3635 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
3636 } else {
3637 memcpy(qinfo->udata + qinfo->udata_offset,
3638 (char *)&snmp->request, data_len);
3639 }
3640 qinfo->udata_offset += data_len;
3641 /* check if all replies received ... */
3642 QETH_DBF_TEXT_(TRACE, 4, "srtot%i",
3643 cmd->data.setadapterparms.hdr.used_total);
3644 QETH_DBF_TEXT_(TRACE, 4, "srseq%i",
3645 cmd->data.setadapterparms.hdr.seq_no);
3646 if (cmd->data.setadapterparms.hdr.seq_no <
3647 cmd->data.setadapterparms.hdr.used_total)
3648 return 1;
3649 return 0;
3650}
3651
3652int qeth_snmp_command(struct qeth_card *card, char __user *udata)
3653{
3654 struct qeth_cmd_buffer *iob;
3655 struct qeth_ipa_cmd *cmd;
3656 struct qeth_snmp_ureq *ureq;
3657 int req_len;
3658 struct qeth_arp_query_info qinfo = {0, };
3659 int rc = 0;
3660
3661 QETH_DBF_TEXT(TRACE, 3, "snmpcmd");
3662
3663 if (card->info.guestlan)
3664 return -EOPNOTSUPP;
3665
3666 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
3667 (!card->options.layer2)) {
3668 PRINT_WARN("SNMP Query MIBS not supported "
3669 "on %s!\n", QETH_CARD_IFNAME(card));
3670 return -EOPNOTSUPP;
3671 }
3672 /* skip 4 bytes (data_len struct member) to get req_len */
3673 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
3674 return -EFAULT;
3675 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
3676 if (!ureq) {
3677 QETH_DBF_TEXT(TRACE, 2, "snmpnome");
3678 return -ENOMEM;
3679 }
3680 if (copy_from_user(ureq, udata,
3681 req_len + sizeof(struct qeth_snmp_ureq_hdr))) {
3682 kfree(ureq);
3683 return -EFAULT;
3684 }
3685 qinfo.udata_len = ureq->hdr.data_len;
3686 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
3687 if (!qinfo.udata) {
3688 kfree(ureq);
3689 return -ENOMEM;
3690 }
3691 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
3692
3693 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
3694 QETH_SNMP_SETADP_CMDLENGTH + req_len);
3695 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3696 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
3697 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
3698 qeth_snmp_command_cb, (void *)&qinfo);
3699 if (rc)
3700 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
3701 QETH_CARD_IFNAME(card), rc);
3702 else {
3703 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
3704 rc = -EFAULT;
3705 }
3706
3707 kfree(ureq);
3708 kfree(qinfo.udata);
3709 return rc;
3710}
3711EXPORT_SYMBOL_GPL(qeth_snmp_command);
3712
3713static inline int qeth_get_qdio_q_format(struct qeth_card *card)
3714{
3715 switch (card->info.type) {
3716 case QETH_CARD_TYPE_IQD:
3717 return 2;
3718 default:
3719 return 0;
3720 }
3721}
3722
3723static int qeth_qdio_establish(struct qeth_card *card)
3724{
3725 struct qdio_initialize init_data;
3726 char *qib_param_field;
3727 struct qdio_buffer **in_sbal_ptrs;
3728 struct qdio_buffer **out_sbal_ptrs;
3729 int i, j, k;
3730 int rc = 0;
3731
3732 QETH_DBF_TEXT(SETUP, 2, "qdioest");
3733
3734 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3735 GFP_KERNEL);
3736 if (!qib_param_field)
3737 return -ENOMEM;
3738
3739 qeth_create_qib_param_field(card, qib_param_field);
3740 qeth_create_qib_param_field_blkt(card, qib_param_field);
3741
3742 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3743 GFP_KERNEL);
3744 if (!in_sbal_ptrs) {
3745 kfree(qib_param_field);
3746 return -ENOMEM;
3747 }
3748 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3749 in_sbal_ptrs[i] = (struct qdio_buffer *)
3750 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3751
3752 out_sbal_ptrs =
3753 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3754 sizeof(void *), GFP_KERNEL);
3755 if (!out_sbal_ptrs) {
3756 kfree(in_sbal_ptrs);
3757 kfree(qib_param_field);
3758 return -ENOMEM;
3759 }
3760 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3761 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
3762 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
3763 card->qdio.out_qs[i]->bufs[j].buffer);
3764 }
3765
3766 memset(&init_data, 0, sizeof(struct qdio_initialize));
3767 init_data.cdev = CARD_DDEV(card);
3768 init_data.q_format = qeth_get_qdio_q_format(card);
3769 init_data.qib_param_field_format = 0;
3770 init_data.qib_param_field = qib_param_field;
3771 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3772 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3773 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3774 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3775 init_data.no_input_qs = 1;
3776 init_data.no_output_qs = card->qdio.no_out_queues;
3777 init_data.input_handler = card->discipline.input_handler;
3778 init_data.output_handler = card->discipline.output_handler;
3779 init_data.int_parm = (unsigned long) card;
3780 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3781 QDIO_OUTBOUND_0COPY_SBALS |
3782 QDIO_USE_OUTBOUND_PCIS;
3783 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3784 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3785
3786 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3787 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
3788 rc = qdio_initialize(&init_data);
3789 if (rc)
3790 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3791 }
3792 kfree(out_sbal_ptrs);
3793 kfree(in_sbal_ptrs);
3794 kfree(qib_param_field);
3795 return rc;
3796}
3797
3798static void qeth_core_free_card(struct qeth_card *card)
3799{
3800
3801 QETH_DBF_TEXT(SETUP, 2, "freecrd");
3802 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
3803 qeth_clean_channel(&card->read);
3804 qeth_clean_channel(&card->write);
3805 if (card->dev)
3806 free_netdev(card->dev);
3807 kfree(card->ip_tbd_list);
3808 qeth_free_qdio_buffers(card);
3809 kfree(card);
3810}
3811
3812static struct ccw_device_id qeth_ids[] = {
3813 {CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE},
3814 {CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD},
3815 {CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN},
3816 {},
3817};
3818MODULE_DEVICE_TABLE(ccw, qeth_ids);
3819
3820static struct ccw_driver qeth_ccw_driver = {
3821 .name = "qeth",
3822 .ids = qeth_ids,
3823 .probe = ccwgroup_probe_ccwdev,
3824 .remove = ccwgroup_remove_ccwdev,
3825};
3826
3827static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3828 unsigned long driver_id)
3829{
3830 const char *start, *end;
3831 char bus_ids[3][BUS_ID_SIZE], *argv[3];
3832 int i;
3833
3834 start = buf;
3835 for (i = 0; i < 3; i++) {
3836 static const char delim[] = { ',', ',', '\n' };
3837 int len;
3838
3839 end = strchr(start, delim[i]);
3840 if (!end)
3841 return -EINVAL;
3842 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start);
3843 strncpy(bus_ids[i], start, len);
3844 bus_ids[i][len] = '\0';
3845 start = end + 1;
3846 argv[i] = bus_ids[i];
3847 }
3848
3849 return (ccwgroup_create(root_dev, driver_id,
3850 &qeth_ccw_driver, 3, argv));
3851}
3852
3853int qeth_core_hardsetup_card(struct qeth_card *card)
3854{
3855 int retries = 3;
3856 int mpno;
3857 int rc;
3858
3859 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
3860 atomic_set(&card->force_alloc_skb, 0);
3861retry:
3862 if (retries < 3) {
3863 PRINT_WARN("Retrying to do IDX activates.\n");
3864 ccw_device_set_offline(CARD_DDEV(card));
3865 ccw_device_set_offline(CARD_WDEV(card));
3866 ccw_device_set_offline(CARD_RDEV(card));
3867 ccw_device_set_online(CARD_RDEV(card));
3868 ccw_device_set_online(CARD_WDEV(card));
3869 ccw_device_set_online(CARD_DDEV(card));
3870 }
3871 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
3872 if (rc == -ERESTARTSYS) {
3873 QETH_DBF_TEXT(SETUP, 2, "break1");
3874 return rc;
3875 } else if (rc) {
3876 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3877 if (--retries < 0)
3878 goto out;
3879 else
3880 goto retry;
3881 }
3882
3883 rc = qeth_get_unitaddr(card);
3884 if (rc) {
3885 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3886 return rc;
3887 }
3888
3889 mpno = QETH_MAX_PORTNO;
3890 if (card->info.portno > mpno) {
3891 PRINT_ERR("Device %s does not offer port number %d \n.",
3892 CARD_BUS_ID(card), card->info.portno);
3893 rc = -ENODEV;
3894 goto out;
3895 }
3896 qeth_init_tokens(card);
3897 qeth_init_func_level(card);
3898 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
3899 if (rc == -ERESTARTSYS) {
3900 QETH_DBF_TEXT(SETUP, 2, "break2");
3901 return rc;
3902 } else if (rc) {
3903 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3904 if (--retries < 0)
3905 goto out;
3906 else
3907 goto retry;
3908 }
3909 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
3910 if (rc == -ERESTARTSYS) {
3911 QETH_DBF_TEXT(SETUP, 2, "break3");
3912 return rc;
3913 } else if (rc) {
3914 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
3915 if (--retries < 0)
3916 goto out;
3917 else
3918 goto retry;
3919 }
3920 rc = qeth_mpc_initialize(card);
3921 if (rc) {
3922 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3923 goto out;
3924 }
3925 return 0;
3926out:
3927 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
3928 return rc;
3929}
3930EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
3931
3932static inline int qeth_create_skb_frag(struct qdio_buffer_element *element,
3933 struct sk_buff **pskb, int offset, int *pfrag, int data_len)
3934{
3935 struct page *page = virt_to_page(element->addr);
3936 if (*pskb == NULL) {
3937 /* the upper protocol layers assume that there is data in the
3938 * skb itself. Copy a small amount (64 bytes) to make them
3939 * happy. */
3940 *pskb = dev_alloc_skb(64 + ETH_HLEN);
3941 if (!(*pskb))
3942 return -ENOMEM;
3943 skb_reserve(*pskb, ETH_HLEN);
3944 if (data_len <= 64) {
3945 memcpy(skb_put(*pskb, data_len), element->addr + offset,
3946 data_len);
3947 } else {
3948 get_page(page);
3949 memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
3950 skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
3951 data_len - 64);
3952 (*pskb)->data_len += data_len - 64;
3953 (*pskb)->len += data_len - 64;
3954 (*pskb)->truesize += data_len - 64;
3955 (*pfrag)++;
3956 }
3957 } else {
3958 get_page(page);
3959 skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
3960 (*pskb)->data_len += data_len;
3961 (*pskb)->len += data_len;
3962 (*pskb)->truesize += data_len;
3963 (*pfrag)++;
3964 }
3965 return 0;
3966}
3967
3968struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
3969 struct qdio_buffer *buffer,
3970 struct qdio_buffer_element **__element, int *__offset,
3971 struct qeth_hdr **hdr)
3972{
3973 struct qdio_buffer_element *element = *__element;
3974 int offset = *__offset;
3975 struct sk_buff *skb = NULL;
3976 int skb_len;
3977 void *data_ptr;
3978 int data_len;
3979 int headroom = 0;
3980 int use_rx_sg = 0;
3981 int frag = 0;
3982
3983 QETH_DBF_TEXT(TRACE, 6, "nextskb");
3984 /* qeth_hdr must not cross element boundaries */
3985 if (element->length < offset + sizeof(struct qeth_hdr)) {
3986 if (qeth_is_last_sbale(element))
3987 return NULL;
3988 element++;
3989 offset = 0;
3990 if (element->length < sizeof(struct qeth_hdr))
3991 return NULL;
3992 }
3993 *hdr = element->addr + offset;
3994
3995 offset += sizeof(struct qeth_hdr);
3996 if (card->options.layer2) {
3997 if (card->info.type == QETH_CARD_TYPE_OSN) {
3998 skb_len = (*hdr)->hdr.osn.pdu_length;
3999 headroom = sizeof(struct qeth_hdr);
4000 } else {
4001 skb_len = (*hdr)->hdr.l2.pkt_length;
4002 }
4003 } else {
4004 skb_len = (*hdr)->hdr.l3.length;
4005 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
4006 (card->info.link_type == QETH_LINK_TYPE_HSTR))
4007 headroom = TR_HLEN;
4008 else
4009 headroom = ETH_HLEN;
4010 }
4011
4012 if (!skb_len)
4013 return NULL;
4014
4015 if ((skb_len >= card->options.rx_sg_cb) &&
4016 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
4017 (!atomic_read(&card->force_alloc_skb))) {
4018 use_rx_sg = 1;
4019 } else {
4020 skb = dev_alloc_skb(skb_len + headroom);
4021 if (!skb)
4022 goto no_mem;
4023 if (headroom)
4024 skb_reserve(skb, headroom);
4025 }
4026
4027 data_ptr = element->addr + offset;
4028 while (skb_len) {
4029 data_len = min(skb_len, (int)(element->length - offset));
4030 if (data_len) {
4031 if (use_rx_sg) {
4032 if (qeth_create_skb_frag(element, &skb, offset,
4033 &frag, data_len))
4034 goto no_mem;
4035 } else {
4036 memcpy(skb_put(skb, data_len), data_ptr,
4037 data_len);
4038 }
4039 }
4040 skb_len -= data_len;
4041 if (skb_len) {
4042 if (qeth_is_last_sbale(element)) {
4043 QETH_DBF_TEXT(TRACE, 4, "unexeob");
4044 QETH_DBF_TEXT_(TRACE, 4, "%s",
4045 CARD_BUS_ID(card));
4046 QETH_DBF_TEXT(QERR, 2, "unexeob");
4047 QETH_DBF_TEXT_(QERR, 2, "%s",
4048 CARD_BUS_ID(card));
4049 QETH_DBF_HEX(MISC, 4, buffer, sizeof(*buffer));
4050 dev_kfree_skb_any(skb);
4051 card->stats.rx_errors++;
4052 return NULL;
4053 }
4054 element++;
4055 offset = 0;
4056 data_ptr = element->addr;
4057 } else {
4058 offset += data_len;
4059 }
4060 }
4061 *__element = element;
4062 *__offset = offset;
4063 if (use_rx_sg && card->options.performance_stats) {
4064 card->perf_stats.sg_skbs_rx++;
4065 card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
4066 }
4067 return skb;
4068no_mem:
4069 if (net_ratelimit()) {
4070 PRINT_WARN("No memory for packet received on %s.\n",
4071 QETH_CARD_IFNAME(card));
4072 QETH_DBF_TEXT(TRACE, 2, "noskbmem");
4073 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
4074 }
4075 card->stats.rx_dropped++;
4076 return NULL;
4077}
4078EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
4079
4080static void qeth_unregister_dbf_views(void)
4081{
4082 int x;
4083 for (x = 0; x < QETH_DBF_INFOS; x++) {
4084 debug_unregister(qeth_dbf[x].id);
4085 qeth_dbf[x].id = NULL;
4086 }
4087}
4088
4089static int qeth_register_dbf_views(void)
4090{
4091 int ret;
4092 int x;
4093
4094 for (x = 0; x < QETH_DBF_INFOS; x++) {
4095 /* register the areas */
4096 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
4097 qeth_dbf[x].pages,
4098 qeth_dbf[x].areas,
4099 qeth_dbf[x].len);
4100 if (qeth_dbf[x].id == NULL) {
4101 qeth_unregister_dbf_views();
4102 return -ENOMEM;
4103 }
4104
4105 /* register a view */
4106 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
4107 if (ret) {
4108 qeth_unregister_dbf_views();
4109 return ret;
4110 }
4111
4112 /* set a passing level */
4113 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
4114 }
4115
4116 return 0;
4117}
4118
4119int qeth_core_load_discipline(struct qeth_card *card,
4120 enum qeth_discipline_id discipline)
4121{
4122 int rc = 0;
4123 switch (discipline) {
4124 case QETH_DISCIPLINE_LAYER3:
4125 card->discipline.ccwgdriver = try_then_request_module(
4126 symbol_get(qeth_l3_ccwgroup_driver),
4127 "qeth_l3");
4128 break;
4129 case QETH_DISCIPLINE_LAYER2:
4130 card->discipline.ccwgdriver = try_then_request_module(
4131 symbol_get(qeth_l2_ccwgroup_driver),
4132 "qeth_l2");
4133 break;
4134 }
4135 if (!card->discipline.ccwgdriver) {
4136 PRINT_ERR("Support for discipline %d not present\n",
4137 discipline);
4138 rc = -EINVAL;
4139 }
4140 return rc;
4141}
4142
4143void qeth_core_free_discipline(struct qeth_card *card)
4144{
4145 if (card->options.layer2)
4146 symbol_put(qeth_l2_ccwgroup_driver);
4147 else
4148 symbol_put(qeth_l3_ccwgroup_driver);
4149 card->discipline.ccwgdriver = NULL;
4150}
4151
4152static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4153{
4154 struct qeth_card *card;
4155 struct device *dev;
4156 int rc;
4157 unsigned long flags;
4158
4159 QETH_DBF_TEXT(SETUP, 2, "probedev");
4160
4161 dev = &gdev->dev;
4162 if (!get_device(dev))
4163 return -ENODEV;
4164
4165 QETH_DBF_TEXT_(SETUP, 2, "%s", gdev->dev.bus_id);
4166
4167 card = qeth_alloc_card();
4168 if (!card) {
4169 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
4170 rc = -ENOMEM;
4171 goto err_dev;
4172 }
4173 card->read.ccwdev = gdev->cdev[0];
4174 card->write.ccwdev = gdev->cdev[1];
4175 card->data.ccwdev = gdev->cdev[2];
4176 dev_set_drvdata(&gdev->dev, card);
4177 card->gdev = gdev;
4178 gdev->cdev[0]->handler = qeth_irq;
4179 gdev->cdev[1]->handler = qeth_irq;
4180 gdev->cdev[2]->handler = qeth_irq;
4181
4182 rc = qeth_determine_card_type(card);
4183 if (rc) {
4184 PRINT_WARN("%s: not a valid card type\n", __func__);
4185 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4186 goto err_card;
4187 }
4188 rc = qeth_setup_card(card);
4189 if (rc) {
4190 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
4191 goto err_card;
4192 }
4193
4194 if (card->info.type == QETH_CARD_TYPE_OSN) {
4195 rc = qeth_core_create_osn_attributes(dev);
4196 if (rc)
4197 goto err_card;
4198 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
4199 if (rc) {
4200 qeth_core_remove_osn_attributes(dev);
4201 goto err_card;
4202 }
4203 rc = card->discipline.ccwgdriver->probe(card->gdev);
4204 if (rc) {
4205 qeth_core_free_discipline(card);
4206 qeth_core_remove_osn_attributes(dev);
4207 goto err_card;
4208 }
4209 } else {
4210 rc = qeth_core_create_device_attributes(dev);
4211 if (rc)
4212 goto err_card;
4213 }
4214
4215 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4216 list_add_tail(&card->list, &qeth_core_card_list.list);
4217 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4218 return 0;
4219
4220err_card:
4221 qeth_core_free_card(card);
4222err_dev:
4223 put_device(dev);
4224 return rc;
4225}
4226
4227static void qeth_core_remove_device(struct ccwgroup_device *gdev)
4228{
4229 unsigned long flags;
4230 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4231
4232 if (card->discipline.ccwgdriver) {
4233 card->discipline.ccwgdriver->remove(gdev);
4234 qeth_core_free_discipline(card);
4235 }
4236
4237 if (card->info.type == QETH_CARD_TYPE_OSN) {
4238 qeth_core_remove_osn_attributes(&gdev->dev);
4239 } else {
4240 qeth_core_remove_device_attributes(&gdev->dev);
4241 }
4242 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4243 list_del(&card->list);
4244 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4245 qeth_core_free_card(card);
4246 dev_set_drvdata(&gdev->dev, NULL);
4247 put_device(&gdev->dev);
4248 return;
4249}
4250
4251static int qeth_core_set_online(struct ccwgroup_device *gdev)
4252{
4253 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4254 int rc = 0;
4255 int def_discipline;
4256
4257 if (!card->discipline.ccwgdriver) {
4258 if (card->info.type == QETH_CARD_TYPE_IQD)
4259 def_discipline = QETH_DISCIPLINE_LAYER3;
4260 else
4261 def_discipline = QETH_DISCIPLINE_LAYER2;
4262 rc = qeth_core_load_discipline(card, def_discipline);
4263 if (rc)
4264 goto err;
4265 rc = card->discipline.ccwgdriver->probe(card->gdev);
4266 if (rc)
4267 goto err;
4268 }
4269 rc = card->discipline.ccwgdriver->set_online(gdev);
4270err:
4271 return rc;
4272}
4273
4274static int qeth_core_set_offline(struct ccwgroup_device *gdev)
4275{
4276 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4277 return card->discipline.ccwgdriver->set_offline(gdev);
4278}
4279
4280static void qeth_core_shutdown(struct ccwgroup_device *gdev)
4281{
4282 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4283 if (card->discipline.ccwgdriver &&
4284 card->discipline.ccwgdriver->shutdown)
4285 card->discipline.ccwgdriver->shutdown(gdev);
4286}
4287
4288static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
4289 .owner = THIS_MODULE,
4290 .name = "qeth",
4291 .driver_id = 0xD8C5E3C8,
4292 .probe = qeth_core_probe_device,
4293 .remove = qeth_core_remove_device,
4294 .set_online = qeth_core_set_online,
4295 .set_offline = qeth_core_set_offline,
4296 .shutdown = qeth_core_shutdown,
4297};
4298
4299static ssize_t
4300qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf,
4301 size_t count)
4302{
4303 int err;
4304 err = qeth_core_driver_group(buf, qeth_core_root_dev,
4305 qeth_core_ccwgroup_driver.driver_id);
4306 if (err)
4307 return err;
4308 else
4309 return count;
4310}
4311
4312static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
4313
4314static struct {
4315 const char str[ETH_GSTRING_LEN];
4316} qeth_ethtool_stats_keys[] = {
4317/* 0 */{"rx skbs"},
4318 {"rx buffers"},
4319 {"tx skbs"},
4320 {"tx buffers"},
4321 {"tx skbs no packing"},
4322 {"tx buffers no packing"},
4323 {"tx skbs packing"},
4324 {"tx buffers packing"},
4325 {"tx sg skbs"},
4326 {"tx sg frags"},
4327/* 10 */{"rx sg skbs"},
4328 {"rx sg frags"},
4329 {"rx sg page allocs"},
4330 {"tx large kbytes"},
4331 {"tx large count"},
4332 {"tx pk state ch n->p"},
4333 {"tx pk state ch p->n"},
4334 {"tx pk watermark low"},
4335 {"tx pk watermark high"},
4336 {"queue 0 buffer usage"},
4337/* 20 */{"queue 1 buffer usage"},
4338 {"queue 2 buffer usage"},
4339 {"queue 3 buffer usage"},
4340 {"rx handler time"},
4341 {"rx handler count"},
4342 {"rx do_QDIO time"},
4343 {"rx do_QDIO count"},
4344 {"tx handler time"},
4345 {"tx handler count"},
4346 {"tx time"},
4347/* 30 */{"tx count"},
4348 {"tx do_QDIO time"},
4349 {"tx do_QDIO count"},
4350};
4351
4352int qeth_core_get_stats_count(struct net_device *dev)
4353{
4354 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
4355}
4356EXPORT_SYMBOL_GPL(qeth_core_get_stats_count);
4357
4358void qeth_core_get_ethtool_stats(struct net_device *dev,
4359 struct ethtool_stats *stats, u64 *data)
4360{
4361 struct qeth_card *card = netdev_priv(dev);
4362 data[0] = card->stats.rx_packets -
4363 card->perf_stats.initial_rx_packets;
4364 data[1] = card->perf_stats.bufs_rec;
4365 data[2] = card->stats.tx_packets -
4366 card->perf_stats.initial_tx_packets;
4367 data[3] = card->perf_stats.bufs_sent;
4368 data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
4369 - card->perf_stats.skbs_sent_pack;
4370 data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
4371 data[6] = card->perf_stats.skbs_sent_pack;
4372 data[7] = card->perf_stats.bufs_sent_pack;
4373 data[8] = card->perf_stats.sg_skbs_sent;
4374 data[9] = card->perf_stats.sg_frags_sent;
4375 data[10] = card->perf_stats.sg_skbs_rx;
4376 data[11] = card->perf_stats.sg_frags_rx;
4377 data[12] = card->perf_stats.sg_alloc_page_rx;
4378 data[13] = (card->perf_stats.large_send_bytes >> 10);
4379 data[14] = card->perf_stats.large_send_cnt;
4380 data[15] = card->perf_stats.sc_dp_p;
4381 data[16] = card->perf_stats.sc_p_dp;
4382 data[17] = QETH_LOW_WATERMARK_PACK;
4383 data[18] = QETH_HIGH_WATERMARK_PACK;
4384 data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
4385 data[20] = (card->qdio.no_out_queues > 1) ?
4386 atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
4387 data[21] = (card->qdio.no_out_queues > 2) ?
4388 atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
4389 data[22] = (card->qdio.no_out_queues > 3) ?
4390 atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
4391 data[23] = card->perf_stats.inbound_time;
4392 data[24] = card->perf_stats.inbound_cnt;
4393 data[25] = card->perf_stats.inbound_do_qdio_time;
4394 data[26] = card->perf_stats.inbound_do_qdio_cnt;
4395 data[27] = card->perf_stats.outbound_handler_time;
4396 data[28] = card->perf_stats.outbound_handler_cnt;
4397 data[29] = card->perf_stats.outbound_time;
4398 data[30] = card->perf_stats.outbound_cnt;
4399 data[31] = card->perf_stats.outbound_do_qdio_time;
4400 data[32] = card->perf_stats.outbound_do_qdio_cnt;
4401}
4402EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4403
4404void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4405{
4406 switch (stringset) {
4407 case ETH_SS_STATS:
4408 memcpy(data, &qeth_ethtool_stats_keys,
4409 sizeof(qeth_ethtool_stats_keys));
4410 break;
4411 default:
4412 WARN_ON(1);
4413 break;
4414 }
4415}
4416EXPORT_SYMBOL_GPL(qeth_core_get_strings);
4417
4418void qeth_core_get_drvinfo(struct net_device *dev,
4419 struct ethtool_drvinfo *info)
4420{
4421 struct qeth_card *card = netdev_priv(dev);
4422 if (card->options.layer2)
4423 strcpy(info->driver, "qeth_l2");
4424 else
4425 strcpy(info->driver, "qeth_l3");
4426
4427 strcpy(info->version, "1.0");
4428 strcpy(info->fw_version, card->info.mcl_level);
4429 sprintf(info->bus_info, "%s/%s/%s",
4430 CARD_RDEV_ID(card),
4431 CARD_WDEV_ID(card),
4432 CARD_DDEV_ID(card));
4433}
4434EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
4435
4436static int __init qeth_core_init(void)
4437{
4438 int rc;
4439
4440 PRINT_INFO("loading core functions\n");
4441 INIT_LIST_HEAD(&qeth_core_card_list.list);
4442 rwlock_init(&qeth_core_card_list.rwlock);
4443
4444 rc = qeth_register_dbf_views();
4445 if (rc)
4446 goto out_err;
4447 rc = ccw_driver_register(&qeth_ccw_driver);
4448 if (rc)
4449 goto ccw_err;
4450 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
4451 if (rc)
4452 goto ccwgroup_err;
4453 rc = driver_create_file(&qeth_core_ccwgroup_driver.driver,
4454 &driver_attr_group);
4455 if (rc)
4456 goto driver_err;
4457 qeth_core_root_dev = s390_root_dev_register("qeth");
4458 rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
4459 if (rc)
4460 goto register_err;
4461 return 0;
4462
4463register_err:
4464 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4465 &driver_attr_group);
4466driver_err:
4467 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4468ccwgroup_err:
4469 ccw_driver_unregister(&qeth_ccw_driver);
4470ccw_err:
4471 qeth_unregister_dbf_views();
4472out_err:
4473 PRINT_ERR("Initialization failed with code %d\n", rc);
4474 return rc;
4475}
4476
4477static void __exit qeth_core_exit(void)
4478{
4479 s390_root_dev_unregister(qeth_core_root_dev);
4480 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4481 &driver_attr_group);
4482 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4483 ccw_driver_unregister(&qeth_ccw_driver);
4484 qeth_unregister_dbf_views();
4485 PRINT_INFO("core functions removed\n");
4486}
4487
4488module_init(qeth_core_init);
4489module_exit(qeth_core_exit);
4490MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
4491MODULE_DESCRIPTION("qeth core functions");
4492MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
new file mode 100644
index 000000000000..06f4de1f0507
--- /dev/null
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -0,0 +1,266 @@
1/*
2 * drivers/s390/net/qeth_core_mpc.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10#include <linux/module.h>
11#include <asm/cio.h>
12#include "qeth_core_mpc.h"
13
14unsigned char IDX_ACTIVATE_READ[] = {
15 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
16 0x19, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
17 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
18 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
19 0x00, 0x00
20};
21
22unsigned char IDX_ACTIVATE_WRITE[] = {
23 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
24 0x15, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
25 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
26 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
27 0x00, 0x00
28};
29
30unsigned char CM_ENABLE[] = {
31 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
32 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63,
33 0x10, 0x00, 0x00, 0x01,
34 0x00, 0x00, 0x00, 0x00,
35 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
36 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x23,
37 0x00, 0x00, 0x23, 0x05, 0x00, 0x00, 0x00, 0x00,
38 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
39 0x01, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x40,
40 0x00, 0x0c, 0x41, 0x02, 0x00, 0x17, 0x00, 0x00,
41 0x00, 0x00, 0x00, 0x00,
42 0x00, 0x0b, 0x04, 0x01,
43 0x7e, 0x04, 0x05, 0x00, 0x01, 0x01, 0x0f,
44 0x00,
45 0x0c, 0x04, 0x02, 0xff, 0xff, 0xff, 0xff, 0xff,
46 0xff, 0xff, 0xff
47};
48
49unsigned char CM_SETUP[] = {
50 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
51 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64,
52 0x10, 0x00, 0x00, 0x01,
53 0x00, 0x00, 0x00, 0x00,
54 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x24,
56 0x00, 0x00, 0x24, 0x05, 0x00, 0x00, 0x00, 0x00,
57 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
58 0x01, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x40,
59 0x00, 0x0c, 0x41, 0x04, 0x00, 0x18, 0x00, 0x00,
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x09, 0x04, 0x04,
62 0x05, 0x00, 0x01, 0x01, 0x11,
63 0x00, 0x09, 0x04,
64 0x05, 0x05, 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x06,
66 0x04, 0x06, 0xc8, 0x00
67};
68
69unsigned char ULP_ENABLE[] = {
70 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
71 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6b,
72 0x10, 0x00, 0x00, 0x01,
73 0x00, 0x00, 0x00, 0x00,
74 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
75 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x2b,
76 0x00, 0x00, 0x2b, 0x05, 0x20, 0x01, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
78 0x01, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x40,
79 0x00, 0x0c, 0x41, 0x02, 0x00, 0x1f, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x0b, 0x04, 0x01,
82 0x03, 0x04, 0x05, 0x00, 0x01, 0x01, 0x12,
83 0x00,
84 0x14, 0x04, 0x0a, 0x00, 0x20, 0x00, 0x00, 0xff,
85 0xff, 0x00, 0x08, 0xc8, 0xe8, 0xc4, 0xf1, 0xc7,
86 0xf1, 0x00, 0x00
87};
88
89unsigned char ULP_SETUP[] = {
90 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
91 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6c,
92 0x10, 0x00, 0x00, 0x01,
93 0x00, 0x00, 0x00, 0x00,
94 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02,
95 0x00, 0x00, 0x00, 0x01, 0x00, 0x24, 0x00, 0x2c,
96 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
98 0x01, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x40,
99 0x00, 0x0c, 0x41, 0x04, 0x00, 0x20, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x09, 0x04, 0x04,
102 0x05, 0x00, 0x01, 0x01, 0x14,
103 0x00, 0x09, 0x04,
104 0x05, 0x05, 0x30, 0x01, 0x00, 0x00,
105 0x00, 0x06,
106 0x04, 0x06, 0x40, 0x00,
107 0x00, 0x08, 0x04, 0x0b,
108 0x00, 0x00, 0x00, 0x00
109};
110
111unsigned char DM_ACT[] = {
112 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
113 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x55,
114 0x10, 0x00, 0x00, 0x01,
115 0x00, 0x00, 0x00, 0x00,
116 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03,
117 0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x15,
118 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
120 0x01, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x40,
121 0x00, 0x0c, 0x43, 0x60, 0x00, 0x09, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x09, 0x04, 0x04,
124 0x05, 0x40, 0x01, 0x01, 0x00
125};
126
127unsigned char IPA_PDU_HEADER[] = {
128 0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77,
129 0x00, 0x00, 0x00, 0x14, 0x00, 0x00,
130 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256,
131 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256,
132 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
133 0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00, 0x00, 0x24,
135 sizeof(struct qeth_ipa_cmd) / 256,
136 sizeof(struct qeth_ipa_cmd) % 256,
137 0x00,
138 sizeof(struct qeth_ipa_cmd) / 256,
139 sizeof(struct qeth_ipa_cmd) % 256,
140 0x05,
141 0x77, 0x77, 0x77, 0x77,
142 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
143 0x01, 0x00,
144 sizeof(struct qeth_ipa_cmd) / 256,
145 sizeof(struct qeth_ipa_cmd) % 256,
146 0x00, 0x00, 0x00, 0x40,
147};
148EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
149
150unsigned char WRITE_CCW[] = {
151 0x01, CCW_FLAG_SLI, 0, 0,
152 0, 0, 0, 0
153};
154
155unsigned char READ_CCW[] = {
156 0x02, CCW_FLAG_SLI, 0, 0,
157 0, 0, 0, 0
158};
159
160
161struct ipa_rc_msg {
162 enum qeth_ipa_return_codes rc;
163 char *msg;
164};
165
166static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
167 {IPA_RC_SUCCESS, "success"},
168 {IPA_RC_NOTSUPP, "Command not supported"},
169 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
170 {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
171 {IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"},
172 {IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
173 {IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"},
174 {IPA_RC_UNREGISTERED_ADDR, "Address not registered"},
175 {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
176 {IPA_RC_ID_NOT_FOUND, "Identifier not found"},
177 {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"},
178 {IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"},
179 {IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"},
180 {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
181 {IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
182 {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"},
183 {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"},
184 {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"},
185 {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
186 {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
187 {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
188 {IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"},
189 {IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"},
190 {IPA_RC_INVALID_LANTYPE, "Invalid LAN type"},
191 {IPA_RC_INVALID_LANNUM, "Invalid LAN num"},
192 {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"},
193 {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"},
194 {IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"},
195 {IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"},
196 {IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"},
197 {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"},
198 {IPA_RC_MC_ADDR_NOT_FOUND, "Multicast address not found"},
199 {IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"},
200 {IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"},
201 {IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"},
202 {IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"},
203 {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"},
204 {IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"},
205 {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
206 {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
207 {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
208 {IPA_RC_FFFF, "Unknown Error"}
209};
210
211
212
213char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
214{
215 int x = 0;
216 qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
217 sizeof(struct ipa_rc_msg) - 1].rc = rc;
218 while (qeth_ipa_rc_msg[x].rc != rc)
219 x++;
220 return qeth_ipa_rc_msg[x].msg;
221}
222
223
224struct ipa_cmd_names {
225 enum qeth_ipa_cmds cmd;
226 char *name;
227};
228
229static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
230 {IPA_CMD_STARTLAN, "startlan"},
231 {IPA_CMD_STOPLAN, "stoplan"},
232 {IPA_CMD_SETVMAC, "setvmac"},
233 {IPA_CMD_DELVMAC, "delvmac"},
234 {IPA_CMD_SETGMAC, "setgmac"},
235 {IPA_CMD_DELGMAC, "delgmac"},
236 {IPA_CMD_SETVLAN, "setvlan"},
237 {IPA_CMD_DELVLAN, "delvlan"},
238 {IPA_CMD_SETCCID, "setccid"},
239 {IPA_CMD_DELCCID, "delccid"},
240 {IPA_CMD_MODCCID, "modccid"},
241 {IPA_CMD_SETIP, "setip"},
242 {IPA_CMD_QIPASSIST, "qipassist"},
243 {IPA_CMD_SETASSPARMS, "setassparms"},
244 {IPA_CMD_SETIPM, "setipm"},
245 {IPA_CMD_DELIPM, "delipm"},
246 {IPA_CMD_SETRTG, "setrtg"},
247 {IPA_CMD_DELIP, "delip"},
248 {IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
249 {IPA_CMD_SET_DIAG_ASS, "set_diag_ass"},
250 {IPA_CMD_CREATE_ADDR, "create_addr"},
251 {IPA_CMD_DESTROY_ADDR, "destroy_addr"},
252 {IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"},
253 {IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"},
254 {IPA_CMD_UNKNOWN, "unknown"},
255};
256
257char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
258{
259 int x = 0;
260 qeth_ipa_cmd_names[
261 sizeof(qeth_ipa_cmd_names) /
262 sizeof(struct ipa_cmd_names)-1].cmd = cmd;
263 while (qeth_ipa_cmd_names[x].cmd != cmd)
264 x++;
265 return qeth_ipa_cmd_names[x].name;
266}
diff --git a/drivers/s390/net/qeth_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6de2da5ed5fd..18548822e37c 100644
--- a/drivers/s390/net/qeth_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -1,27 +1,25 @@
1/* 1/*
2 * linux/drivers/s390/net/qeth_mpc.h 2 * drivers/s390/net/qeth_core_mpc.h
3 *
4 * Linux on zSeries OSA Express and HiperSockets support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Thomas Spatzier <tspat@de.ibm.com>
9 * Frank Pavlic <fpavlic@de.ibm.com>
10 * 3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
11 */ 8 */
12#ifndef __QETH_MPC_H__ 9
13#define __QETH_MPC_H__ 10#ifndef __QETH_CORE_MPC_H__
11#define __QETH_CORE_MPC_H__
14 12
15#include <asm/qeth.h> 13#include <asm/qeth.h>
16 14
17#define IPA_PDU_HEADER_SIZE 0x40 15#define IPA_PDU_HEADER_SIZE 0x40
18#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer+0x0e) 16#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
19#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer+0x26) 17#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26)
20#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer+0x29) 18#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29)
21#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer+0x3a) 19#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a)
22 20
23extern unsigned char IPA_PDU_HEADER[]; 21extern unsigned char IPA_PDU_HEADER[];
24#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer+0x2c) 22#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c)
25 23
26#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd)) 24#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
27 25
@@ -93,7 +91,8 @@ enum qeth_checksum_types {
93 */ 91 */
94#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */ 92#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
95enum qeth_routing_types { 93enum qeth_routing_types {
96 NO_ROUTER = 0, /* TODO: set to bit flag used in IPA Command */ 94 /* TODO: set to bit flag used in IPA Command */
95 NO_ROUTER = 0,
97 PRIMARY_ROUTER = 1, 96 PRIMARY_ROUTER = 1,
98 SECONDARY_ROUTER = 2, 97 SECONDARY_ROUTER = 2,
99 MULTICAST_ROUTER = 3, 98 MULTICAST_ROUTER = 3,
@@ -183,7 +182,7 @@ enum qeth_ipa_return_codes {
183 IPA_RC_SETIP_NO_STARTLAN = 0xe008, 182 IPA_RC_SETIP_NO_STARTLAN = 0xe008,
184 IPA_RC_SETIP_ALREADY_RECEIVED = 0xe009, 183 IPA_RC_SETIP_ALREADY_RECEIVED = 0xe009,
185 IPA_RC_IP_ADDR_ALREADY_USED = 0xe00a, 184 IPA_RC_IP_ADDR_ALREADY_USED = 0xe00a,
186 IPA_RC_MULTICAST_FULL = 0xe00b, 185 IPA_RC_MC_ADDR_NOT_FOUND = 0xe00b,
187 IPA_RC_SETIP_INVALID_VERSION = 0xe00d, 186 IPA_RC_SETIP_INVALID_VERSION = 0xe00d,
188 IPA_RC_UNSUPPORTED_SUBCMD = 0xe00e, 187 IPA_RC_UNSUPPORTED_SUBCMD = 0xe00e,
189 IPA_RC_ARP_ASSIST_NO_ENABLE = 0xe00f, 188 IPA_RC_ARP_ASSIST_NO_ENABLE = 0xe00f,
@@ -233,14 +232,14 @@ enum qeth_ipa_setdelip_flags {
233 232
234/* SETADAPTER IPA Command: ****************************************************/ 233/* SETADAPTER IPA Command: ****************************************************/
235enum qeth_ipa_setadp_cmd { 234enum qeth_ipa_setadp_cmd {
236 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x01, 235 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x0001,
237 IPA_SETADP_ALTER_MAC_ADDRESS = 0x02, 236 IPA_SETADP_ALTER_MAC_ADDRESS = 0x0002,
238 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x04, 237 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x0004,
239 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x08, 238 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x0008,
240 IPA_SETADP_SET_ADDRESSING_MODE = 0x10, 239 IPA_SETADP_SET_ADDRESSING_MODE = 0x0010,
241 IPA_SETADP_SET_CONFIG_PARMS = 0x20, 240 IPA_SETADP_SET_CONFIG_PARMS = 0x0020,
242 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x40, 241 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x0040,
243 IPA_SETADP_SET_BROADCAST_MODE = 0x80, 242 IPA_SETADP_SET_BROADCAST_MODE = 0x0080,
244 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100, 243 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100,
245 IPA_SETADP_SET_SNMP_CONTROL = 0x0200, 244 IPA_SETADP_SET_SNMP_CONTROL = 0x0200,
246 IPA_SETADP_QUERY_CARD_INFO = 0x0400, 245 IPA_SETADP_QUERY_CARD_INFO = 0x0400,
@@ -397,26 +396,11 @@ struct qeth_ipacmd_setadpparms {
397 } data; 396 } data;
398} __attribute__ ((packed)); 397} __attribute__ ((packed));
399 398
400/* IPFRAME IPA Command: ***************************************************/
401/* TODO: define in analogy to commands define above */
402
403/* ADD_ADDR_ENTRY IPA Command: ********************************************/
404/* TODO: define in analogy to commands define above */
405
406/* DELETE_ADDR_ENTRY IPA Command: *****************************************/
407/* TODO: define in analogy to commands define above */
408
409/* CREATE_ADDR IPA Command: ***********************************************/ 399/* CREATE_ADDR IPA Command: ***********************************************/
410struct qeth_create_destroy_address { 400struct qeth_create_destroy_address {
411 __u8 unique_id[8]; 401 __u8 unique_id[8];
412} __attribute__ ((packed)); 402} __attribute__ ((packed));
413 403
414/* REGISTER_LOCAL_ADDR IPA Command: ***************************************/
415/* TODO: define in analogy to commands define above */
416
417/* UNREGISTER_LOCAL_ADDR IPA Command: *************************************/
418/* TODO: define in analogy to commands define above */
419
420/* Header for each IPA command */ 404/* Header for each IPA command */
421struct qeth_ipacmd_hdr { 405struct qeth_ipacmd_hdr {
422 __u8 command; 406 __u8 command;
@@ -463,10 +447,8 @@ enum qeth_ipa_arp_return_codes {
463}; 447};
464 448
465 449
466extern char * 450extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
467qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); 451extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
468extern char *
469qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
470 452
471#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ 453#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
472 sizeof(struct qeth_ipacmd_setassparms_hdr)) 454 sizeof(struct qeth_ipacmd_setassparms_hdr))
@@ -492,88 +474,89 @@ extern unsigned char READ_CCW[];
492 474
493extern unsigned char CM_ENABLE[]; 475extern unsigned char CM_ENABLE[];
494#define CM_ENABLE_SIZE 0x63 476#define CM_ENABLE_SIZE 0x63
495#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer+0x2c) 477#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c)
496#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53) 478#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
497#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer+0x5b) 479#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer + 0x5b)
498 480
499#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \ 481#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \
500 (PDU_ENCAPSULATION(buffer)+ 0x13) 482 (PDU_ENCAPSULATION(buffer) + 0x13)
501 483
502 484
503extern unsigned char CM_SETUP[]; 485extern unsigned char CM_SETUP[];
504#define CM_SETUP_SIZE 0x64 486#define CM_SETUP_SIZE 0x64
505#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer+0x2c) 487#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
506#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51) 488#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
507#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a) 489#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
508 490
509#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \ 491#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
510 (PDU_ENCAPSULATION(buffer) + 0x1a) 492 (PDU_ENCAPSULATION(buffer) + 0x1a)
511 493
512extern unsigned char ULP_ENABLE[]; 494extern unsigned char ULP_ENABLE[];
513#define ULP_ENABLE_SIZE 0x6b 495#define ULP_ENABLE_SIZE 0x6b
514#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer+0x61) 496#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61)
515#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer+0x2c) 497#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c)
516#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53) 498#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
517#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer+0x62) 499#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer + 0x62)
518#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \ 500#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \
519 (PDU_ENCAPSULATION(buffer) + 0x13) 501 (PDU_ENCAPSULATION(buffer) + 0x13)
520#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \ 502#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \
521 (PDU_ENCAPSULATION(buffer)+ 0x1f) 503 (PDU_ENCAPSULATION(buffer) + 0x1f)
522#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \ 504#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \
523 (PDU_ENCAPSULATION(buffer) + 0x17) 505 (PDU_ENCAPSULATION(buffer) + 0x17)
524#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \ 506#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
525 (PDU_ENCAPSULATION(buffer)+ 0x2b) 507 (PDU_ENCAPSULATION(buffer) + 0x2b)
526/* Layer 2 defintions */ 508/* Layer 2 defintions */
527#define QETH_PROT_LAYER2 0x08 509#define QETH_PROT_LAYER2 0x08
528#define QETH_PROT_TCPIP 0x03 510#define QETH_PROT_TCPIP 0x03
529#define QETH_PROT_OSN2 0x0a 511#define QETH_PROT_OSN2 0x0a
530#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer+0x50) 512#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
531#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer+0x19) 513#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
532 514
533extern unsigned char ULP_SETUP[]; 515extern unsigned char ULP_SETUP[];
534#define ULP_SETUP_SIZE 0x6c 516#define ULP_SETUP_SIZE 0x6c
535#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer+0x2c) 517#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
536#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51) 518#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
537#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a) 519#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
538#define QETH_ULP_SETUP_CUA(buffer) (buffer+0x68) 520#define QETH_ULP_SETUP_CUA(buffer) (buffer + 0x68)
539#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer+0x6a) 521#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer + 0x6a)
540 522
541#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \ 523#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \
542 (PDU_ENCAPSULATION(buffer)+0x1a) 524 (PDU_ENCAPSULATION(buffer) + 0x1a)
543 525
544 526
545extern unsigned char DM_ACT[]; 527extern unsigned char DM_ACT[];
546#define DM_ACT_SIZE 0x55 528#define DM_ACT_SIZE 0x55
547#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer+0x2c) 529#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c)
548#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer+0x51) 530#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51)
549 531
550 532
551 533
552#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer+4) 534#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer + 4)
553#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer+0x1c) 535#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c)
554#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer+0x20) 536#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20)
555 537
556extern unsigned char IDX_ACTIVATE_READ[]; 538extern unsigned char IDX_ACTIVATE_READ[];
557extern unsigned char IDX_ACTIVATE_WRITE[]; 539extern unsigned char IDX_ACTIVATE_WRITE[];
558 540
559#define IDX_ACTIVATE_SIZE 0x22 541#define IDX_ACTIVATE_SIZE 0x22
560#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer+0x0c) 542#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
561#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b]&0x80) 543#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
562#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer+0x10) 544#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80)
563#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer+0x16) 545#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10)
564#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer+0x1e) 546#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16)
565#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer+0x20) 547#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer + 0x1e)
566#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08]&3)==2) 548#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer + 0x20)
567#define QETH_IDX_REPLY_LEVEL(buffer) (buffer+0x12) 549#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
550#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
568#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09] 551#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
569 552
570#define PDU_ENCAPSULATION(buffer) \ 553#define PDU_ENCAPSULATION(buffer) \
571 (buffer + *(buffer + (*(buffer+0x0b)) + \ 554 (buffer + *(buffer + (*(buffer + 0x0b)) + \
572 *(buffer + *(buffer+0x0b)+0x11) +0x07)) 555 *(buffer + *(buffer + 0x0b) + 0x11) + 0x07))
573 556
574#define IS_IPA(buffer) \ 557#define IS_IPA(buffer) \
575 ((buffer) && \ 558 ((buffer) && \
576 ( *(buffer + ((*(buffer+0x0b))+4) )==0xc1) ) 559 (*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1))
577 560
578#define ADDR_FRAME_TYPE_DIX 1 561#define ADDR_FRAME_TYPE_DIX 1
579#define ADDR_FRAME_TYPE_802_3 2 562#define ADDR_FRAME_TYPE_802_3 2
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_core_offl.c
index e3c268cfbffe..822df8362856 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_core_offl.c
@@ -1,13 +1,11 @@
1/* 1/*
2 * linux/drivers/s390/net/qeth_eddp.c 2 * drivers/s390/net/qeth_core_offl.c
3 *
4 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
9 * 3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
6 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 */ 7 */
8
11#include <linux/errno.h> 9#include <linux/errno.h>
12#include <linux/ip.h> 10#include <linux/ip.h>
13#include <linux/inetdevice.h> 11#include <linux/inetdevice.h>
@@ -18,14 +16,14 @@
18#include <linux/skbuff.h> 16#include <linux/skbuff.h>
19 17
20#include <net/ip.h> 18#include <net/ip.h>
19#include <net/ip6_checksum.h>
21 20
22#include "qeth.h" 21#include "qeth_core.h"
23#include "qeth_mpc.h" 22#include "qeth_core_mpc.h"
24#include "qeth_eddp.h" 23#include "qeth_core_offl.h"
25 24
26int 25int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
27qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, 26 struct qeth_eddp_context *ctx)
28 struct qeth_eddp_context *ctx)
29{ 27{
30 int index = queue->next_buf_to_fill; 28 int index = queue->next_buf_to_fill;
31 int elements_needed = ctx->num_elements; 29 int elements_needed = ctx->num_elements;
@@ -33,8 +31,8 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
33 int skbs_in_buffer; 31 int skbs_in_buffer;
34 int buffers_needed = 0; 32 int buffers_needed = 0;
35 33
36 QETH_DBF_TEXT(trace, 5, "eddpcbfc"); 34 QETH_DBF_TEXT(TRACE, 5, "eddpcbfc");
37 while(elements_needed > 0) { 35 while (elements_needed > 0) {
38 buffers_needed++; 36 buffers_needed++;
39 if (atomic_read(&queue->bufs[index].state) != 37 if (atomic_read(&queue->bufs[index].state) !=
40 QETH_QDIO_BUF_EMPTY) 38 QETH_QDIO_BUF_EMPTY)
@@ -49,12 +47,11 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
49 return buffers_needed; 47 return buffers_needed;
50} 48}
51 49
52static void 50static void qeth_eddp_free_context(struct qeth_eddp_context *ctx)
53qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54{ 51{
55 int i; 52 int i;
56 53
57 QETH_DBF_TEXT(trace, 5, "eddpfctx"); 54 QETH_DBF_TEXT(TRACE, 5, "eddpfctx");
58 for (i = 0; i < ctx->num_pages; ++i) 55 for (i = 0; i < ctx->num_pages; ++i)
59 free_page((unsigned long)ctx->pages[i]); 56 free_page((unsigned long)ctx->pages[i]);
60 kfree(ctx->pages); 57 kfree(ctx->pages);
@@ -63,26 +60,24 @@ qeth_eddp_free_context(struct qeth_eddp_context *ctx)
63} 60}
64 61
65 62
66static inline void 63static void qeth_eddp_get_context(struct qeth_eddp_context *ctx)
67qeth_eddp_get_context(struct qeth_eddp_context *ctx)
68{ 64{
69 atomic_inc(&ctx->refcnt); 65 atomic_inc(&ctx->refcnt);
70} 66}
71 67
72void 68void qeth_eddp_put_context(struct qeth_eddp_context *ctx)
73qeth_eddp_put_context(struct qeth_eddp_context *ctx)
74{ 69{
75 if (atomic_dec_return(&ctx->refcnt) == 0) 70 if (atomic_dec_return(&ctx->refcnt) == 0)
76 qeth_eddp_free_context(ctx); 71 qeth_eddp_free_context(ctx);
77} 72}
73EXPORT_SYMBOL_GPL(qeth_eddp_put_context);
78 74
79void 75void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
80qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
81{ 76{
82 struct qeth_eddp_context_reference *ref; 77 struct qeth_eddp_context_reference *ref;
83 78
84 QETH_DBF_TEXT(trace, 6, "eddprctx"); 79 QETH_DBF_TEXT(TRACE, 6, "eddprctx");
85 while (!list_empty(&buf->ctx_list)){ 80 while (!list_empty(&buf->ctx_list)) {
86 ref = list_entry(buf->ctx_list.next, 81 ref = list_entry(buf->ctx_list.next,
87 struct qeth_eddp_context_reference, list); 82 struct qeth_eddp_context_reference, list);
88 qeth_eddp_put_context(ref->ctx); 83 qeth_eddp_put_context(ref->ctx);
@@ -91,13 +86,12 @@ qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
91 } 86 }
92} 87}
93 88
94static int 89static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, 90 struct qeth_eddp_context *ctx)
96 struct qeth_eddp_context *ctx)
97{ 91{
98 struct qeth_eddp_context_reference *ref; 92 struct qeth_eddp_context_reference *ref;
99 93
100 QETH_DBF_TEXT(trace, 6, "eddprfcx"); 94 QETH_DBF_TEXT(TRACE, 6, "eddprfcx");
101 ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC); 95 ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
102 if (ref == NULL) 96 if (ref == NULL)
103 return -ENOMEM; 97 return -ENOMEM;
@@ -107,10 +101,8 @@ qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
107 return 0; 101 return 0;
108} 102}
109 103
110int 104int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
111qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, 105 struct qeth_eddp_context *ctx, int index)
112 struct qeth_eddp_context *ctx,
113 int index)
114{ 106{
115 struct qeth_qdio_out_buffer *buf = NULL; 107 struct qeth_qdio_out_buffer *buf = NULL;
116 struct qdio_buffer *buffer; 108 struct qdio_buffer *buffer;
@@ -120,10 +112,10 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
120 int must_refcnt = 1; 112 int must_refcnt = 1;
121 int i; 113 int i;
122 114
123 QETH_DBF_TEXT(trace, 5, "eddpfibu"); 115 QETH_DBF_TEXT(TRACE, 5, "eddpfibu");
124 while (elements > 0) { 116 while (elements > 0) {
125 buf = &queue->bufs[index]; 117 buf = &queue->bufs[index];
126 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){ 118 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) {
127 /* normally this should not happen since we checked for 119 /* normally this should not happen since we checked for
128 * available elements in qeth_check_elements_for_context 120 * available elements in qeth_check_elements_for_context
129 */ 121 */
@@ -148,9 +140,9 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
148 must_refcnt = 1; 140 must_refcnt = 1;
149 continue; 141 continue;
150 } 142 }
151 if (must_refcnt){ 143 if (must_refcnt) {
152 must_refcnt = 0; 144 must_refcnt = 0;
153 if (qeth_eddp_buf_ref_context(buf, ctx)){ 145 if (qeth_eddp_buf_ref_context(buf, ctx)) {
154 PRINT_WARN("no memory to create eddp context " 146 PRINT_WARN("no memory to create eddp context "
155 "reference\n"); 147 "reference\n");
156 goto out_check; 148 goto out_check;
@@ -158,7 +150,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
158 } 150 }
159 buffer = buf->buffer; 151 buffer = buf->buffer;
160 /* fill one skb into buffer */ 152 /* fill one skb into buffer */
161 for (i = 0; i < ctx->elements_per_skb; ++i){ 153 for (i = 0; i < ctx->elements_per_skb; ++i) {
162 if (ctx->elements[element].length != 0) { 154 if (ctx->elements[element].length != 0) {
163 buffer->element[buf->next_element_to_fill]. 155 buffer->element[buf->next_element_to_fill].
164 addr = ctx->elements[element].addr; 156 addr = ctx->elements[element].addr;
@@ -174,16 +166,16 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
174 } 166 }
175out_check: 167out_check:
176 if (!queue->do_pack) { 168 if (!queue->do_pack) {
177 QETH_DBF_TEXT(trace, 6, "fillbfnp"); 169 QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
178 /* set state to PRIMED -> will be flushed */ 170 /* set state to PRIMED -> will be flushed */
179 if (buf->next_element_to_fill > 0){ 171 if (buf->next_element_to_fill > 0) {
180 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); 172 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
181 flush_cnt++; 173 flush_cnt++;
182 } 174 }
183 } else { 175 } else {
184 if (queue->card->options.performance_stats) 176 if (queue->card->options.performance_stats)
185 queue->card->perf_stats.skbs_sent_pack++; 177 queue->card->perf_stats.skbs_sent_pack++;
186 QETH_DBF_TEXT(trace, 6, "fillbfpa"); 178 QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
187 if (buf->next_element_to_fill >= 179 if (buf->next_element_to_fill >=
188 QETH_MAX_BUFFER_ELEMENTS(queue->card)) { 180 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
189 /* 181 /*
@@ -198,9 +190,8 @@ out:
198 return flush_cnt; 190 return flush_cnt;
199} 191}
200 192
201static void 193static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
202qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, 194 struct qeth_eddp_data *eddp, int data_len)
203 struct qeth_eddp_data *eddp, int data_len)
204{ 195{
205 u8 *page; 196 u8 *page;
206 int page_remainder; 197 int page_remainder;
@@ -208,7 +199,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
208 int pkt_len; 199 int pkt_len;
209 struct qeth_eddp_element *element; 200 struct qeth_eddp_element *element;
210 201
211 QETH_DBF_TEXT(trace, 5, "eddpcrsh"); 202 QETH_DBF_TEXT(TRACE, 5, "eddpcrsh");
212 page = ctx->pages[ctx->offset >> PAGE_SHIFT]; 203 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
213 page_offset = ctx->offset % PAGE_SIZE; 204 page_offset = ctx->offset % PAGE_SIZE;
214 element = &ctx->elements[ctx->num_elements]; 205 element = &ctx->elements[ctx->num_elements];
@@ -220,7 +211,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
220 pkt_len += VLAN_HLEN; 211 pkt_len += VLAN_HLEN;
221 /* does complete packet fit in current page ? */ 212 /* does complete packet fit in current page ? */
222 page_remainder = PAGE_SIZE - page_offset; 213 page_remainder = PAGE_SIZE - page_offset;
223 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){ 214 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)) {
224 /* no -> go to start of next page */ 215 /* no -> go to start of next page */
225 ctx->offset += page_remainder; 216 ctx->offset += page_remainder;
226 page = ctx->pages[ctx->offset >> PAGE_SHIFT]; 217 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
@@ -232,14 +223,14 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
232 ctx->offset += sizeof(struct qeth_hdr); 223 ctx->offset += sizeof(struct qeth_hdr);
233 page_offset += sizeof(struct qeth_hdr); 224 page_offset += sizeof(struct qeth_hdr);
234 /* add mac header (?) */ 225 /* add mac header (?) */
235 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ 226 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
236 memcpy(page + page_offset, &eddp->mac, ETH_HLEN); 227 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
237 element->length += ETH_HLEN; 228 element->length += ETH_HLEN;
238 ctx->offset += ETH_HLEN; 229 ctx->offset += ETH_HLEN;
239 page_offset += ETH_HLEN; 230 page_offset += ETH_HLEN;
240 } 231 }
241 /* add VLAN tag */ 232 /* add VLAN tag */
242 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){ 233 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
243 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN); 234 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
244 element->length += VLAN_HLEN; 235 element->length += VLAN_HLEN;
245 ctx->offset += VLAN_HLEN; 236 ctx->offset += VLAN_HLEN;
@@ -258,16 +249,15 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
258 ctx->offset += eddp->thl; 249 ctx->offset += eddp->thl;
259} 250}
260 251
261static void 252static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp,
262qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, 253 int len, __wsum *hcsum)
263 __wsum *hcsum)
264{ 254{
265 struct skb_frag_struct *frag; 255 struct skb_frag_struct *frag;
266 int left_in_frag; 256 int left_in_frag;
267 int copy_len; 257 int copy_len;
268 u8 *src; 258 u8 *src;
269 259
270 QETH_DBF_TEXT(trace, 5, "eddpcdtc"); 260 QETH_DBF_TEXT(TRACE, 5, "eddpcdtc");
271 if (skb_shinfo(eddp->skb)->nr_frags == 0) { 261 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
272 skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset, 262 skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
273 dst, len); 263 dst, len);
@@ -278,16 +268,17 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
278 while (len > 0) { 268 while (len > 0) {
279 if (eddp->frag < 0) { 269 if (eddp->frag < 0) {
280 /* we're in skb->data */ 270 /* we're in skb->data */
281 left_in_frag = (eddp->skb->len - eddp->skb->data_len) 271 left_in_frag = (eddp->skb->len -
272 eddp->skb->data_len)
282 - eddp->skb_offset; 273 - eddp->skb_offset;
283 src = eddp->skb->data + eddp->skb_offset; 274 src = eddp->skb->data + eddp->skb_offset;
284 } else { 275 } else {
285 frag = &skb_shinfo(eddp->skb)-> 276 frag = &skb_shinfo(eddp->skb)->frags[
286 frags[eddp->frag]; 277 eddp->frag];
287 left_in_frag = frag->size - eddp->frag_offset; 278 left_in_frag = frag->size - eddp->frag_offset;
288 src = (u8 *)( 279 src = (u8 *)((page_to_pfn(frag->page) <<
289 (page_to_pfn(frag->page) << PAGE_SHIFT)+ 280 PAGE_SHIFT) + frag->page_offset +
290 frag->page_offset + eddp->frag_offset); 281 eddp->frag_offset);
291 } 282 }
292 if (left_in_frag <= 0) { 283 if (left_in_frag <= 0) {
293 eddp->frag++; 284 eddp->frag++;
@@ -305,10 +296,8 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
305 } 296 }
306} 297}
307 298
308static void 299static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
309qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, 300 struct qeth_eddp_data *eddp, int data_len, __wsum hcsum)
310 struct qeth_eddp_data *eddp, int data_len,
311 __wsum hcsum)
312{ 301{
313 u8 *page; 302 u8 *page;
314 int page_remainder; 303 int page_remainder;
@@ -316,13 +305,13 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
316 struct qeth_eddp_element *element; 305 struct qeth_eddp_element *element;
317 int first_lap = 1; 306 int first_lap = 1;
318 307
319 QETH_DBF_TEXT(trace, 5, "eddpcsdt"); 308 QETH_DBF_TEXT(TRACE, 5, "eddpcsdt");
320 page = ctx->pages[ctx->offset >> PAGE_SHIFT]; 309 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
321 page_offset = ctx->offset % PAGE_SIZE; 310 page_offset = ctx->offset % PAGE_SIZE;
322 element = &ctx->elements[ctx->num_elements]; 311 element = &ctx->elements[ctx->num_elements];
323 while (data_len){ 312 while (data_len) {
324 page_remainder = PAGE_SIZE - page_offset; 313 page_remainder = PAGE_SIZE - page_offset;
325 if (page_remainder < data_len){ 314 if (page_remainder < data_len) {
326 qeth_eddp_copy_data_tcp(page + page_offset, eddp, 315 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
327 page_remainder, &hcsum); 316 page_remainder, &hcsum);
328 element->length += page_remainder; 317 element->length += page_remainder;
@@ -352,12 +341,12 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
352 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); 341 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
353} 342}
354 343
355static __wsum 344static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp,
356qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) 345 int data_len)
357{ 346{
358 __wsum phcsum; /* pseudo header checksum */ 347 __wsum phcsum; /* pseudo header checksum */
359 348
360 QETH_DBF_TEXT(trace, 5, "eddpckt4"); 349 QETH_DBF_TEXT(TRACE, 5, "eddpckt4");
361 eddp->th.tcp.h.check = 0; 350 eddp->th.tcp.h.check = 0;
362 /* compute pseudo header checksum */ 351 /* compute pseudo header checksum */
363 phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr, 352 phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
@@ -366,13 +355,13 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
366 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); 355 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
367} 356}
368 357
369static __wsum 358static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp,
370qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) 359 int data_len)
371{ 360{
372 __be32 proto; 361 __be32 proto;
373 __wsum phcsum; /* pseudo header checksum */ 362 __wsum phcsum; /* pseudo header checksum */
374 363
375 QETH_DBF_TEXT(trace, 5, "eddpckt6"); 364 QETH_DBF_TEXT(TRACE, 5, "eddpckt6");
376 eddp->th.tcp.h.check = 0; 365 eddp->th.tcp.h.check = 0;
377 /* compute pseudo header checksum */ 366 /* compute pseudo header checksum */
378 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr, 367 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
@@ -384,14 +373,14 @@ qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
384 return phcsum; 373 return phcsum;
385} 374}
386 375
387static struct qeth_eddp_data * 376static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh,
388qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) 377 u8 *nh, u8 nhl, u8 *th, u8 thl)
389{ 378{
390 struct qeth_eddp_data *eddp; 379 struct qeth_eddp_data *eddp;
391 380
392 QETH_DBF_TEXT(trace, 5, "eddpcrda"); 381 QETH_DBF_TEXT(TRACE, 5, "eddpcrda");
393 eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); 382 eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
394 if (eddp){ 383 if (eddp) {
395 eddp->nhl = nhl; 384 eddp->nhl = nhl;
396 eddp->thl = thl; 385 eddp->thl = thl;
397 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr)); 386 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
@@ -402,40 +391,35 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
402 return eddp; 391 return eddp;
403} 392}
404 393
405static void 394static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
406__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 395 struct qeth_eddp_data *eddp)
407 struct qeth_eddp_data *eddp)
408{ 396{
409 struct tcphdr *tcph; 397 struct tcphdr *tcph;
410 int data_len; 398 int data_len;
411 __wsum hcsum; 399 __wsum hcsum;
412 400
413 QETH_DBF_TEXT(trace, 5, "eddpftcp"); 401 QETH_DBF_TEXT(TRACE, 5, "eddpftcp");
414 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; 402 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
415 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 403 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
416 eddp->skb_offset += sizeof(struct ethhdr); 404 eddp->skb_offset += sizeof(struct ethhdr);
417#ifdef CONFIG_QETH_VLAN 405 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
418 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) 406 eddp->skb_offset += VLAN_HLEN;
419 eddp->skb_offset += VLAN_HLEN; 407 }
420#endif /* CONFIG_QETH_VLAN */
421 }
422 tcph = tcp_hdr(eddp->skb); 408 tcph = tcp_hdr(eddp->skb);
423 while (eddp->skb_offset < eddp->skb->len) { 409 while (eddp->skb_offset < eddp->skb->len) {
424 data_len = min((int)skb_shinfo(eddp->skb)->gso_size, 410 data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
425 (int)(eddp->skb->len - eddp->skb_offset)); 411 (int)(eddp->skb->len - eddp->skb_offset));
426 /* prepare qdio hdr */ 412 /* prepare qdio hdr */
427 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ 413 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
428 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN + 414 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
429 eddp->nhl + eddp->thl; 415 eddp->nhl + eddp->thl;
430#ifdef CONFIG_QETH_VLAN
431 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) 416 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
432 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN; 417 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
433#endif /* CONFIG_QETH_VLAN */
434 } else 418 } else
435 eddp->qh.hdr.l3.length = data_len + eddp->nhl + 419 eddp->qh.hdr.l3.length = data_len + eddp->nhl +
436 eddp->thl; 420 eddp->thl;
437 /* prepare ip hdr */ 421 /* prepare ip hdr */
438 if (eddp->skb->protocol == htons(ETH_P_IP)){ 422 if (eddp->skb->protocol == htons(ETH_P_IP)) {
439 eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl + 423 eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
440 eddp->thl); 424 eddp->thl);
441 eddp->nh.ip4.h.check = 0; 425 eddp->nh.ip4.h.check = 0;
@@ -443,9 +427,10 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
443 ip_fast_csum((u8 *)&eddp->nh.ip4.h, 427 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
444 eddp->nh.ip4.h.ihl); 428 eddp->nh.ip4.h.ihl);
445 } else 429 } else
446 eddp->nh.ip6.h.payload_len = htons(data_len + eddp->thl); 430 eddp->nh.ip6.h.payload_len = htons(data_len +
431 eddp->thl);
447 /* prepare tcp hdr */ 432 /* prepare tcp hdr */
448 if (data_len == (eddp->skb->len - eddp->skb_offset)){ 433 if (data_len == (eddp->skb->len - eddp->skb_offset)) {
449 /* last segment -> set FIN and PSH flags */ 434 /* last segment -> set FIN and PSH flags */
450 eddp->th.tcp.h.fin = tcph->fin; 435 eddp->th.tcp.h.fin = tcph->fin;
451 eddp->th.tcp.h.psh = tcph->psh; 436 eddp->th.tcp.h.psh = tcph->psh;
@@ -462,17 +447,17 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
462 /* prepare headers for next round */ 447 /* prepare headers for next round */
463 if (eddp->skb->protocol == htons(ETH_P_IP)) 448 if (eddp->skb->protocol == htons(ETH_P_IP))
464 eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1); 449 eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
465 eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) + data_len); 450 eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) +
451 data_len);
466 } 452 }
467} 453}
468 454
469static int 455static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
470qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 456 struct sk_buff *skb, struct qeth_hdr *qhdr)
471 struct sk_buff *skb, struct qeth_hdr *qhdr)
472{ 457{
473 struct qeth_eddp_data *eddp = NULL; 458 struct qeth_eddp_data *eddp = NULL;
474 459
475 QETH_DBF_TEXT(trace, 5, "eddpficx"); 460 QETH_DBF_TEXT(TRACE, 5, "eddpficx");
476 /* create our segmentation headers and copy original headers */ 461 /* create our segmentation headers and copy original headers */
477 if (skb->protocol == htons(ETH_P_IP)) 462 if (skb->protocol == htons(ETH_P_IP))
478 eddp = qeth_eddp_create_eddp_data(qhdr, 463 eddp = qeth_eddp_create_eddp_data(qhdr,
@@ -488,18 +473,16 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
488 tcp_hdrlen(skb)); 473 tcp_hdrlen(skb));
489 474
490 if (eddp == NULL) { 475 if (eddp == NULL) {
491 QETH_DBF_TEXT(trace, 2, "eddpfcnm"); 476 QETH_DBF_TEXT(TRACE, 2, "eddpfcnm");
492 return -ENOMEM; 477 return -ENOMEM;
493 } 478 }
494 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 479 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
495 skb_set_mac_header(skb, sizeof(struct qeth_hdr)); 480 skb_set_mac_header(skb, sizeof(struct qeth_hdr));
496 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); 481 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
497#ifdef CONFIG_QETH_VLAN
498 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { 482 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
499 eddp->vlan[0] = skb->protocol; 483 eddp->vlan[0] = skb->protocol;
500 eddp->vlan[1] = htons(vlan_tx_tag_get(skb)); 484 eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
501 } 485 }
502#endif /* CONFIG_QETH_VLAN */
503 } 486 }
504 /* the next flags will only be set on the last segment */ 487 /* the next flags will only be set on the last segment */
505 eddp->th.tcp.h.fin = 0; 488 eddp->th.tcp.h.fin = 0;
@@ -511,16 +494,15 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
511 return 0; 494 return 0;
512} 495}
513 496
514static void 497static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx,
515qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, 498 struct sk_buff *skb, int hdr_len)
516 int hdr_len)
517{ 499{
518 int skbs_per_page; 500 int skbs_per_page;
519 501
520 QETH_DBF_TEXT(trace, 5, "eddpcanp"); 502 QETH_DBF_TEXT(TRACE, 5, "eddpcanp");
521 /* can we put multiple skbs in one page? */ 503 /* can we put multiple skbs in one page? */
522 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len); 504 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
523 if (skbs_per_page > 1){ 505 if (skbs_per_page > 1) {
524 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) / 506 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
525 skbs_per_page + 1; 507 skbs_per_page + 1;
526 ctx->elements_per_skb = 1; 508 ctx->elements_per_skb = 1;
@@ -535,49 +517,47 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
535 (skb_shinfo(skb)->gso_segs + 1); 517 (skb_shinfo(skb)->gso_segs + 1);
536} 518}
537 519
538static struct qeth_eddp_context * 520static struct qeth_eddp_context *qeth_eddp_create_context_generic(
539qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, 521 struct qeth_card *card, struct sk_buff *skb, int hdr_len)
540 int hdr_len)
541{ 522{
542 struct qeth_eddp_context *ctx = NULL; 523 struct qeth_eddp_context *ctx = NULL;
543 u8 *addr; 524 u8 *addr;
544 int i; 525 int i;
545 526
546 QETH_DBF_TEXT(trace, 5, "creddpcg"); 527 QETH_DBF_TEXT(TRACE, 5, "creddpcg");
547 /* create the context and allocate pages */ 528 /* create the context and allocate pages */
548 ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); 529 ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
549 if (ctx == NULL){ 530 if (ctx == NULL) {
550 QETH_DBF_TEXT(trace, 2, "ceddpcn1"); 531 QETH_DBF_TEXT(TRACE, 2, "ceddpcn1");
551 return NULL; 532 return NULL;
552 } 533 }
553 ctx->type = QETH_LARGE_SEND_EDDP; 534 ctx->type = QETH_LARGE_SEND_EDDP;
554 qeth_eddp_calc_num_pages(ctx, skb, hdr_len); 535 qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
555 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){ 536 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) {
556 QETH_DBF_TEXT(trace, 2, "ceddpcis"); 537 QETH_DBF_TEXT(TRACE, 2, "ceddpcis");
557 kfree(ctx); 538 kfree(ctx);
558 return NULL; 539 return NULL;
559 } 540 }
560 ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC); 541 ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
561 if (ctx->pages == NULL){ 542 if (ctx->pages == NULL) {
562 QETH_DBF_TEXT(trace, 2, "ceddpcn2"); 543 QETH_DBF_TEXT(TRACE, 2, "ceddpcn2");
563 kfree(ctx); 544 kfree(ctx);
564 return NULL; 545 return NULL;
565 } 546 }
566 for (i = 0; i < ctx->num_pages; ++i){ 547 for (i = 0; i < ctx->num_pages; ++i) {
567 addr = (u8 *)__get_free_page(GFP_ATOMIC); 548 addr = (u8 *)get_zeroed_page(GFP_ATOMIC);
568 if (addr == NULL){ 549 if (addr == NULL) {
569 QETH_DBF_TEXT(trace, 2, "ceddpcn3"); 550 QETH_DBF_TEXT(TRACE, 2, "ceddpcn3");
570 ctx->num_pages = i; 551 ctx->num_pages = i;
571 qeth_eddp_free_context(ctx); 552 qeth_eddp_free_context(ctx);
572 return NULL; 553 return NULL;
573 } 554 }
574 memset(addr, 0, PAGE_SIZE);
575 ctx->pages[i] = addr; 555 ctx->pages[i] = addr;
576 } 556 }
577 ctx->elements = kcalloc(ctx->num_elements, 557 ctx->elements = kcalloc(ctx->num_elements,
578 sizeof(struct qeth_eddp_element), GFP_ATOMIC); 558 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
579 if (ctx->elements == NULL){ 559 if (ctx->elements == NULL) {
580 QETH_DBF_TEXT(trace, 2, "ceddpcn4"); 560 QETH_DBF_TEXT(TRACE, 2, "ceddpcn4");
581 qeth_eddp_free_context(ctx); 561 qeth_eddp_free_context(ctx);
582 return NULL; 562 return NULL;
583 } 563 }
@@ -587,31 +567,31 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
587 return ctx; 567 return ctx;
588} 568}
589 569
590static struct qeth_eddp_context * 570static struct qeth_eddp_context *qeth_eddp_create_context_tcp(
591qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, 571 struct qeth_card *card, struct sk_buff *skb,
592 struct qeth_hdr *qhdr) 572 struct qeth_hdr *qhdr)
593{ 573{
594 struct qeth_eddp_context *ctx = NULL; 574 struct qeth_eddp_context *ctx = NULL;
595 575
596 QETH_DBF_TEXT(trace, 5, "creddpct"); 576 QETH_DBF_TEXT(TRACE, 5, "creddpct");
597 if (skb->protocol == htons(ETH_P_IP)) 577 if (skb->protocol == htons(ETH_P_IP))
598 ctx = qeth_eddp_create_context_generic(card, skb, 578 ctx = qeth_eddp_create_context_generic(card, skb,
599 (sizeof(struct qeth_hdr) + 579 (sizeof(struct qeth_hdr) +
600 ip_hdrlen(skb) + 580 ip_hdrlen(skb) +
601 tcp_hdrlen(skb))); 581 tcp_hdrlen(skb)));
602 else if (skb->protocol == htons(ETH_P_IPV6)) 582 else if (skb->protocol == htons(ETH_P_IPV6))
603 ctx = qeth_eddp_create_context_generic(card, skb, 583 ctx = qeth_eddp_create_context_generic(card, skb,
604 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + 584 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
605 tcp_hdrlen(skb)); 585 tcp_hdrlen(skb));
606 else 586 else
607 QETH_DBF_TEXT(trace, 2, "cetcpinv"); 587 QETH_DBF_TEXT(TRACE, 2, "cetcpinv");
608 588
609 if (ctx == NULL) { 589 if (ctx == NULL) {
610 QETH_DBF_TEXT(trace, 2, "creddpnl"); 590 QETH_DBF_TEXT(TRACE, 2, "creddpnl");
611 return NULL; 591 return NULL;
612 } 592 }
613 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){ 593 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) {
614 QETH_DBF_TEXT(trace, 2, "ceddptfe"); 594 QETH_DBF_TEXT(TRACE, 2, "ceddptfe");
615 qeth_eddp_free_context(ctx); 595 qeth_eddp_free_context(ctx);
616 return NULL; 596 return NULL;
617 } 597 }
@@ -619,16 +599,103 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
619 return ctx; 599 return ctx;
620} 600}
621 601
622struct qeth_eddp_context * 602struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card,
623qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb, 603 struct sk_buff *skb, struct qeth_hdr *qhdr,
624 struct qeth_hdr *qhdr, unsigned char sk_protocol) 604 unsigned char sk_protocol)
625{ 605{
626 QETH_DBF_TEXT(trace, 5, "creddpc"); 606 QETH_DBF_TEXT(TRACE, 5, "creddpc");
627 switch (sk_protocol) { 607 switch (sk_protocol) {
628 case IPPROTO_TCP: 608 case IPPROTO_TCP:
629 return qeth_eddp_create_context_tcp(card, skb, qhdr); 609 return qeth_eddp_create_context_tcp(card, skb, qhdr);
630 default: 610 default:
631 QETH_DBF_TEXT(trace, 2, "eddpinvp"); 611 QETH_DBF_TEXT(TRACE, 2, "eddpinvp");
632 } 612 }
633 return NULL; 613 return NULL;
634} 614}
615EXPORT_SYMBOL_GPL(qeth_eddp_create_context);
616
617void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr,
618 struct sk_buff *skb)
619{
620 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
621 struct tcphdr *tcph = tcp_hdr(skb);
622 struct iphdr *iph = ip_hdr(skb);
623 struct ipv6hdr *ip6h = ipv6_hdr(skb);
624
625 QETH_DBF_TEXT(TRACE, 5, "tsofhdr");
626
627 /*fix header to TSO values ...*/
628 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
629 /*set values which are fix for the first approach ...*/
630 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
631 hdr->ext.imb_hdr_no = 1;
632 hdr->ext.hdr_type = 1;
633 hdr->ext.hdr_version = 1;
634 hdr->ext.hdr_len = 28;
635 /*insert non-fix values */
636 hdr->ext.mss = skb_shinfo(skb)->gso_size;
637 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
638 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
639 sizeof(struct qeth_hdr_tso));
640 tcph->check = 0;
641 if (skb->protocol == ETH_P_IPV6) {
642 ip6h->payload_len = 0;
643 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
644 0, IPPROTO_TCP, 0);
645 } else {
646 /*OSA want us to set these values ...*/
647 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
648 0, IPPROTO_TCP, 0);
649 iph->tot_len = 0;
650 iph->check = 0;
651 }
652}
653EXPORT_SYMBOL_GPL(qeth_tso_fill_header);
654
655void qeth_tx_csum(struct sk_buff *skb)
656{
657 int tlen;
658 if (skb->protocol == htons(ETH_P_IP)) {
659 tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
660 switch (ip_hdr(skb)->protocol) {
661 case IPPROTO_TCP:
662 tcp_hdr(skb)->check = 0;
663 tcp_hdr(skb)->check = csum_tcpudp_magic(
664 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
665 tlen, ip_hdr(skb)->protocol,
666 skb_checksum(skb, skb_transport_offset(skb),
667 tlen, 0));
668 break;
669 case IPPROTO_UDP:
670 udp_hdr(skb)->check = 0;
671 udp_hdr(skb)->check = csum_tcpudp_magic(
672 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
673 tlen, ip_hdr(skb)->protocol,
674 skb_checksum(skb, skb_transport_offset(skb),
675 tlen, 0));
676 break;
677 }
678 } else if (skb->protocol == htons(ETH_P_IPV6)) {
679 switch (ipv6_hdr(skb)->nexthdr) {
680 case IPPROTO_TCP:
681 tcp_hdr(skb)->check = 0;
682 tcp_hdr(skb)->check = csum_ipv6_magic(
683 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
684 ipv6_hdr(skb)->payload_len,
685 ipv6_hdr(skb)->nexthdr,
686 skb_checksum(skb, skb_transport_offset(skb),
687 ipv6_hdr(skb)->payload_len, 0));
688 break;
689 case IPPROTO_UDP:
690 udp_hdr(skb)->check = 0;
691 udp_hdr(skb)->check = csum_ipv6_magic(
692 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
693 ipv6_hdr(skb)->payload_len,
694 ipv6_hdr(skb)->nexthdr,
695 skb_checksum(skb, skb_transport_offset(skb),
696 ipv6_hdr(skb)->payload_len, 0));
697 break;
698 }
699 }
700}
701EXPORT_SYMBOL_GPL(qeth_tx_csum);
diff --git a/drivers/s390/net/qeth_eddp.h b/drivers/s390/net/qeth_core_offl.h
index 52910c9252c0..86bf7df8cf16 100644
--- a/drivers/s390/net/qeth_eddp.h
+++ b/drivers/s390/net/qeth_core_offl.h
@@ -1,15 +1,13 @@
1/* 1/*
2 * linux/drivers/s390/net/qeth_eddp.h 2 * drivers/s390/net/qeth_core_offl.h
3 *
4 * Header file for qeth enhanced device driver packing.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
9 * 3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
6 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 */ 7 */
11#ifndef __QETH_EDDP_H__ 8
12#define __QETH_EDDP_H__ 9#ifndef __QETH_CORE_OFFL_H__
10#define __QETH_CORE_OFFL_H__
13 11
14struct qeth_eddp_element { 12struct qeth_eddp_element {
15 u32 flags; 13 u32 flags;
@@ -33,25 +31,6 @@ struct qeth_eddp_context_reference {
33 struct qeth_eddp_context *ctx; 31 struct qeth_eddp_context *ctx;
34}; 32};
35 33
36extern struct qeth_eddp_context *
37qeth_eddp_create_context(struct qeth_card *,struct sk_buff *,
38 struct qeth_hdr *, unsigned char);
39
40extern void
41qeth_eddp_put_context(struct qeth_eddp_context *);
42
43extern int
44qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,struct qeth_eddp_context *,int);
45
46extern void
47qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *);
48
49extern int
50qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
51 struct qeth_eddp_context *);
52/*
53 * Data used for fragmenting a IP packet.
54 */
55struct qeth_eddp_data { 34struct qeth_eddp_data {
56 struct qeth_hdr qh; 35 struct qeth_hdr qh;
57 struct ethhdr mac; 36 struct ethhdr mac;
@@ -81,4 +60,17 @@ struct qeth_eddp_data {
81 int frag_offset; 60 int frag_offset;
82} __attribute__ ((packed)); 61} __attribute__ ((packed));
83 62
84#endif /* __QETH_EDDP_H__ */ 63extern struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *,
64 struct sk_buff *, struct qeth_hdr *, unsigned char);
65extern void qeth_eddp_put_context(struct qeth_eddp_context *);
66extern int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,
67 struct qeth_eddp_context *, int);
68extern void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *);
69extern int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
70 struct qeth_eddp_context *);
71
72void qeth_tso_fill_header(struct qeth_card *, struct qeth_hdr *,
73 struct sk_buff *);
74void qeth_tx_csum(struct sk_buff *skb);
75
76#endif /* __QETH_CORE_EDDP_H__ */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
new file mode 100644
index 000000000000..08a50f057284
--- /dev/null
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -0,0 +1,651 @@
1/*
2 * drivers/s390/net/qeth_core_sys.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/list.h>
12#include <linux/rwsem.h>
13#include <asm/ebcdic.h>
14
15#include "qeth_core.h"
16
17static ssize_t qeth_dev_state_show(struct device *dev,
18 struct device_attribute *attr, char *buf)
19{
20 struct qeth_card *card = dev_get_drvdata(dev);
21 if (!card)
22 return -EINVAL;
23
24 switch (card->state) {
25 case CARD_STATE_DOWN:
26 return sprintf(buf, "DOWN\n");
27 case CARD_STATE_HARDSETUP:
28 return sprintf(buf, "HARDSETUP\n");
29 case CARD_STATE_SOFTSETUP:
30 return sprintf(buf, "SOFTSETUP\n");
31 case CARD_STATE_UP:
32 if (card->lan_online)
33 return sprintf(buf, "UP (LAN ONLINE)\n");
34 else
35 return sprintf(buf, "UP (LAN OFFLINE)\n");
36 case CARD_STATE_RECOVER:
37 return sprintf(buf, "RECOVER\n");
38 default:
39 return sprintf(buf, "UNKNOWN\n");
40 }
41}
42
43static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
44
45static ssize_t qeth_dev_chpid_show(struct device *dev,
46 struct device_attribute *attr, char *buf)
47{
48 struct qeth_card *card = dev_get_drvdata(dev);
49 if (!card)
50 return -EINVAL;
51
52 return sprintf(buf, "%02X\n", card->info.chpid);
53}
54
55static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
56
57static ssize_t qeth_dev_if_name_show(struct device *dev,
58 struct device_attribute *attr, char *buf)
59{
60 struct qeth_card *card = dev_get_drvdata(dev);
61 if (!card)
62 return -EINVAL;
63 return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
64}
65
66static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
67
68static ssize_t qeth_dev_card_type_show(struct device *dev,
69 struct device_attribute *attr, char *buf)
70{
71 struct qeth_card *card = dev_get_drvdata(dev);
72 if (!card)
73 return -EINVAL;
74
75 return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
76}
77
78static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
79
80static inline const char *qeth_get_bufsize_str(struct qeth_card *card)
81{
82 if (card->qdio.in_buf_size == 16384)
83 return "16k";
84 else if (card->qdio.in_buf_size == 24576)
85 return "24k";
86 else if (card->qdio.in_buf_size == 32768)
87 return "32k";
88 else if (card->qdio.in_buf_size == 40960)
89 return "40k";
90 else
91 return "64k";
92}
93
94static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
95 struct device_attribute *attr, char *buf)
96{
97 struct qeth_card *card = dev_get_drvdata(dev);
98 if (!card)
99 return -EINVAL;
100
101 return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
102}
103
104static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL);
105
106static ssize_t qeth_dev_portno_show(struct device *dev,
107 struct device_attribute *attr, char *buf)
108{
109 struct qeth_card *card = dev_get_drvdata(dev);
110 if (!card)
111 return -EINVAL;
112
113 return sprintf(buf, "%i\n", card->info.portno);
114}
115
116static ssize_t qeth_dev_portno_store(struct device *dev,
117 struct device_attribute *attr, const char *buf, size_t count)
118{
119 struct qeth_card *card = dev_get_drvdata(dev);
120 char *tmp;
121 unsigned int portno;
122
123 if (!card)
124 return -EINVAL;
125
126 if ((card->state != CARD_STATE_DOWN) &&
127 (card->state != CARD_STATE_RECOVER))
128 return -EPERM;
129
130 portno = simple_strtoul(buf, &tmp, 16);
131 if (portno > QETH_MAX_PORTNO) {
132 PRINT_WARN("portno 0x%X is out of range\n", portno);
133 return -EINVAL;
134 }
135
136 card->info.portno = portno;
137 return count;
138}
139
140static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
141
142static ssize_t qeth_dev_portname_show(struct device *dev,
143 struct device_attribute *attr, char *buf)
144{
145 struct qeth_card *card = dev_get_drvdata(dev);
146 char portname[9] = {0, };
147
148 if (!card)
149 return -EINVAL;
150
151 if (card->info.portname_required) {
152 memcpy(portname, card->info.portname + 1, 8);
153 EBCASC(portname, 8);
154 return sprintf(buf, "%s\n", portname);
155 } else
156 return sprintf(buf, "no portname required\n");
157}
158
159static ssize_t qeth_dev_portname_store(struct device *dev,
160 struct device_attribute *attr, const char *buf, size_t count)
161{
162 struct qeth_card *card = dev_get_drvdata(dev);
163 char *tmp;
164 int i;
165
166 if (!card)
167 return -EINVAL;
168
169 if ((card->state != CARD_STATE_DOWN) &&
170 (card->state != CARD_STATE_RECOVER))
171 return -EPERM;
172
173 tmp = strsep((char **) &buf, "\n");
174 if ((strlen(tmp) > 8) || (strlen(tmp) == 0))
175 return -EINVAL;
176
177 card->info.portname[0] = strlen(tmp);
178 /* for beauty reasons */
179 for (i = 1; i < 9; i++)
180 card->info.portname[i] = ' ';
181 strcpy(card->info.portname + 1, tmp);
182 ASCEBC(card->info.portname + 1, 8);
183
184 return count;
185}
186
187static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
188 qeth_dev_portname_store);
189
190static ssize_t qeth_dev_prioqing_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192{
193 struct qeth_card *card = dev_get_drvdata(dev);
194
195 if (!card)
196 return -EINVAL;
197
198 switch (card->qdio.do_prio_queueing) {
199 case QETH_PRIO_Q_ING_PREC:
200 return sprintf(buf, "%s\n", "by precedence");
201 case QETH_PRIO_Q_ING_TOS:
202 return sprintf(buf, "%s\n", "by type of service");
203 default:
204 return sprintf(buf, "always queue %i\n",
205 card->qdio.default_out_queue);
206 }
207}
208
209static ssize_t qeth_dev_prioqing_store(struct device *dev,
210 struct device_attribute *attr, const char *buf, size_t count)
211{
212 struct qeth_card *card = dev_get_drvdata(dev);
213 char *tmp;
214
215 if (!card)
216 return -EINVAL;
217
218 if ((card->state != CARD_STATE_DOWN) &&
219 (card->state != CARD_STATE_RECOVER))
220 return -EPERM;
221
222 /* check if 1920 devices are supported ,
223 * if though we have to permit priority queueing
224 */
225 if (card->qdio.no_out_queues == 1) {
226 PRINT_WARN("Priority queueing disabled due "
227 "to hardware limitations!\n");
228 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
229 return -EPERM;
230 }
231
232 tmp = strsep((char **) &buf, "\n");
233 if (!strcmp(tmp, "prio_queueing_prec"))
234 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
235 else if (!strcmp(tmp, "prio_queueing_tos"))
236 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
237 else if (!strcmp(tmp, "no_prio_queueing:0")) {
238 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
239 card->qdio.default_out_queue = 0;
240 } else if (!strcmp(tmp, "no_prio_queueing:1")) {
241 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
242 card->qdio.default_out_queue = 1;
243 } else if (!strcmp(tmp, "no_prio_queueing:2")) {
244 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
245 card->qdio.default_out_queue = 2;
246 } else if (!strcmp(tmp, "no_prio_queueing:3")) {
247 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
248 card->qdio.default_out_queue = 3;
249 } else if (!strcmp(tmp, "no_prio_queueing")) {
250 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
251 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
252 } else {
253 PRINT_WARN("Unknown queueing type '%s'\n", tmp);
254 return -EINVAL;
255 }
256 return count;
257}
258
259static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
260 qeth_dev_prioqing_store);
261
262static ssize_t qeth_dev_bufcnt_show(struct device *dev,
263 struct device_attribute *attr, char *buf)
264{
265 struct qeth_card *card = dev_get_drvdata(dev);
266
267 if (!card)
268 return -EINVAL;
269
270 return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
271}
272
273static ssize_t qeth_dev_bufcnt_store(struct device *dev,
274 struct device_attribute *attr, const char *buf, size_t count)
275{
276 struct qeth_card *card = dev_get_drvdata(dev);
277 char *tmp;
278 int cnt, old_cnt;
279 int rc;
280
281 if (!card)
282 return -EINVAL;
283
284 if ((card->state != CARD_STATE_DOWN) &&
285 (card->state != CARD_STATE_RECOVER))
286 return -EPERM;
287
288 old_cnt = card->qdio.in_buf_pool.buf_count;
289 cnt = simple_strtoul(buf, &tmp, 10);
290 cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
291 ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
292 if (old_cnt != cnt) {
293 rc = qeth_realloc_buffer_pool(card, cnt);
294 if (rc)
295 PRINT_WARN("Error (%d) while setting "
296 "buffer count.\n", rc);
297 }
298 return count;
299}
300
301static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
302 qeth_dev_bufcnt_store);
303
304static ssize_t qeth_dev_recover_store(struct device *dev,
305 struct device_attribute *attr, const char *buf, size_t count)
306{
307 struct qeth_card *card = dev_get_drvdata(dev);
308 char *tmp;
309 int i;
310
311 if (!card)
312 return -EINVAL;
313
314 if (card->state != CARD_STATE_UP)
315 return -EPERM;
316
317 i = simple_strtoul(buf, &tmp, 16);
318 if (i == 1)
319 qeth_schedule_recovery(card);
320
321 return count;
322}
323
324static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
325
326static ssize_t qeth_dev_performance_stats_show(struct device *dev,
327 struct device_attribute *attr, char *buf)
328{
329 struct qeth_card *card = dev_get_drvdata(dev);
330
331 if (!card)
332 return -EINVAL;
333
334 return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
335}
336
337static ssize_t qeth_dev_performance_stats_store(struct device *dev,
338 struct device_attribute *attr, const char *buf, size_t count)
339{
340 struct qeth_card *card = dev_get_drvdata(dev);
341 char *tmp;
342 int i;
343
344 if (!card)
345 return -EINVAL;
346
347 i = simple_strtoul(buf, &tmp, 16);
348 if ((i == 0) || (i == 1)) {
349 if (i == card->options.performance_stats)
350 return count;
351 card->options.performance_stats = i;
352 if (i == 0)
353 memset(&card->perf_stats, 0,
354 sizeof(struct qeth_perf_stats));
355 card->perf_stats.initial_rx_packets = card->stats.rx_packets;
356 card->perf_stats.initial_tx_packets = card->stats.tx_packets;
357 } else {
358 PRINT_WARN("performance_stats: write 0 or 1 to this file!\n");
359 return -EINVAL;
360 }
361 return count;
362}
363
364static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
365 qeth_dev_performance_stats_store);
366
367static ssize_t qeth_dev_layer2_show(struct device *dev,
368 struct device_attribute *attr, char *buf)
369{
370 struct qeth_card *card = dev_get_drvdata(dev);
371
372 if (!card)
373 return -EINVAL;
374
375 return sprintf(buf, "%i\n", card->options.layer2 ? 1:0);
376}
377
378static ssize_t qeth_dev_layer2_store(struct device *dev,
379 struct device_attribute *attr, const char *buf, size_t count)
380{
381 struct qeth_card *card = dev_get_drvdata(dev);
382 char *tmp;
383 int i, rc;
384 enum qeth_discipline_id newdis;
385
386 if (!card)
387 return -EINVAL;
388
389 if (((card->state != CARD_STATE_DOWN) &&
390 (card->state != CARD_STATE_RECOVER)))
391 return -EPERM;
392
393 i = simple_strtoul(buf, &tmp, 16);
394 switch (i) {
395 case 0:
396 newdis = QETH_DISCIPLINE_LAYER3;
397 break;
398 case 1:
399 newdis = QETH_DISCIPLINE_LAYER2;
400 break;
401 default:
402 PRINT_WARN("layer2: write 0 or 1 to this file!\n");
403 return -EINVAL;
404 }
405
406 if (card->options.layer2 == newdis) {
407 return count;
408 } else {
409 if (card->discipline.ccwgdriver) {
410 card->discipline.ccwgdriver->remove(card->gdev);
411 qeth_core_free_discipline(card);
412 }
413 }
414
415 rc = qeth_core_load_discipline(card, newdis);
416 if (rc)
417 return rc;
418
419 rc = card->discipline.ccwgdriver->probe(card->gdev);
420 if (rc)
421 return rc;
422 return count;
423}
424
425static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
426 qeth_dev_layer2_store);
427
428static ssize_t qeth_dev_large_send_show(struct device *dev,
429 struct device_attribute *attr, char *buf)
430{
431 struct qeth_card *card = dev_get_drvdata(dev);
432
433 if (!card)
434 return -EINVAL;
435
436 switch (card->options.large_send) {
437 case QETH_LARGE_SEND_NO:
438 return sprintf(buf, "%s\n", "no");
439 case QETH_LARGE_SEND_EDDP:
440 return sprintf(buf, "%s\n", "EDDP");
441 case QETH_LARGE_SEND_TSO:
442 return sprintf(buf, "%s\n", "TSO");
443 default:
444 return sprintf(buf, "%s\n", "N/A");
445 }
446}
447
448static ssize_t qeth_dev_large_send_store(struct device *dev,
449 struct device_attribute *attr, const char *buf, size_t count)
450{
451 struct qeth_card *card = dev_get_drvdata(dev);
452 enum qeth_large_send_types type;
453 int rc = 0;
454 char *tmp;
455
456 if (!card)
457 return -EINVAL;
458 tmp = strsep((char **) &buf, "\n");
459 if (!strcmp(tmp, "no")) {
460 type = QETH_LARGE_SEND_NO;
461 } else if (!strcmp(tmp, "EDDP")) {
462 type = QETH_LARGE_SEND_EDDP;
463 } else if (!strcmp(tmp, "TSO")) {
464 type = QETH_LARGE_SEND_TSO;
465 } else {
466 PRINT_WARN("large_send: invalid mode %s!\n", tmp);
467 return -EINVAL;
468 }
469 if (card->options.large_send == type)
470 return count;
471 rc = qeth_set_large_send(card, type);
472 if (rc)
473 return rc;
474 return count;
475}
476
477static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show,
478 qeth_dev_large_send_store);
479
480static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
481{
482
483 if (!card)
484 return -EINVAL;
485
486 return sprintf(buf, "%i\n", value);
487}
488
489static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
490 const char *buf, size_t count, int *value, int max_value)
491{
492 char *tmp;
493 int i;
494
495 if (!card)
496 return -EINVAL;
497
498 if ((card->state != CARD_STATE_DOWN) &&
499 (card->state != CARD_STATE_RECOVER))
500 return -EPERM;
501
502 i = simple_strtoul(buf, &tmp, 10);
503 if (i <= max_value) {
504 *value = i;
505 } else {
506 PRINT_WARN("blkt total time: write values between"
507 " 0 and %d to this file!\n", max_value);
508 return -EINVAL;
509 }
510 return count;
511}
512
513static ssize_t qeth_dev_blkt_total_show(struct device *dev,
514 struct device_attribute *attr, char *buf)
515{
516 struct qeth_card *card = dev_get_drvdata(dev);
517
518 return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
519}
520
521static ssize_t qeth_dev_blkt_total_store(struct device *dev,
522 struct device_attribute *attr, const char *buf, size_t count)
523{
524 struct qeth_card *card = dev_get_drvdata(dev);
525
526 return qeth_dev_blkt_store(card, buf, count,
527 &card->info.blkt.time_total, 1000);
528}
529
530
531
532static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
533 qeth_dev_blkt_total_store);
534
535static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
536 struct device_attribute *attr, char *buf)
537{
538 struct qeth_card *card = dev_get_drvdata(dev);
539
540 return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
541}
542
543static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
544 struct device_attribute *attr, const char *buf, size_t count)
545{
546 struct qeth_card *card = dev_get_drvdata(dev);
547
548 return qeth_dev_blkt_store(card, buf, count,
549 &card->info.blkt.inter_packet, 100);
550}
551
552static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
553 qeth_dev_blkt_inter_store);
554
555static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
556 struct device_attribute *attr, char *buf)
557{
558 struct qeth_card *card = dev_get_drvdata(dev);
559
560 return qeth_dev_blkt_show(buf, card,
561 card->info.blkt.inter_packet_jumbo);
562}
563
564static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
565 struct device_attribute *attr, const char *buf, size_t count)
566{
567 struct qeth_card *card = dev_get_drvdata(dev);
568
569 return qeth_dev_blkt_store(card, buf, count,
570 &card->info.blkt.inter_packet_jumbo, 100);
571}
572
573static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
574 qeth_dev_blkt_inter_jumbo_store);
575
576static struct attribute *qeth_blkt_device_attrs[] = {
577 &dev_attr_total.attr,
578 &dev_attr_inter.attr,
579 &dev_attr_inter_jumbo.attr,
580 NULL,
581};
582
583static struct attribute_group qeth_device_blkt_group = {
584 .name = "blkt",
585 .attrs = qeth_blkt_device_attrs,
586};
587
588static struct attribute *qeth_device_attrs[] = {
589 &dev_attr_state.attr,
590 &dev_attr_chpid.attr,
591 &dev_attr_if_name.attr,
592 &dev_attr_card_type.attr,
593 &dev_attr_inbuf_size.attr,
594 &dev_attr_portno.attr,
595 &dev_attr_portname.attr,
596 &dev_attr_priority_queueing.attr,
597 &dev_attr_buffer_count.attr,
598 &dev_attr_recover.attr,
599 &dev_attr_performance_stats.attr,
600 &dev_attr_layer2.attr,
601 &dev_attr_large_send.attr,
602 NULL,
603};
604
605static struct attribute_group qeth_device_attr_group = {
606 .attrs = qeth_device_attrs,
607};
608
609static struct attribute *qeth_osn_device_attrs[] = {
610 &dev_attr_state.attr,
611 &dev_attr_chpid.attr,
612 &dev_attr_if_name.attr,
613 &dev_attr_card_type.attr,
614 &dev_attr_buffer_count.attr,
615 &dev_attr_recover.attr,
616 NULL,
617};
618
619static struct attribute_group qeth_osn_device_attr_group = {
620 .attrs = qeth_osn_device_attrs,
621};
622
623int qeth_core_create_device_attributes(struct device *dev)
624{
625 int ret;
626 ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group);
627 if (ret)
628 return ret;
629 ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group);
630 if (ret)
631 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
632
633 return 0;
634}
635
636void qeth_core_remove_device_attributes(struct device *dev)
637{
638 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
639 sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
640}
641
642int qeth_core_create_osn_attributes(struct device *dev)
643{
644 return sysfs_create_group(&dev->kobj, &qeth_osn_device_attr_group);
645}
646
647void qeth_core_remove_osn_attributes(struct device *dev)
648{
649 sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
650 return;
651}
diff --git a/drivers/s390/net/qeth_fs.h b/drivers/s390/net/qeth_fs.h
deleted file mode 100644
index 61faf05517d6..000000000000
--- a/drivers/s390/net/qeth_fs.h
+++ /dev/null
@@ -1,168 +0,0 @@
1/*
2 * linux/drivers/s390/net/qeth_fs.h
3 *
4 * Linux on zSeries OSA Express and HiperSockets support.
5 *
6 * This header file contains definitions related to sysfs and procfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
10 *
11 */
12#ifndef __QETH_FS_H__
13#define __QETH_FS_H__
14
15#ifdef CONFIG_PROC_FS
16extern int
17qeth_create_procfs_entries(void);
18
19extern void
20qeth_remove_procfs_entries(void);
21#else
22static inline int
23qeth_create_procfs_entries(void)
24{
25 return 0;
26}
27
28static inline void
29qeth_remove_procfs_entries(void)
30{
31}
32#endif /* CONFIG_PROC_FS */
33
34extern int
35qeth_create_device_attributes(struct device *dev);
36
37extern void
38qeth_remove_device_attributes(struct device *dev);
39
40extern int
41qeth_create_device_attributes_osn(struct device *dev);
42
43extern void
44qeth_remove_device_attributes_osn(struct device *dev);
45
46extern int
47qeth_create_driver_attributes(void);
48
49extern void
50qeth_remove_driver_attributes(void);
51
52/*
53 * utility functions used in qeth_proc.c and qeth_sys.c
54 */
55
56static inline const char *
57qeth_get_checksum_str(struct qeth_card *card)
58{
59 if (card->options.checksum_type == SW_CHECKSUMMING)
60 return "sw";
61 else if (card->options.checksum_type == HW_CHECKSUMMING)
62 return "hw";
63 else
64 return "no";
65}
66
67static inline const char *
68qeth_get_prioq_str(struct qeth_card *card, char *buf)
69{
70 if (card->qdio.do_prio_queueing == QETH_NO_PRIO_QUEUEING)
71 sprintf(buf, "always_q_%i", card->qdio.default_out_queue);
72 else
73 strcpy(buf, (card->qdio.do_prio_queueing ==
74 QETH_PRIO_Q_ING_PREC)?
75 "by_prec." : "by_ToS");
76 return buf;
77}
78
79static inline const char *
80qeth_get_bufsize_str(struct qeth_card *card)
81{
82 if (card->qdio.in_buf_size == 16384)
83 return "16k";
84 else if (card->qdio.in_buf_size == 24576)
85 return "24k";
86 else if (card->qdio.in_buf_size == 32768)
87 return "32k";
88 else if (card->qdio.in_buf_size == 40960)
89 return "40k";
90 else
91 return "64k";
92}
93
94static inline const char *
95qeth_get_cardname(struct qeth_card *card)
96{
97 if (card->info.guestlan) {
98 switch (card->info.type) {
99 case QETH_CARD_TYPE_OSAE:
100 return " Guest LAN QDIO";
101 case QETH_CARD_TYPE_IQD:
102 return " Guest LAN Hiper";
103 default:
104 return " unknown";
105 }
106 } else {
107 switch (card->info.type) {
108 case QETH_CARD_TYPE_OSAE:
109 return " OSD Express";
110 case QETH_CARD_TYPE_IQD:
111 return " HiperSockets";
112 case QETH_CARD_TYPE_OSN:
113 return " OSN QDIO";
114 default:
115 return " unknown";
116 }
117 }
118 return " n/a";
119}
120
121/* max length to be returned: 14 */
122static inline const char *
123qeth_get_cardname_short(struct qeth_card *card)
124{
125 if (card->info.guestlan){
126 switch (card->info.type){
127 case QETH_CARD_TYPE_OSAE:
128 return "GuestLAN QDIO";
129 case QETH_CARD_TYPE_IQD:
130 return "GuestLAN Hiper";
131 default:
132 return "unknown";
133 }
134 } else {
135 switch (card->info.type) {
136 case QETH_CARD_TYPE_OSAE:
137 switch (card->info.link_type) {
138 case QETH_LINK_TYPE_FAST_ETH:
139 return "OSD_100";
140 case QETH_LINK_TYPE_HSTR:
141 return "HSTR";
142 case QETH_LINK_TYPE_GBIT_ETH:
143 return "OSD_1000";
144 case QETH_LINK_TYPE_10GBIT_ETH:
145 return "OSD_10GIG";
146 case QETH_LINK_TYPE_LANE_ETH100:
147 return "OSD_FE_LANE";
148 case QETH_LINK_TYPE_LANE_TR:
149 return "OSD_TR_LANE";
150 case QETH_LINK_TYPE_LANE_ETH1000:
151 return "OSD_GbE_LANE";
152 case QETH_LINK_TYPE_LANE:
153 return "OSD_ATM_LANE";
154 default:
155 return "OSD_Express";
156 }
157 case QETH_CARD_TYPE_IQD:
158 return "HiperSockets";
159 case QETH_CARD_TYPE_OSN:
160 return "OSN";
161 default:
162 return "unknown";
163 }
164 }
165 return "n/a";
166}
167
168#endif /* __QETH_FS_H__ */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
new file mode 100644
index 000000000000..3921d1631a78
--- /dev/null
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -0,0 +1,1234 @@
1/*
2 * drivers/s390/net/qeth_l2_main.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/etherdevice.h>
17#include <linux/mii.h>
18#include <linux/ip.h>
19
20#include <asm/s390_rdev.h>
21
22#include "qeth_core.h"
23#include "qeth_core_offl.h"
24
25#define QETH_DBF_TXT_BUF qeth_l2_dbf_txt_buf
26static DEFINE_PER_CPU(char[256], qeth_l2_dbf_txt_buf);
27
28static int qeth_l2_set_offline(struct ccwgroup_device *);
29static int qeth_l2_stop(struct net_device *);
30static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
31static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
32 enum qeth_ipa_cmds,
33 int (*reply_cb) (struct qeth_card *,
34 struct qeth_reply*,
35 unsigned long));
36static void qeth_l2_set_multicast_list(struct net_device *);
37static int qeth_l2_recover(void *);
38
39static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
40{
41 struct qeth_card *card = netdev_priv(dev);
42 struct mii_ioctl_data *mii_data;
43 int rc = 0;
44
45 if (!card)
46 return -ENODEV;
47
48 if ((card->state != CARD_STATE_UP) &&
49 (card->state != CARD_STATE_SOFTSETUP))
50 return -ENODEV;
51
52 if (card->info.type == QETH_CARD_TYPE_OSN)
53 return -EPERM;
54
55 switch (cmd) {
56 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
57 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
58 break;
59 case SIOC_QETH_GET_CARD_TYPE:
60 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
61 !card->info.guestlan)
62 return 1;
63 return 0;
64 break;
65 case SIOCGMIIPHY:
66 mii_data = if_mii(rq);
67 mii_data->phy_id = 0;
68 break;
69 case SIOCGMIIREG:
70 mii_data = if_mii(rq);
71 if (mii_data->phy_id != 0)
72 rc = -EINVAL;
73 else
74 mii_data->val_out = qeth_mdio_read(dev,
75 mii_data->phy_id, mii_data->reg_num);
76 break;
77 default:
78 rc = -EOPNOTSUPP;
79 }
80 if (rc)
81 QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
82 return rc;
83}
84
85static int qeth_l2_verify_dev(struct net_device *dev)
86{
87 struct qeth_card *card;
88 unsigned long flags;
89 int rc = 0;
90
91 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
92 list_for_each_entry(card, &qeth_core_card_list.list, list) {
93 if (card->dev == dev) {
94 rc = QETH_REAL_CARD;
95 break;
96 }
97 }
98 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
99
100 return rc;
101}
102
103static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
104{
105 struct qeth_card *card;
106 struct net_device *ndev;
107 unsigned char *readno;
108 __u16 temp_dev_no, card_dev_no;
109 char *endp;
110 unsigned long flags;
111
112 ndev = NULL;
113 memcpy(&temp_dev_no, read_dev_no, 2);
114 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
115 list_for_each_entry(card, &qeth_core_card_list.list, list) {
116 readno = CARD_RDEV_ID(card);
117 readno += (strlen(readno) - 4);
118 card_dev_no = simple_strtoul(readno, &endp, 16);
119 if (card_dev_no == temp_dev_no) {
120 ndev = card->dev;
121 break;
122 }
123 }
124 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
125 return ndev;
126}
127
128static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
129 struct qeth_reply *reply,
130 unsigned long data)
131{
132 struct qeth_ipa_cmd *cmd;
133 __u8 *mac;
134
135 QETH_DBF_TEXT(TRACE, 2, "L2Sgmacb");
136 cmd = (struct qeth_ipa_cmd *) data;
137 mac = &cmd->data.setdelmac.mac[0];
138 /* MAC already registered, needed in couple/uncouple case */
139 if (cmd->hdr.return_code == 0x2005) {
140 PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
141 "already existing on %s \n",
142 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
143 QETH_CARD_IFNAME(card));
144 cmd->hdr.return_code = 0;
145 }
146 if (cmd->hdr.return_code)
147 PRINT_ERR("Could not set group MAC " \
148 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
149 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
150 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
151 return 0;
152}
153
154static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
155{
156 QETH_DBF_TEXT(TRACE, 2, "L2Sgmac");
157 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
158 qeth_l2_send_setgroupmac_cb);
159}
160
161static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
162 struct qeth_reply *reply,
163 unsigned long data)
164{
165 struct qeth_ipa_cmd *cmd;
166 __u8 *mac;
167
168 QETH_DBF_TEXT(TRACE, 2, "L2Dgmacb");
169 cmd = (struct qeth_ipa_cmd *) data;
170 mac = &cmd->data.setdelmac.mac[0];
171 if (cmd->hdr.return_code)
172 PRINT_ERR("Could not delete group MAC " \
173 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
174 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
175 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
176 return 0;
177}
178
179static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
180{
181 QETH_DBF_TEXT(TRACE, 2, "L2Dgmac");
182 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
183 qeth_l2_send_delgroupmac_cb);
184}
185
186static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac)
187{
188 struct qeth_mc_mac *mc;
189
190 mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
191
192 if (!mc) {
193 PRINT_ERR("no mem vor mc mac address\n");
194 return;
195 }
196
197 memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
198 mc->mc_addrlen = OSA_ADDR_LEN;
199
200 if (!qeth_l2_send_setgroupmac(card, mac))
201 list_add_tail(&mc->list, &card->mc_list);
202 else
203 kfree(mc);
204}
205
206static void qeth_l2_del_all_mc(struct qeth_card *card)
207{
208 struct qeth_mc_mac *mc, *tmp;
209
210 spin_lock_bh(&card->mclock);
211 list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
212 qeth_l2_send_delgroupmac(card, mc->mc_addr);
213 list_del(&mc->list);
214 kfree(mc);
215 }
216 spin_unlock_bh(&card->mclock);
217}
218
219static void qeth_l2_get_packet_type(struct qeth_card *card,
220 struct qeth_hdr *hdr, struct sk_buff *skb)
221{
222 __u16 hdr_mac;
223
224 if (!memcmp(skb->data + QETH_HEADER_SIZE,
225 skb->dev->broadcast, 6)) {
226 /* broadcast? */
227 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
228 return;
229 }
230 hdr_mac = *((__u16 *)skb->data);
231 /* tr multicast? */
232 switch (card->info.link_type) {
233 case QETH_LINK_TYPE_HSTR:
234 case QETH_LINK_TYPE_LANE_TR:
235 if ((hdr_mac == QETH_TR_MAC_NC) ||
236 (hdr_mac == QETH_TR_MAC_C))
237 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
238 else
239 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
240 break;
241 /* eth or so multicast? */
242 default:
243 if ((hdr_mac == QETH_ETH_MAC_V4) ||
244 (hdr_mac == QETH_ETH_MAC_V6))
245 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
246 else
247 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
248 }
249}
250
251static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
252 struct sk_buff *skb, int ipv, int cast_type)
253{
254 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)((skb->data) +
255 QETH_HEADER_SIZE);
256
257 memset(hdr, 0, sizeof(struct qeth_hdr));
258 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
259
260 /* set byte byte 3 to casting flags */
261 if (cast_type == RTN_MULTICAST)
262 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
263 else if (cast_type == RTN_BROADCAST)
264 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
265 else
266 qeth_l2_get_packet_type(card, hdr, skb);
267
268 hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
269 /* VSWITCH relies on the VLAN
270 * information to be present in
271 * the QDIO header */
272 if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
273 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
274 hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
275 }
276}
277
278static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
279 struct qeth_reply *reply, unsigned long data)
280{
281 struct qeth_ipa_cmd *cmd;
282
283 QETH_DBF_TEXT(TRACE, 2, "L2sdvcb");
284 cmd = (struct qeth_ipa_cmd *) data;
285 if (cmd->hdr.return_code) {
286 PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
287 "Continuing\n", cmd->data.setdelvlan.vlan_id,
288 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
289 QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command);
290 QETH_DBF_TEXT_(TRACE, 2, "L2%s", CARD_BUS_ID(card));
291 QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
292 }
293 return 0;
294}
295
296static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
297 enum qeth_ipa_cmds ipacmd)
298{
299 struct qeth_ipa_cmd *cmd;
300 struct qeth_cmd_buffer *iob;
301
302 QETH_DBF_TEXT_(TRACE, 4, "L2sdv%x", ipacmd);
303 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
304 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
305 cmd->data.setdelvlan.vlan_id = i;
306 return qeth_send_ipa_cmd(card, iob,
307 qeth_l2_send_setdelvlan_cb, NULL);
308}
309
310static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
311{
312 struct qeth_vlan_vid *id;
313 QETH_DBF_TEXT(TRACE, 3, "L2prcvln");
314 spin_lock_bh(&card->vlanlock);
315 list_for_each_entry(id, &card->vid_list, list) {
316 if (clear)
317 qeth_l2_send_setdelvlan(card, id->vid,
318 IPA_CMD_DELVLAN);
319 else
320 qeth_l2_send_setdelvlan(card, id->vid,
321 IPA_CMD_SETVLAN);
322 }
323 spin_unlock_bh(&card->vlanlock);
324}
325
326static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
327{
328 struct qeth_card *card = netdev_priv(dev);
329 struct qeth_vlan_vid *id;
330
331 QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
332 id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
333 if (id) {
334 id->vid = vid;
335 qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
336 spin_lock_bh(&card->vlanlock);
337 list_add_tail(&id->list, &card->vid_list);
338 spin_unlock_bh(&card->vlanlock);
339 } else {
340 PRINT_ERR("no memory for vid\n");
341 }
342}
343
344static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
345{
346 struct qeth_vlan_vid *id, *tmpid = NULL;
347 struct qeth_card *card = netdev_priv(dev);
348
349 QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
350 spin_lock_bh(&card->vlanlock);
351 list_for_each_entry(id, &card->vid_list, list) {
352 if (id->vid == vid) {
353 list_del(&id->list);
354 tmpid = id;
355 break;
356 }
357 }
358 spin_unlock_bh(&card->vlanlock);
359 if (tmpid) {
360 qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
361 kfree(tmpid);
362 }
363 qeth_l2_set_multicast_list(card->dev);
364}
365
366static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
367{
368 int rc = 0;
369
370 QETH_DBF_TEXT(SETUP , 2, "stopcard");
371 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
372
373 qeth_set_allowed_threads(card, 0, 1);
374 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
375 return -ERESTARTSYS;
376 if (card->read.state == CH_STATE_UP &&
377 card->write.state == CH_STATE_UP &&
378 (card->state == CARD_STATE_UP)) {
379 if (recovery_mode &&
380 card->info.type != QETH_CARD_TYPE_OSN) {
381 qeth_l2_stop(card->dev);
382 } else {
383 rtnl_lock();
384 dev_close(card->dev);
385 rtnl_unlock();
386 }
387 if (!card->use_hard_stop) {
388 __u8 *mac = &card->dev->dev_addr[0];
389 rc = qeth_l2_send_delmac(card, mac);
390 QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc);
391 }
392 card->state = CARD_STATE_SOFTSETUP;
393 }
394 if (card->state == CARD_STATE_SOFTSETUP) {
395 qeth_l2_process_vlans(card, 1);
396 qeth_l2_del_all_mc(card);
397 qeth_clear_ipacmd_list(card);
398 card->state = CARD_STATE_HARDSETUP;
399 }
400 if (card->state == CARD_STATE_HARDSETUP) {
401 qeth_qdio_clear_card(card, 0);
402 qeth_clear_qdio_buffers(card);
403 qeth_clear_working_pool_list(card);
404 card->state = CARD_STATE_DOWN;
405 }
406 if (card->state == CARD_STATE_DOWN) {
407 qeth_clear_cmd_buffers(&card->read);
408 qeth_clear_cmd_buffers(&card->write);
409 }
410 card->use_hard_stop = 0;
411 return rc;
412}
413
414static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
415 struct qeth_qdio_buffer *buf, int index)
416{
417 struct qdio_buffer_element *element;
418 struct sk_buff *skb;
419 struct qeth_hdr *hdr;
420 int offset;
421 unsigned int len;
422
423 /* get first element of current buffer */
424 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
425 offset = 0;
426 if (card->options.performance_stats)
427 card->perf_stats.bufs_rec++;
428 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
429 &offset, &hdr))) {
430 skb->dev = card->dev;
431 /* is device UP ? */
432 if (!(card->dev->flags & IFF_UP)) {
433 dev_kfree_skb_any(skb);
434 continue;
435 }
436
437 switch (hdr->hdr.l2.id) {
438 case QETH_HEADER_TYPE_LAYER2:
439 skb->pkt_type = PACKET_HOST;
440 skb->protocol = eth_type_trans(skb, skb->dev);
441 if (card->options.checksum_type == NO_CHECKSUMMING)
442 skb->ip_summed = CHECKSUM_UNNECESSARY;
443 else
444 skb->ip_summed = CHECKSUM_NONE;
445 if (skb->protocol == htons(ETH_P_802_2))
446 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
447 len = skb->len;
448 netif_rx(skb);
449 break;
450 case QETH_HEADER_TYPE_OSN:
451 skb_push(skb, sizeof(struct qeth_hdr));
452 skb_copy_to_linear_data(skb, hdr,
453 sizeof(struct qeth_hdr));
454 len = skb->len;
455 card->osn_info.data_cb(skb);
456 break;
457 default:
458 dev_kfree_skb_any(skb);
459 QETH_DBF_TEXT(TRACE, 3, "inbunkno");
460 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
461 continue;
462 }
463 card->dev->last_rx = jiffies;
464 card->stats.rx_packets++;
465 card->stats.rx_bytes += len;
466 }
467}
468
469static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
470 enum qeth_ipa_cmds ipacmd,
471 int (*reply_cb) (struct qeth_card *,
472 struct qeth_reply*,
473 unsigned long))
474{
475 struct qeth_ipa_cmd *cmd;
476 struct qeth_cmd_buffer *iob;
477
478 QETH_DBF_TEXT(TRACE, 2, "L2sdmac");
479 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
480 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
481 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
482 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
483 return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
484}
485
486static int qeth_l2_send_setmac_cb(struct qeth_card *card,
487 struct qeth_reply *reply,
488 unsigned long data)
489{
490 struct qeth_ipa_cmd *cmd;
491
492 QETH_DBF_TEXT(TRACE, 2, "L2Smaccb");
493 cmd = (struct qeth_ipa_cmd *) data;
494 if (cmd->hdr.return_code) {
495 QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code);
496 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
497 cmd->hdr.return_code = -EIO;
498 } else {
499 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
500 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
501 OSA_ADDR_LEN);
502 PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
503 "successfully registered on device %s\n",
504 card->dev->dev_addr[0], card->dev->dev_addr[1],
505 card->dev->dev_addr[2], card->dev->dev_addr[3],
506 card->dev->dev_addr[4], card->dev->dev_addr[5],
507 card->dev->name);
508 }
509 return 0;
510}
511
512static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
513{
514 QETH_DBF_TEXT(TRACE, 2, "L2Setmac");
515 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
516 qeth_l2_send_setmac_cb);
517}
518
519static int qeth_l2_send_delmac_cb(struct qeth_card *card,
520 struct qeth_reply *reply,
521 unsigned long data)
522{
523 struct qeth_ipa_cmd *cmd;
524
525 QETH_DBF_TEXT(TRACE, 2, "L2Dmaccb");
526 cmd = (struct qeth_ipa_cmd *) data;
527 if (cmd->hdr.return_code) {
528 QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
529 cmd->hdr.return_code = -EIO;
530 return 0;
531 }
532 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
533
534 return 0;
535}
536
537static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
538{
539 QETH_DBF_TEXT(TRACE, 2, "L2Delmac");
540 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
541 return 0;
542 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
543 qeth_l2_send_delmac_cb);
544}
545
546static int qeth_l2_request_initial_mac(struct qeth_card *card)
547{
548 int rc = 0;
549 char vendor_pre[] = {0x02, 0x00, 0x00};
550
551 QETH_DBF_TEXT(SETUP, 2, "doL2init");
552 QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
553
554 rc = qeth_query_setadapterparms(card);
555 if (rc) {
556 PRINT_WARN("could not query adapter parameters on device %s: "
557 "x%x\n", CARD_BUS_ID(card), rc);
558 }
559
560 if (card->info.guestlan) {
561 rc = qeth_setadpparms_change_macaddr(card);
562 if (rc) {
563 PRINT_WARN("couldn't get MAC address on "
564 "device %s: x%x\n",
565 CARD_BUS_ID(card), rc);
566 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
567 return rc;
568 }
569 QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
570 } else {
571 random_ether_addr(card->dev->dev_addr);
572 memcpy(card->dev->dev_addr, vendor_pre, 3);
573 }
574 return 0;
575}
576
577static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
578{
579 struct sockaddr *addr = p;
580 struct qeth_card *card = netdev_priv(dev);
581 int rc = 0;
582
583 QETH_DBF_TEXT(TRACE, 3, "setmac");
584
585 if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
586 QETH_DBF_TEXT(TRACE, 3, "setmcINV");
587 return -EOPNOTSUPP;
588 }
589
590 if (card->info.type == QETH_CARD_TYPE_OSN) {
591 PRINT_WARN("Setting MAC address on %s is not supported.\n",
592 dev->name);
593 QETH_DBF_TEXT(TRACE, 3, "setmcOSN");
594 return -EOPNOTSUPP;
595 }
596 QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card));
597 QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN);
598 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
599 if (!rc)
600 rc = qeth_l2_send_setmac(card, addr->sa_data);
601 return rc;
602}
603
604static void qeth_l2_set_multicast_list(struct net_device *dev)
605{
606 struct qeth_card *card = netdev_priv(dev);
607 struct dev_mc_list *dm;
608
609 if (card->info.type == QETH_CARD_TYPE_OSN)
610 return ;
611
612 QETH_DBF_TEXT(TRACE, 3, "setmulti");
613 qeth_l2_del_all_mc(card);
614 spin_lock_bh(&card->mclock);
615 for (dm = dev->mc_list; dm; dm = dm->next)
616 qeth_l2_add_mc(card, dm->dmi_addr);
617 spin_unlock_bh(&card->mclock);
618 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
619 return;
620 qeth_setadp_promisc_mode(card);
621}
622
623static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
624{
625 int rc;
626 struct qeth_hdr *hdr = NULL;
627 int elements = 0;
628 struct qeth_card *card = netdev_priv(dev);
629 struct sk_buff *new_skb = skb;
630 int ipv = qeth_get_ip_version(skb);
631 int cast_type = qeth_get_cast_type(card, skb);
632 struct qeth_qdio_out_q *queue = card->qdio.out_qs
633 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
634 int tx_bytes = skb->len;
635 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
636 struct qeth_eddp_context *ctx = NULL;
637
638 QETH_DBF_TEXT(TRACE, 6, "l2xmit");
639
640 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
641 card->stats.tx_carrier_errors++;
642 goto tx_drop;
643 }
644
645 if ((card->info.type == QETH_CARD_TYPE_OSN) &&
646 (skb->protocol == htons(ETH_P_IPV6)))
647 goto tx_drop;
648
649 if (card->options.performance_stats) {
650 card->perf_stats.outbound_cnt++;
651 card->perf_stats.outbound_start_time = qeth_get_micros();
652 }
653 netif_stop_queue(dev);
654
655 if (skb_is_gso(skb))
656 large_send = QETH_LARGE_SEND_EDDP;
657
658 if (card->info.type == QETH_CARD_TYPE_OSN)
659 hdr = (struct qeth_hdr *)skb->data;
660 else {
661 new_skb = qeth_prepare_skb(card, skb, &hdr);
662 if (!new_skb)
663 goto tx_drop;
664 qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
665 }
666
667 if (large_send == QETH_LARGE_SEND_EDDP) {
668 ctx = qeth_eddp_create_context(card, new_skb, hdr,
669 skb->sk->sk_protocol);
670 if (ctx == NULL) {
671 PRINT_WARN("could not create eddp context\n");
672 goto tx_drop;
673 }
674 } else {
675 elements = qeth_get_elements_no(card, (void *)hdr, new_skb, 0);
676 if (!elements)
677 goto tx_drop;
678 }
679
680 if ((large_send == QETH_LARGE_SEND_NO) &&
681 (skb->ip_summed == CHECKSUM_PARTIAL))
682 qeth_tx_csum(new_skb);
683
684 if (card->info.type != QETH_CARD_TYPE_IQD)
685 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
686 elements, ctx);
687 else
688 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
689 elements, ctx);
690 if (!rc) {
691 card->stats.tx_packets++;
692 card->stats.tx_bytes += tx_bytes;
693 if (new_skb != skb)
694 dev_kfree_skb_any(skb);
695 if (card->options.performance_stats) {
696 if (large_send != QETH_LARGE_SEND_NO) {
697 card->perf_stats.large_send_bytes += tx_bytes;
698 card->perf_stats.large_send_cnt++;
699 }
700 if (skb_shinfo(new_skb)->nr_frags > 0) {
701 card->perf_stats.sg_skbs_sent++;
702 /* nr_frags + skb->data */
703 card->perf_stats.sg_frags_sent +=
704 skb_shinfo(new_skb)->nr_frags + 1;
705 }
706 }
707
708 if (ctx != NULL) {
709 qeth_eddp_put_context(ctx);
710 dev_kfree_skb_any(new_skb);
711 }
712 } else {
713 if (ctx != NULL)
714 qeth_eddp_put_context(ctx);
715
716 if (rc == -EBUSY) {
717 if (new_skb != skb)
718 dev_kfree_skb_any(new_skb);
719 return NETDEV_TX_BUSY;
720 } else
721 goto tx_drop;
722 }
723
724 netif_wake_queue(dev);
725 if (card->options.performance_stats)
726 card->perf_stats.outbound_time += qeth_get_micros() -
727 card->perf_stats.outbound_start_time;
728 return rc;
729
730tx_drop:
731 card->stats.tx_dropped++;
732 card->stats.tx_errors++;
733 if ((new_skb != skb) && new_skb)
734 dev_kfree_skb_any(new_skb);
735 dev_kfree_skb_any(skb);
736 return NETDEV_TX_OK;
737}
738
739static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
740 unsigned int status, unsigned int qdio_err,
741 unsigned int siga_err, unsigned int queue,
742 int first_element, int count, unsigned long card_ptr)
743{
744 struct net_device *net_dev;
745 struct qeth_card *card;
746 struct qeth_qdio_buffer *buffer;
747 int index;
748 int i;
749
750 QETH_DBF_TEXT(TRACE, 6, "qdinput");
751 card = (struct qeth_card *) card_ptr;
752 net_dev = card->dev;
753 if (card->options.performance_stats) {
754 card->perf_stats.inbound_cnt++;
755 card->perf_stats.inbound_start_time = qeth_get_micros();
756 }
757 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
758 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
759 QETH_DBF_TEXT(TRACE, 1, "qdinchk");
760 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
761 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
762 count);
763 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status);
764 qeth_schedule_recovery(card);
765 return;
766 }
767 }
768 for (i = first_element; i < (first_element + count); ++i) {
769 index = i % QDIO_MAX_BUFFERS_PER_Q;
770 buffer = &card->qdio.in_q->bufs[index];
771 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
772 qeth_check_qdio_errors(buffer->buffer,
773 qdio_err, siga_err, "qinerr")))
774 qeth_l2_process_inbound_buffer(card, buffer, index);
775 /* clear buffer and give back to hardware */
776 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
777 qeth_queue_input_buffer(card, index);
778 }
779 if (card->options.performance_stats)
780 card->perf_stats.inbound_time += qeth_get_micros() -
781 card->perf_stats.inbound_start_time;
782}
783
784static int qeth_l2_open(struct net_device *dev)
785{
786 struct qeth_card *card = netdev_priv(dev);
787
788 QETH_DBF_TEXT(TRACE, 4, "qethopen");
789 if (card->state != CARD_STATE_SOFTSETUP)
790 return -ENODEV;
791
792 if ((card->info.type != QETH_CARD_TYPE_OSN) &&
793 (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
794 QETH_DBF_TEXT(TRACE, 4, "nomacadr");
795 return -EPERM;
796 }
797 card->data.state = CH_STATE_UP;
798 card->state = CARD_STATE_UP;
799 card->dev->flags |= IFF_UP;
800 netif_start_queue(dev);
801
802 if (!card->lan_online && netif_carrier_ok(dev))
803 netif_carrier_off(dev);
804 return 0;
805}
806
807
808static int qeth_l2_stop(struct net_device *dev)
809{
810 struct qeth_card *card = netdev_priv(dev);
811
812 QETH_DBF_TEXT(TRACE, 4, "qethstop");
813 netif_tx_disable(dev);
814 card->dev->flags &= ~IFF_UP;
815 if (card->state == CARD_STATE_UP)
816 card->state = CARD_STATE_SOFTSETUP;
817 return 0;
818}
819
820static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
821{
822 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
823
824 INIT_LIST_HEAD(&card->vid_list);
825 INIT_LIST_HEAD(&card->mc_list);
826 card->options.layer2 = 1;
827 card->discipline.input_handler = (qdio_handler_t *)
828 qeth_l2_qdio_input_handler;
829 card->discipline.output_handler = (qdio_handler_t *)
830 qeth_qdio_output_handler;
831 card->discipline.recover = qeth_l2_recover;
832 return 0;
833}
834
835static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
836{
837 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
838
839 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
840
841 if (cgdev->state == CCWGROUP_ONLINE) {
842 card->use_hard_stop = 1;
843 qeth_l2_set_offline(cgdev);
844 }
845
846 if (card->dev) {
847 unregister_netdev(card->dev);
848 card->dev = NULL;
849 }
850
851 qeth_l2_del_all_mc(card);
852 return;
853}
854
855static struct ethtool_ops qeth_l2_ethtool_ops = {
856 .get_link = ethtool_op_get_link,
857 .get_tx_csum = ethtool_op_get_tx_csum,
858 .set_tx_csum = ethtool_op_set_tx_hw_csum,
859 .get_sg = ethtool_op_get_sg,
860 .set_sg = ethtool_op_set_sg,
861 .get_tso = ethtool_op_get_tso,
862 .set_tso = ethtool_op_set_tso,
863 .get_strings = qeth_core_get_strings,
864 .get_ethtool_stats = qeth_core_get_ethtool_stats,
865 .get_stats_count = qeth_core_get_stats_count,
866 .get_drvinfo = qeth_core_get_drvinfo,
867};
868
869static struct ethtool_ops qeth_l2_osn_ops = {
870 .get_strings = qeth_core_get_strings,
871 .get_ethtool_stats = qeth_core_get_ethtool_stats,
872 .get_stats_count = qeth_core_get_stats_count,
873 .get_drvinfo = qeth_core_get_drvinfo,
874};
875
876static int qeth_l2_setup_netdev(struct qeth_card *card)
877{
878 switch (card->info.type) {
879 case QETH_CARD_TYPE_OSAE:
880 card->dev = alloc_etherdev(0);
881 break;
882 case QETH_CARD_TYPE_IQD:
883 card->dev = alloc_netdev(0, "hsi%d", ether_setup);
884 break;
885 case QETH_CARD_TYPE_OSN:
886 card->dev = alloc_netdev(0, "osn%d", ether_setup);
887 card->dev->flags |= IFF_NOARP;
888 break;
889 default:
890 card->dev = alloc_etherdev(0);
891 }
892
893 if (!card->dev)
894 return -ENODEV;
895
896 card->dev->priv = card;
897 card->dev->tx_timeout = &qeth_tx_timeout;
898 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
899 card->dev->open = qeth_l2_open;
900 card->dev->stop = qeth_l2_stop;
901 card->dev->hard_start_xmit = qeth_l2_hard_start_xmit;
902 card->dev->do_ioctl = qeth_l2_do_ioctl;
903 card->dev->get_stats = qeth_get_stats;
904 card->dev->change_mtu = qeth_change_mtu;
905 card->dev->set_multicast_list = qeth_l2_set_multicast_list;
906 card->dev->vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid;
907 card->dev->vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid;
908 card->dev->set_mac_address = qeth_l2_set_mac_address;
909 card->dev->mtu = card->info.initial_mtu;
910 if (card->info.type != QETH_CARD_TYPE_OSN)
911 SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
912 else
913 SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
914 card->dev->features |= NETIF_F_HW_VLAN_FILTER;
915 card->info.broadcast_capable = 1;
916 qeth_l2_request_initial_mac(card);
917 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
918 return register_netdev(card->dev);
919}
920
921static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
922{
923 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
924 int rc = 0;
925 enum qeth_card_states recover_flag;
926
927 BUG_ON(!card);
928 QETH_DBF_TEXT(SETUP, 2, "setonlin");
929 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
930
931 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
932 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) {
933 PRINT_WARN("set_online of card %s interrupted by user!\n",
934 CARD_BUS_ID(card));
935 return -ERESTARTSYS;
936 }
937
938 recover_flag = card->state;
939 rc = ccw_device_set_online(CARD_RDEV(card));
940 if (rc) {
941 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
942 return -EIO;
943 }
944 rc = ccw_device_set_online(CARD_WDEV(card));
945 if (rc) {
946 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
947 return -EIO;
948 }
949 rc = ccw_device_set_online(CARD_DDEV(card));
950 if (rc) {
951 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
952 return -EIO;
953 }
954
955 rc = qeth_core_hardsetup_card(card);
956 if (rc) {
957 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
958 goto out_remove;
959 }
960
961 if (!card->dev && qeth_l2_setup_netdev(card))
962 goto out_remove;
963
964 if (card->info.type != QETH_CARD_TYPE_OSN)
965 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
966
967 card->state = CARD_STATE_HARDSETUP;
968 qeth_print_status_message(card);
969
970 /* softsetup */
971 QETH_DBF_TEXT(SETUP, 2, "softsetp");
972
973 rc = qeth_send_startlan(card);
974 if (rc) {
975 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
976 if (rc == 0xe080) {
977 PRINT_WARN("LAN on card %s if offline! "
978 "Waiting for STARTLAN from card.\n",
979 CARD_BUS_ID(card));
980 card->lan_online = 0;
981 }
982 return rc;
983 } else
984 card->lan_online = 1;
985
986 if (card->info.type != QETH_CARD_TYPE_OSN) {
987 qeth_set_large_send(card, card->options.large_send);
988 qeth_l2_process_vlans(card, 0);
989 }
990
991 netif_tx_disable(card->dev);
992
993 rc = qeth_init_qdio_queues(card);
994 if (rc) {
995 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
996 goto out_remove;
997 }
998 card->state = CARD_STATE_SOFTSETUP;
999 netif_carrier_on(card->dev);
1000
1001 qeth_set_allowed_threads(card, 0xffffffff, 0);
1002 if (recover_flag == CARD_STATE_RECOVER) {
1003 if (recovery_mode &&
1004 card->info.type != QETH_CARD_TYPE_OSN) {
1005 qeth_l2_open(card->dev);
1006 } else {
1007 rtnl_lock();
1008 dev_open(card->dev);
1009 rtnl_unlock();
1010 }
1011 /* this also sets saved unicast addresses */
1012 qeth_l2_set_multicast_list(card->dev);
1013 }
1014 /* let user_space know that device is online */
1015 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1016 return 0;
1017out_remove:
1018 card->use_hard_stop = 1;
1019 qeth_l2_stop_card(card, 0);
1020 ccw_device_set_offline(CARD_DDEV(card));
1021 ccw_device_set_offline(CARD_WDEV(card));
1022 ccw_device_set_offline(CARD_RDEV(card));
1023 if (recover_flag == CARD_STATE_RECOVER)
1024 card->state = CARD_STATE_RECOVER;
1025 else
1026 card->state = CARD_STATE_DOWN;
1027 return -ENODEV;
1028}
1029
1030static int qeth_l2_set_online(struct ccwgroup_device *gdev)
1031{
1032 return __qeth_l2_set_online(gdev, 0);
1033}
1034
1035static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
1036 int recovery_mode)
1037{
1038 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
1039 int rc = 0, rc2 = 0, rc3 = 0;
1040 enum qeth_card_states recover_flag;
1041
1042 QETH_DBF_TEXT(SETUP, 3, "setoffl");
1043 QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
1044
1045 if (card->dev && netif_carrier_ok(card->dev))
1046 netif_carrier_off(card->dev);
1047 recover_flag = card->state;
1048 if (qeth_l2_stop_card(card, recovery_mode) == -ERESTARTSYS) {
1049 PRINT_WARN("Stopping card %s interrupted by user!\n",
1050 CARD_BUS_ID(card));
1051 return -ERESTARTSYS;
1052 }
1053 rc = ccw_device_set_offline(CARD_DDEV(card));
1054 rc2 = ccw_device_set_offline(CARD_WDEV(card));
1055 rc3 = ccw_device_set_offline(CARD_RDEV(card));
1056 if (!rc)
1057 rc = (rc2) ? rc2 : rc3;
1058 if (rc)
1059 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
1060 if (recover_flag == CARD_STATE_UP)
1061 card->state = CARD_STATE_RECOVER;
1062 /* let user_space know that device is offline */
1063 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
1064 return 0;
1065}
1066
1067static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
1068{
1069 return __qeth_l2_set_offline(cgdev, 0);
1070}
1071
1072static int qeth_l2_recover(void *ptr)
1073{
1074 struct qeth_card *card;
1075 int rc = 0;
1076
1077 card = (struct qeth_card *) ptr;
1078 QETH_DBF_TEXT(TRACE, 2, "recover1");
1079 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
1080 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
1081 return 0;
1082 QETH_DBF_TEXT(TRACE, 2, "recover2");
1083 PRINT_WARN("Recovery of device %s started ...\n",
1084 CARD_BUS_ID(card));
1085 card->use_hard_stop = 1;
1086 __qeth_l2_set_offline(card->gdev, 1);
1087 rc = __qeth_l2_set_online(card->gdev, 1);
1088 /* don't run another scheduled recovery */
1089 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1090 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1091 if (!rc)
1092 PRINT_INFO("Device %s successfully recovered!\n",
1093 CARD_BUS_ID(card));
1094 else
1095 PRINT_INFO("Device %s could not be recovered!\n",
1096 CARD_BUS_ID(card));
1097 return 0;
1098}
1099
1100static int __init qeth_l2_init(void)
1101{
1102 PRINT_INFO("register layer 2 discipline\n");
1103 return 0;
1104}
1105
1106static void __exit qeth_l2_exit(void)
1107{
1108 PRINT_INFO("unregister layer 2 discipline\n");
1109}
1110
1111static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
1112{
1113 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
1114 qeth_qdio_clear_card(card, 0);
1115 qeth_clear_qdio_buffers(card);
1116}
1117
1118struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
1119 .probe = qeth_l2_probe_device,
1120 .remove = qeth_l2_remove_device,
1121 .set_online = qeth_l2_set_online,
1122 .set_offline = qeth_l2_set_offline,
1123 .shutdown = qeth_l2_shutdown,
1124};
1125EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
1126
1127static int qeth_osn_send_control_data(struct qeth_card *card, int len,
1128 struct qeth_cmd_buffer *iob)
1129{
1130 unsigned long flags;
1131 int rc = 0;
1132
1133 QETH_DBF_TEXT(TRACE, 5, "osndctrd");
1134
1135 wait_event(card->wait_q,
1136 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1137 qeth_prepare_control_data(card, len, iob);
1138 QETH_DBF_TEXT(TRACE, 6, "osnoirqp");
1139 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1140 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1141 (addr_t) iob, 0, 0);
1142 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1143 if (rc) {
1144 PRINT_WARN("qeth_osn_send_control_data: "
1145 "ccw_device_start rc = %i\n", rc);
1146 QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
1147 qeth_release_buffer(iob->channel, iob);
1148 atomic_set(&card->write.irq_pending, 0);
1149 wake_up(&card->wait_q);
1150 }
1151 return rc;
1152}
1153
1154static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
1155 struct qeth_cmd_buffer *iob, int data_len)
1156{
1157 u16 s1, s2;
1158
1159 QETH_DBF_TEXT(TRACE, 4, "osndipa");
1160
1161 qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
1162 s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
1163 s2 = (u16)data_len;
1164 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
1165 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
1166 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
1167 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
1168 return qeth_osn_send_control_data(card, s1, iob);
1169}
1170
1171int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
1172{
1173 struct qeth_cmd_buffer *iob;
1174 struct qeth_card *card;
1175 int rc;
1176
1177 QETH_DBF_TEXT(TRACE, 2, "osnsdmc");
1178 if (!dev)
1179 return -ENODEV;
1180 card = netdev_priv(dev);
1181 if (!card)
1182 return -ENODEV;
1183 if ((card->state != CARD_STATE_UP) &&
1184 (card->state != CARD_STATE_SOFTSETUP))
1185 return -ENODEV;
1186 iob = qeth_wait_for_buffer(&card->write);
1187 memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
1188 rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
1189 return rc;
1190}
1191EXPORT_SYMBOL(qeth_osn_assist);
1192
1193int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
1194 int (*assist_cb)(struct net_device *, void *),
1195 int (*data_cb)(struct sk_buff *))
1196{
1197 struct qeth_card *card;
1198
1199 QETH_DBF_TEXT(TRACE, 2, "osnreg");
1200 *dev = qeth_l2_netdev_by_devno(read_dev_no);
1201 if (*dev == NULL)
1202 return -ENODEV;
1203 card = netdev_priv(*dev);
1204 if (!card)
1205 return -ENODEV;
1206 if ((assist_cb == NULL) || (data_cb == NULL))
1207 return -EINVAL;
1208 card->osn_info.assist_cb = assist_cb;
1209 card->osn_info.data_cb = data_cb;
1210 return 0;
1211}
1212EXPORT_SYMBOL(qeth_osn_register);
1213
1214void qeth_osn_deregister(struct net_device *dev)
1215{
1216 struct qeth_card *card;
1217
1218 QETH_DBF_TEXT(TRACE, 2, "osndereg");
1219 if (!dev)
1220 return;
1221 card = netdev_priv(dev);
1222 if (!card)
1223 return;
1224 card->osn_info.assist_cb = NULL;
1225 card->osn_info.data_cb = NULL;
1226 return;
1227}
1228EXPORT_SYMBOL(qeth_osn_deregister);
1229
1230module_init(qeth_l2_init);
1231module_exit(qeth_l2_exit);
1232MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
1233MODULE_DESCRIPTION("qeth layer 2 discipline");
1234MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
new file mode 100644
index 000000000000..1be353593a59
--- /dev/null
+++ b/drivers/s390/net/qeth_l3.h
@@ -0,0 +1,67 @@
1/*
2 * drivers/s390/net/qeth_l3.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#ifndef __QETH_L3_H__
12#define __QETH_L3_H__
13
14#include "qeth_core.h"
15
16#define QETH_DBF_TXT_BUF qeth_l3_dbf_txt_buf
17DECLARE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
18
19struct qeth_ipaddr {
20 struct list_head entry;
21 enum qeth_ip_types type;
22 enum qeth_ipa_setdelip_flags set_flags;
23 enum qeth_ipa_setdelip_flags del_flags;
24 int is_multicast;
25 int users;
26 enum qeth_prot_versions proto;
27 unsigned char mac[OSA_ADDR_LEN];
28 union {
29 struct {
30 unsigned int addr;
31 unsigned int mask;
32 } a4;
33 struct {
34 struct in6_addr addr;
35 unsigned int pfxlen;
36 } a6;
37 } u;
38};
39
40struct qeth_ipato_entry {
41 struct list_head entry;
42 enum qeth_prot_versions proto;
43 char addr[16];
44 int mask_bits;
45};
46
47
48void qeth_l3_ipaddr4_to_string(const __u8 *, char *);
49int qeth_l3_string_to_ipaddr4(const char *, __u8 *);
50void qeth_l3_ipaddr6_to_string(const __u8 *, char *);
51int qeth_l3_string_to_ipaddr6(const char *, __u8 *);
52void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
53int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
54int qeth_l3_create_device_attributes(struct device *);
55void qeth_l3_remove_device_attributes(struct device *);
56int qeth_l3_setrouting_v4(struct qeth_card *);
57int qeth_l3_setrouting_v6(struct qeth_card *);
58int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
59void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions,
60 u8 *, int);
61int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
62void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
63int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
64void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
65 const u8 *);
66
67#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
new file mode 100644
index 000000000000..e1bfe56087d6
--- /dev/null
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -0,0 +1,3396 @@
1/*
2 * drivers/s390/net/qeth_l3_main.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/etherdevice.h>
17#include <linux/mii.h>
18#include <linux/ip.h>
19#include <linux/reboot.h>
20#include <linux/inetdevice.h>
21#include <linux/igmp.h>
22
23#include <net/ip.h>
24#include <net/arp.h>
25
26#include <asm/s390_rdev.h>
27
28#include "qeth_l3.h"
29#include "qeth_core_offl.h"
30
31DEFINE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
32
33static int qeth_l3_set_offline(struct ccwgroup_device *);
34static int qeth_l3_recover(void *);
35static int qeth_l3_stop(struct net_device *);
36static void qeth_l3_set_multicast_list(struct net_device *);
37static int qeth_l3_neigh_setup(struct net_device *, struct neigh_parms *);
38static int qeth_l3_register_addr_entry(struct qeth_card *,
39 struct qeth_ipaddr *);
40static int qeth_l3_deregister_addr_entry(struct qeth_card *,
41 struct qeth_ipaddr *);
42static int __qeth_l3_set_online(struct ccwgroup_device *, int);
43static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
44
45
46static int qeth_l3_isxdigit(char *buf)
47{
48 while (*buf) {
49 if (!isxdigit(*buf++))
50 return 0;
51 }
52 return 1;
53}
54
55void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
56{
57 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
58}
59
60int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
61{
62 int count = 0, rc = 0;
63 int in[4];
64 char c;
65
66 rc = sscanf(buf, "%u.%u.%u.%u%c",
67 &in[0], &in[1], &in[2], &in[3], &c);
68 if (rc != 4 && (rc != 5 || c != '\n'))
69 return -EINVAL;
70 for (count = 0; count < 4; count++) {
71 if (in[count] > 255)
72 return -EINVAL;
73 addr[count] = in[count];
74 }
75 return 0;
76}
77
78void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
79{
80 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
81 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
82 addr[0], addr[1], addr[2], addr[3],
83 addr[4], addr[5], addr[6], addr[7],
84 addr[8], addr[9], addr[10], addr[11],
85 addr[12], addr[13], addr[14], addr[15]);
86}
87
88int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
89{
90 const char *end, *end_tmp, *start;
91 __u16 *in;
92 char num[5];
93 int num2, cnt, out, found, save_cnt;
94 unsigned short in_tmp[8] = {0, };
95
96 cnt = out = found = save_cnt = num2 = 0;
97 end = start = buf;
98 in = (__u16 *) addr;
99 memset(in, 0, 16);
100 while (*end) {
101 end = strchr(start, ':');
102 if (end == NULL) {
103 end = buf + strlen(buf);
104 end_tmp = strchr(start, '\n');
105 if (end_tmp != NULL)
106 end = end_tmp;
107 out = 1;
108 }
109 if ((end - start)) {
110 memset(num, 0, 5);
111 if ((end - start) > 4)
112 return -EINVAL;
113 memcpy(num, start, end - start);
114 if (!qeth_l3_isxdigit(num))
115 return -EINVAL;
116 sscanf(start, "%x", &num2);
117 if (found)
118 in_tmp[save_cnt++] = num2;
119 else
120 in[cnt++] = num2;
121 if (out)
122 break;
123 } else {
124 if (found)
125 return -EINVAL;
126 found = 1;
127 }
128 start = ++end;
129 }
130 if (cnt + save_cnt > 8)
131 return -EINVAL;
132 cnt = 7;
133 while (save_cnt)
134 in[cnt--] = in_tmp[--save_cnt];
135 return 0;
136}
137
138void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
139 char *buf)
140{
141 if (proto == QETH_PROT_IPV4)
142 qeth_l3_ipaddr4_to_string(addr, buf);
143 else if (proto == QETH_PROT_IPV6)
144 qeth_l3_ipaddr6_to_string(addr, buf);
145}
146
147int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
148 __u8 *addr)
149{
150 if (proto == QETH_PROT_IPV4)
151 return qeth_l3_string_to_ipaddr4(buf, addr);
152 else if (proto == QETH_PROT_IPV6)
153 return qeth_l3_string_to_ipaddr6(buf, addr);
154 else
155 return -EINVAL;
156}
157
158static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
159{
160 int i, j;
161 u8 octet;
162
163 for (i = 0; i < len; ++i) {
164 octet = addr[i];
165 for (j = 7; j >= 0; --j) {
166 bits[i*8 + j] = octet & 1;
167 octet >>= 1;
168 }
169 }
170}
171
172static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
173 struct qeth_ipaddr *addr)
174{
175 struct qeth_ipato_entry *ipatoe;
176 u8 addr_bits[128] = {0, };
177 u8 ipatoe_bits[128] = {0, };
178 int rc = 0;
179
180 if (!card->ipato.enabled)
181 return 0;
182
183 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
184 (addr->proto == QETH_PROT_IPV4)? 4:16);
185 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
186 if (addr->proto != ipatoe->proto)
187 continue;
188 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
189 (ipatoe->proto == QETH_PROT_IPV4) ?
190 4 : 16);
191 if (addr->proto == QETH_PROT_IPV4)
192 rc = !memcmp(addr_bits, ipatoe_bits,
193 min(32, ipatoe->mask_bits));
194 else
195 rc = !memcmp(addr_bits, ipatoe_bits,
196 min(128, ipatoe->mask_bits));
197 if (rc)
198 break;
199 }
200 /* invert? */
201 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
202 rc = !rc;
203 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
204 rc = !rc;
205
206 return rc;
207}
208
209/*
210 * Add IP to be added to todo list. If there is already an "add todo"
211 * in this list we just incremenent the reference count.
212 * Returns 0 if we just incremented reference count.
213 */
214static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
215 struct qeth_ipaddr *addr, int add)
216{
217 struct qeth_ipaddr *tmp, *t;
218 int found = 0;
219
220 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
221 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
222 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
223 return 0;
224 if ((tmp->proto == QETH_PROT_IPV4) &&
225 (addr->proto == QETH_PROT_IPV4) &&
226 (tmp->type == addr->type) &&
227 (tmp->is_multicast == addr->is_multicast) &&
228 (tmp->u.a4.addr == addr->u.a4.addr) &&
229 (tmp->u.a4.mask == addr->u.a4.mask)) {
230 found = 1;
231 break;
232 }
233 if ((tmp->proto == QETH_PROT_IPV6) &&
234 (addr->proto == QETH_PROT_IPV6) &&
235 (tmp->type == addr->type) &&
236 (tmp->is_multicast == addr->is_multicast) &&
237 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
238 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
239 sizeof(struct in6_addr)) == 0)) {
240 found = 1;
241 break;
242 }
243 }
244 if (found) {
245 if (addr->users != 0)
246 tmp->users += addr->users;
247 else
248 tmp->users += add ? 1 : -1;
249 if (tmp->users == 0) {
250 list_del(&tmp->entry);
251 kfree(tmp);
252 }
253 return 0;
254 } else {
255 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
256 list_add(&addr->entry, card->ip_tbd_list);
257 else {
258 if (addr->users == 0)
259 addr->users += add ? 1 : -1;
260 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
261 qeth_l3_is_addr_covered_by_ipato(card, addr)) {
262 QETH_DBF_TEXT(TRACE, 2, "tkovaddr");
263 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
264 }
265 list_add_tail(&addr->entry, card->ip_tbd_list);
266 }
267 return 1;
268 }
269}
270
271static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
272{
273 unsigned long flags;
274 int rc = 0;
275
276 QETH_DBF_TEXT(TRACE, 4, "delip");
277
278 if (addr->proto == QETH_PROT_IPV4)
279 QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
280 else {
281 QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
282 QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
283 }
284 spin_lock_irqsave(&card->ip_lock, flags);
285 rc = __qeth_l3_insert_ip_todo(card, addr, 0);
286 spin_unlock_irqrestore(&card->ip_lock, flags);
287 return rc;
288}
289
290static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
291{
292 unsigned long flags;
293 int rc = 0;
294
295 QETH_DBF_TEXT(TRACE, 4, "addip");
296 if (addr->proto == QETH_PROT_IPV4)
297 QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
298 else {
299 QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
300 QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
301 }
302 spin_lock_irqsave(&card->ip_lock, flags);
303 rc = __qeth_l3_insert_ip_todo(card, addr, 1);
304 spin_unlock_irqrestore(&card->ip_lock, flags);
305 return rc;
306}
307
308
309static struct qeth_ipaddr *qeth_l3_get_addr_buffer(
310 enum qeth_prot_versions prot)
311{
312 struct qeth_ipaddr *addr;
313
314 addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
315 if (addr == NULL) {
316 PRINT_WARN("Not enough memory to add address\n");
317 return NULL;
318 }
319 addr->type = QETH_IP_TYPE_NORMAL;
320 addr->proto = prot;
321 return addr;
322}
323
324static void qeth_l3_delete_mc_addresses(struct qeth_card *card)
325{
326 struct qeth_ipaddr *iptodo;
327 unsigned long flags;
328
329 QETH_DBF_TEXT(TRACE, 4, "delmc");
330 iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
331 if (!iptodo) {
332 QETH_DBF_TEXT(TRACE, 2, "dmcnomem");
333 return;
334 }
335 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
336 spin_lock_irqsave(&card->ip_lock, flags);
337 if (!__qeth_l3_insert_ip_todo(card, iptodo, 0))
338 kfree(iptodo);
339 spin_unlock_irqrestore(&card->ip_lock, flags);
340}
341
342/*
343 * Add/remove address to/from card's ip list, i.e. try to add or remove
344 * reference to/from an IP address that is already registered on the card.
345 * Returns:
346 * 0 address was on card and its reference count has been adjusted,
347 * but is still > 0, so nothing has to be done
348 * also returns 0 if card was not on card and the todo was to delete
349 * the address -> there is also nothing to be done
350 * 1 address was not on card and the todo is to add it to the card's ip
351 * list
352 * -1 address was on card and its reference count has been decremented
353 * to <= 0 by the todo -> address must be removed from card
354 */
355static int __qeth_l3_ref_ip_on_card(struct qeth_card *card,
356 struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr)
357{
358 struct qeth_ipaddr *addr;
359 int found = 0;
360
361 list_for_each_entry(addr, &card->ip_list, entry) {
362 if ((addr->proto == QETH_PROT_IPV4) &&
363 (todo->proto == QETH_PROT_IPV4) &&
364 (addr->type == todo->type) &&
365 (addr->u.a4.addr == todo->u.a4.addr) &&
366 (addr->u.a4.mask == todo->u.a4.mask)) {
367 found = 1;
368 break;
369 }
370 if ((addr->proto == QETH_PROT_IPV6) &&
371 (todo->proto == QETH_PROT_IPV6) &&
372 (addr->type == todo->type) &&
373 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
374 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
375 sizeof(struct in6_addr)) == 0)) {
376 found = 1;
377 break;
378 }
379 }
380 if (found) {
381 addr->users += todo->users;
382 if (addr->users <= 0) {
383 *__addr = addr;
384 return -1;
385 } else {
386 /* for VIPA and RXIP limit refcount to 1 */
387 if (addr->type != QETH_IP_TYPE_NORMAL)
388 addr->users = 1;
389 return 0;
390 }
391 }
392 if (todo->users > 0) {
393 /* for VIPA and RXIP limit refcount to 1 */
394 if (todo->type != QETH_IP_TYPE_NORMAL)
395 todo->users = 1;
396 return 1;
397 } else
398 return 0;
399}
400
401static void __qeth_l3_delete_all_mc(struct qeth_card *card,
402 unsigned long *flags)
403{
404 struct list_head fail_list;
405 struct qeth_ipaddr *addr, *tmp;
406 int rc;
407
408 INIT_LIST_HEAD(&fail_list);
409again:
410 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
411 if (addr->is_multicast) {
412 list_del(&addr->entry);
413 spin_unlock_irqrestore(&card->ip_lock, *flags);
414 rc = qeth_l3_deregister_addr_entry(card, addr);
415 spin_lock_irqsave(&card->ip_lock, *flags);
416 if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND))
417 kfree(addr);
418 else
419 list_add_tail(&addr->entry, &fail_list);
420 goto again;
421 }
422 }
423 list_splice(&fail_list, &card->ip_list);
424}
425
426static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
427{
428 struct list_head *tbd_list;
429 struct qeth_ipaddr *todo, *addr;
430 unsigned long flags;
431 int rc;
432
433 QETH_DBF_TEXT(TRACE, 2, "sdiplist");
434 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
435
436 spin_lock_irqsave(&card->ip_lock, flags);
437 tbd_list = card->ip_tbd_list;
438 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
439 if (!card->ip_tbd_list) {
440 QETH_DBF_TEXT(TRACE, 0, "silnomem");
441 card->ip_tbd_list = tbd_list;
442 spin_unlock_irqrestore(&card->ip_lock, flags);
443 return;
444 } else
445 INIT_LIST_HEAD(card->ip_tbd_list);
446
447 while (!list_empty(tbd_list)) {
448 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
449 list_del(&todo->entry);
450 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) {
451 __qeth_l3_delete_all_mc(card, &flags);
452 kfree(todo);
453 continue;
454 }
455 rc = __qeth_l3_ref_ip_on_card(card, todo, &addr);
456 if (rc == 0) {
457 /* nothing to be done; only adjusted refcount */
458 kfree(todo);
459 } else if (rc == 1) {
460 /* new entry to be added to on-card list */
461 spin_unlock_irqrestore(&card->ip_lock, flags);
462 rc = qeth_l3_register_addr_entry(card, todo);
463 spin_lock_irqsave(&card->ip_lock, flags);
464 if (!rc || (rc == IPA_RC_LAN_OFFLINE))
465 list_add_tail(&todo->entry, &card->ip_list);
466 else
467 kfree(todo);
468 } else if (rc == -1) {
469 /* on-card entry to be removed */
470 list_del_init(&addr->entry);
471 spin_unlock_irqrestore(&card->ip_lock, flags);
472 rc = qeth_l3_deregister_addr_entry(card, addr);
473 spin_lock_irqsave(&card->ip_lock, flags);
474 if (!rc || (rc == IPA_RC_PRIMARY_ALREADY_DEFINED))
475 kfree(addr);
476 else
477 list_add_tail(&addr->entry, &card->ip_list);
478 kfree(todo);
479 }
480 }
481 spin_unlock_irqrestore(&card->ip_lock, flags);
482 kfree(tbd_list);
483}
484
485static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
486 int recover)
487{
488 struct qeth_ipaddr *addr, *tmp;
489 unsigned long flags;
490
491 QETH_DBF_TEXT(TRACE, 4, "clearip");
492 spin_lock_irqsave(&card->ip_lock, flags);
493 /* clear todo list */
494 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
495 list_del(&addr->entry);
496 kfree(addr);
497 }
498
499 while (!list_empty(&card->ip_list)) {
500 addr = list_entry(card->ip_list.next,
501 struct qeth_ipaddr, entry);
502 list_del_init(&addr->entry);
503 if (clean) {
504 spin_unlock_irqrestore(&card->ip_lock, flags);
505 qeth_l3_deregister_addr_entry(card, addr);
506 spin_lock_irqsave(&card->ip_lock, flags);
507 }
508 if (!recover || addr->is_multicast) {
509 kfree(addr);
510 continue;
511 }
512 list_add_tail(&addr->entry, card->ip_tbd_list);
513 }
514 spin_unlock_irqrestore(&card->ip_lock, flags);
515}
516
517static int qeth_l3_address_exists_in_list(struct list_head *list,
518 struct qeth_ipaddr *addr, int same_type)
519{
520 struct qeth_ipaddr *tmp;
521
522 list_for_each_entry(tmp, list, entry) {
523 if ((tmp->proto == QETH_PROT_IPV4) &&
524 (addr->proto == QETH_PROT_IPV4) &&
525 ((same_type && (tmp->type == addr->type)) ||
526 (!same_type && (tmp->type != addr->type))) &&
527 (tmp->u.a4.addr == addr->u.a4.addr))
528 return 1;
529
530 if ((tmp->proto == QETH_PROT_IPV6) &&
531 (addr->proto == QETH_PROT_IPV6) &&
532 ((same_type && (tmp->type == addr->type)) ||
533 (!same_type && (tmp->type != addr->type))) &&
534 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
535 sizeof(struct in6_addr)) == 0))
536 return 1;
537
538 }
539 return 0;
540}
541
542static int qeth_l3_send_setdelmc(struct qeth_card *card,
543 struct qeth_ipaddr *addr, int ipacmd)
544{
545 int rc;
546 struct qeth_cmd_buffer *iob;
547 struct qeth_ipa_cmd *cmd;
548
549 QETH_DBF_TEXT(TRACE, 4, "setdelmc");
550
551 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
552 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
553 memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
554 if (addr->proto == QETH_PROT_IPV6)
555 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
556 sizeof(struct in6_addr));
557 else
558 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
559
560 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
561
562 return rc;
563}
564
565static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
566{
567 int i, j;
568 for (i = 0; i < 16; i++) {
569 j = (len) - (i * 8);
570 if (j >= 8)
571 netmask[i] = 0xff;
572 else if (j > 0)
573 netmask[i] = (u8)(0xFF00 >> j);
574 else
575 netmask[i] = 0;
576 }
577}
578
579static int qeth_l3_send_setdelip(struct qeth_card *card,
580 struct qeth_ipaddr *addr, int ipacmd, unsigned int flags)
581{
582 int rc;
583 struct qeth_cmd_buffer *iob;
584 struct qeth_ipa_cmd *cmd;
585 __u8 netmask[16];
586
587 QETH_DBF_TEXT(TRACE, 4, "setdelip");
588 QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags);
589
590 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
591 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
592 if (addr->proto == QETH_PROT_IPV6) {
593 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
594 sizeof(struct in6_addr));
595 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
596 memcpy(cmd->data.setdelip6.mask, netmask,
597 sizeof(struct in6_addr));
598 cmd->data.setdelip6.flags = flags;
599 } else {
600 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
601 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
602 cmd->data.setdelip4.flags = flags;
603 }
604
605 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
606
607 return rc;
608}
609
610static int qeth_l3_send_setrouting(struct qeth_card *card,
611 enum qeth_routing_types type, enum qeth_prot_versions prot)
612{
613 int rc;
614 struct qeth_ipa_cmd *cmd;
615 struct qeth_cmd_buffer *iob;
616
617 QETH_DBF_TEXT(TRACE, 4, "setroutg");
618 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
619 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
620 cmd->data.setrtg.type = (type);
621 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
622
623 return rc;
624}
625
626static void qeth_l3_correct_routing_type(struct qeth_card *card,
627 enum qeth_routing_types *type, enum qeth_prot_versions prot)
628{
629 if (card->info.type == QETH_CARD_TYPE_IQD) {
630 switch (*type) {
631 case NO_ROUTER:
632 case PRIMARY_CONNECTOR:
633 case SECONDARY_CONNECTOR:
634 case MULTICAST_ROUTER:
635 return;
636 default:
637 goto out_inval;
638 }
639 } else {
640 switch (*type) {
641 case NO_ROUTER:
642 case PRIMARY_ROUTER:
643 case SECONDARY_ROUTER:
644 return;
645 case MULTICAST_ROUTER:
646 if (qeth_is_ipafunc_supported(card, prot,
647 IPA_OSA_MC_ROUTER))
648 return;
649 default:
650 goto out_inval;
651 }
652 }
653out_inval:
654 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
655 "Router status set to 'no router'.\n",
656 ((*type == PRIMARY_ROUTER)? "primary router" :
657 (*type == SECONDARY_ROUTER)? "secondary router" :
658 (*type == PRIMARY_CONNECTOR)? "primary connector" :
659 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
660 (*type == MULTICAST_ROUTER)? "multicast router" :
661 "unknown"),
662 card->dev->name);
663 *type = NO_ROUTER;
664}
665
666int qeth_l3_setrouting_v4(struct qeth_card *card)
667{
668 int rc;
669
670 QETH_DBF_TEXT(TRACE, 3, "setrtg4");
671
672 qeth_l3_correct_routing_type(card, &card->options.route4.type,
673 QETH_PROT_IPV4);
674
675 rc = qeth_l3_send_setrouting(card, card->options.route4.type,
676 QETH_PROT_IPV4);
677 if (rc) {
678 card->options.route4.type = NO_ROUTER;
679 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
680 "Type set to 'no router'.\n",
681 rc, QETH_CARD_IFNAME(card));
682 }
683 return rc;
684}
685
686int qeth_l3_setrouting_v6(struct qeth_card *card)
687{
688 int rc = 0;
689
690 QETH_DBF_TEXT(TRACE, 3, "setrtg6");
691#ifdef CONFIG_QETH_IPV6
692
693 if (!qeth_is_supported(card, IPA_IPV6))
694 return 0;
695 qeth_l3_correct_routing_type(card, &card->options.route6.type,
696 QETH_PROT_IPV6);
697
698 rc = qeth_l3_send_setrouting(card, card->options.route6.type,
699 QETH_PROT_IPV6);
700 if (rc) {
701 card->options.route6.type = NO_ROUTER;
702 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
703 "Type set to 'no router'.\n",
704 rc, QETH_CARD_IFNAME(card));
705 }
706#endif
707 return rc;
708}
709
710/*
711 * IP address takeover related functions
712 */
713static void qeth_l3_clear_ipato_list(struct qeth_card *card)
714{
715
716 struct qeth_ipato_entry *ipatoe, *tmp;
717 unsigned long flags;
718
719 spin_lock_irqsave(&card->ip_lock, flags);
720 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
721 list_del(&ipatoe->entry);
722 kfree(ipatoe);
723 }
724 spin_unlock_irqrestore(&card->ip_lock, flags);
725}
726
727int qeth_l3_add_ipato_entry(struct qeth_card *card,
728 struct qeth_ipato_entry *new)
729{
730 struct qeth_ipato_entry *ipatoe;
731 unsigned long flags;
732 int rc = 0;
733
734 QETH_DBF_TEXT(TRACE, 2, "addipato");
735 spin_lock_irqsave(&card->ip_lock, flags);
736 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
737 if (ipatoe->proto != new->proto)
738 continue;
739 if (!memcmp(ipatoe->addr, new->addr,
740 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
741 (ipatoe->mask_bits == new->mask_bits)) {
742 PRINT_WARN("ipato entry already exists!\n");
743 rc = -EEXIST;
744 break;
745 }
746 }
747 if (!rc)
748 list_add_tail(&new->entry, &card->ipato.entries);
749
750 spin_unlock_irqrestore(&card->ip_lock, flags);
751 return rc;
752}
753
754void qeth_l3_del_ipato_entry(struct qeth_card *card,
755 enum qeth_prot_versions proto, u8 *addr, int mask_bits)
756{
757 struct qeth_ipato_entry *ipatoe, *tmp;
758 unsigned long flags;
759
760 QETH_DBF_TEXT(TRACE, 2, "delipato");
761 spin_lock_irqsave(&card->ip_lock, flags);
762 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
763 if (ipatoe->proto != proto)
764 continue;
765 if (!memcmp(ipatoe->addr, addr,
766 (proto == QETH_PROT_IPV4)? 4:16) &&
767 (ipatoe->mask_bits == mask_bits)) {
768 list_del(&ipatoe->entry);
769 kfree(ipatoe);
770 }
771 }
772 spin_unlock_irqrestore(&card->ip_lock, flags);
773}
774
775/*
776 * VIPA related functions
777 */
778int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
779 const u8 *addr)
780{
781 struct qeth_ipaddr *ipaddr;
782 unsigned long flags;
783 int rc = 0;
784
785 ipaddr = qeth_l3_get_addr_buffer(proto);
786 if (ipaddr) {
787 if (proto == QETH_PROT_IPV4) {
788 QETH_DBF_TEXT(TRACE, 2, "addvipa4");
789 memcpy(&ipaddr->u.a4.addr, addr, 4);
790 ipaddr->u.a4.mask = 0;
791 } else if (proto == QETH_PROT_IPV6) {
792 QETH_DBF_TEXT(TRACE, 2, "addvipa6");
793 memcpy(&ipaddr->u.a6.addr, addr, 16);
794 ipaddr->u.a6.pfxlen = 0;
795 }
796 ipaddr->type = QETH_IP_TYPE_VIPA;
797 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
798 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
799 } else
800 return -ENOMEM;
801 spin_lock_irqsave(&card->ip_lock, flags);
802 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
803 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
804 rc = -EEXIST;
805 spin_unlock_irqrestore(&card->ip_lock, flags);
806 if (rc) {
807 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
808 return rc;
809 }
810 if (!qeth_l3_add_ip(card, ipaddr))
811 kfree(ipaddr);
812 qeth_l3_set_ip_addr_list(card);
813 return rc;
814}
815
816void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
817 const u8 *addr)
818{
819 struct qeth_ipaddr *ipaddr;
820
821 ipaddr = qeth_l3_get_addr_buffer(proto);
822 if (ipaddr) {
823 if (proto == QETH_PROT_IPV4) {
824 QETH_DBF_TEXT(TRACE, 2, "delvipa4");
825 memcpy(&ipaddr->u.a4.addr, addr, 4);
826 ipaddr->u.a4.mask = 0;
827 } else if (proto == QETH_PROT_IPV6) {
828 QETH_DBF_TEXT(TRACE, 2, "delvipa6");
829 memcpy(&ipaddr->u.a6.addr, addr, 16);
830 ipaddr->u.a6.pfxlen = 0;
831 }
832 ipaddr->type = QETH_IP_TYPE_VIPA;
833 } else
834 return;
835 if (!qeth_l3_delete_ip(card, ipaddr))
836 kfree(ipaddr);
837 qeth_l3_set_ip_addr_list(card);
838}
839
840/*
841 * proxy ARP related functions
842 */
843int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
844 const u8 *addr)
845{
846 struct qeth_ipaddr *ipaddr;
847 unsigned long flags;
848 int rc = 0;
849
850 ipaddr = qeth_l3_get_addr_buffer(proto);
851 if (ipaddr) {
852 if (proto == QETH_PROT_IPV4) {
853 QETH_DBF_TEXT(TRACE, 2, "addrxip4");
854 memcpy(&ipaddr->u.a4.addr, addr, 4);
855 ipaddr->u.a4.mask = 0;
856 } else if (proto == QETH_PROT_IPV6) {
857 QETH_DBF_TEXT(TRACE, 2, "addrxip6");
858 memcpy(&ipaddr->u.a6.addr, addr, 16);
859 ipaddr->u.a6.pfxlen = 0;
860 }
861 ipaddr->type = QETH_IP_TYPE_RXIP;
862 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
863 ipaddr->del_flags = 0;
864 } else
865 return -ENOMEM;
866 spin_lock_irqsave(&card->ip_lock, flags);
867 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
868 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
869 rc = -EEXIST;
870 spin_unlock_irqrestore(&card->ip_lock, flags);
871 if (rc) {
872 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
873 return rc;
874 }
875 if (!qeth_l3_add_ip(card, ipaddr))
876 kfree(ipaddr);
877 qeth_l3_set_ip_addr_list(card);
878 return 0;
879}
880
881void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
882 const u8 *addr)
883{
884 struct qeth_ipaddr *ipaddr;
885
886 ipaddr = qeth_l3_get_addr_buffer(proto);
887 if (ipaddr) {
888 if (proto == QETH_PROT_IPV4) {
889 QETH_DBF_TEXT(TRACE, 2, "addrxip4");
890 memcpy(&ipaddr->u.a4.addr, addr, 4);
891 ipaddr->u.a4.mask = 0;
892 } else if (proto == QETH_PROT_IPV6) {
893 QETH_DBF_TEXT(TRACE, 2, "addrxip6");
894 memcpy(&ipaddr->u.a6.addr, addr, 16);
895 ipaddr->u.a6.pfxlen = 0;
896 }
897 ipaddr->type = QETH_IP_TYPE_RXIP;
898 } else
899 return;
900 if (!qeth_l3_delete_ip(card, ipaddr))
901 kfree(ipaddr);
902 qeth_l3_set_ip_addr_list(card);
903}
904
905static int qeth_l3_register_addr_entry(struct qeth_card *card,
906 struct qeth_ipaddr *addr)
907{
908 char buf[50];
909 int rc = 0;
910 int cnt = 3;
911
912 if (addr->proto == QETH_PROT_IPV4) {
913 QETH_DBF_TEXT(TRACE, 2, "setaddr4");
914 QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
915 } else if (addr->proto == QETH_PROT_IPV6) {
916 QETH_DBF_TEXT(TRACE, 2, "setaddr6");
917 QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
918 QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
919 } else {
920 QETH_DBF_TEXT(TRACE, 2, "setaddr?");
921 QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
922 }
923 do {
924 if (addr->is_multicast)
925 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
926 else
927 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP,
928 addr->set_flags);
929 if (rc)
930 QETH_DBF_TEXT(TRACE, 2, "failed");
931 } while ((--cnt > 0) && rc);
932 if (rc) {
933 QETH_DBF_TEXT(TRACE, 2, "FAILED");
934 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
935 PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
936 buf, rc, rc);
937 }
938 return rc;
939}
940
941static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
942 struct qeth_ipaddr *addr)
943{
944 int rc = 0;
945
946 if (addr->proto == QETH_PROT_IPV4) {
947 QETH_DBF_TEXT(TRACE, 2, "deladdr4");
948 QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
949 } else if (addr->proto == QETH_PROT_IPV6) {
950 QETH_DBF_TEXT(TRACE, 2, "deladdr6");
951 QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
952 QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
953 } else {
954 QETH_DBF_TEXT(TRACE, 2, "deladdr?");
955 QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
956 }
957 if (addr->is_multicast)
958 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
959 else
960 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
961 addr->del_flags);
962 if (rc) {
963 QETH_DBF_TEXT(TRACE, 2, "failed");
964 /* TODO: re-activate this warning as soon as we have a
965 * clean mirco code
966 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
967 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
968 buf, rc);
969 */
970 }
971
972 return rc;
973}
974
975static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
976{
977 if (cast_type == RTN_MULTICAST)
978 return QETH_CAST_MULTICAST;
979 if (cast_type == RTN_BROADCAST)
980 return QETH_CAST_BROADCAST;
981 return QETH_CAST_UNICAST;
982}
983
984static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
985{
986 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
987 if (cast_type == RTN_MULTICAST)
988 return ct | QETH_CAST_MULTICAST;
989 if (cast_type == RTN_ANYCAST)
990 return ct | QETH_CAST_ANYCAST;
991 if (cast_type == RTN_BROADCAST)
992 return ct | QETH_CAST_BROADCAST;
993 return ct | QETH_CAST_UNICAST;
994}
995
996static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
997 __u32 mode)
998{
999 int rc;
1000 struct qeth_cmd_buffer *iob;
1001 struct qeth_ipa_cmd *cmd;
1002
1003 QETH_DBF_TEXT(TRACE, 4, "adpmode");
1004
1005 iob = qeth_get_adapter_cmd(card, command,
1006 sizeof(struct qeth_ipacmd_setadpparms));
1007 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1008 cmd->data.setadapterparms.data.mode = mode;
1009 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
1010 NULL);
1011 return rc;
1012}
1013
1014static int qeth_l3_setadapter_hstr(struct qeth_card *card)
1015{
1016 int rc;
1017
1018 QETH_DBF_TEXT(TRACE, 4, "adphstr");
1019
1020 if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
1021 rc = qeth_l3_send_setadp_mode(card,
1022 IPA_SETADP_SET_BROADCAST_MODE,
1023 card->options.broadcast_mode);
1024 if (rc)
1025 PRINT_WARN("couldn't set broadcast mode on "
1026 "device %s: x%x\n",
1027 CARD_BUS_ID(card), rc);
1028 rc = qeth_l3_send_setadp_mode(card,
1029 IPA_SETADP_ALTER_MAC_ADDRESS,
1030 card->options.macaddr_mode);
1031 if (rc)
1032 PRINT_WARN("couldn't set macaddr mode on "
1033 "device %s: x%x\n", CARD_BUS_ID(card), rc);
1034 return rc;
1035 }
1036 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
1037 PRINT_WARN("set adapter parameters not available "
1038 "to set broadcast mode, using ALLRINGS "
1039 "on device %s:\n", CARD_BUS_ID(card));
1040 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
1041 PRINT_WARN("set adapter parameters not available "
1042 "to set macaddr mode, using NONCANONICAL "
1043 "on device %s:\n", CARD_BUS_ID(card));
1044 return 0;
1045}
1046
1047static int qeth_l3_setadapter_parms(struct qeth_card *card)
1048{
1049 int rc;
1050
1051 QETH_DBF_TEXT(SETUP, 2, "setadprm");
1052
1053 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
1054 PRINT_WARN("set adapter parameters not supported "
1055 "on device %s.\n",
1056 CARD_BUS_ID(card));
1057 QETH_DBF_TEXT(SETUP, 2, " notsupp");
1058 return 0;
1059 }
1060 rc = qeth_query_setadapterparms(card);
1061 if (rc) {
1062 PRINT_WARN("couldn't set adapter parameters on device %s: "
1063 "x%x\n", CARD_BUS_ID(card), rc);
1064 return rc;
1065 }
1066 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
1067 rc = qeth_setadpparms_change_macaddr(card);
1068 if (rc)
1069 PRINT_WARN("couldn't get MAC address on "
1070 "device %s: x%x\n",
1071 CARD_BUS_ID(card), rc);
1072 }
1073
1074 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
1075 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
1076 rc = qeth_l3_setadapter_hstr(card);
1077
1078 return rc;
1079}
1080
1081static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
1082 struct qeth_reply *reply, unsigned long data)
1083{
1084 struct qeth_ipa_cmd *cmd;
1085
1086 QETH_DBF_TEXT(TRACE, 4, "defadpcb");
1087
1088 cmd = (struct qeth_ipa_cmd *) data;
1089 if (cmd->hdr.return_code == 0) {
1090 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
1091 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
1092 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1093 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
1094 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1095 }
1096 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
1097 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
1098 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
1099 QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask);
1100 }
1101 return 0;
1102}
1103
1104static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
1105 struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code,
1106 __u16 len, enum qeth_prot_versions prot)
1107{
1108 struct qeth_cmd_buffer *iob;
1109 struct qeth_ipa_cmd *cmd;
1110
1111 QETH_DBF_TEXT(TRACE, 4, "getasscm");
1112 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
1113
1114 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1115 cmd->data.setassparms.hdr.assist_no = ipa_func;
1116 cmd->data.setassparms.hdr.length = 8 + len;
1117 cmd->data.setassparms.hdr.command_code = cmd_code;
1118 cmd->data.setassparms.hdr.return_code = 0;
1119 cmd->data.setassparms.hdr.seq_no = 0;
1120
1121 return iob;
1122}
1123
1124static int qeth_l3_send_setassparms(struct qeth_card *card,
1125 struct qeth_cmd_buffer *iob, __u16 len, long data,
1126 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1127 unsigned long),
1128 void *reply_param)
1129{
1130 int rc;
1131 struct qeth_ipa_cmd *cmd;
1132
1133 QETH_DBF_TEXT(TRACE, 4, "sendassp");
1134
1135 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1136 if (len <= sizeof(__u32))
1137 cmd->data.setassparms.data.flags_32bit = (__u32) data;
1138 else /* (len > sizeof(__u32)) */
1139 memcpy(&cmd->data.setassparms.data, (void *) data, len);
1140
1141 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
1142 return rc;
1143}
1144
1145#ifdef CONFIG_QETH_IPV6
1146static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
1147 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
1148{
1149 int rc;
1150 struct qeth_cmd_buffer *iob;
1151
1152 QETH_DBF_TEXT(TRACE, 4, "simassp6");
1153 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1154 0, QETH_PROT_IPV6);
1155 rc = qeth_l3_send_setassparms(card, iob, 0, 0,
1156 qeth_l3_default_setassparms_cb, NULL);
1157 return rc;
1158}
1159#endif
1160
1161static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
1162 enum qeth_ipa_funcs ipa_func, __u16 cmd_code, long data)
1163{
1164 int rc;
1165 int length = 0;
1166 struct qeth_cmd_buffer *iob;
1167
1168 QETH_DBF_TEXT(TRACE, 4, "simassp4");
1169 if (data)
1170 length = sizeof(__u32);
1171 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1172 length, QETH_PROT_IPV4);
1173 rc = qeth_l3_send_setassparms(card, iob, length, data,
1174 qeth_l3_default_setassparms_cb, NULL);
1175 return rc;
1176}
1177
1178static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
1179{
1180 int rc;
1181
1182 QETH_DBF_TEXT(TRACE, 3, "ipaarp");
1183
1184 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1185 PRINT_WARN("ARP processing not supported "
1186 "on %s!\n", QETH_CARD_IFNAME(card));
1187 return 0;
1188 }
1189 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
1190 IPA_CMD_ASS_START, 0);
1191 if (rc) {
1192 PRINT_WARN("Could not start ARP processing "
1193 "assist on %s: 0x%x\n",
1194 QETH_CARD_IFNAME(card), rc);
1195 }
1196 return rc;
1197}
1198
1199static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card)
1200{
1201 int rc;
1202
1203 QETH_DBF_TEXT(TRACE, 3, "ipaipfrg");
1204
1205 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
1206 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
1207 QETH_CARD_IFNAME(card));
1208 return -EOPNOTSUPP;
1209 }
1210
1211 rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
1212 IPA_CMD_ASS_START, 0);
1213 if (rc) {
1214 PRINT_WARN("Could not start Hardware IP fragmentation "
1215 "assist on %s: 0x%x\n",
1216 QETH_CARD_IFNAME(card), rc);
1217 } else
1218 PRINT_INFO("Hardware IP fragmentation enabled \n");
1219 return rc;
1220}
1221
1222static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
1223{
1224 int rc;
1225
1226 QETH_DBF_TEXT(TRACE, 3, "stsrcmac");
1227
1228 if (!card->options.fake_ll)
1229 return -EOPNOTSUPP;
1230
1231 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
1232 PRINT_INFO("Inbound source address not "
1233 "supported on %s\n", QETH_CARD_IFNAME(card));
1234 return -EOPNOTSUPP;
1235 }
1236
1237 rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC,
1238 IPA_CMD_ASS_START, 0);
1239 if (rc)
1240 PRINT_WARN("Could not start inbound source "
1241 "assist on %s: 0x%x\n",
1242 QETH_CARD_IFNAME(card), rc);
1243 return rc;
1244}
1245
1246static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
1247{
1248 int rc = 0;
1249
1250 QETH_DBF_TEXT(TRACE, 3, "strtvlan");
1251
1252 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
1253 PRINT_WARN("VLAN not supported on %s\n",
1254 QETH_CARD_IFNAME(card));
1255 return -EOPNOTSUPP;
1256 }
1257
1258 rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO,
1259 IPA_CMD_ASS_START, 0);
1260 if (rc) {
1261 PRINT_WARN("Could not start vlan "
1262 "assist on %s: 0x%x\n",
1263 QETH_CARD_IFNAME(card), rc);
1264 } else {
1265 PRINT_INFO("VLAN enabled \n");
1266 }
1267 return rc;
1268}
1269
1270static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
1271{
1272 int rc;
1273
1274 QETH_DBF_TEXT(TRACE, 3, "stmcast");
1275
1276 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
1277 PRINT_WARN("Multicast not supported on %s\n",
1278 QETH_CARD_IFNAME(card));
1279 return -EOPNOTSUPP;
1280 }
1281
1282 rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING,
1283 IPA_CMD_ASS_START, 0);
1284 if (rc) {
1285 PRINT_WARN("Could not start multicast "
1286 "assist on %s: rc=%i\n",
1287 QETH_CARD_IFNAME(card), rc);
1288 } else {
1289 PRINT_INFO("Multicast enabled\n");
1290 card->dev->flags |= IFF_MULTICAST;
1291 }
1292 return rc;
1293}
1294
1295static int qeth_l3_query_ipassists_cb(struct qeth_card *card,
1296 struct qeth_reply *reply, unsigned long data)
1297{
1298 struct qeth_ipa_cmd *cmd;
1299
1300 QETH_DBF_TEXT(SETUP, 2, "qipasscb");
1301
1302 cmd = (struct qeth_ipa_cmd *) data;
1303 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
1304 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
1305 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1306 } else {
1307 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
1308 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1309 }
1310 QETH_DBF_TEXT(SETUP, 2, "suppenbl");
1311 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported);
1312 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled);
1313 return 0;
1314}
1315
1316static int qeth_l3_query_ipassists(struct qeth_card *card,
1317 enum qeth_prot_versions prot)
1318{
1319 int rc;
1320 struct qeth_cmd_buffer *iob;
1321
1322 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
1323 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
1324 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL);
1325 return rc;
1326}
1327
1328#ifdef CONFIG_QETH_IPV6
1329static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
1330{
1331 int rc;
1332
1333 QETH_DBF_TEXT(TRACE, 3, "softipv6");
1334
1335 if (card->info.type == QETH_CARD_TYPE_IQD)
1336 goto out;
1337
1338 rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6);
1339 if (rc) {
1340 PRINT_ERR("IPv6 query ipassist failed on %s\n",
1341 QETH_CARD_IFNAME(card));
1342 return rc;
1343 }
1344 rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6,
1345 IPA_CMD_ASS_START, 3);
1346 if (rc) {
1347 PRINT_WARN("IPv6 start assist (version 4) failed "
1348 "on %s: 0x%x\n",
1349 QETH_CARD_IFNAME(card), rc);
1350 return rc;
1351 }
1352 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6,
1353 IPA_CMD_ASS_START);
1354 if (rc) {
1355 PRINT_WARN("IPV6 start assist (version 6) failed "
1356 "on %s: 0x%x\n",
1357 QETH_CARD_IFNAME(card), rc);
1358 return rc;
1359 }
1360 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
1361 IPA_CMD_ASS_START);
1362 if (rc) {
1363 PRINT_WARN("Could not enable passthrough "
1364 "on %s: 0x%x\n",
1365 QETH_CARD_IFNAME(card), rc);
1366 return rc;
1367 }
1368out:
1369 PRINT_INFO("IPV6 enabled \n");
1370 return 0;
1371}
1372#endif
1373
1374static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
1375{
1376 int rc = 0;
1377
1378 QETH_DBF_TEXT(TRACE, 3, "strtipv6");
1379
1380 if (!qeth_is_supported(card, IPA_IPV6)) {
1381 PRINT_WARN("IPv6 not supported on %s\n",
1382 QETH_CARD_IFNAME(card));
1383 return 0;
1384 }
1385#ifdef CONFIG_QETH_IPV6
1386 rc = qeth_l3_softsetup_ipv6(card);
1387#endif
1388 return rc ;
1389}
1390
1391static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
1392{
1393 int rc;
1394
1395 QETH_DBF_TEXT(TRACE, 3, "stbrdcst");
1396 card->info.broadcast_capable = 0;
1397 if (!qeth_is_supported(card, IPA_FILTERING)) {
1398 PRINT_WARN("Broadcast not supported on %s\n",
1399 QETH_CARD_IFNAME(card));
1400 rc = -EOPNOTSUPP;
1401 goto out;
1402 }
1403 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1404 IPA_CMD_ASS_START, 0);
1405 if (rc) {
1406 PRINT_WARN("Could not enable broadcasting filtering "
1407 "on %s: 0x%x\n",
1408 QETH_CARD_IFNAME(card), rc);
1409 goto out;
1410 }
1411
1412 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1413 IPA_CMD_ASS_CONFIGURE, 1);
1414 if (rc) {
1415 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
1416 QETH_CARD_IFNAME(card), rc);
1417 goto out;
1418 }
1419 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
1420 PRINT_INFO("Broadcast enabled \n");
1421 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1422 IPA_CMD_ASS_ENABLE, 1);
1423 if (rc) {
1424 PRINT_WARN("Could not set up broadcast echo filtering on "
1425 "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
1426 goto out;
1427 }
1428 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
1429out:
1430 if (card->info.broadcast_capable)
1431 card->dev->flags |= IFF_BROADCAST;
1432 else
1433 card->dev->flags &= ~IFF_BROADCAST;
1434 return rc;
1435}
1436
1437static int qeth_l3_send_checksum_command(struct qeth_card *card)
1438{
1439 int rc;
1440
1441 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
1442 IPA_CMD_ASS_START, 0);
1443 if (rc) {
1444 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
1445 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
1446 QETH_CARD_IFNAME(card), rc);
1447 return rc;
1448 }
1449 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
1450 IPA_CMD_ASS_ENABLE,
1451 card->info.csum_mask);
1452 if (rc) {
1453 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
1454 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
1455 QETH_CARD_IFNAME(card), rc);
1456 return rc;
1457 }
1458 return 0;
1459}
1460
1461static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1462{
1463 int rc = 0;
1464
1465 QETH_DBF_TEXT(TRACE, 3, "strtcsum");
1466
1467 if (card->options.checksum_type == NO_CHECKSUMMING) {
1468 PRINT_WARN("Using no checksumming on %s.\n",
1469 QETH_CARD_IFNAME(card));
1470 return 0;
1471 }
1472 if (card->options.checksum_type == SW_CHECKSUMMING) {
1473 PRINT_WARN("Using SW checksumming on %s.\n",
1474 QETH_CARD_IFNAME(card));
1475 return 0;
1476 }
1477 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
1478 PRINT_WARN("Inbound HW Checksumming not "
1479 "supported on %s,\ncontinuing "
1480 "using Inbound SW Checksumming\n",
1481 QETH_CARD_IFNAME(card));
1482 card->options.checksum_type = SW_CHECKSUMMING;
1483 return 0;
1484 }
1485 rc = qeth_l3_send_checksum_command(card);
1486 if (!rc)
1487 PRINT_INFO("HW Checksumming (inbound) enabled \n");
1488
1489 return rc;
1490}
1491
1492static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1493{
1494 int rc;
1495
1496 QETH_DBF_TEXT(TRACE, 3, "sttso");
1497
1498 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
1499 PRINT_WARN("Outbound TSO not supported on %s\n",
1500 QETH_CARD_IFNAME(card));
1501 rc = -EOPNOTSUPP;
1502 } else {
1503 rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
1504 IPA_CMD_ASS_START, 0);
1505 if (rc)
1506 PRINT_WARN("Could not start outbound TSO "
1507 "assist on %s: rc=%i\n",
1508 QETH_CARD_IFNAME(card), rc);
1509 else
1510 PRINT_INFO("Outbound TSO enabled\n");
1511 }
1512 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) {
1513 card->options.large_send = QETH_LARGE_SEND_NO;
1514 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
1515 }
1516 return rc;
1517}
1518
1519static int qeth_l3_start_ipassists(struct qeth_card *card)
1520{
1521 QETH_DBF_TEXT(TRACE, 3, "strtipas");
1522 qeth_l3_start_ipa_arp_processing(card); /* go on*/
1523 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
1524 qeth_l3_start_ipa_source_mac(card); /* go on*/
1525 qeth_l3_start_ipa_vlan(card); /* go on*/
1526 qeth_l3_start_ipa_multicast(card); /* go on*/
1527 qeth_l3_start_ipa_ipv6(card); /* go on*/
1528 qeth_l3_start_ipa_broadcast(card); /* go on*/
1529 qeth_l3_start_ipa_checksum(card); /* go on*/
1530 qeth_l3_start_ipa_tso(card); /* go on*/
1531 return 0;
1532}
1533
1534static int qeth_l3_put_unique_id(struct qeth_card *card)
1535{
1536
1537 int rc = 0;
1538 struct qeth_cmd_buffer *iob;
1539 struct qeth_ipa_cmd *cmd;
1540
1541 QETH_DBF_TEXT(TRACE, 2, "puniqeid");
1542
1543 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
1544 UNIQUE_ID_NOT_BY_CARD)
1545 return -1;
1546 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
1547 QETH_PROT_IPV6);
1548 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1549 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1550 card->info.unique_id;
1551 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
1552 card->dev->dev_addr, OSA_ADDR_LEN);
1553 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
1554 return rc;
1555}
1556
1557static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
1558 struct qeth_reply *reply, unsigned long data)
1559{
1560 struct qeth_ipa_cmd *cmd;
1561
1562 cmd = (struct qeth_ipa_cmd *) data;
1563 if (cmd->hdr.return_code == 0)
1564 memcpy(card->dev->dev_addr,
1565 cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
1566 else
1567 random_ether_addr(card->dev->dev_addr);
1568
1569 return 0;
1570}
1571
1572static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
1573{
1574 int rc = 0;
1575 struct qeth_cmd_buffer *iob;
1576 struct qeth_ipa_cmd *cmd;
1577
1578 QETH_DBF_TEXT(SETUP, 2, "hsrmac");
1579
1580 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1581 QETH_PROT_IPV6);
1582 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1583 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1584 card->info.unique_id;
1585
1586 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
1587 NULL);
1588 return rc;
1589}
1590
1591static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
1592 struct qeth_reply *reply, unsigned long data)
1593{
1594 struct qeth_ipa_cmd *cmd;
1595
1596 cmd = (struct qeth_ipa_cmd *) data;
1597 if (cmd->hdr.return_code == 0)
1598 card->info.unique_id = *((__u16 *)
1599 &cmd->data.create_destroy_addr.unique_id[6]);
1600 else {
1601 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1602 UNIQUE_ID_NOT_BY_CARD;
1603 PRINT_WARN("couldn't get a unique id from the card on device "
1604 "%s (result=x%x), using default id. ipv6 "
1605 "autoconfig on other lpars may lead to duplicate "
1606 "ip addresses. please use manually "
1607 "configured ones.\n",
1608 CARD_BUS_ID(card), cmd->hdr.return_code);
1609 }
1610 return 0;
1611}
1612
1613static int qeth_l3_get_unique_id(struct qeth_card *card)
1614{
1615 int rc = 0;
1616 struct qeth_cmd_buffer *iob;
1617 struct qeth_ipa_cmd *cmd;
1618
1619 QETH_DBF_TEXT(SETUP, 2, "guniqeid");
1620
1621 if (!qeth_is_supported(card, IPA_IPV6)) {
1622 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1623 UNIQUE_ID_NOT_BY_CARD;
1624 return 0;
1625 }
1626
1627 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1628 QETH_PROT_IPV6);
1629 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1630 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1631 card->info.unique_id;
1632
1633 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
1634 return rc;
1635}
1636
1637static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
1638 struct net_device *dev)
1639{
1640 if (dev->type == ARPHRD_IEEE802_TR)
1641 ip_tr_mc_map(ipm, mac);
1642 else
1643 ip_eth_mc_map(ipm, mac);
1644}
1645
1646static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
1647{
1648 struct qeth_ipaddr *ipm;
1649 struct ip_mc_list *im4;
1650 char buf[MAX_ADDR_LEN];
1651
1652 QETH_DBF_TEXT(TRACE, 4, "addmc");
1653 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
1654 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
1655 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1656 if (!ipm)
1657 continue;
1658 ipm->u.a4.addr = im4->multiaddr;
1659 memcpy(ipm->mac, buf, OSA_ADDR_LEN);
1660 ipm->is_multicast = 1;
1661 if (!qeth_l3_add_ip(card, ipm))
1662 kfree(ipm);
1663 }
1664}
1665
1666static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1667{
1668 struct in_device *in_dev;
1669 struct vlan_group *vg;
1670 int i;
1671
1672 QETH_DBF_TEXT(TRACE, 4, "addmcvl");
1673 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
1674 return;
1675
1676 vg = card->vlangrp;
1677 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
1678 struct net_device *netdev = vlan_group_get_device(vg, i);
1679 if (netdev == NULL ||
1680 !(netdev->flags & IFF_UP))
1681 continue;
1682 in_dev = in_dev_get(netdev);
1683 if (!in_dev)
1684 continue;
1685 read_lock(&in_dev->mc_list_lock);
1686 qeth_l3_add_mc(card, in_dev);
1687 read_unlock(&in_dev->mc_list_lock);
1688 in_dev_put(in_dev);
1689 }
1690}
1691
1692static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
1693{
1694 struct in_device *in4_dev;
1695
1696 QETH_DBF_TEXT(TRACE, 4, "chkmcv4");
1697 in4_dev = in_dev_get(card->dev);
1698 if (in4_dev == NULL)
1699 return;
1700 read_lock(&in4_dev->mc_list_lock);
1701 qeth_l3_add_mc(card, in4_dev);
1702 qeth_l3_add_vlan_mc(card);
1703 read_unlock(&in4_dev->mc_list_lock);
1704 in_dev_put(in4_dev);
1705}
1706
1707#ifdef CONFIG_QETH_IPV6
1708static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
1709{
1710 struct qeth_ipaddr *ipm;
1711 struct ifmcaddr6 *im6;
1712 char buf[MAX_ADDR_LEN];
1713
1714 QETH_DBF_TEXT(TRACE, 4, "addmc6");
1715 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
1716 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
1717 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1718 if (!ipm)
1719 continue;
1720 ipm->is_multicast = 1;
1721 memcpy(ipm->mac, buf, OSA_ADDR_LEN);
1722 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
1723 sizeof(struct in6_addr));
1724 if (!qeth_l3_add_ip(card, ipm))
1725 kfree(ipm);
1726 }
1727}
1728
1729static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1730{
1731 struct inet6_dev *in_dev;
1732 struct vlan_group *vg;
1733 int i;
1734
1735 QETH_DBF_TEXT(TRACE, 4, "admc6vl");
1736 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
1737 return;
1738
1739 vg = card->vlangrp;
1740 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
1741 struct net_device *netdev = vlan_group_get_device(vg, i);
1742 if (netdev == NULL ||
1743 !(netdev->flags & IFF_UP))
1744 continue;
1745 in_dev = in6_dev_get(netdev);
1746 if (!in_dev)
1747 continue;
1748 read_lock_bh(&in_dev->lock);
1749 qeth_l3_add_mc6(card, in_dev);
1750 read_unlock_bh(&in_dev->lock);
1751 in6_dev_put(in_dev);
1752 }
1753}
1754
1755static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
1756{
1757 struct inet6_dev *in6_dev;
1758
1759 QETH_DBF_TEXT(TRACE, 4, "chkmcv6");
1760 if (!qeth_is_supported(card, IPA_IPV6))
1761 return ;
1762 in6_dev = in6_dev_get(card->dev);
1763 if (in6_dev == NULL)
1764 return;
1765 read_lock_bh(&in6_dev->lock);
1766 qeth_l3_add_mc6(card, in6_dev);
1767 qeth_l3_add_vlan_mc6(card);
1768 read_unlock_bh(&in6_dev->lock);
1769 in6_dev_put(in6_dev);
1770}
1771#endif /* CONFIG_QETH_IPV6 */
1772
1773static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
1774 unsigned short vid)
1775{
1776 struct in_device *in_dev;
1777 struct in_ifaddr *ifa;
1778 struct qeth_ipaddr *addr;
1779
1780 QETH_DBF_TEXT(TRACE, 4, "frvaddr4");
1781
1782 in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid));
1783 if (!in_dev)
1784 return;
1785 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1786 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1787 if (addr) {
1788 addr->u.a4.addr = ifa->ifa_address;
1789 addr->u.a4.mask = ifa->ifa_mask;
1790 addr->type = QETH_IP_TYPE_NORMAL;
1791 if (!qeth_l3_delete_ip(card, addr))
1792 kfree(addr);
1793 }
1794 }
1795 in_dev_put(in_dev);
1796}
1797
1798static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
1799 unsigned short vid)
1800{
1801#ifdef CONFIG_QETH_IPV6
1802 struct inet6_dev *in6_dev;
1803 struct inet6_ifaddr *ifa;
1804 struct qeth_ipaddr *addr;
1805
1806 QETH_DBF_TEXT(TRACE, 4, "frvaddr6");
1807
1808 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
1809 if (!in6_dev)
1810 return;
1811 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) {
1812 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1813 if (addr) {
1814 memcpy(&addr->u.a6.addr, &ifa->addr,
1815 sizeof(struct in6_addr));
1816 addr->u.a6.pfxlen = ifa->prefix_len;
1817 addr->type = QETH_IP_TYPE_NORMAL;
1818 if (!qeth_l3_delete_ip(card, addr))
1819 kfree(addr);
1820 }
1821 }
1822 in6_dev_put(in6_dev);
1823#endif /* CONFIG_QETH_IPV6 */
1824}
1825
1826static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
1827 unsigned short vid)
1828{
1829 if (!card->vlangrp)
1830 return;
1831 qeth_l3_free_vlan_addresses4(card, vid);
1832 qeth_l3_free_vlan_addresses6(card, vid);
1833}
1834
1835static void qeth_l3_vlan_rx_register(struct net_device *dev,
1836 struct vlan_group *grp)
1837{
1838 struct qeth_card *card = netdev_priv(dev);
1839 unsigned long flags;
1840
1841 QETH_DBF_TEXT(TRACE, 4, "vlanreg");
1842 spin_lock_irqsave(&card->vlanlock, flags);
1843 card->vlangrp = grp;
1844 spin_unlock_irqrestore(&card->vlanlock, flags);
1845}
1846
1847static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1848{
1849 struct net_device *vlandev;
1850 struct qeth_card *card = (struct qeth_card *) dev->priv;
1851 struct in_device *in_dev;
1852
1853 if (card->info.type == QETH_CARD_TYPE_IQD)
1854 return;
1855
1856 vlandev = vlan_group_get_device(card->vlangrp, vid);
1857 vlandev->neigh_setup = qeth_l3_neigh_setup;
1858
1859 in_dev = in_dev_get(vlandev);
1860#ifdef CONFIG_SYSCTL
1861 neigh_sysctl_unregister(in_dev->arp_parms);
1862#endif
1863 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
1864
1865 in_dev->arp_parms = neigh_parms_alloc(vlandev, &arp_tbl);
1866#ifdef CONFIG_SYSCTL
1867 neigh_sysctl_register(vlandev, in_dev->arp_parms, NET_IPV4,
1868 NET_IPV4_NEIGH, "ipv4", NULL, NULL);
1869#endif
1870 in_dev_put(in_dev);
1871 return;
1872}
1873
1874static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1875{
1876 struct qeth_card *card = netdev_priv(dev);
1877 unsigned long flags;
1878
1879 QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
1880 spin_lock_irqsave(&card->vlanlock, flags);
1881 /* unregister IP addresses of vlan device */
1882 qeth_l3_free_vlan_addresses(card, vid);
1883 vlan_group_set_device(card->vlangrp, vid, NULL);
1884 spin_unlock_irqrestore(&card->vlanlock, flags);
1885 qeth_l3_set_multicast_list(card->dev);
1886}
1887
1888static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
1889 struct sk_buff *skb, struct qeth_hdr *hdr)
1890{
1891 unsigned short vlan_id = 0;
1892 __be16 prot;
1893 struct iphdr *ip_hdr;
1894 unsigned char tg_addr[MAX_ADDR_LEN];
1895
1896 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
1897 prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
1898 ETH_P_IP);
1899 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
1900 case QETH_CAST_MULTICAST:
1901 switch (prot) {
1902#ifdef CONFIG_QETH_IPV6
1903 case __constant_htons(ETH_P_IPV6):
1904 ndisc_mc_map((struct in6_addr *)
1905 skb->data + 24,
1906 tg_addr, card->dev, 0);
1907 break;
1908#endif
1909 case __constant_htons(ETH_P_IP):
1910 ip_hdr = (struct iphdr *)skb->data;
1911 (card->dev->type == ARPHRD_IEEE802_TR) ?
1912 ip_tr_mc_map(ip_hdr->daddr, tg_addr):
1913 ip_eth_mc_map(ip_hdr->daddr, tg_addr);
1914 break;
1915 default:
1916 memcpy(tg_addr, card->dev->broadcast,
1917 card->dev->addr_len);
1918 }
1919 card->stats.multicast++;
1920 skb->pkt_type = PACKET_MULTICAST;
1921 break;
1922 case QETH_CAST_BROADCAST:
1923 memcpy(tg_addr, card->dev->broadcast,
1924 card->dev->addr_len);
1925 card->stats.multicast++;
1926 skb->pkt_type = PACKET_BROADCAST;
1927 break;
1928 case QETH_CAST_UNICAST:
1929 case QETH_CAST_ANYCAST:
1930 case QETH_CAST_NOCAST:
1931 default:
1932 skb->pkt_type = PACKET_HOST;
1933 memcpy(tg_addr, card->dev->dev_addr,
1934 card->dev->addr_len);
1935 }
1936 card->dev->header_ops->create(skb, card->dev, prot, tg_addr,
1937 "FAKELL", card->dev->addr_len);
1938 }
1939
1940#ifdef CONFIG_TR
1941 if (card->dev->type == ARPHRD_IEEE802_TR)
1942 skb->protocol = tr_type_trans(skb, card->dev);
1943 else
1944#endif
1945 skb->protocol = eth_type_trans(skb, card->dev);
1946
1947 if (hdr->hdr.l3.ext_flags &
1948 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
1949 vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
1950 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
1951 }
1952
1953 skb->ip_summed = card->options.checksum_type;
1954 if (card->options.checksum_type == HW_CHECKSUMMING) {
1955 if ((hdr->hdr.l3.ext_flags &
1956 (QETH_HDR_EXT_CSUM_HDR_REQ |
1957 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
1958 (QETH_HDR_EXT_CSUM_HDR_REQ |
1959 QETH_HDR_EXT_CSUM_TRANSP_REQ))
1960 skb->ip_summed = CHECKSUM_UNNECESSARY;
1961 else
1962 skb->ip_summed = SW_CHECKSUMMING;
1963 }
1964
1965 return vlan_id;
1966}
1967
1968static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
1969 struct qeth_qdio_buffer *buf, int index)
1970{
1971 struct qdio_buffer_element *element;
1972 struct sk_buff *skb;
1973 struct qeth_hdr *hdr;
1974 int offset;
1975 __u16 vlan_tag = 0;
1976 unsigned int len;
1977
1978 /* get first element of current buffer */
1979 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
1980 offset = 0;
1981 if (card->options.performance_stats)
1982 card->perf_stats.bufs_rec++;
1983 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
1984 &offset, &hdr))) {
1985 skb->dev = card->dev;
1986 /* is device UP ? */
1987 if (!(card->dev->flags & IFF_UP)) {
1988 dev_kfree_skb_any(skb);
1989 continue;
1990 }
1991
1992 switch (hdr->hdr.l3.id) {
1993 case QETH_HEADER_TYPE_LAYER3:
1994 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
1995 len = skb->len;
1996 if (vlan_tag)
1997 if (card->vlangrp)
1998 vlan_hwaccel_rx(skb, card->vlangrp,
1999 vlan_tag);
2000 else {
2001 dev_kfree_skb_any(skb);
2002 continue;
2003 }
2004 else
2005 netif_rx(skb);
2006 break;
2007 default:
2008 dev_kfree_skb_any(skb);
2009 QETH_DBF_TEXT(TRACE, 3, "inbunkno");
2010 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
2011 continue;
2012 }
2013
2014 card->dev->last_rx = jiffies;
2015 card->stats.rx_packets++;
2016 card->stats.rx_bytes += len;
2017 }
2018}
2019
2020static int qeth_l3_verify_vlan_dev(struct net_device *dev,
2021 struct qeth_card *card)
2022{
2023 int rc = 0;
2024 struct vlan_group *vg;
2025 int i;
2026
2027 vg = card->vlangrp;
2028 if (!vg)
2029 return rc;
2030
2031 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
2032 if (vlan_group_get_device(vg, i) == dev) {
2033 rc = QETH_VLAN_CARD;
2034 break;
2035 }
2036 }
2037
2038 if (rc && !(netdev_priv(vlan_dev_info(dev)->real_dev) == (void *)card))
2039 return 0;
2040
2041 return rc;
2042}
2043
2044static int qeth_l3_verify_dev(struct net_device *dev)
2045{
2046 struct qeth_card *card;
2047 unsigned long flags;
2048 int rc = 0;
2049
2050 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
2051 list_for_each_entry(card, &qeth_core_card_list.list, list) {
2052 if (card->dev == dev) {
2053 rc = QETH_REAL_CARD;
2054 break;
2055 }
2056 rc = qeth_l3_verify_vlan_dev(dev, card);
2057 if (rc)
2058 break;
2059 }
2060 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
2061
2062 return rc;
2063}
2064
2065static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
2066{
2067 struct qeth_card *card = NULL;
2068 int rc;
2069
2070 rc = qeth_l3_verify_dev(dev);
2071 if (rc == QETH_REAL_CARD)
2072 card = netdev_priv(dev);
2073 else if (rc == QETH_VLAN_CARD)
2074 card = netdev_priv(vlan_dev_info(dev)->real_dev);
2075 if (card->options.layer2)
2076 card = NULL;
2077 QETH_DBF_TEXT_(TRACE, 4, "%d", rc);
2078 return card ;
2079}
2080
2081static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2082{
2083 int rc = 0;
2084
2085 QETH_DBF_TEXT(SETUP, 2, "stopcard");
2086 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
2087
2088 qeth_set_allowed_threads(card, 0, 1);
2089 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
2090 return -ERESTARTSYS;
2091 if (card->read.state == CH_STATE_UP &&
2092 card->write.state == CH_STATE_UP &&
2093 (card->state == CARD_STATE_UP)) {
2094 if (recovery_mode)
2095 qeth_l3_stop(card->dev);
2096 if (!card->use_hard_stop) {
2097 rc = qeth_send_stoplan(card);
2098 if (rc)
2099 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2100 }
2101 card->state = CARD_STATE_SOFTSETUP;
2102 }
2103 if (card->state == CARD_STATE_SOFTSETUP) {
2104 qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1);
2105 qeth_clear_ipacmd_list(card);
2106 card->state = CARD_STATE_HARDSETUP;
2107 }
2108 if (card->state == CARD_STATE_HARDSETUP) {
2109 if (!card->use_hard_stop &&
2110 (card->info.type != QETH_CARD_TYPE_IQD)) {
2111 rc = qeth_l3_put_unique_id(card);
2112 if (rc)
2113 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2114 }
2115 qeth_qdio_clear_card(card, 0);
2116 qeth_clear_qdio_buffers(card);
2117 qeth_clear_working_pool_list(card);
2118 card->state = CARD_STATE_DOWN;
2119 }
2120 if (card->state == CARD_STATE_DOWN) {
2121 qeth_clear_cmd_buffers(&card->read);
2122 qeth_clear_cmd_buffers(&card->write);
2123 }
2124 card->use_hard_stop = 0;
2125 return rc;
2126}
2127
2128static void qeth_l3_set_multicast_list(struct net_device *dev)
2129{
2130 struct qeth_card *card = netdev_priv(dev);
2131
2132 QETH_DBF_TEXT(TRACE, 3, "setmulti");
2133 qeth_l3_delete_mc_addresses(card);
2134 qeth_l3_add_multicast_ipv4(card);
2135#ifdef CONFIG_QETH_IPV6
2136 qeth_l3_add_multicast_ipv6(card);
2137#endif
2138 qeth_l3_set_ip_addr_list(card);
2139 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2140 return;
2141 qeth_setadp_promisc_mode(card);
2142}
2143
2144static const char *qeth_l3_arp_get_error_cause(int *rc)
2145{
2146 switch (*rc) {
2147 case QETH_IPA_ARP_RC_FAILED:
2148 *rc = -EIO;
2149 return "operation failed";
2150 case QETH_IPA_ARP_RC_NOTSUPP:
2151 *rc = -EOPNOTSUPP;
2152 return "operation not supported";
2153 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
2154 *rc = -EINVAL;
2155 return "argument out of range";
2156 case QETH_IPA_ARP_RC_Q_NOTSUPP:
2157 *rc = -EOPNOTSUPP;
2158 return "query operation not supported";
2159 case QETH_IPA_ARP_RC_Q_NO_DATA:
2160 *rc = -ENOENT;
2161 return "no query data available";
2162 default:
2163 return "unknown error";
2164 }
2165}
2166
2167static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
2168{
2169 int tmp;
2170 int rc;
2171
2172 QETH_DBF_TEXT(TRACE, 3, "arpstnoe");
2173
2174 /*
2175 * currently GuestLAN only supports the ARP assist function
2176 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
2177 * thus we say EOPNOTSUPP for this ARP function
2178 */
2179 if (card->info.guestlan)
2180 return -EOPNOTSUPP;
2181 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2182 PRINT_WARN("ARP processing not supported "
2183 "on %s!\n", QETH_CARD_IFNAME(card));
2184 return -EOPNOTSUPP;
2185 }
2186 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
2187 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
2188 no_entries);
2189 if (rc) {
2190 tmp = rc;
2191 PRINT_WARN("Could not set number of ARP entries on %s: "
2192 "%s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
2193 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2194 }
2195 return rc;
2196}
2197
2198static void qeth_l3_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
2199 struct qeth_arp_query_data *qdata, int entry_size,
2200 int uentry_size)
2201{
2202 char *entry_ptr;
2203 char *uentry_ptr;
2204 int i;
2205
2206 entry_ptr = (char *)&qdata->data;
2207 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
2208 for (i = 0; i < qdata->no_entries; ++i) {
2209 /* strip off 32 bytes "media specific information" */
2210 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
2211 entry_ptr += entry_size;
2212 uentry_ptr += uentry_size;
2213 }
2214}
2215
2216static int qeth_l3_arp_query_cb(struct qeth_card *card,
2217 struct qeth_reply *reply, unsigned long data)
2218{
2219 struct qeth_ipa_cmd *cmd;
2220 struct qeth_arp_query_data *qdata;
2221 struct qeth_arp_query_info *qinfo;
2222 int entry_size;
2223 int uentry_size;
2224 int i;
2225
2226 QETH_DBF_TEXT(TRACE, 4, "arpquecb");
2227
2228 qinfo = (struct qeth_arp_query_info *) reply->param;
2229 cmd = (struct qeth_ipa_cmd *) data;
2230 if (cmd->hdr.return_code) {
2231 QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code);
2232 return 0;
2233 }
2234 if (cmd->data.setassparms.hdr.return_code) {
2235 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
2236 QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code);
2237 return 0;
2238 }
2239 qdata = &cmd->data.setassparms.data.query_arp;
2240 switch (qdata->reply_bits) {
2241 case 5:
2242 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
2243 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2244 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
2245 break;
2246 case 7:
2247 /* fall through to default */
2248 default:
2249 /* tr is the same as eth -> entry7 */
2250 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
2251 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2252 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
2253 break;
2254 }
2255 /* check if there is enough room in userspace */
2256 if ((qinfo->udata_len - qinfo->udata_offset) <
2257 qdata->no_entries * uentry_size){
2258 QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM);
2259 cmd->hdr.return_code = -ENOMEM;
2260 PRINT_WARN("query ARP user space buffer is too small for "
2261 "the returned number of ARP entries. "
2262 "Aborting query!\n");
2263 goto out_error;
2264 }
2265 QETH_DBF_TEXT_(TRACE, 4, "anore%i",
2266 cmd->data.setassparms.hdr.number_of_replies);
2267 QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
2268 QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries);
2269
2270 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
2271 /* strip off "media specific information" */
2272 qeth_l3_copy_arp_entries_stripped(qinfo, qdata, entry_size,
2273 uentry_size);
2274 } else
2275 /*copy entries to user buffer*/
2276 memcpy(qinfo->udata + qinfo->udata_offset,
2277 (char *)&qdata->data, qdata->no_entries*uentry_size);
2278
2279 qinfo->no_entries += qdata->no_entries;
2280 qinfo->udata_offset += (qdata->no_entries*uentry_size);
2281 /* check if all replies received ... */
2282 if (cmd->data.setassparms.hdr.seq_no <
2283 cmd->data.setassparms.hdr.number_of_replies)
2284 return 1;
2285 memcpy(qinfo->udata, &qinfo->no_entries, 4);
2286 /* keep STRIP_ENTRIES flag so the user program can distinguish
2287 * stripped entries from normal ones */
2288 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2289 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
2290 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
2291 return 0;
2292out_error:
2293 i = 0;
2294 memcpy(qinfo->udata, &i, 4);
2295 return 0;
2296}
2297
2298static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
2299 struct qeth_cmd_buffer *iob, int len,
2300 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
2301 unsigned long),
2302 void *reply_param)
2303{
2304 QETH_DBF_TEXT(TRACE, 4, "sendarp");
2305
2306 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2307 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2308 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2309 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
2310 reply_cb, reply_param);
2311}
2312
2313static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
2314{
2315 struct qeth_cmd_buffer *iob;
2316 struct qeth_arp_query_info qinfo = {0, };
2317 int tmp;
2318 int rc;
2319
2320 QETH_DBF_TEXT(TRACE, 3, "arpquery");
2321
2322 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
2323 IPA_ARP_PROCESSING)) {
2324 PRINT_WARN("ARP processing not supported "
2325 "on %s!\n", QETH_CARD_IFNAME(card));
2326 return -EOPNOTSUPP;
2327 }
2328 /* get size of userspace buffer and mask_bits -> 6 bytes */
2329 if (copy_from_user(&qinfo, udata, 6))
2330 return -EFAULT;
2331 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
2332 if (!qinfo.udata)
2333 return -ENOMEM;
2334 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
2335 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2336 IPA_CMD_ASS_ARP_QUERY_INFO,
2337 sizeof(int), QETH_PROT_IPV4);
2338
2339 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
2340 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
2341 qeth_l3_arp_query_cb, (void *)&qinfo);
2342 if (rc) {
2343 tmp = rc;
2344 PRINT_WARN("Error while querying ARP cache on %s: %s "
2345 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
2346 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2347 if (copy_to_user(udata, qinfo.udata, 4))
2348 rc = -EFAULT;
2349 } else {
2350 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
2351 rc = -EFAULT;
2352 }
2353 kfree(qinfo.udata);
2354 return rc;
2355}
2356
2357static int qeth_l3_arp_add_entry(struct qeth_card *card,
2358 struct qeth_arp_cache_entry *entry)
2359{
2360 struct qeth_cmd_buffer *iob;
2361 char buf[16];
2362 int tmp;
2363 int rc;
2364
2365 QETH_DBF_TEXT(TRACE, 3, "arpadent");
2366
2367 /*
2368 * currently GuestLAN only supports the ARP assist function
2369 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
2370 * thus we say EOPNOTSUPP for this ARP function
2371 */
2372 if (card->info.guestlan)
2373 return -EOPNOTSUPP;
2374 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2375 PRINT_WARN("ARP processing not supported "
2376 "on %s!\n", QETH_CARD_IFNAME(card));
2377 return -EOPNOTSUPP;
2378 }
2379
2380 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2381 IPA_CMD_ASS_ARP_ADD_ENTRY,
2382 sizeof(struct qeth_arp_cache_entry),
2383 QETH_PROT_IPV4);
2384 rc = qeth_l3_send_setassparms(card, iob,
2385 sizeof(struct qeth_arp_cache_entry),
2386 (unsigned long) entry,
2387 qeth_l3_default_setassparms_cb, NULL);
2388 if (rc) {
2389 tmp = rc;
2390 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
2391 PRINT_WARN("Could not add ARP entry for address %s on %s: "
2392 "%s (0x%x/%d)\n",
2393 buf, QETH_CARD_IFNAME(card),
2394 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2395 }
2396 return rc;
2397}
2398
2399static int qeth_l3_arp_remove_entry(struct qeth_card *card,
2400 struct qeth_arp_cache_entry *entry)
2401{
2402 struct qeth_cmd_buffer *iob;
2403 char buf[16] = {0, };
2404 int tmp;
2405 int rc;
2406
2407 QETH_DBF_TEXT(TRACE, 3, "arprment");
2408
2409 /*
2410 * currently GuestLAN only supports the ARP assist function
2411 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
2412 * thus we say EOPNOTSUPP for this ARP function
2413 */
2414 if (card->info.guestlan)
2415 return -EOPNOTSUPP;
2416 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2417 PRINT_WARN("ARP processing not supported "
2418 "on %s!\n", QETH_CARD_IFNAME(card));
2419 return -EOPNOTSUPP;
2420 }
2421 memcpy(buf, entry, 12);
2422 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2423 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
2424 12,
2425 QETH_PROT_IPV4);
2426 rc = qeth_l3_send_setassparms(card, iob,
2427 12, (unsigned long)buf,
2428 qeth_l3_default_setassparms_cb, NULL);
2429 if (rc) {
2430 tmp = rc;
2431 memset(buf, 0, 16);
2432 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
2433 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
2434 "%s (0x%x/%d)\n",
2435 buf, QETH_CARD_IFNAME(card),
2436 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2437 }
2438 return rc;
2439}
2440
2441static int qeth_l3_arp_flush_cache(struct qeth_card *card)
2442{
2443 int rc;
2444 int tmp;
2445
2446 QETH_DBF_TEXT(TRACE, 3, "arpflush");
2447
2448 /*
2449 * currently GuestLAN only supports the ARP assist function
2450 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
2451 * thus we say EOPNOTSUPP for this ARP function
2452 */
2453 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
2454 return -EOPNOTSUPP;
2455 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2456 PRINT_WARN("ARP processing not supported "
2457 "on %s!\n", QETH_CARD_IFNAME(card));
2458 return -EOPNOTSUPP;
2459 }
2460 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
2461 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
2462 if (rc) {
2463 tmp = rc;
2464 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
2465 QETH_CARD_IFNAME(card),
2466 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2467 }
2468 return rc;
2469}
2470
2471static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2472{
2473 struct qeth_card *card = netdev_priv(dev);
2474 struct qeth_arp_cache_entry arp_entry;
2475 struct mii_ioctl_data *mii_data;
2476 int rc = 0;
2477
2478 if (!card)
2479 return -ENODEV;
2480
2481 if ((card->state != CARD_STATE_UP) &&
2482 (card->state != CARD_STATE_SOFTSETUP))
2483 return -ENODEV;
2484
2485 switch (cmd) {
2486 case SIOC_QETH_ARP_SET_NO_ENTRIES:
2487 if (!capable(CAP_NET_ADMIN)) {
2488 rc = -EPERM;
2489 break;
2490 }
2491 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
2492 break;
2493 case SIOC_QETH_ARP_QUERY_INFO:
2494 if (!capable(CAP_NET_ADMIN)) {
2495 rc = -EPERM;
2496 break;
2497 }
2498 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
2499 break;
2500 case SIOC_QETH_ARP_ADD_ENTRY:
2501 if (!capable(CAP_NET_ADMIN)) {
2502 rc = -EPERM;
2503 break;
2504 }
2505 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
2506 sizeof(struct qeth_arp_cache_entry)))
2507 rc = -EFAULT;
2508 else
2509 rc = qeth_l3_arp_add_entry(card, &arp_entry);
2510 break;
2511 case SIOC_QETH_ARP_REMOVE_ENTRY:
2512 if (!capable(CAP_NET_ADMIN)) {
2513 rc = -EPERM;
2514 break;
2515 }
2516 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
2517 sizeof(struct qeth_arp_cache_entry)))
2518 rc = -EFAULT;
2519 else
2520 rc = qeth_l3_arp_remove_entry(card, &arp_entry);
2521 break;
2522 case SIOC_QETH_ARP_FLUSH_CACHE:
2523 if (!capable(CAP_NET_ADMIN)) {
2524 rc = -EPERM;
2525 break;
2526 }
2527 rc = qeth_l3_arp_flush_cache(card);
2528 break;
2529 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
2530 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
2531 break;
2532 case SIOC_QETH_GET_CARD_TYPE:
2533 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
2534 !card->info.guestlan)
2535 return 1;
2536 return 0;
2537 break;
2538 case SIOCGMIIPHY:
2539 mii_data = if_mii(rq);
2540 mii_data->phy_id = 0;
2541 break;
2542 case SIOCGMIIREG:
2543 mii_data = if_mii(rq);
2544 if (mii_data->phy_id != 0)
2545 rc = -EINVAL;
2546 else
2547 mii_data->val_out = qeth_mdio_read(dev,
2548 mii_data->phy_id,
2549 mii_data->reg_num);
2550 break;
2551 default:
2552 rc = -EOPNOTSUPP;
2553 }
2554 if (rc)
2555 QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
2556 return rc;
2557}
2558
2559static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2560 struct sk_buff *skb, int ipv, int cast_type)
2561{
2562 QETH_DBF_TEXT(TRACE, 6, "fillhdr");
2563
2564 memset(hdr, 0, sizeof(struct qeth_hdr));
2565 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2566 hdr->hdr.l3.ext_flags = 0;
2567
2568 /*
2569 * before we're going to overwrite this location with next hop ip.
2570 * v6 uses passthrough, v4 sets the tag in the QDIO header.
2571 */
2572 if (card->vlangrp && vlan_tx_tag_present(skb)) {
2573 hdr->hdr.l3.ext_flags = (ipv == 4) ?
2574 QETH_HDR_EXT_VLAN_FRAME :
2575 QETH_HDR_EXT_INCLUDE_VLAN_TAG;
2576 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
2577 }
2578
2579 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
2580 if (ipv == 4) {
2581 /* IPv4 */
2582 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
2583 memset(hdr->hdr.l3.dest_addr, 0, 12);
2584 if ((skb->dst) && (skb->dst->neighbour)) {
2585 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2586 *((u32 *) skb->dst->neighbour->primary_key);
2587 } else {
2588 /* fill in destination address used in ip header */
2589 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2590 ip_hdr(skb)->daddr;
2591 }
2592 } else if (ipv == 6) {
2593 /* IPv6 */
2594 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
2595 if (card->info.type == QETH_CARD_TYPE_IQD)
2596 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
2597 if ((skb->dst) && (skb->dst->neighbour)) {
2598 memcpy(hdr->hdr.l3.dest_addr,
2599 skb->dst->neighbour->primary_key, 16);
2600 } else {
2601 /* fill in destination address used in ip header */
2602 memcpy(hdr->hdr.l3.dest_addr,
2603 &ipv6_hdr(skb)->daddr, 16);
2604 }
2605 } else {
2606 /* passthrough */
2607 if ((skb->dev->type == ARPHRD_IEEE802_TR) &&
2608 !memcmp(skb->data + sizeof(struct qeth_hdr) +
2609 sizeof(__u16), skb->dev->broadcast, 6)) {
2610 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2611 QETH_HDR_PASSTHRU;
2612 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
2613 skb->dev->broadcast, 6)) {
2614 /* broadcast? */
2615 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2616 QETH_HDR_PASSTHRU;
2617 } else {
2618 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
2619 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
2620 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
2621 }
2622 }
2623}
2624
2625static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2626{
2627 int rc;
2628 u16 *tag;
2629 struct qeth_hdr *hdr = NULL;
2630 int elements_needed = 0;
2631 struct qeth_card *card = netdev_priv(dev);
2632 struct sk_buff *new_skb = NULL;
2633 int ipv = qeth_get_ip_version(skb);
2634 int cast_type = qeth_get_cast_type(card, skb);
2635 struct qeth_qdio_out_q *queue = card->qdio.out_qs
2636 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
2637 int tx_bytes = skb->len;
2638 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
2639 struct qeth_eddp_context *ctx = NULL;
2640
2641 QETH_DBF_TEXT(TRACE, 6, "l3xmit");
2642
2643 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2644 (skb->protocol != htons(ETH_P_IPV6)) &&
2645 (skb->protocol != htons(ETH_P_IP)))
2646 goto tx_drop;
2647
2648 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
2649 card->stats.tx_carrier_errors++;
2650 goto tx_drop;
2651 }
2652
2653 if ((cast_type == RTN_BROADCAST) &&
2654 (card->info.broadcast_capable == 0))
2655 goto tx_drop;
2656
2657 if (card->options.performance_stats) {
2658 card->perf_stats.outbound_cnt++;
2659 card->perf_stats.outbound_start_time = qeth_get_micros();
2660 }
2661
2662 /* create a clone with writeable headroom */
2663 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
2664 VLAN_HLEN);
2665 if (!new_skb)
2666 goto tx_drop;
2667
2668 if (card->info.type == QETH_CARD_TYPE_IQD) {
2669 skb_pull(new_skb, ETH_HLEN);
2670 } else {
2671 if (new_skb->protocol == htons(ETH_P_IP)) {
2672 if (card->dev->type == ARPHRD_IEEE802_TR)
2673 skb_pull(new_skb, TR_HLEN);
2674 else
2675 skb_pull(new_skb, ETH_HLEN);
2676 }
2677
2678 if (new_skb->protocol == ETH_P_IPV6 && card->vlangrp &&
2679 vlan_tx_tag_present(new_skb)) {
2680 skb_push(new_skb, VLAN_HLEN);
2681 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
2682 skb_copy_to_linear_data_offset(new_skb, 4,
2683 new_skb->data + 8, 4);
2684 skb_copy_to_linear_data_offset(new_skb, 8,
2685 new_skb->data + 12, 4);
2686 tag = (u16 *)(new_skb->data + 12);
2687 *tag = __constant_htons(ETH_P_8021Q);
2688 *(tag + 1) = htons(vlan_tx_tag_get(new_skb));
2689 VLAN_TX_SKB_CB(new_skb)->magic = 0;
2690 }
2691 }
2692
2693 netif_stop_queue(dev);
2694
2695 if (skb_is_gso(new_skb))
2696 large_send = card->options.large_send;
2697
2698 /* fix hardware limitation: as long as we do not have sbal
2699 * chaining we can not send long frag lists so we temporary
2700 * switch to EDDP
2701 */
2702 if ((large_send == QETH_LARGE_SEND_TSO) &&
2703 ((skb_shinfo(new_skb)->nr_frags + 2) > 16))
2704 large_send = QETH_LARGE_SEND_EDDP;
2705
2706 if ((large_send == QETH_LARGE_SEND_TSO) &&
2707 (cast_type == RTN_UNSPEC)) {
2708 hdr = (struct qeth_hdr *)skb_push(new_skb,
2709 sizeof(struct qeth_hdr_tso));
2710 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2711 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2712 qeth_tso_fill_header(card, hdr, new_skb);
2713 elements_needed++;
2714 } else {
2715 hdr = (struct qeth_hdr *)skb_push(new_skb,
2716 sizeof(struct qeth_hdr));
2717 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2718 }
2719
2720 if (large_send == QETH_LARGE_SEND_EDDP) {
2721 /* new_skb is not owned by a socket so we use skb to get
2722 * the protocol
2723 */
2724 ctx = qeth_eddp_create_context(card, new_skb, hdr,
2725 skb->sk->sk_protocol);
2726 if (ctx == NULL) {
2727 PRINT_WARN("could not create eddp context\n");
2728 goto tx_drop;
2729 }
2730 } else {
2731 int elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
2732 elements_needed);
2733 if (!elems)
2734 goto tx_drop;
2735 elements_needed += elems;
2736 }
2737
2738 if ((large_send == QETH_LARGE_SEND_NO) &&
2739 (new_skb->ip_summed == CHECKSUM_PARTIAL))
2740 qeth_tx_csum(new_skb);
2741
2742 if (card->info.type != QETH_CARD_TYPE_IQD)
2743 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
2744 elements_needed, ctx);
2745 else
2746 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
2747 elements_needed, ctx);
2748
2749 if (!rc) {
2750 card->stats.tx_packets++;
2751 card->stats.tx_bytes += tx_bytes;
2752 if (new_skb != skb)
2753 dev_kfree_skb_any(skb);
2754 if (card->options.performance_stats) {
2755 if (large_send != QETH_LARGE_SEND_NO) {
2756 card->perf_stats.large_send_bytes += tx_bytes;
2757 card->perf_stats.large_send_cnt++;
2758 }
2759 if (skb_shinfo(new_skb)->nr_frags > 0) {
2760 card->perf_stats.sg_skbs_sent++;
2761 /* nr_frags + skb->data */
2762 card->perf_stats.sg_frags_sent +=
2763 skb_shinfo(new_skb)->nr_frags + 1;
2764 }
2765 }
2766
2767 if (ctx != NULL) {
2768 qeth_eddp_put_context(ctx);
2769 dev_kfree_skb_any(new_skb);
2770 }
2771 } else {
2772 if (ctx != NULL)
2773 qeth_eddp_put_context(ctx);
2774
2775 if (rc == -EBUSY) {
2776 if (new_skb != skb)
2777 dev_kfree_skb_any(new_skb);
2778 return NETDEV_TX_BUSY;
2779 } else
2780 goto tx_drop;
2781 }
2782
2783 netif_wake_queue(dev);
2784 if (card->options.performance_stats)
2785 card->perf_stats.outbound_time += qeth_get_micros() -
2786 card->perf_stats.outbound_start_time;
2787 return rc;
2788
2789tx_drop:
2790 card->stats.tx_dropped++;
2791 card->stats.tx_errors++;
2792 if ((new_skb != skb) && new_skb)
2793 dev_kfree_skb_any(new_skb);
2794 dev_kfree_skb_any(skb);
2795 return NETDEV_TX_OK;
2796}
2797
2798static int qeth_l3_open(struct net_device *dev)
2799{
2800 struct qeth_card *card = netdev_priv(dev);
2801
2802 QETH_DBF_TEXT(TRACE, 4, "qethopen");
2803 if (card->state != CARD_STATE_SOFTSETUP)
2804 return -ENODEV;
2805 card->data.state = CH_STATE_UP;
2806 card->state = CARD_STATE_UP;
2807 card->dev->flags |= IFF_UP;
2808 netif_start_queue(dev);
2809
2810 if (!card->lan_online && netif_carrier_ok(dev))
2811 netif_carrier_off(dev);
2812 return 0;
2813}
2814
2815static int qeth_l3_stop(struct net_device *dev)
2816{
2817 struct qeth_card *card = netdev_priv(dev);
2818
2819 QETH_DBF_TEXT(TRACE, 4, "qethstop");
2820 netif_tx_disable(dev);
2821 card->dev->flags &= ~IFF_UP;
2822 if (card->state == CARD_STATE_UP)
2823 card->state = CARD_STATE_SOFTSETUP;
2824 return 0;
2825}
2826
2827static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
2828{
2829 struct qeth_card *card = netdev_priv(dev);
2830
2831 return (card->options.checksum_type == HW_CHECKSUMMING);
2832}
2833
2834static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2835{
2836 struct qeth_card *card = netdev_priv(dev);
2837 enum qeth_card_states old_state;
2838 enum qeth_checksum_types csum_type;
2839
2840 if ((card->state != CARD_STATE_UP) &&
2841 (card->state != CARD_STATE_DOWN))
2842 return -EPERM;
2843
2844 if (data)
2845 csum_type = HW_CHECKSUMMING;
2846 else
2847 csum_type = SW_CHECKSUMMING;
2848
2849 if (card->options.checksum_type != csum_type) {
2850 old_state = card->state;
2851 if (card->state == CARD_STATE_UP)
2852 __qeth_l3_set_offline(card->gdev, 1);
2853 card->options.checksum_type = csum_type;
2854 if (old_state == CARD_STATE_UP)
2855 __qeth_l3_set_online(card->gdev, 1);
2856 }
2857 return 0;
2858}
2859
2860static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2861{
2862 struct qeth_card *card = netdev_priv(dev);
2863
2864 if (data) {
2865 if (card->options.large_send == QETH_LARGE_SEND_NO) {
2866 if (card->info.type == QETH_CARD_TYPE_IQD)
2867 card->options.large_send = QETH_LARGE_SEND_EDDP;
2868 else
2869 card->options.large_send = QETH_LARGE_SEND_TSO;
2870 dev->features |= NETIF_F_TSO;
2871 }
2872 } else {
2873 dev->features &= ~NETIF_F_TSO;
2874 card->options.large_send = QETH_LARGE_SEND_NO;
2875 }
2876 return 0;
2877}
2878
2879static struct ethtool_ops qeth_l3_ethtool_ops = {
2880 .get_link = ethtool_op_get_link,
2881 .get_tx_csum = ethtool_op_get_tx_csum,
2882 .set_tx_csum = ethtool_op_set_tx_hw_csum,
2883 .get_rx_csum = qeth_l3_ethtool_get_rx_csum,
2884 .set_rx_csum = qeth_l3_ethtool_set_rx_csum,
2885 .get_sg = ethtool_op_get_sg,
2886 .set_sg = ethtool_op_set_sg,
2887 .get_tso = ethtool_op_get_tso,
2888 .set_tso = qeth_l3_ethtool_set_tso,
2889 .get_strings = qeth_core_get_strings,
2890 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2891 .get_stats_count = qeth_core_get_stats_count,
2892 .get_drvinfo = qeth_core_get_drvinfo,
2893};
2894
2895/*
2896 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
2897 * NOARP on the netdevice is no option because it also turns off neighbor
2898 * solicitation. For IPv4 we install a neighbor_setup function. We don't want
2899 * arp resolution but we want the hard header (packet socket will work
2900 * e.g. tcpdump)
2901 */
2902static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
2903{
2904 n->nud_state = NUD_NOARP;
2905 memcpy(n->ha, "FAKELL", 6);
2906 n->output = n->ops->connected_output;
2907 return 0;
2908}
2909
2910static int
2911qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
2912{
2913 if (np->tbl->family == AF_INET)
2914 np->neigh_setup = qeth_l3_neigh_setup_noarp;
2915
2916 return 0;
2917}
2918
2919static int qeth_l3_setup_netdev(struct qeth_card *card)
2920{
2921 if (card->info.type == QETH_CARD_TYPE_OSAE) {
2922 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
2923 (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
2924#ifdef CONFIG_TR
2925 card->dev = alloc_trdev(0);
2926#endif
2927 if (!card->dev)
2928 return -ENODEV;
2929 } else {
2930 card->dev = alloc_etherdev(0);
2931 if (!card->dev)
2932 return -ENODEV;
2933 card->dev->neigh_setup = qeth_l3_neigh_setup;
2934
2935 /*IPv6 address autoconfiguration stuff*/
2936 qeth_l3_get_unique_id(card);
2937 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
2938 card->dev->dev_id = card->info.unique_id &
2939 0xffff;
2940 }
2941 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
2942 card->dev = alloc_netdev(0, "hsi%d", ether_setup);
2943 if (!card->dev)
2944 return -ENODEV;
2945 card->dev->flags |= IFF_NOARP;
2946 qeth_l3_iqd_read_initial_mac(card);
2947 } else
2948 return -ENODEV;
2949
2950 card->dev->hard_start_xmit = qeth_l3_hard_start_xmit;
2951 card->dev->priv = card;
2952 card->dev->tx_timeout = &qeth_tx_timeout;
2953 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
2954 card->dev->open = qeth_l3_open;
2955 card->dev->stop = qeth_l3_stop;
2956 card->dev->do_ioctl = qeth_l3_do_ioctl;
2957 card->dev->get_stats = qeth_get_stats;
2958 card->dev->change_mtu = qeth_change_mtu;
2959 card->dev->set_multicast_list = qeth_l3_set_multicast_list;
2960 card->dev->vlan_rx_register = qeth_l3_vlan_rx_register;
2961 card->dev->vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid;
2962 card->dev->vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid;
2963 card->dev->mtu = card->info.initial_mtu;
2964 card->dev->set_mac_address = NULL;
2965 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
2966 card->dev->features |= NETIF_F_HW_VLAN_TX |
2967 NETIF_F_HW_VLAN_RX |
2968 NETIF_F_HW_VLAN_FILTER;
2969
2970 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
2971 return register_netdev(card->dev);
2972}
2973
2974static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
2975 unsigned int status, unsigned int qdio_err,
2976 unsigned int siga_err, unsigned int queue, int first_element,
2977 int count, unsigned long card_ptr)
2978{
2979 struct net_device *net_dev;
2980 struct qeth_card *card;
2981 struct qeth_qdio_buffer *buffer;
2982 int index;
2983 int i;
2984
2985 QETH_DBF_TEXT(TRACE, 6, "qdinput");
2986 card = (struct qeth_card *) card_ptr;
2987 net_dev = card->dev;
2988 if (card->options.performance_stats) {
2989 card->perf_stats.inbound_cnt++;
2990 card->perf_stats.inbound_start_time = qeth_get_micros();
2991 }
2992 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2993 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
2994 QETH_DBF_TEXT(TRACE, 1, "qdinchk");
2995 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2996 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
2997 first_element, count);
2998 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status);
2999 qeth_schedule_recovery(card);
3000 return;
3001 }
3002 }
3003 for (i = first_element; i < (first_element + count); ++i) {
3004 index = i % QDIO_MAX_BUFFERS_PER_Q;
3005 buffer = &card->qdio.in_q->bufs[index];
3006 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
3007 qeth_check_qdio_errors(buffer->buffer,
3008 qdio_err, siga_err, "qinerr")))
3009 qeth_l3_process_inbound_buffer(card, buffer, index);
3010 /* clear buffer and give back to hardware */
3011 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
3012 qeth_queue_input_buffer(card, index);
3013 }
3014 if (card->options.performance_stats)
3015 card->perf_stats.inbound_time += qeth_get_micros() -
3016 card->perf_stats.inbound_start_time;
3017}
3018
3019static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3020{
3021 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3022
3023 qeth_l3_create_device_attributes(&gdev->dev);
3024 card->options.layer2 = 0;
3025 card->discipline.input_handler = (qdio_handler_t *)
3026 qeth_l3_qdio_input_handler;
3027 card->discipline.output_handler = (qdio_handler_t *)
3028 qeth_qdio_output_handler;
3029 card->discipline.recover = qeth_l3_recover;
3030 return 0;
3031}
3032
3033static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3034{
3035 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
3036
3037 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
3038
3039 if (cgdev->state == CCWGROUP_ONLINE) {
3040 card->use_hard_stop = 1;
3041 qeth_l3_set_offline(cgdev);
3042 }
3043
3044 if (card->dev) {
3045 unregister_netdev(card->dev);
3046 card->dev = NULL;
3047 }
3048
3049 qeth_l3_remove_device_attributes(&cgdev->dev);
3050 qeth_l3_clear_ip_list(card, 0, 0);
3051 qeth_l3_clear_ipato_list(card);
3052 return;
3053}
3054
3055static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3056{
3057 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3058 int rc = 0;
3059 enum qeth_card_states recover_flag;
3060
3061 BUG_ON(!card);
3062 QETH_DBF_TEXT(SETUP, 2, "setonlin");
3063 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
3064
3065 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3066 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) {
3067 PRINT_WARN("set_online of card %s interrupted by user!\n",
3068 CARD_BUS_ID(card));
3069 return -ERESTARTSYS;
3070 }
3071
3072 recover_flag = card->state;
3073 rc = ccw_device_set_online(CARD_RDEV(card));
3074 if (rc) {
3075 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3076 return -EIO;
3077 }
3078 rc = ccw_device_set_online(CARD_WDEV(card));
3079 if (rc) {
3080 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3081 return -EIO;
3082 }
3083 rc = ccw_device_set_online(CARD_DDEV(card));
3084 if (rc) {
3085 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3086 return -EIO;
3087 }
3088
3089 rc = qeth_core_hardsetup_card(card);
3090 if (rc) {
3091 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3092 goto out_remove;
3093 }
3094
3095 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3096
3097 if (!card->dev && qeth_l3_setup_netdev(card))
3098 goto out_remove;
3099
3100 card->state = CARD_STATE_HARDSETUP;
3101 qeth_print_status_message(card);
3102
3103 /* softsetup */
3104 QETH_DBF_TEXT(SETUP, 2, "softsetp");
3105
3106 rc = qeth_send_startlan(card);
3107 if (rc) {
3108 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3109 if (rc == 0xe080) {
3110 PRINT_WARN("LAN on card %s if offline! "
3111 "Waiting for STARTLAN from card.\n",
3112 CARD_BUS_ID(card));
3113 card->lan_online = 0;
3114 }
3115 return rc;
3116 } else
3117 card->lan_online = 1;
3118 qeth_set_large_send(card, card->options.large_send);
3119
3120 rc = qeth_l3_setadapter_parms(card);
3121 if (rc)
3122 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3123 rc = qeth_l3_start_ipassists(card);
3124 if (rc)
3125 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3126 rc = qeth_l3_setrouting_v4(card);
3127 if (rc)
3128 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
3129 rc = qeth_l3_setrouting_v6(card);
3130 if (rc)
3131 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3132 netif_tx_disable(card->dev);
3133
3134 rc = qeth_init_qdio_queues(card);
3135 if (rc) {
3136 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3137 goto out_remove;
3138 }
3139 card->state = CARD_STATE_SOFTSETUP;
3140 netif_carrier_on(card->dev);
3141
3142 qeth_set_allowed_threads(card, 0xffffffff, 0);
3143 if ((recover_flag == CARD_STATE_RECOVER) && recovery_mode) {
3144 qeth_l3_open(card->dev);
3145 qeth_l3_set_multicast_list(card->dev);
3146 }
3147 /* let user_space know that device is online */
3148 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
3149 return 0;
3150out_remove:
3151 card->use_hard_stop = 1;
3152 qeth_l3_stop_card(card, 0);
3153 ccw_device_set_offline(CARD_DDEV(card));
3154 ccw_device_set_offline(CARD_WDEV(card));
3155 ccw_device_set_offline(CARD_RDEV(card));
3156 if (recover_flag == CARD_STATE_RECOVER)
3157 card->state = CARD_STATE_RECOVER;
3158 else
3159 card->state = CARD_STATE_DOWN;
3160 return -ENODEV;
3161}
3162
3163static int qeth_l3_set_online(struct ccwgroup_device *gdev)
3164{
3165 return __qeth_l3_set_online(gdev, 0);
3166}
3167
3168static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
3169 int recovery_mode)
3170{
3171 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
3172 int rc = 0, rc2 = 0, rc3 = 0;
3173 enum qeth_card_states recover_flag;
3174
3175 QETH_DBF_TEXT(SETUP, 3, "setoffl");
3176 QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
3177
3178 if (card->dev && netif_carrier_ok(card->dev))
3179 netif_carrier_off(card->dev);
3180 recover_flag = card->state;
3181 if (qeth_l3_stop_card(card, recovery_mode) == -ERESTARTSYS) {
3182 PRINT_WARN("Stopping card %s interrupted by user!\n",
3183 CARD_BUS_ID(card));
3184 return -ERESTARTSYS;
3185 }
3186 rc = ccw_device_set_offline(CARD_DDEV(card));
3187 rc2 = ccw_device_set_offline(CARD_WDEV(card));
3188 rc3 = ccw_device_set_offline(CARD_RDEV(card));
3189 if (!rc)
3190 rc = (rc2) ? rc2 : rc3;
3191 if (rc)
3192 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3193 if (recover_flag == CARD_STATE_UP)
3194 card->state = CARD_STATE_RECOVER;
3195 /* let user_space know that device is offline */
3196 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
3197 return 0;
3198}
3199
3200static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
3201{
3202 return __qeth_l3_set_offline(cgdev, 0);
3203}
3204
3205static int qeth_l3_recover(void *ptr)
3206{
3207 struct qeth_card *card;
3208 int rc = 0;
3209
3210 card = (struct qeth_card *) ptr;
3211 QETH_DBF_TEXT(TRACE, 2, "recover1");
3212 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
3213 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
3214 return 0;
3215 QETH_DBF_TEXT(TRACE, 2, "recover2");
3216 PRINT_WARN("Recovery of device %s started ...\n",
3217 CARD_BUS_ID(card));
3218 card->use_hard_stop = 1;
3219 __qeth_l3_set_offline(card->gdev, 1);
3220 rc = __qeth_l3_set_online(card->gdev, 1);
3221 /* don't run another scheduled recovery */
3222 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3223 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3224 if (!rc)
3225 PRINT_INFO("Device %s successfully recovered!\n",
3226 CARD_BUS_ID(card));
3227 else
3228 PRINT_INFO("Device %s could not be recovered!\n",
3229 CARD_BUS_ID(card));
3230 return 0;
3231}
3232
3233static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
3234{
3235 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3236 qeth_l3_clear_ip_list(card, 0, 0);
3237 qeth_qdio_clear_card(card, 0);
3238 qeth_clear_qdio_buffers(card);
3239}
3240
3241struct ccwgroup_driver qeth_l3_ccwgroup_driver = {
3242 .probe = qeth_l3_probe_device,
3243 .remove = qeth_l3_remove_device,
3244 .set_online = qeth_l3_set_online,
3245 .set_offline = qeth_l3_set_offline,
3246 .shutdown = qeth_l3_shutdown,
3247};
3248EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver);
3249
3250static int qeth_l3_ip_event(struct notifier_block *this,
3251 unsigned long event, void *ptr)
3252{
3253 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3254 struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev;
3255 struct qeth_ipaddr *addr;
3256 struct qeth_card *card;
3257
3258 if (dev_net(dev) != &init_net)
3259 return NOTIFY_DONE;
3260
3261 QETH_DBF_TEXT(TRACE, 3, "ipevent");
3262 card = qeth_l3_get_card_from_dev(dev);
3263 if (!card)
3264 return NOTIFY_DONE;
3265
3266 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
3267 if (addr != NULL) {
3268 addr->u.a4.addr = ifa->ifa_address;
3269 addr->u.a4.mask = ifa->ifa_mask;
3270 addr->type = QETH_IP_TYPE_NORMAL;
3271 } else
3272 goto out;
3273
3274 switch (event) {
3275 case NETDEV_UP:
3276 if (!qeth_l3_add_ip(card, addr))
3277 kfree(addr);
3278 break;
3279 case NETDEV_DOWN:
3280 if (!qeth_l3_delete_ip(card, addr))
3281 kfree(addr);
3282 break;
3283 default:
3284 break;
3285 }
3286 qeth_l3_set_ip_addr_list(card);
3287out:
3288 return NOTIFY_DONE;
3289}
3290
3291static struct notifier_block qeth_l3_ip_notifier = {
3292 qeth_l3_ip_event,
3293 NULL,
3294};
3295
3296#ifdef CONFIG_QETH_IPV6
3297/**
3298 * IPv6 event handler
3299 */
3300static int qeth_l3_ip6_event(struct notifier_block *this,
3301 unsigned long event, void *ptr)
3302{
3303 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
3304 struct net_device *dev = (struct net_device *)ifa->idev->dev;
3305 struct qeth_ipaddr *addr;
3306 struct qeth_card *card;
3307
3308 QETH_DBF_TEXT(TRACE, 3, "ip6event");
3309
3310 card = qeth_l3_get_card_from_dev(dev);
3311 if (!card)
3312 return NOTIFY_DONE;
3313 if (!qeth_is_supported(card, IPA_IPV6))
3314 return NOTIFY_DONE;
3315
3316 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
3317 if (addr != NULL) {
3318 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
3319 addr->u.a6.pfxlen = ifa->prefix_len;
3320 addr->type = QETH_IP_TYPE_NORMAL;
3321 } else
3322 goto out;
3323
3324 switch (event) {
3325 case NETDEV_UP:
3326 if (!qeth_l3_add_ip(card, addr))
3327 kfree(addr);
3328 break;
3329 case NETDEV_DOWN:
3330 if (!qeth_l3_delete_ip(card, addr))
3331 kfree(addr);
3332 break;
3333 default:
3334 break;
3335 }
3336 qeth_l3_set_ip_addr_list(card);
3337out:
3338 return NOTIFY_DONE;
3339}
3340
3341static struct notifier_block qeth_l3_ip6_notifier = {
3342 qeth_l3_ip6_event,
3343 NULL,
3344};
3345#endif
3346
3347static int qeth_l3_register_notifiers(void)
3348{
3349 int rc;
3350
3351 QETH_DBF_TEXT(TRACE, 5, "regnotif");
3352 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
3353 if (rc)
3354 return rc;
3355#ifdef CONFIG_QETH_IPV6
3356 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
3357 if (rc) {
3358 unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
3359 return rc;
3360 }
3361#else
3362 PRINT_WARN("layer 3 discipline no IPv6 support\n");
3363#endif
3364 return 0;
3365}
3366
3367static void qeth_l3_unregister_notifiers(void)
3368{
3369
3370 QETH_DBF_TEXT(TRACE, 5, "unregnot");
3371 BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
3372#ifdef CONFIG_QETH_IPV6
3373 BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
3374#endif /* QETH_IPV6 */
3375}
3376
3377static int __init qeth_l3_init(void)
3378{
3379 int rc = 0;
3380
3381 PRINT_INFO("register layer 3 discipline\n");
3382 rc = qeth_l3_register_notifiers();
3383 return rc;
3384}
3385
3386static void __exit qeth_l3_exit(void)
3387{
3388 qeth_l3_unregister_notifiers();
3389 PRINT_INFO("unregister layer 3 discipline\n");
3390}
3391
3392module_init(qeth_l3_init);
3393module_exit(qeth_l3_exit);
3394MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
3395MODULE_DESCRIPTION("qeth layer 3 discipline");
3396MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
new file mode 100644
index 000000000000..08f51fd902c4
--- /dev/null
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -0,0 +1,1051 @@
1/*
2 * drivers/s390/net/qeth_l3_sys.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include "qeth_l3.h"
12
13#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
14struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
15
16static const char *qeth_l3_get_checksum_str(struct qeth_card *card)
17{
18 if (card->options.checksum_type == SW_CHECKSUMMING)
19 return "sw";
20 else if (card->options.checksum_type == HW_CHECKSUMMING)
21 return "hw";
22 else
23 return "no";
24}
25
26static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
27 struct qeth_routing_info *route, char *buf)
28{
29 switch (route->type) {
30 case PRIMARY_ROUTER:
31 return sprintf(buf, "%s\n", "primary router");
32 case SECONDARY_ROUTER:
33 return sprintf(buf, "%s\n", "secondary router");
34 case MULTICAST_ROUTER:
35 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
36 return sprintf(buf, "%s\n", "multicast router+");
37 else
38 return sprintf(buf, "%s\n", "multicast router");
39 case PRIMARY_CONNECTOR:
40 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
41 return sprintf(buf, "%s\n", "primary connector+");
42 else
43 return sprintf(buf, "%s\n", "primary connector");
44 case SECONDARY_CONNECTOR:
45 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
46 return sprintf(buf, "%s\n", "secondary connector+");
47 else
48 return sprintf(buf, "%s\n", "secondary connector");
49 default:
50 return sprintf(buf, "%s\n", "no");
51 }
52}
53
54static ssize_t qeth_l3_dev_route4_show(struct device *dev,
55 struct device_attribute *attr, char *buf)
56{
57 struct qeth_card *card = dev_get_drvdata(dev);
58
59 if (!card)
60 return -EINVAL;
61
62 return qeth_l3_dev_route_show(card, &card->options.route4, buf);
63}
64
65static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
66 struct qeth_routing_info *route, enum qeth_prot_versions prot,
67 const char *buf, size_t count)
68{
69 enum qeth_routing_types old_route_type = route->type;
70 char *tmp;
71 int rc;
72
73 tmp = strsep((char **) &buf, "\n");
74
75 if (!strcmp(tmp, "no_router")) {
76 route->type = NO_ROUTER;
77 } else if (!strcmp(tmp, "primary_connector")) {
78 route->type = PRIMARY_CONNECTOR;
79 } else if (!strcmp(tmp, "secondary_connector")) {
80 route->type = SECONDARY_CONNECTOR;
81 } else if (!strcmp(tmp, "primary_router")) {
82 route->type = PRIMARY_ROUTER;
83 } else if (!strcmp(tmp, "secondary_router")) {
84 route->type = SECONDARY_ROUTER;
85 } else if (!strcmp(tmp, "multicast_router")) {
86 route->type = MULTICAST_ROUTER;
87 } else {
88 PRINT_WARN("Invalid routing type '%s'.\n", tmp);
89 return -EINVAL;
90 }
91 if (((card->state == CARD_STATE_SOFTSETUP) ||
92 (card->state == CARD_STATE_UP)) &&
93 (old_route_type != route->type)) {
94 if (prot == QETH_PROT_IPV4)
95 rc = qeth_l3_setrouting_v4(card);
96 else if (prot == QETH_PROT_IPV6)
97 rc = qeth_l3_setrouting_v6(card);
98 }
99 return count;
100}
101
102static ssize_t qeth_l3_dev_route4_store(struct device *dev,
103 struct device_attribute *attr, const char *buf, size_t count)
104{
105 struct qeth_card *card = dev_get_drvdata(dev);
106
107 if (!card)
108 return -EINVAL;
109
110 return qeth_l3_dev_route_store(card, &card->options.route4,
111 QETH_PROT_IPV4, buf, count);
112}
113
114static DEVICE_ATTR(route4, 0644, qeth_l3_dev_route4_show,
115 qeth_l3_dev_route4_store);
116
117static ssize_t qeth_l3_dev_route6_show(struct device *dev,
118 struct device_attribute *attr, char *buf)
119{
120 struct qeth_card *card = dev_get_drvdata(dev);
121
122 if (!card)
123 return -EINVAL;
124
125 if (!qeth_is_supported(card, IPA_IPV6))
126 return sprintf(buf, "%s\n", "n/a");
127
128 return qeth_l3_dev_route_show(card, &card->options.route6, buf);
129}
130
131static ssize_t qeth_l3_dev_route6_store(struct device *dev,
132 struct device_attribute *attr, const char *buf, size_t count)
133{
134 struct qeth_card *card = dev_get_drvdata(dev);
135
136 if (!card)
137 return -EINVAL;
138
139 if (!qeth_is_supported(card, IPA_IPV6)) {
140 PRINT_WARN("IPv6 not supported for interface %s.\n"
141 "Routing status no changed.\n",
142 QETH_CARD_IFNAME(card));
143 return -ENOTSUPP;
144 }
145
146 return qeth_l3_dev_route_store(card, &card->options.route6,
147 QETH_PROT_IPV6, buf, count);
148}
149
150static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show,
151 qeth_l3_dev_route6_store);
152
153static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
154 struct device_attribute *attr, char *buf)
155{
156 struct qeth_card *card = dev_get_drvdata(dev);
157
158 if (!card)
159 return -EINVAL;
160
161 return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
162}
163
164static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
165 struct device_attribute *attr, const char *buf, size_t count)
166{
167 struct qeth_card *card = dev_get_drvdata(dev);
168 char *tmp;
169 int i;
170
171 if (!card)
172 return -EINVAL;
173
174 if ((card->state != CARD_STATE_DOWN) &&
175 (card->state != CARD_STATE_RECOVER))
176 return -EPERM;
177
178 i = simple_strtoul(buf, &tmp, 16);
179 if ((i == 0) || (i == 1))
180 card->options.fake_broadcast = i;
181 else {
182 PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n");
183 return -EINVAL;
184 }
185 return count;
186}
187
188static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
189 qeth_l3_dev_fake_broadcast_store);
190
191static ssize_t qeth_l3_dev_broadcast_mode_show(struct device *dev,
192 struct device_attribute *attr, char *buf)
193{
194 struct qeth_card *card = dev_get_drvdata(dev);
195
196 if (!card)
197 return -EINVAL;
198
199 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
200 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
201 return sprintf(buf, "n/a\n");
202
203 return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
204 QETH_TR_BROADCAST_ALLRINGS)?
205 "all rings":"local");
206}
207
208static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
209 struct device_attribute *attr, const char *buf, size_t count)
210{
211 struct qeth_card *card = dev_get_drvdata(dev);
212 char *tmp;
213
214 if (!card)
215 return -EINVAL;
216
217 if ((card->state != CARD_STATE_DOWN) &&
218 (card->state != CARD_STATE_RECOVER))
219 return -EPERM;
220
221 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
222 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
223 PRINT_WARN("Device is not a tokenring device!\n");
224 return -EINVAL;
225 }
226
227 tmp = strsep((char **) &buf, "\n");
228
229 if (!strcmp(tmp, "local")) {
230 card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
231 return count;
232 } else if (!strcmp(tmp, "all_rings")) {
233 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
234 return count;
235 } else {
236 PRINT_WARN("broadcast_mode: invalid mode %s!\n",
237 tmp);
238 return -EINVAL;
239 }
240 return count;
241}
242
243static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show,
244 qeth_l3_dev_broadcast_mode_store);
245
246static ssize_t qeth_l3_dev_canonical_macaddr_show(struct device *dev,
247 struct device_attribute *attr, char *buf)
248{
249 struct qeth_card *card = dev_get_drvdata(dev);
250
251 if (!card)
252 return -EINVAL;
253
254 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
255 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
256 return sprintf(buf, "n/a\n");
257
258 return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
259 QETH_TR_MACADDR_CANONICAL)? 1:0);
260}
261
262static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
263 struct device_attribute *attr, const char *buf, size_t count)
264{
265 struct qeth_card *card = dev_get_drvdata(dev);
266 char *tmp;
267 int i;
268
269 if (!card)
270 return -EINVAL;
271
272 if ((card->state != CARD_STATE_DOWN) &&
273 (card->state != CARD_STATE_RECOVER))
274 return -EPERM;
275
276 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
277 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
278 PRINT_WARN("Device is not a tokenring device!\n");
279 return -EINVAL;
280 }
281
282 i = simple_strtoul(buf, &tmp, 16);
283 if ((i == 0) || (i == 1))
284 card->options.macaddr_mode = i?
285 QETH_TR_MACADDR_CANONICAL :
286 QETH_TR_MACADDR_NONCANONICAL;
287 else {
288 PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n");
289 return -EINVAL;
290 }
291 return count;
292}
293
294static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
295 qeth_l3_dev_canonical_macaddr_store);
296
297static ssize_t qeth_l3_dev_checksum_show(struct device *dev,
298 struct device_attribute *attr, char *buf)
299{
300 struct qeth_card *card = dev_get_drvdata(dev);
301
302 if (!card)
303 return -EINVAL;
304
305 return sprintf(buf, "%s checksumming\n",
306 qeth_l3_get_checksum_str(card));
307}
308
309static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
310 struct device_attribute *attr, const char *buf, size_t count)
311{
312 struct qeth_card *card = dev_get_drvdata(dev);
313 char *tmp;
314
315 if (!card)
316 return -EINVAL;
317
318 if ((card->state != CARD_STATE_DOWN) &&
319 (card->state != CARD_STATE_RECOVER))
320 return -EPERM;
321
322 tmp = strsep((char **) &buf, "\n");
323 if (!strcmp(tmp, "sw_checksumming"))
324 card->options.checksum_type = SW_CHECKSUMMING;
325 else if (!strcmp(tmp, "hw_checksumming"))
326 card->options.checksum_type = HW_CHECKSUMMING;
327 else if (!strcmp(tmp, "no_checksumming"))
328 card->options.checksum_type = NO_CHECKSUMMING;
329 else {
330 PRINT_WARN("Unknown checksumming type '%s'\n", tmp);
331 return -EINVAL;
332 }
333 return count;
334}
335
336static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
337 qeth_l3_dev_checksum_store);
338
339static struct attribute *qeth_l3_device_attrs[] = {
340 &dev_attr_route4.attr,
341 &dev_attr_route6.attr,
342 &dev_attr_fake_broadcast.attr,
343 &dev_attr_broadcast_mode.attr,
344 &dev_attr_canonical_macaddr.attr,
345 &dev_attr_checksumming.attr,
346 NULL,
347};
348
349static struct attribute_group qeth_l3_device_attr_group = {
350 .attrs = qeth_l3_device_attrs,
351};
352
353static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
354 struct device_attribute *attr, char *buf)
355{
356 struct qeth_card *card = dev_get_drvdata(dev);
357
358 if (!card)
359 return -EINVAL;
360
361 return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
362}
363
364static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
365 struct device_attribute *attr, const char *buf, size_t count)
366{
367 struct qeth_card *card = dev_get_drvdata(dev);
368 char *tmp;
369
370 if (!card)
371 return -EINVAL;
372
373 if ((card->state != CARD_STATE_DOWN) &&
374 (card->state != CARD_STATE_RECOVER))
375 return -EPERM;
376
377 tmp = strsep((char **) &buf, "\n");
378 if (!strcmp(tmp, "toggle")) {
379 card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
380 } else if (!strcmp(tmp, "1")) {
381 card->ipato.enabled = 1;
382 } else if (!strcmp(tmp, "0")) {
383 card->ipato.enabled = 0;
384 } else {
385 PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to "
386 "this file\n");
387 return -EINVAL;
388 }
389 return count;
390}
391
392static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
393 qeth_l3_dev_ipato_enable_show,
394 qeth_l3_dev_ipato_enable_store);
395
396static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
397 struct device_attribute *attr, char *buf)
398{
399 struct qeth_card *card = dev_get_drvdata(dev);
400
401 if (!card)
402 return -EINVAL;
403
404 return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
405}
406
407static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
408 struct device_attribute *attr,
409 const char *buf, size_t count)
410{
411 struct qeth_card *card = dev_get_drvdata(dev);
412 char *tmp;
413
414 if (!card)
415 return -EINVAL;
416
417 tmp = strsep((char **) &buf, "\n");
418 if (!strcmp(tmp, "toggle")) {
419 card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
420 } else if (!strcmp(tmp, "1")) {
421 card->ipato.invert4 = 1;
422 } else if (!strcmp(tmp, "0")) {
423 card->ipato.invert4 = 0;
424 } else {
425 PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to "
426 "this file\n");
427 return -EINVAL;
428 }
429 return count;
430}
431
432static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
433 qeth_l3_dev_ipato_invert4_show,
434 qeth_l3_dev_ipato_invert4_store);
435
436static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
437 enum qeth_prot_versions proto)
438{
439 struct qeth_ipato_entry *ipatoe;
440 unsigned long flags;
441 char addr_str[40];
442 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
443 int i = 0;
444
445 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
446 /* add strlen for "/<mask>\n" */
447 entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
448 spin_lock_irqsave(&card->ip_lock, flags);
449 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
450 if (ipatoe->proto != proto)
451 continue;
452 /* String must not be longer than PAGE_SIZE. So we check if
453 * string length gets near PAGE_SIZE. Then we can savely display
454 * the next IPv6 address (worst case, compared to IPv4) */
455 if ((PAGE_SIZE - i) <= entry_len)
456 break;
457 qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
458 i += snprintf(buf + i, PAGE_SIZE - i,
459 "%s/%i\n", addr_str, ipatoe->mask_bits);
460 }
461 spin_unlock_irqrestore(&card->ip_lock, flags);
462 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
463
464 return i;
465}
466
467static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
468 struct device_attribute *attr, char *buf)
469{
470 struct qeth_card *card = dev_get_drvdata(dev);
471
472 if (!card)
473 return -EINVAL;
474
475 return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
476}
477
478static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
479 u8 *addr, int *mask_bits)
480{
481 const char *start, *end;
482 char *tmp;
483 char buffer[40] = {0, };
484
485 start = buf;
486 /* get address string */
487 end = strchr(start, '/');
488 if (!end || (end - start >= 40)) {
489 PRINT_WARN("Invalid format for ipato_addx/delx. "
490 "Use <ip addr>/<mask bits>\n");
491 return -EINVAL;
492 }
493 strncpy(buffer, start, end - start);
494 if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) {
495 PRINT_WARN("Invalid IP address format!\n");
496 return -EINVAL;
497 }
498 start = end + 1;
499 *mask_bits = simple_strtoul(start, &tmp, 10);
500 if (!strlen(start) ||
501 (tmp == start) ||
502 (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
503 PRINT_WARN("Invalid mask bits for ipato_addx/delx !\n");
504 return -EINVAL;
505 }
506 return 0;
507}
508
509static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
510 struct qeth_card *card, enum qeth_prot_versions proto)
511{
512 struct qeth_ipato_entry *ipatoe;
513 u8 addr[16];
514 int mask_bits;
515 int rc;
516
517 rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
518 if (rc)
519 return rc;
520
521 ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
522 if (!ipatoe) {
523 PRINT_WARN("No memory to allocate ipato entry\n");
524 return -ENOMEM;
525 }
526 ipatoe->proto = proto;
527 memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
528 ipatoe->mask_bits = mask_bits;
529
530 rc = qeth_l3_add_ipato_entry(card, ipatoe);
531 if (rc) {
532 kfree(ipatoe);
533 return rc;
534 }
535
536 return count;
537}
538
539static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
540 struct device_attribute *attr, const char *buf, size_t count)
541{
542 struct qeth_card *card = dev_get_drvdata(dev);
543
544 if (!card)
545 return -EINVAL;
546
547 return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
548}
549
550static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
551 qeth_l3_dev_ipato_add4_show,
552 qeth_l3_dev_ipato_add4_store);
553
554static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
555 struct qeth_card *card, enum qeth_prot_versions proto)
556{
557 u8 addr[16];
558 int mask_bits;
559 int rc;
560
561 rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
562 if (rc)
563 return rc;
564
565 qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
566
567 return count;
568}
569
570static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
571 struct device_attribute *attr, const char *buf, size_t count)
572{
573 struct qeth_card *card = dev_get_drvdata(dev);
574
575 if (!card)
576 return -EINVAL;
577
578 return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
579}
580
581static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
582 qeth_l3_dev_ipato_del4_store);
583
584static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
585 struct device_attribute *attr, char *buf)
586{
587 struct qeth_card *card = dev_get_drvdata(dev);
588
589 if (!card)
590 return -EINVAL;
591
592 return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
593}
594
595static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
596 struct device_attribute *attr, const char *buf, size_t count)
597{
598 struct qeth_card *card = dev_get_drvdata(dev);
599 char *tmp;
600
601 if (!card)
602 return -EINVAL;
603
604 tmp = strsep((char **) &buf, "\n");
605 if (!strcmp(tmp, "toggle")) {
606 card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
607 } else if (!strcmp(tmp, "1")) {
608 card->ipato.invert6 = 1;
609 } else if (!strcmp(tmp, "0")) {
610 card->ipato.invert6 = 0;
611 } else {
612 PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to "
613 "this file\n");
614 return -EINVAL;
615 }
616 return count;
617}
618
619static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
620 qeth_l3_dev_ipato_invert6_show,
621 qeth_l3_dev_ipato_invert6_store);
622
623
624static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev,
625 struct device_attribute *attr, char *buf)
626{
627 struct qeth_card *card = dev_get_drvdata(dev);
628
629 if (!card)
630 return -EINVAL;
631
632 return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
633}
634
635static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev,
636 struct device_attribute *attr, const char *buf, size_t count)
637{
638 struct qeth_card *card = dev_get_drvdata(dev);
639
640 if (!card)
641 return -EINVAL;
642
643 return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
644}
645
646static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
647 qeth_l3_dev_ipato_add6_show,
648 qeth_l3_dev_ipato_add6_store);
649
650static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev,
651 struct device_attribute *attr, const char *buf, size_t count)
652{
653 struct qeth_card *card = dev_get_drvdata(dev);
654
655 if (!card)
656 return -EINVAL;
657
658 return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
659}
660
661static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
662 qeth_l3_dev_ipato_del6_store);
663
664static struct attribute *qeth_ipato_device_attrs[] = {
665 &dev_attr_ipato_enable.attr,
666 &dev_attr_ipato_invert4.attr,
667 &dev_attr_ipato_add4.attr,
668 &dev_attr_ipato_del4.attr,
669 &dev_attr_ipato_invert6.attr,
670 &dev_attr_ipato_add6.attr,
671 &dev_attr_ipato_del6.attr,
672 NULL,
673};
674
675static struct attribute_group qeth_device_ipato_group = {
676 .name = "ipa_takeover",
677 .attrs = qeth_ipato_device_attrs,
678};
679
680static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
681 enum qeth_prot_versions proto)
682{
683 struct qeth_ipaddr *ipaddr;
684 char addr_str[40];
685 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
686 unsigned long flags;
687 int i = 0;
688
689 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
690 entry_len += 2; /* \n + terminator */
691 spin_lock_irqsave(&card->ip_lock, flags);
692 list_for_each_entry(ipaddr, &card->ip_list, entry) {
693 if (ipaddr->proto != proto)
694 continue;
695 if (ipaddr->type != QETH_IP_TYPE_VIPA)
696 continue;
697 /* String must not be longer than PAGE_SIZE. So we check if
698 * string length gets near PAGE_SIZE. Then we can savely display
699 * the next IPv6 address (worst case, compared to IPv4) */
700 if ((PAGE_SIZE - i) <= entry_len)
701 break;
702 qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
703 addr_str);
704 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
705 }
706 spin_unlock_irqrestore(&card->ip_lock, flags);
707 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
708
709 return i;
710}
711
712static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
713 struct device_attribute *attr, char *buf)
714{
715 struct qeth_card *card = dev_get_drvdata(dev);
716
717 if (!card)
718 return -EINVAL;
719
720 return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
721}
722
723static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
724 u8 *addr)
725{
726 if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
727 PRINT_WARN("Invalid IP address format!\n");
728 return -EINVAL;
729 }
730 return 0;
731}
732
733static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
734 struct qeth_card *card, enum qeth_prot_versions proto)
735{
736 u8 addr[16] = {0, };
737 int rc;
738
739 rc = qeth_l3_parse_vipae(buf, proto, addr);
740 if (rc)
741 return rc;
742
743 rc = qeth_l3_add_vipa(card, proto, addr);
744 if (rc)
745 return rc;
746
747 return count;
748}
749
750static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
751 struct device_attribute *attr, const char *buf, size_t count)
752{
753 struct qeth_card *card = dev_get_drvdata(dev);
754
755 if (!card)
756 return -EINVAL;
757
758 return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
759}
760
761static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
762 qeth_l3_dev_vipa_add4_show,
763 qeth_l3_dev_vipa_add4_store);
764
765static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
766 struct qeth_card *card, enum qeth_prot_versions proto)
767{
768 u8 addr[16];
769 int rc;
770
771 rc = qeth_l3_parse_vipae(buf, proto, addr);
772 if (rc)
773 return rc;
774
775 qeth_l3_del_vipa(card, proto, addr);
776
777 return count;
778}
779
780static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
781 struct device_attribute *attr, const char *buf, size_t count)
782{
783 struct qeth_card *card = dev_get_drvdata(dev);
784
785 if (!card)
786 return -EINVAL;
787
788 return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
789}
790
791static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
792 qeth_l3_dev_vipa_del4_store);
793
794static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
795 struct device_attribute *attr, char *buf)
796{
797 struct qeth_card *card = dev_get_drvdata(dev);
798
799 if (!card)
800 return -EINVAL;
801
802 return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
803}
804
805static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
806 struct device_attribute *attr, const char *buf, size_t count)
807{
808 struct qeth_card *card = dev_get_drvdata(dev);
809
810 if (!card)
811 return -EINVAL;
812
813 return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
814}
815
816static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
817 qeth_l3_dev_vipa_add6_show,
818 qeth_l3_dev_vipa_add6_store);
819
820static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
821 struct device_attribute *attr, const char *buf, size_t count)
822{
823 struct qeth_card *card = dev_get_drvdata(dev);
824
825 if (!card)
826 return -EINVAL;
827
828 return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
829}
830
831static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
832 qeth_l3_dev_vipa_del6_store);
833
834static struct attribute *qeth_vipa_device_attrs[] = {
835 &dev_attr_vipa_add4.attr,
836 &dev_attr_vipa_del4.attr,
837 &dev_attr_vipa_add6.attr,
838 &dev_attr_vipa_del6.attr,
839 NULL,
840};
841
842static struct attribute_group qeth_device_vipa_group = {
843 .name = "vipa",
844 .attrs = qeth_vipa_device_attrs,
845};
846
847static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
848 enum qeth_prot_versions proto)
849{
850 struct qeth_ipaddr *ipaddr;
851 char addr_str[40];
852 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
853 unsigned long flags;
854 int i = 0;
855
856 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
857 entry_len += 2; /* \n + terminator */
858 spin_lock_irqsave(&card->ip_lock, flags);
859 list_for_each_entry(ipaddr, &card->ip_list, entry) {
860 if (ipaddr->proto != proto)
861 continue;
862 if (ipaddr->type != QETH_IP_TYPE_RXIP)
863 continue;
864 /* String must not be longer than PAGE_SIZE. So we check if
865 * string length gets near PAGE_SIZE. Then we can savely display
866 * the next IPv6 address (worst case, compared to IPv4) */
867 if ((PAGE_SIZE - i) <= entry_len)
868 break;
869 qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
870 addr_str);
871 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
872 }
873 spin_unlock_irqrestore(&card->ip_lock, flags);
874 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
875
876 return i;
877}
878
879static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
880 struct device_attribute *attr, char *buf)
881{
882 struct qeth_card *card = dev_get_drvdata(dev);
883
884 if (!card)
885 return -EINVAL;
886
887 return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
888}
889
890static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
891 u8 *addr)
892{
893 if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
894 PRINT_WARN("Invalid IP address format!\n");
895 return -EINVAL;
896 }
897 return 0;
898}
899
900static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
901 struct qeth_card *card, enum qeth_prot_versions proto)
902{
903 u8 addr[16] = {0, };
904 int rc;
905
906 rc = qeth_l3_parse_rxipe(buf, proto, addr);
907 if (rc)
908 return rc;
909
910 rc = qeth_l3_add_rxip(card, proto, addr);
911 if (rc)
912 return rc;
913
914 return count;
915}
916
917static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
918 struct device_attribute *attr, const char *buf, size_t count)
919{
920 struct qeth_card *card = dev_get_drvdata(dev);
921
922 if (!card)
923 return -EINVAL;
924
925 return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
926}
927
928static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
929 qeth_l3_dev_rxip_add4_show,
930 qeth_l3_dev_rxip_add4_store);
931
932static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
933 struct qeth_card *card, enum qeth_prot_versions proto)
934{
935 u8 addr[16];
936 int rc;
937
938 rc = qeth_l3_parse_rxipe(buf, proto, addr);
939 if (rc)
940 return rc;
941
942 qeth_l3_del_rxip(card, proto, addr);
943
944 return count;
945}
946
947static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
948 struct device_attribute *attr, const char *buf, size_t count)
949{
950 struct qeth_card *card = dev_get_drvdata(dev);
951
952 if (!card)
953 return -EINVAL;
954
955 return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
956}
957
958static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
959 qeth_l3_dev_rxip_del4_store);
960
961static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
962 struct device_attribute *attr, char *buf)
963{
964 struct qeth_card *card = dev_get_drvdata(dev);
965
966 if (!card)
967 return -EINVAL;
968
969 return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
970}
971
972static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
973 struct device_attribute *attr, const char *buf, size_t count)
974{
975 struct qeth_card *card = dev_get_drvdata(dev);
976
977 if (!card)
978 return -EINVAL;
979
980 return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
981}
982
983static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
984 qeth_l3_dev_rxip_add6_show,
985 qeth_l3_dev_rxip_add6_store);
986
987static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
988 struct device_attribute *attr, const char *buf, size_t count)
989{
990 struct qeth_card *card = dev_get_drvdata(dev);
991
992 if (!card)
993 return -EINVAL;
994
995 return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
996}
997
998static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
999 qeth_l3_dev_rxip_del6_store);
1000
1001static struct attribute *qeth_rxip_device_attrs[] = {
1002 &dev_attr_rxip_add4.attr,
1003 &dev_attr_rxip_del4.attr,
1004 &dev_attr_rxip_add6.attr,
1005 &dev_attr_rxip_del6.attr,
1006 NULL,
1007};
1008
1009static struct attribute_group qeth_device_rxip_group = {
1010 .name = "rxip",
1011 .attrs = qeth_rxip_device_attrs,
1012};
1013
1014int qeth_l3_create_device_attributes(struct device *dev)
1015{
1016 int ret;
1017
1018 ret = sysfs_create_group(&dev->kobj, &qeth_l3_device_attr_group);
1019 if (ret)
1020 return ret;
1021
1022 ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group);
1023 if (ret) {
1024 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1025 return ret;
1026 }
1027
1028 ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group);
1029 if (ret) {
1030 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1031 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1032 return ret;
1033 }
1034
1035 ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group);
1036 if (ret) {
1037 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1038 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1039 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1040 return ret;
1041 }
1042 return 0;
1043}
1044
1045void qeth_l3_remove_device_attributes(struct device *dev)
1046{
1047 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1048 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1049 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1050 sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
1051}
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
deleted file mode 100644
index 62606ce26e55..000000000000
--- a/drivers/s390/net/qeth_main.c
+++ /dev/null
@@ -1,8956 +0,0 @@
1/*
2 * linux/drivers/s390/net/qeth_main.c
3 *
4 * Linux on zSeries OSA Express and HiperSockets support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 *
8 * Author(s): Original Code written by
9 * Utz Bacher (utz.bacher@de.ibm.com)
10 * Rewritten by
11 * Frank Pavlic (fpavlic@de.ibm.com) and
12 * Thomas Spatzier <tspat@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29
30#include <linux/module.h>
31#include <linux/moduleparam.h>
32#include <linux/string.h>
33#include <linux/errno.h>
34#include <linux/mm.h>
35#include <linux/ip.h>
36#include <linux/inetdevice.h>
37#include <linux/netdevice.h>
38#include <linux/sched.h>
39#include <linux/workqueue.h>
40#include <linux/kernel.h>
41#include <linux/slab.h>
42#include <linux/interrupt.h>
43#include <linux/tcp.h>
44#include <linux/icmp.h>
45#include <linux/skbuff.h>
46#include <linux/in.h>
47#include <linux/igmp.h>
48#include <linux/init.h>
49#include <linux/reboot.h>
50#include <linux/mii.h>
51#include <linux/rcupdate.h>
52#include <linux/ethtool.h>
53
54#include <net/arp.h>
55#include <net/ip.h>
56#include <net/route.h>
57
58#include <asm/ebcdic.h>
59#include <asm/io.h>
60#include <asm/qeth.h>
61#include <asm/timex.h>
62#include <asm/semaphore.h>
63#include <asm/uaccess.h>
64#include <asm/s390_rdev.h>
65
66#include "qeth.h"
67#include "qeth_mpc.h"
68#include "qeth_fs.h"
69#include "qeth_eddp.h"
70#include "qeth_tso.h"
71
72static const char *version = "qeth S/390 OSA-Express driver";
73
74/**
75 * Debug Facility Stuff
76 */
77static debug_info_t *qeth_dbf_setup = NULL;
78static debug_info_t *qeth_dbf_data = NULL;
79static debug_info_t *qeth_dbf_misc = NULL;
80static debug_info_t *qeth_dbf_control = NULL;
81debug_info_t *qeth_dbf_trace = NULL;
82static debug_info_t *qeth_dbf_sense = NULL;
83static debug_info_t *qeth_dbf_qerr = NULL;
84
85DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
86
87static struct lock_class_key qdio_out_skb_queue_key;
88
89/**
90 * some more definitions and declarations
91 */
92static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
93
94/* list of our cards */
95struct qeth_card_list_struct qeth_card_list;
96/*process list want to be notified*/
97spinlock_t qeth_notify_lock;
98struct list_head qeth_notify_list;
99
100static void qeth_send_control_data_cb(struct qeth_channel *,
101 struct qeth_cmd_buffer *);
102
103/**
104 * here we go with function implementation
105 */
106static void
107qeth_init_qdio_info(struct qeth_card *card);
108
109static int
110qeth_init_qdio_queues(struct qeth_card *card);
111
112static int
113qeth_alloc_qdio_buffers(struct qeth_card *card);
114
115static void
116qeth_free_qdio_buffers(struct qeth_card *);
117
118static void
119qeth_clear_qdio_buffers(struct qeth_card *);
120
121static void
122qeth_clear_ip_list(struct qeth_card *, int, int);
123
124static void
125qeth_clear_ipacmd_list(struct qeth_card *);
126
127static int
128qeth_qdio_clear_card(struct qeth_card *, int);
129
130static void
131qeth_clear_working_pool_list(struct qeth_card *);
132
133static void
134qeth_clear_cmd_buffers(struct qeth_channel *);
135
136static int
137qeth_stop(struct net_device *);
138
139static void
140qeth_clear_ipato_list(struct qeth_card *);
141
142static int
143qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
144
145static void
146qeth_irq_tasklet(unsigned long);
147
148static int
149qeth_set_online(struct ccwgroup_device *);
150
151static int
152__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode);
153
154static struct qeth_ipaddr *
155qeth_get_addr_buffer(enum qeth_prot_versions);
156
157static void
158qeth_set_multicast_list(struct net_device *);
159
160static void
161qeth_setadp_promisc_mode(struct qeth_card *);
162
163static int
164qeth_hard_header_parse(const struct sk_buff *skb, unsigned char *haddr);
165
166static void
167qeth_notify_processes(void)
168{
169 /*notify all registered processes */
170 struct qeth_notify_list_struct *n_entry;
171
172 QETH_DBF_TEXT(trace,3,"procnoti");
173 spin_lock(&qeth_notify_lock);
174 list_for_each_entry(n_entry, &qeth_notify_list, list) {
175 send_sig(n_entry->signum, n_entry->task, 1);
176 }
177 spin_unlock(&qeth_notify_lock);
178
179}
180int
181qeth_notifier_unregister(struct task_struct *p)
182{
183 struct qeth_notify_list_struct *n_entry, *tmp;
184
185 QETH_DBF_TEXT(trace, 2, "notunreg");
186 spin_lock(&qeth_notify_lock);
187 list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) {
188 if (n_entry->task == p) {
189 list_del(&n_entry->list);
190 kfree(n_entry);
191 goto out;
192 }
193 }
194out:
195 spin_unlock(&qeth_notify_lock);
196 return 0;
197}
198int
199qeth_notifier_register(struct task_struct *p, int signum)
200{
201 struct qeth_notify_list_struct *n_entry;
202
203 /*check first if entry already exists*/
204 spin_lock(&qeth_notify_lock);
205 list_for_each_entry(n_entry, &qeth_notify_list, list) {
206 if (n_entry->task == p) {
207 n_entry->signum = signum;
208 spin_unlock(&qeth_notify_lock);
209 return 0;
210 }
211 }
212 spin_unlock(&qeth_notify_lock);
213
214 n_entry = (struct qeth_notify_list_struct *)
215 kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL);
216 if (!n_entry)
217 return -ENOMEM;
218 n_entry->task = p;
219 n_entry->signum = signum;
220 spin_lock(&qeth_notify_lock);
221 list_add(&n_entry->list,&qeth_notify_list);
222 spin_unlock(&qeth_notify_lock);
223 return 0;
224}
225
226
227/**
228 * free channel command buffers
229 */
230static void
231qeth_clean_channel(struct qeth_channel *channel)
232{
233 int cnt;
234
235 QETH_DBF_TEXT(setup, 2, "freech");
236 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
237 kfree(channel->iob[cnt].data);
238}
239
240/**
241 * free card
242 */
243static void
244qeth_free_card(struct qeth_card *card)
245{
246
247 QETH_DBF_TEXT(setup, 2, "freecrd");
248 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
249 qeth_clean_channel(&card->read);
250 qeth_clean_channel(&card->write);
251 if (card->dev)
252 free_netdev(card->dev);
253 qeth_clear_ip_list(card, 0, 0);
254 qeth_clear_ipato_list(card);
255 kfree(card->ip_tbd_list);
256 qeth_free_qdio_buffers(card);
257 kfree(card);
258}
259
260/**
261 * alloc memory for command buffer per channel
262 */
263static int
264qeth_setup_channel(struct qeth_channel *channel)
265{
266 int cnt;
267
268 QETH_DBF_TEXT(setup, 2, "setupch");
269 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
270 channel->iob[cnt].data = (char *)
271 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
272 if (channel->iob[cnt].data == NULL)
273 break;
274 channel->iob[cnt].state = BUF_STATE_FREE;
275 channel->iob[cnt].channel = channel;
276 channel->iob[cnt].callback = qeth_send_control_data_cb;
277 channel->iob[cnt].rc = 0;
278 }
279 if (cnt < QETH_CMD_BUFFER_NO) {
280 while (cnt-- > 0)
281 kfree(channel->iob[cnt].data);
282 return -ENOMEM;
283 }
284 channel->buf_no = 0;
285 channel->io_buf_no = 0;
286 atomic_set(&channel->irq_pending, 0);
287 spin_lock_init(&channel->iob_lock);
288
289 init_waitqueue_head(&channel->wait_q);
290 channel->irq_tasklet.data = (unsigned long) channel;
291 channel->irq_tasklet.func = qeth_irq_tasklet;
292 return 0;
293}
294
295/**
296 * alloc memory for card structure
297 */
298static struct qeth_card *
299qeth_alloc_card(void)
300{
301 struct qeth_card *card;
302
303 QETH_DBF_TEXT(setup, 2, "alloccrd");
304 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
305 if (!card)
306 return NULL;
307 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
308 if (qeth_setup_channel(&card->read)) {
309 kfree(card);
310 return NULL;
311 }
312 if (qeth_setup_channel(&card->write)) {
313 qeth_clean_channel(&card->read);
314 kfree(card);
315 return NULL;
316 }
317 return card;
318}
319
320static long
321__qeth_check_irb_error(struct ccw_device *cdev, unsigned long intparm,
322 struct irb *irb)
323{
324 if (!IS_ERR(irb))
325 return 0;
326
327 switch (PTR_ERR(irb)) {
328 case -EIO:
329 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
330 QETH_DBF_TEXT(trace, 2, "ckirberr");
331 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
332 break;
333 case -ETIMEDOUT:
334 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
335 QETH_DBF_TEXT(trace, 2, "ckirberr");
336 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
337 if (intparm == QETH_RCD_PARM) {
338 struct qeth_card *card = CARD_FROM_CDEV(cdev);
339
340 if (card && (card->data.ccwdev == cdev)) {
341 card->data.state = CH_STATE_DOWN;
342 wake_up(&card->wait_q);
343 }
344 }
345 break;
346 default:
347 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
348 cdev->dev.bus_id);
349 QETH_DBF_TEXT(trace, 2, "ckirberr");
350 QETH_DBF_TEXT(trace, 2, " rc???");
351 }
352 return PTR_ERR(irb);
353}
354
355static int
356qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
357{
358 int dstat,cstat;
359 char *sense;
360
361 sense = (char *) irb->ecw;
362 cstat = irb->scsw.cstat;
363 dstat = irb->scsw.dstat;
364
365 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
366 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
367 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
368 QETH_DBF_TEXT(trace,2, "CGENCHK");
369 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
370 cdev->dev.bus_id, dstat, cstat);
371 HEXDUMP16(WARN, "irb: ", irb);
372 HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32);
373 return 1;
374 }
375
376 if (dstat & DEV_STAT_UNIT_CHECK) {
377 if (sense[SENSE_RESETTING_EVENT_BYTE] &
378 SENSE_RESETTING_EVENT_FLAG) {
379 QETH_DBF_TEXT(trace,2,"REVIND");
380 return 1;
381 }
382 if (sense[SENSE_COMMAND_REJECT_BYTE] &
383 SENSE_COMMAND_REJECT_FLAG) {
384 QETH_DBF_TEXT(trace,2,"CMDREJi");
385 return 0;
386 }
387 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
388 QETH_DBF_TEXT(trace,2,"AFFE");
389 return 1;
390 }
391 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
392 QETH_DBF_TEXT(trace,2,"ZEROSEN");
393 return 0;
394 }
395 QETH_DBF_TEXT(trace,2,"DGENCHK");
396 return 1;
397 }
398 return 0;
399}
400static int qeth_issue_next_read(struct qeth_card *);
401
402/**
403 * interrupt handler
404 */
405static void
406qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
407{
408 int rc;
409 int cstat,dstat;
410 struct qeth_cmd_buffer *buffer;
411 struct qeth_channel *channel;
412 struct qeth_card *card;
413
414 QETH_DBF_TEXT(trace,5,"irq");
415
416 if (__qeth_check_irb_error(cdev, intparm, irb))
417 return;
418 cstat = irb->scsw.cstat;
419 dstat = irb->scsw.dstat;
420
421 card = CARD_FROM_CDEV(cdev);
422 if (!card)
423 return;
424
425 if (card->read.ccwdev == cdev){
426 channel = &card->read;
427 QETH_DBF_TEXT(trace,5,"read");
428 } else if (card->write.ccwdev == cdev) {
429 channel = &card->write;
430 QETH_DBF_TEXT(trace,5,"write");
431 } else {
432 channel = &card->data;
433 QETH_DBF_TEXT(trace,5,"data");
434 }
435 atomic_set(&channel->irq_pending, 0);
436
437 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
438 channel->state = CH_STATE_STOPPED;
439
440 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
441 channel->state = CH_STATE_HALTED;
442
443 /*let's wake up immediately on data channel*/
444 if ((channel == &card->data) && (intparm != 0) &&
445 (intparm != QETH_RCD_PARM))
446 goto out;
447
448 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
449 QETH_DBF_TEXT(trace, 6, "clrchpar");
450 /* we don't have to handle this further */
451 intparm = 0;
452 }
453 if (intparm == QETH_HALT_CHANNEL_PARM) {
454 QETH_DBF_TEXT(trace, 6, "hltchpar");
455 /* we don't have to handle this further */
456 intparm = 0;
457 }
458 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
459 (dstat & DEV_STAT_UNIT_CHECK) ||
460 (cstat)) {
461 if (irb->esw.esw0.erw.cons) {
462 /* TODO: we should make this s390dbf */
463 PRINT_WARN("sense data available on channel %s.\n",
464 CHANNEL_ID(channel));
465 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
466 HEXDUMP16(WARN,"irb: ",irb);
467 HEXDUMP16(WARN,"sense data: ",irb->ecw);
468 }
469 if (intparm == QETH_RCD_PARM) {
470 channel->state = CH_STATE_DOWN;
471 goto out;
472 }
473 rc = qeth_get_problem(cdev,irb);
474 if (rc) {
475 qeth_schedule_recovery(card);
476 goto out;
477 }
478 }
479
480 if (intparm == QETH_RCD_PARM) {
481 channel->state = CH_STATE_RCD_DONE;
482 goto out;
483 }
484 if (intparm) {
485 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
486 buffer->state = BUF_STATE_PROCESSED;
487 }
488 if (channel == &card->data)
489 return;
490
491 if (channel == &card->read &&
492 channel->state == CH_STATE_UP)
493 qeth_issue_next_read(card);
494
495 qeth_irq_tasklet((unsigned long)channel);
496 return;
497out:
498 wake_up(&card->wait_q);
499}
500
501/**
502 * tasklet function scheduled from irq handler
503 */
504static void
505qeth_irq_tasklet(unsigned long data)
506{
507 struct qeth_card *card;
508 struct qeth_channel *channel;
509 struct qeth_cmd_buffer *iob;
510 __u8 index;
511
512 QETH_DBF_TEXT(trace,5,"irqtlet");
513 channel = (struct qeth_channel *) data;
514 iob = channel->iob;
515 index = channel->buf_no;
516 card = CARD_FROM_CDEV(channel->ccwdev);
517 while (iob[index].state == BUF_STATE_PROCESSED) {
518 if (iob[index].callback !=NULL) {
519 iob[index].callback(channel,iob + index);
520 }
521 index = (index + 1) % QETH_CMD_BUFFER_NO;
522 }
523 channel->buf_no = index;
524 wake_up(&card->wait_q);
525}
526
527static int qeth_stop_card(struct qeth_card *, int);
528
529static int
530__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
531{
532 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
533 int rc = 0, rc2 = 0, rc3 = 0;
534 enum qeth_card_states recover_flag;
535
536 QETH_DBF_TEXT(setup, 3, "setoffl");
537 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
538
539 if (card->dev && netif_carrier_ok(card->dev))
540 netif_carrier_off(card->dev);
541 recover_flag = card->state;
542 if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){
543 PRINT_WARN("Stopping card %s interrupted by user!\n",
544 CARD_BUS_ID(card));
545 return -ERESTARTSYS;
546 }
547 rc = ccw_device_set_offline(CARD_DDEV(card));
548 rc2 = ccw_device_set_offline(CARD_WDEV(card));
549 rc3 = ccw_device_set_offline(CARD_RDEV(card));
550 if (!rc)
551 rc = (rc2) ? rc2 : rc3;
552 if (rc)
553 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
554 if (recover_flag == CARD_STATE_UP)
555 card->state = CARD_STATE_RECOVER;
556 qeth_notify_processes();
557 return 0;
558}
559
560static int
561qeth_set_offline(struct ccwgroup_device *cgdev)
562{
563 return __qeth_set_offline(cgdev, 0);
564}
565
566static int
567qeth_threads_running(struct qeth_card *card, unsigned long threads);
568
569
570static void
571qeth_remove_device(struct ccwgroup_device *cgdev)
572{
573 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
574 unsigned long flags;
575
576 QETH_DBF_TEXT(setup, 3, "rmdev");
577 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
578
579 if (!card)
580 return;
581
582 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
583
584 if (cgdev->state == CCWGROUP_ONLINE){
585 card->use_hard_stop = 1;
586 qeth_set_offline(cgdev);
587 }
588 /* remove form our internal list */
589 write_lock_irqsave(&qeth_card_list.rwlock, flags);
590 list_del(&card->list);
591 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
592 if (card->dev)
593 unregister_netdev(card->dev);
594 qeth_remove_device_attributes(&cgdev->dev);
595 qeth_free_card(card);
596 cgdev->dev.driver_data = NULL;
597 put_device(&cgdev->dev);
598}
599
600static int
601qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
602static int
603qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
604
605/**
606 * Add/remove address to/from card's ip list, i.e. try to add or remove
607 * reference to/from an IP address that is already registered on the card.
608 * Returns:
609 * 0 address was on card and its reference count has been adjusted,
610 * but is still > 0, so nothing has to be done
611 * also returns 0 if card was not on card and the todo was to delete
612 * the address -> there is also nothing to be done
613 * 1 address was not on card and the todo is to add it to the card's ip
614 * list
615 * -1 address was on card and its reference count has been decremented
616 * to <= 0 by the todo -> address must be removed from card
617 */
618static int
619__qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
620 struct qeth_ipaddr **__addr)
621{
622 struct qeth_ipaddr *addr;
623 int found = 0;
624
625 list_for_each_entry(addr, &card->ip_list, entry) {
626 if (card->options.layer2) {
627 if ((addr->type == todo->type) &&
628 (memcmp(&addr->mac, &todo->mac,
629 OSA_ADDR_LEN) == 0)) {
630 found = 1;
631 break;
632 }
633 continue;
634 }
635 if ((addr->proto == QETH_PROT_IPV4) &&
636 (todo->proto == QETH_PROT_IPV4) &&
637 (addr->type == todo->type) &&
638 (addr->u.a4.addr == todo->u.a4.addr) &&
639 (addr->u.a4.mask == todo->u.a4.mask)) {
640 found = 1;
641 break;
642 }
643 if ((addr->proto == QETH_PROT_IPV6) &&
644 (todo->proto == QETH_PROT_IPV6) &&
645 (addr->type == todo->type) &&
646 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
647 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
648 sizeof(struct in6_addr)) == 0)) {
649 found = 1;
650 break;
651 }
652 }
653 if (found) {
654 addr->users += todo->users;
655 if (addr->users <= 0){
656 *__addr = addr;
657 return -1;
658 } else {
659 /* for VIPA and RXIP limit refcount to 1 */
660 if (addr->type != QETH_IP_TYPE_NORMAL)
661 addr->users = 1;
662 return 0;
663 }
664 }
665 if (todo->users > 0) {
666 /* for VIPA and RXIP limit refcount to 1 */
667 if (todo->type != QETH_IP_TYPE_NORMAL)
668 todo->users = 1;
669 return 1;
670 } else
671 return 0;
672}
673
674static int
675__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
676 int same_type)
677{
678 struct qeth_ipaddr *tmp;
679
680 list_for_each_entry(tmp, list, entry) {
681 if ((tmp->proto == QETH_PROT_IPV4) &&
682 (addr->proto == QETH_PROT_IPV4) &&
683 ((same_type && (tmp->type == addr->type)) ||
684 (!same_type && (tmp->type != addr->type)) ) &&
685 (tmp->u.a4.addr == addr->u.a4.addr) ){
686 return 1;
687 }
688 if ((tmp->proto == QETH_PROT_IPV6) &&
689 (addr->proto == QETH_PROT_IPV6) &&
690 ((same_type && (tmp->type == addr->type)) ||
691 (!same_type && (tmp->type != addr->type)) ) &&
692 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
693 sizeof(struct in6_addr)) == 0) ) {
694 return 1;
695 }
696 }
697 return 0;
698}
699
700/*
701 * Add IP to be added to todo list. If there is already an "add todo"
702 * in this list we just incremenent the reference count.
703 * Returns 0 if we just incremented reference count.
704 */
705static int
706__qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add)
707{
708 struct qeth_ipaddr *tmp, *t;
709 int found = 0;
710
711 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
712 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
713 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
714 return 0;
715 if (card->options.layer2) {
716 if ((tmp->type == addr->type) &&
717 (tmp->is_multicast == addr->is_multicast) &&
718 (memcmp(&tmp->mac, &addr->mac,
719 OSA_ADDR_LEN) == 0)) {
720 found = 1;
721 break;
722 }
723 continue;
724 }
725 if ((tmp->proto == QETH_PROT_IPV4) &&
726 (addr->proto == QETH_PROT_IPV4) &&
727 (tmp->type == addr->type) &&
728 (tmp->is_multicast == addr->is_multicast) &&
729 (tmp->u.a4.addr == addr->u.a4.addr) &&
730 (tmp->u.a4.mask == addr->u.a4.mask)) {
731 found = 1;
732 break;
733 }
734 if ((tmp->proto == QETH_PROT_IPV6) &&
735 (addr->proto == QETH_PROT_IPV6) &&
736 (tmp->type == addr->type) &&
737 (tmp->is_multicast == addr->is_multicast) &&
738 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
739 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
740 sizeof(struct in6_addr)) == 0)) {
741 found = 1;
742 break;
743 }
744 }
745 if (found){
746 if (addr->users != 0)
747 tmp->users += addr->users;
748 else
749 tmp->users += add? 1:-1;
750 if (tmp->users == 0) {
751 list_del(&tmp->entry);
752 kfree(tmp);
753 }
754 return 0;
755 } else {
756 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
757 list_add(&addr->entry, card->ip_tbd_list);
758 else {
759 if (addr->users == 0)
760 addr->users += add? 1:-1;
761 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
762 qeth_is_addr_covered_by_ipato(card, addr)){
763 QETH_DBF_TEXT(trace, 2, "tkovaddr");
764 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
765 }
766 list_add_tail(&addr->entry, card->ip_tbd_list);
767 }
768 return 1;
769 }
770}
771
772/**
773 * Remove IP address from list
774 */
775static int
776qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
777{
778 unsigned long flags;
779 int rc = 0;
780
781 QETH_DBF_TEXT(trace, 4, "delip");
782
783 if (card->options.layer2)
784 QETH_DBF_HEX(trace, 4, &addr->mac, 6);
785 else if (addr->proto == QETH_PROT_IPV4)
786 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
787 else {
788 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
789 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
790 }
791 spin_lock_irqsave(&card->ip_lock, flags);
792 rc = __qeth_insert_ip_todo(card, addr, 0);
793 spin_unlock_irqrestore(&card->ip_lock, flags);
794 return rc;
795}
796
797static int
798qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
799{
800 unsigned long flags;
801 int rc = 0;
802
803 QETH_DBF_TEXT(trace, 4, "addip");
804 if (card->options.layer2)
805 QETH_DBF_HEX(trace, 4, &addr->mac, 6);
806 else if (addr->proto == QETH_PROT_IPV4)
807 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
808 else {
809 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
810 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
811 }
812 spin_lock_irqsave(&card->ip_lock, flags);
813 rc = __qeth_insert_ip_todo(card, addr, 1);
814 spin_unlock_irqrestore(&card->ip_lock, flags);
815 return rc;
816}
817
818static void
819__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
820{
821 struct qeth_ipaddr *addr, *tmp;
822 int rc;
823again:
824 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
825 if (addr->is_multicast) {
826 list_del(&addr->entry);
827 spin_unlock_irqrestore(&card->ip_lock, *flags);
828 rc = qeth_deregister_addr_entry(card, addr);
829 spin_lock_irqsave(&card->ip_lock, *flags);
830 if (!rc) {
831 kfree(addr);
832 goto again;
833 } else
834 list_add(&addr->entry, &card->ip_list);
835 }
836 }
837}
838
839static void
840qeth_set_ip_addr_list(struct qeth_card *card)
841{
842 struct list_head *tbd_list;
843 struct qeth_ipaddr *todo, *addr;
844 unsigned long flags;
845 int rc;
846
847 QETH_DBF_TEXT(trace, 2, "sdiplist");
848 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
849
850 spin_lock_irqsave(&card->ip_lock, flags);
851 tbd_list = card->ip_tbd_list;
852 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
853 if (!card->ip_tbd_list) {
854 QETH_DBF_TEXT(trace, 0, "silnomem");
855 card->ip_tbd_list = tbd_list;
856 spin_unlock_irqrestore(&card->ip_lock, flags);
857 return;
858 } else
859 INIT_LIST_HEAD(card->ip_tbd_list);
860
861 while (!list_empty(tbd_list)){
862 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
863 list_del(&todo->entry);
864 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC){
865 __qeth_delete_all_mc(card, &flags);
866 kfree(todo);
867 continue;
868 }
869 rc = __qeth_ref_ip_on_card(card, todo, &addr);
870 if (rc == 0) {
871 /* nothing to be done; only adjusted refcount */
872 kfree(todo);
873 } else if (rc == 1) {
874 /* new entry to be added to on-card list */
875 spin_unlock_irqrestore(&card->ip_lock, flags);
876 rc = qeth_register_addr_entry(card, todo);
877 spin_lock_irqsave(&card->ip_lock, flags);
878 if (!rc)
879 list_add_tail(&todo->entry, &card->ip_list);
880 else
881 kfree(todo);
882 } else if (rc == -1) {
883 /* on-card entry to be removed */
884 list_del_init(&addr->entry);
885 spin_unlock_irqrestore(&card->ip_lock, flags);
886 rc = qeth_deregister_addr_entry(card, addr);
887 spin_lock_irqsave(&card->ip_lock, flags);
888 if (!rc)
889 kfree(addr);
890 else
891 list_add_tail(&addr->entry, &card->ip_list);
892 kfree(todo);
893 }
894 }
895 spin_unlock_irqrestore(&card->ip_lock, flags);
896 kfree(tbd_list);
897}
898
899static void qeth_delete_mc_addresses(struct qeth_card *);
900static void qeth_add_multicast_ipv4(struct qeth_card *);
901static void qeth_layer2_add_multicast(struct qeth_card *);
902#ifdef CONFIG_QETH_IPV6
903static void qeth_add_multicast_ipv6(struct qeth_card *);
904#endif
905
906static int
907qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
908{
909 unsigned long flags;
910
911 spin_lock_irqsave(&card->thread_mask_lock, flags);
912 if ( !(card->thread_allowed_mask & thread) ||
913 (card->thread_start_mask & thread) ) {
914 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
915 return -EPERM;
916 }
917 card->thread_start_mask |= thread;
918 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
919 return 0;
920}
921
922static void
923qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
924{
925 unsigned long flags;
926
927 spin_lock_irqsave(&card->thread_mask_lock, flags);
928 card->thread_start_mask &= ~thread;
929 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
930 wake_up(&card->wait_q);
931}
932
933static void
934qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
935{
936 unsigned long flags;
937
938 spin_lock_irqsave(&card->thread_mask_lock, flags);
939 card->thread_running_mask &= ~thread;
940 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
941 wake_up(&card->wait_q);
942}
943
944static int
945__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
946{
947 unsigned long flags;
948 int rc = 0;
949
950 spin_lock_irqsave(&card->thread_mask_lock, flags);
951 if (card->thread_start_mask & thread){
952 if ((card->thread_allowed_mask & thread) &&
953 !(card->thread_running_mask & thread)){
954 rc = 1;
955 card->thread_start_mask &= ~thread;
956 card->thread_running_mask |= thread;
957 } else
958 rc = -EPERM;
959 }
960 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
961 return rc;
962}
963
964static int
965qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
966{
967 int rc = 0;
968
969 wait_event(card->wait_q,
970 (rc = __qeth_do_run_thread(card, thread)) >= 0);
971 return rc;
972}
973
974static int
975qeth_recover(void *ptr)
976{
977 struct qeth_card *card;
978 int rc = 0;
979
980 card = (struct qeth_card *) ptr;
981 daemonize("qeth_recover");
982 QETH_DBF_TEXT(trace,2,"recover1");
983 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
984 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
985 return 0;
986 QETH_DBF_TEXT(trace,2,"recover2");
987 PRINT_WARN("Recovery of device %s started ...\n",
988 CARD_BUS_ID(card));
989 card->use_hard_stop = 1;
990 __qeth_set_offline(card->gdev,1);
991 rc = __qeth_set_online(card->gdev,1);
992 /* don't run another scheduled recovery */
993 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
994 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
995 if (!rc)
996 PRINT_INFO("Device %s successfully recovered!\n",
997 CARD_BUS_ID(card));
998 else
999 PRINT_INFO("Device %s could not be recovered!\n",
1000 CARD_BUS_ID(card));
1001 return 0;
1002}
1003
1004void
1005qeth_schedule_recovery(struct qeth_card *card)
1006{
1007 QETH_DBF_TEXT(trace,2,"startrec");
1008 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
1009 schedule_work(&card->kernel_thread_starter);
1010}
1011
1012static int
1013qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1014{
1015 unsigned long flags;
1016 int rc = 0;
1017
1018 spin_lock_irqsave(&card->thread_mask_lock, flags);
1019 QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
1020 (u8) card->thread_start_mask,
1021 (u8) card->thread_allowed_mask,
1022 (u8) card->thread_running_mask);
1023 rc = (card->thread_start_mask & thread);
1024 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1025 return rc;
1026}
1027
1028static void
1029qeth_start_kernel_thread(struct work_struct *work)
1030{
1031 struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter);
1032 QETH_DBF_TEXT(trace , 2, "strthrd");
1033
1034 if (card->read.state != CH_STATE_UP &&
1035 card->write.state != CH_STATE_UP)
1036 return;
1037 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1038 kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1039}
1040
1041
1042static void
1043qeth_set_intial_options(struct qeth_card *card)
1044{
1045 card->options.route4.type = NO_ROUTER;
1046#ifdef CONFIG_QETH_IPV6
1047 card->options.route6.type = NO_ROUTER;
1048#endif /* QETH_IPV6 */
1049 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1050 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1051 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1052 card->options.fake_broadcast = 0;
1053 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1054 card->options.fake_ll = 0;
1055 if (card->info.type == QETH_CARD_TYPE_OSN)
1056 card->options.layer2 = 1;
1057 else
1058 card->options.layer2 = 0;
1059 card->options.performance_stats = 0;
1060 card->options.rx_sg_cb = QETH_RX_SG_CB;
1061}
1062
1063/**
1064 * initialize channels ,card and all state machines
1065 */
1066static int
1067qeth_setup_card(struct qeth_card *card)
1068{
1069
1070 QETH_DBF_TEXT(setup, 2, "setupcrd");
1071 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1072
1073 card->read.state = CH_STATE_DOWN;
1074 card->write.state = CH_STATE_DOWN;
1075 card->data.state = CH_STATE_DOWN;
1076 card->state = CARD_STATE_DOWN;
1077 card->lan_online = 0;
1078 card->use_hard_stop = 0;
1079 card->dev = NULL;
1080#ifdef CONFIG_QETH_VLAN
1081 spin_lock_init(&card->vlanlock);
1082 card->vlangrp = NULL;
1083#endif
1084 spin_lock_init(&card->lock);
1085 spin_lock_init(&card->ip_lock);
1086 spin_lock_init(&card->thread_mask_lock);
1087 card->thread_start_mask = 0;
1088 card->thread_allowed_mask = 0;
1089 card->thread_running_mask = 0;
1090 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1091 INIT_LIST_HEAD(&card->ip_list);
1092 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1093 if (!card->ip_tbd_list) {
1094 QETH_DBF_TEXT(setup, 0, "iptbdnom");
1095 return -ENOMEM;
1096 }
1097 INIT_LIST_HEAD(card->ip_tbd_list);
1098 INIT_LIST_HEAD(&card->cmd_waiter_list);
1099 init_waitqueue_head(&card->wait_q);
1100 /* intial options */
1101 qeth_set_intial_options(card);
1102 /* IP address takeover */
1103 INIT_LIST_HEAD(&card->ipato.entries);
1104 card->ipato.enabled = 0;
1105 card->ipato.invert4 = 0;
1106 card->ipato.invert6 = 0;
1107 /* init QDIO stuff */
1108 qeth_init_qdio_info(card);
1109 return 0;
1110}
1111
1112static int
1113is_1920_device (struct qeth_card *card)
1114{
1115 int single_queue = 0;
1116 struct ccw_device *ccwdev;
1117 struct channelPath_dsc {
1118 u8 flags;
1119 u8 lsn;
1120 u8 desc;
1121 u8 chpid;
1122 u8 swla;
1123 u8 zeroes;
1124 u8 chla;
1125 u8 chpp;
1126 } *chp_dsc;
1127
1128 QETH_DBF_TEXT(setup, 2, "chk_1920");
1129
1130 ccwdev = card->data.ccwdev;
1131 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1132 if (chp_dsc != NULL) {
1133 /* CHPP field bit 6 == 1 -> single queue */
1134 single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
1135 kfree(chp_dsc);
1136 }
1137 QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue);
1138 return single_queue;
1139}
1140
1141static int
1142qeth_determine_card_type(struct qeth_card *card)
1143{
1144 int i = 0;
1145
1146 QETH_DBF_TEXT(setup, 2, "detcdtyp");
1147
1148 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1149 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1150 while (known_devices[i][4]) {
1151 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1152 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1153 card->info.type = known_devices[i][4];
1154 card->qdio.no_out_queues = known_devices[i][8];
1155 card->info.is_multicast_different = known_devices[i][9];
1156 if (is_1920_device(card)) {
1157 PRINT_INFO("Priority Queueing not able "
1158 "due to hardware limitations!\n");
1159 card->qdio.no_out_queues = 1;
1160 card->qdio.default_out_queue = 0;
1161 }
1162 return 0;
1163 }
1164 i++;
1165 }
1166 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1167 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1168 return -ENOENT;
1169}
1170
1171static int
1172qeth_probe_device(struct ccwgroup_device *gdev)
1173{
1174 struct qeth_card *card;
1175 struct device *dev;
1176 unsigned long flags;
1177 int rc;
1178
1179 QETH_DBF_TEXT(setup, 2, "probedev");
1180
1181 dev = &gdev->dev;
1182 if (!get_device(dev))
1183 return -ENODEV;
1184
1185 QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id);
1186
1187 card = qeth_alloc_card();
1188 if (!card) {
1189 put_device(dev);
1190 QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
1191 return -ENOMEM;
1192 }
1193 card->read.ccwdev = gdev->cdev[0];
1194 card->write.ccwdev = gdev->cdev[1];
1195 card->data.ccwdev = gdev->cdev[2];
1196 gdev->dev.driver_data = card;
1197 card->gdev = gdev;
1198 gdev->cdev[0]->handler = qeth_irq;
1199 gdev->cdev[1]->handler = qeth_irq;
1200 gdev->cdev[2]->handler = qeth_irq;
1201
1202 if ((rc = qeth_determine_card_type(card))){
1203 PRINT_WARN("%s: not a valid card type\n", __func__);
1204 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1205 put_device(dev);
1206 qeth_free_card(card);
1207 return rc;
1208 }
1209 if ((rc = qeth_setup_card(card))){
1210 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1211 put_device(dev);
1212 qeth_free_card(card);
1213 return rc;
1214 }
1215 rc = qeth_create_device_attributes(dev);
1216 if (rc) {
1217 put_device(dev);
1218 qeth_free_card(card);
1219 return rc;
1220 }
1221 /* insert into our internal list */
1222 write_lock_irqsave(&qeth_card_list.rwlock, flags);
1223 list_add_tail(&card->list, &qeth_card_list.list);
1224 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
1225 return rc;
1226}
1227
1228
1229static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1230 int *length)
1231{
1232 struct ciw *ciw;
1233 char *rcd_buf;
1234 int ret;
1235 struct qeth_channel *channel = &card->data;
1236 unsigned long flags;
1237
1238 /*
1239 * scan for RCD command in extended SenseID data
1240 */
1241 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1242 if (!ciw || ciw->cmd == 0)
1243 return -EOPNOTSUPP;
1244 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1245 if (!rcd_buf)
1246 return -ENOMEM;
1247
1248 channel->ccw.cmd_code = ciw->cmd;
1249 channel->ccw.cda = (__u32) __pa (rcd_buf);
1250 channel->ccw.count = ciw->count;
1251 channel->ccw.flags = CCW_FLAG_SLI;
1252 channel->state = CH_STATE_RCD;
1253 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1254 ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1255 QETH_RCD_PARM, LPM_ANYPATH, 0,
1256 QETH_RCD_TIMEOUT);
1257 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1258 if (!ret)
1259 wait_event(card->wait_q,
1260 (channel->state == CH_STATE_RCD_DONE ||
1261 channel->state == CH_STATE_DOWN));
1262 if (channel->state == CH_STATE_DOWN)
1263 ret = -EIO;
1264 else
1265 channel->state = CH_STATE_DOWN;
1266 if (ret) {
1267 kfree(rcd_buf);
1268 *buffer = NULL;
1269 *length = 0;
1270 } else {
1271 *length = ciw->count;
1272 *buffer = rcd_buf;
1273 }
1274 return ret;
1275}
1276
1277static int
1278qeth_get_unitaddr(struct qeth_card *card)
1279{
1280 int length;
1281 char *prcd;
1282 int rc;
1283
1284 QETH_DBF_TEXT(setup, 2, "getunit");
1285 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
1286 if (rc) {
1287 PRINT_ERR("qeth_read_conf_data for device %s returned %i\n",
1288 CARD_DDEV_ID(card), rc);
1289 return rc;
1290 }
1291 card->info.chpid = prcd[30];
1292 card->info.unit_addr2 = prcd[31];
1293 card->info.cula = prcd[63];
1294 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1295 (prcd[0x11] == _ascebc['M']));
1296 kfree(prcd);
1297 return 0;
1298}
1299
1300static void
1301qeth_init_tokens(struct qeth_card *card)
1302{
1303 card->token.issuer_rm_w = 0x00010103UL;
1304 card->token.cm_filter_w = 0x00010108UL;
1305 card->token.cm_connection_w = 0x0001010aUL;
1306 card->token.ulp_filter_w = 0x0001010bUL;
1307 card->token.ulp_connection_w = 0x0001010dUL;
1308}
1309
1310static inline __u16
1311raw_devno_from_bus_id(char *id)
1312{
1313 id += (strlen(id) - 4);
1314 return (__u16) simple_strtoul(id, &id, 16);
1315}
1316/**
1317 * setup channel
1318 */
1319static void
1320qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len)
1321{
1322 struct qeth_card *card;
1323
1324 QETH_DBF_TEXT(trace, 4, "setupccw");
1325 card = CARD_FROM_CDEV(channel->ccwdev);
1326 if (channel == &card->read)
1327 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1328 else
1329 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1330 channel->ccw.count = len;
1331 channel->ccw.cda = (__u32) __pa(iob);
1332}
1333
1334/**
1335 * get free buffer for ccws (IDX activation, lancmds,ipassists...)
1336 */
1337static struct qeth_cmd_buffer *
1338__qeth_get_buffer(struct qeth_channel *channel)
1339{
1340 __u8 index;
1341
1342 QETH_DBF_TEXT(trace, 6, "getbuff");
1343 index = channel->io_buf_no;
1344 do {
1345 if (channel->iob[index].state == BUF_STATE_FREE) {
1346 channel->iob[index].state = BUF_STATE_LOCKED;
1347 channel->io_buf_no = (channel->io_buf_no + 1) %
1348 QETH_CMD_BUFFER_NO;
1349 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
1350 return channel->iob + index;
1351 }
1352 index = (index + 1) % QETH_CMD_BUFFER_NO;
1353 } while(index != channel->io_buf_no);
1354
1355 return NULL;
1356}
1357
1358/**
1359 * release command buffer
1360 */
1361static void
1362qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1363{
1364 unsigned long flags;
1365
1366 QETH_DBF_TEXT(trace, 6, "relbuff");
1367 spin_lock_irqsave(&channel->iob_lock, flags);
1368 memset(iob->data, 0, QETH_BUFSIZE);
1369 iob->state = BUF_STATE_FREE;
1370 iob->callback = qeth_send_control_data_cb;
1371 iob->rc = 0;
1372 spin_unlock_irqrestore(&channel->iob_lock, flags);
1373}
1374
1375static struct qeth_cmd_buffer *
1376qeth_get_buffer(struct qeth_channel *channel)
1377{
1378 struct qeth_cmd_buffer *buffer = NULL;
1379 unsigned long flags;
1380
1381 spin_lock_irqsave(&channel->iob_lock, flags);
1382 buffer = __qeth_get_buffer(channel);
1383 spin_unlock_irqrestore(&channel->iob_lock, flags);
1384 return buffer;
1385}
1386
1387static struct qeth_cmd_buffer *
1388qeth_wait_for_buffer(struct qeth_channel *channel)
1389{
1390 struct qeth_cmd_buffer *buffer;
1391 wait_event(channel->wait_q,
1392 ((buffer = qeth_get_buffer(channel)) != NULL));
1393 return buffer;
1394}
1395
1396static void
1397qeth_clear_cmd_buffers(struct qeth_channel *channel)
1398{
1399 int cnt;
1400
1401 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1402 qeth_release_buffer(channel,&channel->iob[cnt]);
1403 channel->buf_no = 0;
1404 channel->io_buf_no = 0;
1405}
1406
1407/**
1408 * start IDX for read and write channel
1409 */
1410static int
1411qeth_idx_activate_get_answer(struct qeth_channel *channel,
1412 void (*idx_reply_cb)(struct qeth_channel *,
1413 struct qeth_cmd_buffer *))
1414{
1415 struct qeth_cmd_buffer *iob;
1416 unsigned long flags;
1417 int rc;
1418 struct qeth_card *card;
1419
1420 QETH_DBF_TEXT(setup, 2, "idxanswr");
1421 card = CARD_FROM_CDEV(channel->ccwdev);
1422 iob = qeth_get_buffer(channel);
1423 iob->callback = idx_reply_cb;
1424 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1425 channel->ccw.count = QETH_BUFSIZE;
1426 channel->ccw.cda = (__u32) __pa(iob->data);
1427
1428 wait_event(card->wait_q,
1429 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1430 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1431 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1432 rc = ccw_device_start(channel->ccwdev,
1433 &channel->ccw,(addr_t) iob, 0, 0);
1434 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1435
1436 if (rc) {
1437 PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc);
1438 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1439 atomic_set(&channel->irq_pending, 0);
1440 wake_up(&card->wait_q);
1441 return rc;
1442 }
1443 rc = wait_event_interruptible_timeout(card->wait_q,
1444 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1445 if (rc == -ERESTARTSYS)
1446 return rc;
1447 if (channel->state != CH_STATE_UP){
1448 rc = -ETIME;
1449 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1450 qeth_clear_cmd_buffers(channel);
1451 } else
1452 rc = 0;
1453 return rc;
1454}
1455
1456static int
1457qeth_idx_activate_channel(struct qeth_channel *channel,
1458 void (*idx_reply_cb)(struct qeth_channel *,
1459 struct qeth_cmd_buffer *))
1460{
1461 struct qeth_card *card;
1462 struct qeth_cmd_buffer *iob;
1463 unsigned long flags;
1464 __u16 temp;
1465 int rc;
1466
1467 card = CARD_FROM_CDEV(channel->ccwdev);
1468
1469 QETH_DBF_TEXT(setup, 2, "idxactch");
1470
1471 iob = qeth_get_buffer(channel);
1472 iob->callback = idx_reply_cb;
1473 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1474 channel->ccw.count = IDX_ACTIVATE_SIZE;
1475 channel->ccw.cda = (__u32) __pa(iob->data);
1476 if (channel == &card->write) {
1477 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1478 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1479 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1480 card->seqno.trans_hdr++;
1481 } else {
1482 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1483 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1484 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1485 }
1486 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1487 &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH);
1488 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1489 &card->info.func_level,sizeof(__u16));
1490 temp = raw_devno_from_bus_id(CARD_DDEV_ID(card));
1491 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1492 temp = (card->info.cula << 8) + card->info.unit_addr2;
1493 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1494
1495 wait_event(card->wait_q,
1496 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1497 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1498 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1499 rc = ccw_device_start(channel->ccwdev,
1500 &channel->ccw,(addr_t) iob, 0, 0);
1501 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1502
1503 if (rc) {
1504 PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc);
1505 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1506 atomic_set(&channel->irq_pending, 0);
1507 wake_up(&card->wait_q);
1508 return rc;
1509 }
1510 rc = wait_event_interruptible_timeout(card->wait_q,
1511 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1512 if (rc == -ERESTARTSYS)
1513 return rc;
1514 if (channel->state != CH_STATE_ACTIVATING) {
1515 PRINT_WARN("qeth: IDX activate timed out!\n");
1516 QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1517 qeth_clear_cmd_buffers(channel);
1518 return -ETIME;
1519 }
1520 return qeth_idx_activate_get_answer(channel,idx_reply_cb);
1521}
1522
1523static int
1524qeth_peer_func_level(int level)
1525{
1526 if ((level & 0xff) == 8)
1527 return (level & 0xff) + 0x400;
1528 if (((level >> 8) & 3) == 1)
1529 return (level & 0xff) + 0x200;
1530 return level;
1531}
1532
1533static void
1534qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1535{
1536 struct qeth_card *card;
1537 __u16 temp;
1538
1539 QETH_DBF_TEXT(setup ,2, "idxwrcb");
1540
1541 if (channel->state == CH_STATE_DOWN) {
1542 channel->state = CH_STATE_ACTIVATING;
1543 goto out;
1544 }
1545 card = CARD_FROM_CDEV(channel->ccwdev);
1546
1547 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1548 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
1549 PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
1550 "adapter exclusively used by another host\n",
1551 CARD_WDEV_ID(card));
1552 else
1553 PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
1554 "negative reply\n", CARD_WDEV_ID(card));
1555 goto out;
1556 }
1557 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1558 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1559 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1560 "function level mismatch "
1561 "(sent: 0x%x, received: 0x%x)\n",
1562 CARD_WDEV_ID(card), card->info.func_level, temp);
1563 goto out;
1564 }
1565 channel->state = CH_STATE_UP;
1566out:
1567 qeth_release_buffer(channel, iob);
1568}
1569
1570static int
1571qeth_check_idx_response(unsigned char *buffer)
1572{
1573 if (!buffer)
1574 return 0;
1575
1576 QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
1577 if ((buffer[2] & 0xc0) == 0xc0) {
1578 PRINT_WARN("received an IDX TERMINATE "
1579 "with cause code 0x%02x%s\n",
1580 buffer[4],
1581 ((buffer[4] == 0x22) ?
1582 " -- try another portname" : ""));
1583 QETH_DBF_TEXT(trace, 2, "ckidxres");
1584 QETH_DBF_TEXT(trace, 2, " idxterm");
1585 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1586 return -EIO;
1587 }
1588 return 0;
1589}
1590
1591static void
1592qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1593{
1594 struct qeth_card *card;
1595 __u16 temp;
1596
1597 QETH_DBF_TEXT(setup , 2, "idxrdcb");
1598 if (channel->state == CH_STATE_DOWN) {
1599 channel->state = CH_STATE_ACTIVATING;
1600 goto out;
1601 }
1602
1603 card = CARD_FROM_CDEV(channel->ccwdev);
1604 if (qeth_check_idx_response(iob->data)) {
1605 goto out;
1606 }
1607 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1608 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
1609 PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
1610 "adapter exclusively used by another host\n",
1611 CARD_RDEV_ID(card));
1612 else
1613 PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
1614 "negative reply\n", CARD_RDEV_ID(card));
1615 goto out;
1616 }
1617
1618/**
1619 * temporary fix for microcode bug
1620 * to revert it,replace OR by AND
1621 */
1622 if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1623 (card->info.type == QETH_CARD_TYPE_OSAE) )
1624 card->info.portname_required = 1;
1625
1626 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1627 if (temp != qeth_peer_func_level(card->info.func_level)) {
1628 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1629 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1630 CARD_RDEV_ID(card), card->info.func_level, temp);
1631 goto out;
1632 }
1633 memcpy(&card->token.issuer_rm_r,
1634 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1635 QETH_MPC_TOKEN_LENGTH);
1636 memcpy(&card->info.mcl_level[0],
1637 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1638 channel->state = CH_STATE_UP;
1639out:
1640 qeth_release_buffer(channel,iob);
1641}
1642
1643static int
1644qeth_issue_next_read(struct qeth_card *card)
1645{
1646 int rc;
1647 struct qeth_cmd_buffer *iob;
1648
1649 QETH_DBF_TEXT(trace,5,"issnxrd");
1650 if (card->read.state != CH_STATE_UP)
1651 return -EIO;
1652 iob = qeth_get_buffer(&card->read);
1653 if (!iob) {
1654 PRINT_WARN("issue_next_read failed: no iob available!\n");
1655 return -ENOMEM;
1656 }
1657 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1658 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1659 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1660 (addr_t) iob, 0, 0);
1661 if (rc) {
1662 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
1663 atomic_set(&card->read.irq_pending, 0);
1664 qeth_schedule_recovery(card);
1665 wake_up(&card->wait_q);
1666 }
1667 return rc;
1668}
1669
1670static struct qeth_reply *
1671qeth_alloc_reply(struct qeth_card *card)
1672{
1673 struct qeth_reply *reply;
1674
1675 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1676 if (reply){
1677 atomic_set(&reply->refcnt, 1);
1678 atomic_set(&reply->received, 0);
1679 reply->card = card;
1680 };
1681 return reply;
1682}
1683
1684static void
1685qeth_get_reply(struct qeth_reply *reply)
1686{
1687 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1688 atomic_inc(&reply->refcnt);
1689}
1690
1691static void
1692qeth_put_reply(struct qeth_reply *reply)
1693{
1694 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1695 if (atomic_dec_and_test(&reply->refcnt))
1696 kfree(reply);
1697}
1698
1699static void
1700qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, struct qeth_card *card)
1701{
1702 int rc;
1703 int com;
1704 char * ipa_name;
1705
1706 com = cmd->hdr.command;
1707 rc = cmd->hdr.return_code;
1708 ipa_name = qeth_get_ipa_cmd_name(com);
1709
1710 PRINT_ERR("%s(x%X) for %s returned x%X \"%s\"\n", ipa_name, com,
1711 QETH_CARD_IFNAME(card), rc, qeth_get_ipa_msg(rc));
1712}
1713
1714static struct qeth_ipa_cmd *
1715qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1716{
1717 struct qeth_ipa_cmd *cmd = NULL;
1718
1719 QETH_DBF_TEXT(trace,5,"chkipad");
1720 if (IS_IPA(iob->data)){
1721 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
1722 if (IS_IPA_REPLY(cmd)) {
1723 if (cmd->hdr.return_code)
1724 qeth_issue_ipa_msg(cmd, card);
1725 return cmd;
1726 }
1727 else {
1728 switch (cmd->hdr.command) {
1729 case IPA_CMD_STOPLAN:
1730 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
1731 "there is a network problem or "
1732 "someone pulled the cable or "
1733 "disabled the port.\n",
1734 QETH_CARD_IFNAME(card),
1735 card->info.chpid);
1736 card->lan_online = 0;
1737 if (card->dev && netif_carrier_ok(card->dev))
1738 netif_carrier_off(card->dev);
1739 return NULL;
1740 case IPA_CMD_STARTLAN:
1741 PRINT_INFO("Link reestablished on %s "
1742 "(CHPID 0x%X). Scheduling "
1743 "IP address reset.\n",
1744 QETH_CARD_IFNAME(card),
1745 card->info.chpid);
1746 netif_carrier_on(card->dev);
1747 qeth_schedule_recovery(card);
1748 return NULL;
1749 case IPA_CMD_MODCCID:
1750 return cmd;
1751 case IPA_CMD_REGISTER_LOCAL_ADDR:
1752 QETH_DBF_TEXT(trace,3, "irla");
1753 break;
1754 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
1755 QETH_DBF_TEXT(trace,3, "urla");
1756 break;
1757 default:
1758 PRINT_WARN("Received data is IPA "
1759 "but not a reply!\n");
1760 break;
1761 }
1762 }
1763 }
1764 return cmd;
1765}
1766
1767/**
1768 * wake all waiting ipa commands
1769 */
1770static void
1771qeth_clear_ipacmd_list(struct qeth_card *card)
1772{
1773 struct qeth_reply *reply, *r;
1774 unsigned long flags;
1775
1776 QETH_DBF_TEXT(trace, 4, "clipalst");
1777
1778 spin_lock_irqsave(&card->lock, flags);
1779 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1780 qeth_get_reply(reply);
1781 reply->rc = -EIO;
1782 atomic_inc(&reply->received);
1783 list_del_init(&reply->list);
1784 wake_up(&reply->wait_q);
1785 qeth_put_reply(reply);
1786 }
1787 spin_unlock_irqrestore(&card->lock, flags);
1788}
1789
1790static void
1791qeth_send_control_data_cb(struct qeth_channel *channel,
1792 struct qeth_cmd_buffer *iob)
1793{
1794 struct qeth_card *card;
1795 struct qeth_reply *reply, *r;
1796 struct qeth_ipa_cmd *cmd;
1797 unsigned long flags;
1798 int keep_reply;
1799
1800 QETH_DBF_TEXT(trace,4,"sndctlcb");
1801
1802 card = CARD_FROM_CDEV(channel->ccwdev);
1803 if (qeth_check_idx_response(iob->data)) {
1804 qeth_clear_ipacmd_list(card);
1805 qeth_schedule_recovery(card);
1806 goto out;
1807 }
1808
1809 cmd = qeth_check_ipa_data(card, iob);
1810 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
1811 goto out;
1812 /*in case of OSN : check if cmd is set */
1813 if (card->info.type == QETH_CARD_TYPE_OSN &&
1814 cmd &&
1815 cmd->hdr.command != IPA_CMD_STARTLAN &&
1816 card->osn_info.assist_cb != NULL) {
1817 card->osn_info.assist_cb(card->dev, cmd);
1818 goto out;
1819 }
1820
1821 spin_lock_irqsave(&card->lock, flags);
1822 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1823 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
1824 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
1825 qeth_get_reply(reply);
1826 list_del_init(&reply->list);
1827 spin_unlock_irqrestore(&card->lock, flags);
1828 keep_reply = 0;
1829 if (reply->callback != NULL) {
1830 if (cmd) {
1831 reply->offset = (__u16)((char*)cmd -
1832 (char *)iob->data);
1833 keep_reply = reply->callback(card,
1834 reply,
1835 (unsigned long)cmd);
1836 } else
1837 keep_reply = reply->callback(card,
1838 reply,
1839 (unsigned long)iob);
1840 }
1841 if (cmd)
1842 reply->rc = (u16) cmd->hdr.return_code;
1843 else if (iob->rc)
1844 reply->rc = iob->rc;
1845 if (keep_reply) {
1846 spin_lock_irqsave(&card->lock, flags);
1847 list_add_tail(&reply->list,
1848 &card->cmd_waiter_list);
1849 spin_unlock_irqrestore(&card->lock, flags);
1850 } else {
1851 atomic_inc(&reply->received);
1852 wake_up(&reply->wait_q);
1853 }
1854 qeth_put_reply(reply);
1855 goto out;
1856 }
1857 }
1858 spin_unlock_irqrestore(&card->lock, flags);
1859out:
1860 memcpy(&card->seqno.pdu_hdr_ack,
1861 QETH_PDU_HEADER_SEQ_NO(iob->data),
1862 QETH_SEQ_NO_LENGTH);
1863 qeth_release_buffer(channel,iob);
1864}
1865
1866static void
1867qeth_prepare_control_data(struct qeth_card *card, int len,
1868 struct qeth_cmd_buffer *iob)
1869{
1870 qeth_setup_ccw(&card->write,iob->data,len);
1871 iob->callback = qeth_release_buffer;
1872
1873 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1874 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1875 card->seqno.trans_hdr++;
1876 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1877 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1878 card->seqno.pdu_hdr++;
1879 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1880 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1881 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1882}
1883
1884static int
1885qeth_send_control_data(struct qeth_card *card, int len,
1886 struct qeth_cmd_buffer *iob,
1887 int (*reply_cb)
1888 (struct qeth_card *, struct qeth_reply*, unsigned long),
1889 void *reply_param)
1890
1891{
1892 int rc;
1893 unsigned long flags;
1894 struct qeth_reply *reply = NULL;
1895 unsigned long timeout;
1896
1897 QETH_DBF_TEXT(trace, 2, "sendctl");
1898
1899 reply = qeth_alloc_reply(card);
1900 if (!reply) {
1901 PRINT_WARN("Could no alloc qeth_reply!\n");
1902 return -ENOMEM;
1903 }
1904 reply->callback = reply_cb;
1905 reply->param = reply_param;
1906 if (card->state == CARD_STATE_DOWN)
1907 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1908 else
1909 reply->seqno = card->seqno.ipa++;
1910 init_waitqueue_head(&reply->wait_q);
1911 spin_lock_irqsave(&card->lock, flags);
1912 list_add_tail(&reply->list, &card->cmd_waiter_list);
1913 spin_unlock_irqrestore(&card->lock, flags);
1914 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1915
1916 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
1917 qeth_prepare_control_data(card, len, iob);
1918
1919 if (IS_IPA(iob->data))
1920 timeout = jiffies + QETH_IPA_TIMEOUT;
1921 else
1922 timeout = jiffies + QETH_TIMEOUT;
1923
1924 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1925 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1926 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1927 (addr_t) iob, 0, 0);
1928 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1929 if (rc){
1930 PRINT_WARN("qeth_send_control_data: "
1931 "ccw_device_start rc = %i\n", rc);
1932 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1933 spin_lock_irqsave(&card->lock, flags);
1934 list_del_init(&reply->list);
1935 qeth_put_reply(reply);
1936 spin_unlock_irqrestore(&card->lock, flags);
1937 qeth_release_buffer(iob->channel, iob);
1938 atomic_set(&card->write.irq_pending, 0);
1939 wake_up(&card->wait_q);
1940 return rc;
1941 }
1942 while (!atomic_read(&reply->received)) {
1943 if (time_after(jiffies, timeout)) {
1944 spin_lock_irqsave(&reply->card->lock, flags);
1945 list_del_init(&reply->list);
1946 spin_unlock_irqrestore(&reply->card->lock, flags);
1947 reply->rc = -ETIME;
1948 atomic_inc(&reply->received);
1949 wake_up(&reply->wait_q);
1950 }
1951 cpu_relax();
1952 };
1953 rc = reply->rc;
1954 qeth_put_reply(reply);
1955 return rc;
1956}
1957
1958static int
1959qeth_osn_send_control_data(struct qeth_card *card, int len,
1960 struct qeth_cmd_buffer *iob)
1961{
1962 unsigned long flags;
1963 int rc = 0;
1964
1965 QETH_DBF_TEXT(trace, 5, "osndctrd");
1966
1967 wait_event(card->wait_q,
1968 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1969 qeth_prepare_control_data(card, len, iob);
1970 QETH_DBF_TEXT(trace, 6, "osnoirqp");
1971 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1972 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1973 (addr_t) iob, 0, 0);
1974 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1975 if (rc){
1976 PRINT_WARN("qeth_osn_send_control_data: "
1977 "ccw_device_start rc = %i\n", rc);
1978 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1979 qeth_release_buffer(iob->channel, iob);
1980 atomic_set(&card->write.irq_pending, 0);
1981 wake_up(&card->wait_q);
1982 }
1983 return rc;
1984}
1985
1986static inline void
1987qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1988 char prot_type)
1989{
1990 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1991 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
1992 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1993 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1994}
1995
1996static int
1997qeth_osn_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1998 int data_len)
1999{
2000 u16 s1, s2;
2001
2002 QETH_DBF_TEXT(trace,4,"osndipa");
2003
2004 qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
2005 s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
2006 s2 = (u16)data_len;
2007 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
2008 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
2009 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
2010 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
2011 return qeth_osn_send_control_data(card, s1, iob);
2012}
2013
2014static int
2015qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2016 int (*reply_cb)
2017 (struct qeth_card *,struct qeth_reply*, unsigned long),
2018 void *reply_param)
2019{
2020 int rc;
2021 char prot_type;
2022
2023 QETH_DBF_TEXT(trace,4,"sendipa");
2024
2025 if (card->options.layer2)
2026 if (card->info.type == QETH_CARD_TYPE_OSN)
2027 prot_type = QETH_PROT_OSN2;
2028 else
2029 prot_type = QETH_PROT_LAYER2;
2030 else
2031 prot_type = QETH_PROT_TCPIP;
2032 qeth_prepare_ipa_cmd(card,iob,prot_type);
2033 rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
2034 reply_cb, reply_param);
2035 return rc;
2036}
2037
2038
2039static int
2040qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2041 unsigned long data)
2042{
2043 struct qeth_cmd_buffer *iob;
2044
2045 QETH_DBF_TEXT(setup, 2, "cmenblcb");
2046
2047 iob = (struct qeth_cmd_buffer *) data;
2048 memcpy(&card->token.cm_filter_r,
2049 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2050 QETH_MPC_TOKEN_LENGTH);
2051 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2052 return 0;
2053}
2054
2055static int
2056qeth_cm_enable(struct qeth_card *card)
2057{
2058 int rc;
2059 struct qeth_cmd_buffer *iob;
2060
2061 QETH_DBF_TEXT(setup,2,"cmenable");
2062
2063 iob = qeth_wait_for_buffer(&card->write);
2064 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
2065 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2066 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2067 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2068 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2069
2070 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
2071 qeth_cm_enable_cb, NULL);
2072 return rc;
2073}
2074
2075static int
2076qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2077 unsigned long data)
2078{
2079
2080 struct qeth_cmd_buffer *iob;
2081
2082 QETH_DBF_TEXT(setup, 2, "cmsetpcb");
2083
2084 iob = (struct qeth_cmd_buffer *) data;
2085 memcpy(&card->token.cm_connection_r,
2086 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2087 QETH_MPC_TOKEN_LENGTH);
2088 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2089 return 0;
2090}
2091
2092static int
2093qeth_cm_setup(struct qeth_card *card)
2094{
2095 int rc;
2096 struct qeth_cmd_buffer *iob;
2097
2098 QETH_DBF_TEXT(setup,2,"cmsetup");
2099
2100 iob = qeth_wait_for_buffer(&card->write);
2101 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
2102 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2103 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2104 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2105 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2106 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2107 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2108 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
2109 qeth_cm_setup_cb, NULL);
2110 return rc;
2111
2112}
2113
2114static int
2115qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2116 unsigned long data)
2117{
2118
2119 __u16 mtu, framesize;
2120 __u16 len;
2121 __u8 link_type;
2122 struct qeth_cmd_buffer *iob;
2123
2124 QETH_DBF_TEXT(setup, 2, "ulpenacb");
2125
2126 iob = (struct qeth_cmd_buffer *) data;
2127 memcpy(&card->token.ulp_filter_r,
2128 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2129 QETH_MPC_TOKEN_LENGTH);
2130 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
2131 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2132 mtu = qeth_get_mtu_outof_framesize(framesize);
2133 if (!mtu) {
2134 iob->rc = -EINVAL;
2135 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2136 return 0;
2137 }
2138 card->info.max_mtu = mtu;
2139 card->info.initial_mtu = mtu;
2140 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
2141 } else {
2142 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
2143 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
2144 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
2145 }
2146
2147 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2148 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2149 memcpy(&link_type,
2150 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2151 card->info.link_type = link_type;
2152 } else
2153 card->info.link_type = 0;
2154 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2155 return 0;
2156}
2157
2158static int
2159qeth_ulp_enable(struct qeth_card *card)
2160{
2161 int rc;
2162 char prot_type;
2163 struct qeth_cmd_buffer *iob;
2164
2165 /*FIXME: trace view callbacks*/
2166 QETH_DBF_TEXT(setup,2,"ulpenabl");
2167
2168 iob = qeth_wait_for_buffer(&card->write);
2169 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2170
2171 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
2172 (__u8) card->info.portno;
2173 if (card->options.layer2)
2174 if (card->info.type == QETH_CARD_TYPE_OSN)
2175 prot_type = QETH_PROT_OSN2;
2176 else
2177 prot_type = QETH_PROT_LAYER2;
2178 else
2179 prot_type = QETH_PROT_TCPIP;
2180
2181 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data),&prot_type,1);
2182 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2183 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2184 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2185 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2186 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
2187 card->info.portname, 9);
2188 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2189 qeth_ulp_enable_cb, NULL);
2190 return rc;
2191
2192}
2193
2194static int
2195qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2196 unsigned long data)
2197{
2198 struct qeth_cmd_buffer *iob;
2199
2200 QETH_DBF_TEXT(setup, 2, "ulpstpcb");
2201
2202 iob = (struct qeth_cmd_buffer *) data;
2203 memcpy(&card->token.ulp_connection_r,
2204 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2205 QETH_MPC_TOKEN_LENGTH);
2206 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2207 return 0;
2208}
2209
2210static int
2211qeth_ulp_setup(struct qeth_card *card)
2212{
2213 int rc;
2214 __u16 temp;
2215 struct qeth_cmd_buffer *iob;
2216 struct ccw_dev_id dev_id;
2217
2218 QETH_DBF_TEXT(setup,2,"ulpsetup");
2219
2220 iob = qeth_wait_for_buffer(&card->write);
2221 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2222
2223 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2224 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2225 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2226 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2227 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2228 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2229
2230 ccw_device_get_id(CARD_DDEV(card), &dev_id);
2231 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2232 temp = (card->info.cula << 8) + card->info.unit_addr2;
2233 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2234 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2235 qeth_ulp_setup_cb, NULL);
2236 return rc;
2237}
2238
2239static inline int
2240qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2241 unsigned int siga_error, const char *dbftext)
2242{
2243 if (qdio_error || siga_error) {
2244 QETH_DBF_TEXT(trace, 2, dbftext);
2245 QETH_DBF_TEXT(qerr, 2, dbftext);
2246 QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
2247 buf->element[15].flags & 0xff);
2248 QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
2249 buf->element[14].flags & 0xff);
2250 QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2251 QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
2252 return 1;
2253 }
2254 return 0;
2255}
2256
2257static struct sk_buff *
2258qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2259{
2260 struct sk_buff* skb;
2261 int add_len;
2262
2263 add_len = 0;
2264 if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN)
2265 add_len = sizeof(struct qeth_hdr);
2266#ifdef CONFIG_QETH_VLAN
2267 else
2268 add_len = VLAN_HLEN;
2269#endif
2270 skb = dev_alloc_skb(length + add_len);
2271 if (skb && add_len)
2272 skb_reserve(skb, add_len);
2273 return skb;
2274}
2275
2276static inline int
2277qeth_create_skb_frag(struct qdio_buffer_element *element,
2278 struct sk_buff **pskb,
2279 int offset, int *pfrag, int data_len)
2280{
2281 struct page *page = virt_to_page(element->addr);
2282 if (*pfrag == 0) {
2283 /* the upper protocol layers assume that there is data in the
2284 * skb itself. Copy a small amount (64 bytes) to make them
2285 * happy. */
2286 *pskb = dev_alloc_skb(64 + QETH_FAKE_LL_LEN_ETH);
2287 if (!(*pskb))
2288 return -ENOMEM;
2289 skb_reserve(*pskb, QETH_FAKE_LL_LEN_ETH);
2290 if (data_len <= 64) {
2291 memcpy(skb_put(*pskb, data_len), element->addr + offset,
2292 data_len);
2293 } else {
2294 get_page(page);
2295 memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
2296 skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
2297 data_len - 64);
2298 (*pskb)->data_len += data_len - 64;
2299 (*pskb)->len += data_len - 64;
2300 (*pskb)->truesize += data_len - 64;
2301 }
2302 } else {
2303 get_page(page);
2304 skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
2305 (*pskb)->data_len += data_len;
2306 (*pskb)->len += data_len;
2307 (*pskb)->truesize += data_len;
2308 }
2309 (*pfrag)++;
2310 return 0;
2311}
2312
2313static inline struct qeth_buffer_pool_entry *
2314qeth_find_free_buffer_pool_entry(struct qeth_card *card)
2315{
2316 struct list_head *plh;
2317 struct qeth_buffer_pool_entry *entry;
2318 int i, free;
2319 struct page *page;
2320
2321 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2322 return NULL;
2323
2324 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2325 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2326 free = 1;
2327 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2328 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2329 free = 0;
2330 break;
2331 }
2332 }
2333 if (free) {
2334 list_del_init(&entry->list);
2335 return entry;
2336 }
2337 }
2338
2339 /* no free buffer in pool so take first one and swap pages */
2340 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2341 struct qeth_buffer_pool_entry, list);
2342 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2343 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2344 page = alloc_page(GFP_ATOMIC|GFP_DMA);
2345 if (!page) {
2346 return NULL;
2347 } else {
2348 free_page((unsigned long)entry->elements[i]);
2349 entry->elements[i] = page_address(page);
2350 if (card->options.performance_stats)
2351 card->perf_stats.sg_alloc_page_rx++;
2352 }
2353 }
2354 }
2355 list_del_init(&entry->list);
2356 return entry;
2357}
2358
2359static struct sk_buff *
2360qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2361 struct qdio_buffer_element **__element, int *__offset,
2362 struct qeth_hdr **hdr)
2363{
2364 struct qdio_buffer_element *element = *__element;
2365 int offset = *__offset;
2366 struct sk_buff *skb = NULL;
2367 int skb_len;
2368 void *data_ptr;
2369 int data_len;
2370 int use_rx_sg = 0;
2371 int frag = 0;
2372
2373 QETH_DBF_TEXT(trace,6,"nextskb");
2374 /* qeth_hdr must not cross element boundaries */
2375 if (element->length < offset + sizeof(struct qeth_hdr)){
2376 if (qeth_is_last_sbale(element))
2377 return NULL;
2378 element++;
2379 offset = 0;
2380 if (element->length < sizeof(struct qeth_hdr))
2381 return NULL;
2382 }
2383 *hdr = element->addr + offset;
2384
2385 offset += sizeof(struct qeth_hdr);
2386 if (card->options.layer2)
2387 if (card->info.type == QETH_CARD_TYPE_OSN)
2388 skb_len = (*hdr)->hdr.osn.pdu_length;
2389 else
2390 skb_len = (*hdr)->hdr.l2.pkt_length;
2391 else
2392 skb_len = (*hdr)->hdr.l3.length;
2393
2394 if (!skb_len)
2395 return NULL;
2396 if ((skb_len >= card->options.rx_sg_cb) &&
2397 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
2398 (!atomic_read(&card->force_alloc_skb))) {
2399 use_rx_sg = 1;
2400 } else {
2401 if (card->options.fake_ll) {
2402 if (card->dev->type == ARPHRD_IEEE802_TR) {
2403 if (!(skb = qeth_get_skb(skb_len +
2404 QETH_FAKE_LL_LEN_TR, *hdr)))
2405 goto no_mem;
2406 skb_reserve(skb, QETH_FAKE_LL_LEN_TR);
2407 } else {
2408 if (!(skb = qeth_get_skb(skb_len +
2409 QETH_FAKE_LL_LEN_ETH, *hdr)))
2410 goto no_mem;
2411 skb_reserve(skb, QETH_FAKE_LL_LEN_ETH);
2412 }
2413 } else {
2414 skb = qeth_get_skb(skb_len, *hdr);
2415 if (!skb)
2416 goto no_mem;
2417 }
2418 }
2419
2420 data_ptr = element->addr + offset;
2421 while (skb_len) {
2422 data_len = min(skb_len, (int)(element->length - offset));
2423 if (data_len) {
2424 if (use_rx_sg) {
2425 if (qeth_create_skb_frag(element, &skb, offset,
2426 &frag, data_len))
2427 goto no_mem;
2428 } else {
2429 memcpy(skb_put(skb, data_len), data_ptr,
2430 data_len);
2431 }
2432 }
2433 skb_len -= data_len;
2434 if (skb_len){
2435 if (qeth_is_last_sbale(element)){
2436 QETH_DBF_TEXT(trace,4,"unexeob");
2437 QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card));
2438 QETH_DBF_TEXT(qerr,2,"unexeob");
2439 QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
2440 QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
2441 dev_kfree_skb_any(skb);
2442 card->stats.rx_errors++;
2443 return NULL;
2444 }
2445 element++;
2446 offset = 0;
2447 data_ptr = element->addr;
2448 } else {
2449 offset += data_len;
2450 }
2451 }
2452 *__element = element;
2453 *__offset = offset;
2454 if (use_rx_sg && card->options.performance_stats) {
2455 card->perf_stats.sg_skbs_rx++;
2456 card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
2457 }
2458 return skb;
2459no_mem:
2460 if (net_ratelimit()){
2461 PRINT_WARN("No memory for packet received on %s.\n",
2462 QETH_CARD_IFNAME(card));
2463 QETH_DBF_TEXT(trace,2,"noskbmem");
2464 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2465 }
2466 card->stats.rx_dropped++;
2467 return NULL;
2468}
2469
2470static __be16
2471qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2472{
2473 struct qeth_card *card;
2474 struct ethhdr *eth;
2475
2476 QETH_DBF_TEXT(trace,6,"typtrans");
2477
2478 card = (struct qeth_card *)dev->priv;
2479#ifdef CONFIG_TR
2480 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
2481 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
2482 return tr_type_trans(skb,dev);
2483#endif /* CONFIG_TR */
2484 skb_reset_mac_header(skb);
2485 skb_pull(skb, ETH_HLEN );
2486 eth = eth_hdr(skb);
2487
2488 if (*eth->h_dest & 1) {
2489 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
2490 skb->pkt_type = PACKET_BROADCAST;
2491 else
2492 skb->pkt_type = PACKET_MULTICAST;
2493 } else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
2494 skb->pkt_type = PACKET_OTHERHOST;
2495
2496 if (ntohs(eth->h_proto) >= 1536)
2497 return eth->h_proto;
2498 if (*(unsigned short *) (skb->data) == 0xFFFF)
2499 return htons(ETH_P_802_3);
2500 return htons(ETH_P_802_2);
2501}
2502
2503static void
2504qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2505 struct qeth_hdr *hdr)
2506{
2507 struct trh_hdr *fake_hdr;
2508 struct trllc *fake_llc;
2509 struct iphdr *ip_hdr;
2510
2511 QETH_DBF_TEXT(trace,5,"skbfktr");
2512 skb_set_mac_header(skb, (int)-QETH_FAKE_LL_LEN_TR);
2513 /* this is a fake ethernet header */
2514 fake_hdr = tr_hdr(skb);
2515
2516 /* the destination MAC address */
2517 switch (skb->pkt_type){
2518 case PACKET_MULTICAST:
2519 switch (skb->protocol){
2520#ifdef CONFIG_QETH_IPV6
2521 case __constant_htons(ETH_P_IPV6):
2522 ndisc_mc_map((struct in6_addr *)
2523 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2524 fake_hdr->daddr, card->dev, 0);
2525 break;
2526#endif /* CONFIG_QETH_IPV6 */
2527 case __constant_htons(ETH_P_IP):
2528 ip_hdr = (struct iphdr *)skb->data;
2529 ip_tr_mc_map(ip_hdr->daddr, fake_hdr->daddr);
2530 break;
2531 default:
2532 memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
2533 }
2534 break;
2535 case PACKET_BROADCAST:
2536 memset(fake_hdr->daddr, 0xff, TR_ALEN);
2537 break;
2538 default:
2539 memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
2540 }
2541 /* the source MAC address */
2542 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2543 memcpy(fake_hdr->saddr, &hdr->hdr.l3.dest_addr[2], TR_ALEN);
2544 else
2545 memset(fake_hdr->saddr, 0, TR_ALEN);
2546 fake_hdr->rcf=0;
2547 fake_llc = (struct trllc*)&(fake_hdr->rcf);
2548 fake_llc->dsap = EXTENDED_SAP;
2549 fake_llc->ssap = EXTENDED_SAP;
2550 fake_llc->llc = UI_CMD;
2551 fake_llc->protid[0] = 0;
2552 fake_llc->protid[1] = 0;
2553 fake_llc->protid[2] = 0;
2554 fake_llc->ethertype = ETH_P_IP;
2555}
2556
2557static void
2558qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
2559 struct qeth_hdr *hdr)
2560{
2561 struct ethhdr *fake_hdr;
2562 struct iphdr *ip_hdr;
2563
2564 QETH_DBF_TEXT(trace,5,"skbfketh");
2565 skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_ETH);
2566 /* this is a fake ethernet header */
2567 fake_hdr = eth_hdr(skb);
2568
2569 /* the destination MAC address */
2570 switch (skb->pkt_type){
2571 case PACKET_MULTICAST:
2572 switch (skb->protocol){
2573#ifdef CONFIG_QETH_IPV6
2574 case __constant_htons(ETH_P_IPV6):
2575 ndisc_mc_map((struct in6_addr *)
2576 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2577 fake_hdr->h_dest, card->dev, 0);
2578 break;
2579#endif /* CONFIG_QETH_IPV6 */
2580 case __constant_htons(ETH_P_IP):
2581 ip_hdr = (struct iphdr *)skb->data;
2582 ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2583 break;
2584 default:
2585 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2586 }
2587 break;
2588 case PACKET_BROADCAST:
2589 memset(fake_hdr->h_dest, 0xff, ETH_ALEN);
2590 break;
2591 default:
2592 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2593 }
2594 /* the source MAC address */
2595 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2596 memcpy(fake_hdr->h_source, &hdr->hdr.l3.dest_addr[2], ETH_ALEN);
2597 else
2598 memset(fake_hdr->h_source, 0, ETH_ALEN);
2599 /* the protocol */
2600 fake_hdr->h_proto = skb->protocol;
2601}
2602
2603static inline void
2604qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2605 struct qeth_hdr *hdr)
2606{
2607 if (card->dev->type == ARPHRD_IEEE802_TR)
2608 qeth_rebuild_skb_fake_ll_tr(card, skb, hdr);
2609 else
2610 qeth_rebuild_skb_fake_ll_eth(card, skb, hdr);
2611}
2612
2613static inline void
2614qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2615 struct qeth_hdr *hdr)
2616{
2617 skb->pkt_type = PACKET_HOST;
2618 skb->protocol = qeth_type_trans(skb, skb->dev);
2619 if (card->options.checksum_type == NO_CHECKSUMMING)
2620 skb->ip_summed = CHECKSUM_UNNECESSARY;
2621 else
2622 skb->ip_summed = CHECKSUM_NONE;
2623 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2624}
2625
2626static __u16
2627qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2628 struct qeth_hdr *hdr)
2629{
2630 unsigned short vlan_id = 0;
2631#ifdef CONFIG_QETH_IPV6
2632 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
2633 skb->pkt_type = PACKET_HOST;
2634 skb->protocol = qeth_type_trans(skb, card->dev);
2635 return 0;
2636 }
2637#endif /* CONFIG_QETH_IPV6 */
2638 skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
2639 ETH_P_IP);
2640 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK){
2641 case QETH_CAST_UNICAST:
2642 skb->pkt_type = PACKET_HOST;
2643 break;
2644 case QETH_CAST_MULTICAST:
2645 skb->pkt_type = PACKET_MULTICAST;
2646 card->stats.multicast++;
2647 break;
2648 case QETH_CAST_BROADCAST:
2649 skb->pkt_type = PACKET_BROADCAST;
2650 card->stats.multicast++;
2651 break;
2652 case QETH_CAST_ANYCAST:
2653 case QETH_CAST_NOCAST:
2654 default:
2655 skb->pkt_type = PACKET_HOST;
2656 }
2657
2658 if (hdr->hdr.l3.ext_flags &
2659 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
2660 vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
2661 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
2662 }
2663
2664 if (card->options.fake_ll)
2665 qeth_rebuild_skb_fake_ll(card, skb, hdr);
2666 else
2667 skb_reset_mac_header(skb);
2668 skb->ip_summed = card->options.checksum_type;
2669 if (card->options.checksum_type == HW_CHECKSUMMING){
2670 if ( (hdr->hdr.l3.ext_flags &
2671 (QETH_HDR_EXT_CSUM_HDR_REQ |
2672 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
2673 (QETH_HDR_EXT_CSUM_HDR_REQ |
2674 QETH_HDR_EXT_CSUM_TRANSP_REQ) )
2675 skb->ip_summed = CHECKSUM_UNNECESSARY;
2676 else
2677 skb->ip_summed = SW_CHECKSUMMING;
2678 }
2679 return vlan_id;
2680}
2681
2682static void
2683qeth_process_inbound_buffer(struct qeth_card *card,
2684 struct qeth_qdio_buffer *buf, int index)
2685{
2686 struct qdio_buffer_element *element;
2687 struct sk_buff *skb;
2688 struct qeth_hdr *hdr;
2689 int offset;
2690 int rxrc;
2691 __u16 vlan_tag = 0;
2692
2693 /* get first element of current buffer */
2694 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2695 offset = 0;
2696 if (card->options.performance_stats)
2697 card->perf_stats.bufs_rec++;
2698 while((skb = qeth_get_next_skb(card, buf->buffer, &element,
2699 &offset, &hdr))) {
2700 skb->dev = card->dev;
2701 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
2702 qeth_layer2_rebuild_skb(card, skb, hdr);
2703 else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
2704 vlan_tag = qeth_rebuild_skb(card, skb, hdr);
2705 else if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN) {
2706 skb_push(skb, sizeof(struct qeth_hdr));
2707 skb_copy_to_linear_data(skb, hdr,
2708 sizeof(struct qeth_hdr));
2709 } else { /* unknown header type */
2710 dev_kfree_skb_any(skb);
2711 QETH_DBF_TEXT(trace, 3, "inbunkno");
2712 QETH_DBF_HEX(control, 3, hdr, QETH_DBF_CONTROL_LEN);
2713 continue;
2714 }
2715 /* is device UP ? */
2716 if (!(card->dev->flags & IFF_UP)){
2717 dev_kfree_skb_any(skb);
2718 continue;
2719 }
2720 if (card->info.type == QETH_CARD_TYPE_OSN)
2721 rxrc = card->osn_info.data_cb(skb);
2722 else
2723#ifdef CONFIG_QETH_VLAN
2724 if (vlan_tag)
2725 if (card->vlangrp)
2726 vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
2727 else {
2728 dev_kfree_skb_any(skb);
2729 continue;
2730 }
2731 else
2732#endif
2733 rxrc = netif_rx(skb);
2734 card->dev->last_rx = jiffies;
2735 card->stats.rx_packets++;
2736 card->stats.rx_bytes += skb->len;
2737 }
2738}
2739
2740static int
2741qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2742{
2743 struct qeth_buffer_pool_entry *pool_entry;
2744 int i;
2745
2746 pool_entry = qeth_find_free_buffer_pool_entry(card);
2747 if (!pool_entry)
2748 return 1;
2749 /*
2750 * since the buffer is accessed only from the input_tasklet
2751 * there shouldn't be a need to synchronize; also, since we use
2752 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2753 * buffers
2754 */
2755 BUG_ON(!pool_entry);
2756
2757 buf->pool_entry = pool_entry;
2758 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
2759 buf->buffer->element[i].length = PAGE_SIZE;
2760 buf->buffer->element[i].addr = pool_entry->elements[i];
2761 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2762 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2763 else
2764 buf->buffer->element[i].flags = 0;
2765 }
2766 buf->state = QETH_QDIO_BUF_EMPTY;
2767 return 0;
2768}
2769
2770static void
2771qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2772 struct qeth_qdio_out_buffer *buf)
2773{
2774 int i;
2775 struct sk_buff *skb;
2776
2777 /* is PCI flag set on buffer? */
2778 if (buf->buffer->element[0].flags & 0x40)
2779 atomic_dec(&queue->set_pci_flags_count);
2780
2781 while ((skb = skb_dequeue(&buf->skb_list))){
2782 atomic_dec(&skb->users);
2783 dev_kfree_skb_any(skb);
2784 }
2785 qeth_eddp_buf_release_contexts(buf);
2786 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
2787 buf->buffer->element[i].length = 0;
2788 buf->buffer->element[i].addr = NULL;
2789 buf->buffer->element[i].flags = 0;
2790 }
2791 buf->next_element_to_fill = 0;
2792 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2793}
2794
2795static void
2796qeth_queue_input_buffer(struct qeth_card *card, int index)
2797{
2798 struct qeth_qdio_q *queue = card->qdio.in_q;
2799 int count;
2800 int i;
2801 int rc;
2802 int newcount = 0;
2803
2804 QETH_DBF_TEXT(trace,6,"queinbuf");
2805 count = (index < queue->next_buf_to_init)?
2806 card->qdio.in_buf_pool.buf_count -
2807 (queue->next_buf_to_init - index) :
2808 card->qdio.in_buf_pool.buf_count -
2809 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2810 /* only requeue at a certain threshold to avoid SIGAs */
2811 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
2812 for (i = queue->next_buf_to_init;
2813 i < queue->next_buf_to_init + count; ++i) {
2814 if (qeth_init_input_buffer(card,
2815 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
2816 break;
2817 } else {
2818 newcount++;
2819 }
2820 }
2821
2822 if (newcount < count) {
2823 /* we are in memory shortage so we switch back to
2824 traditional skb allocation and drop packages */
2825 if (!atomic_read(&card->force_alloc_skb) &&
2826 net_ratelimit())
2827 PRINT_WARN("Switch to alloc skb\n");
2828 atomic_set(&card->force_alloc_skb, 3);
2829 count = newcount;
2830 } else {
2831 if ((atomic_read(&card->force_alloc_skb) == 1) &&
2832 net_ratelimit())
2833 PRINT_WARN("Switch to sg\n");
2834 atomic_add_unless(&card->force_alloc_skb, -1, 0);
2835 }
2836
2837 /*
2838 * according to old code it should be avoided to requeue all
2839 * 128 buffers in order to benefit from PCI avoidance.
2840 * this function keeps at least one buffer (the buffer at
2841 * 'index') un-requeued -> this buffer is the first buffer that
2842 * will be requeued the next time
2843 */
2844 if (card->options.performance_stats) {
2845 card->perf_stats.inbound_do_qdio_cnt++;
2846 card->perf_stats.inbound_do_qdio_start_time =
2847 qeth_get_micros();
2848 }
2849 rc = do_QDIO(CARD_DDEV(card),
2850 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2851 0, queue->next_buf_to_init, count, NULL);
2852 if (card->options.performance_stats)
2853 card->perf_stats.inbound_do_qdio_time +=
2854 qeth_get_micros() -
2855 card->perf_stats.inbound_do_qdio_start_time;
2856 if (rc){
2857 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2858 "return %i (device %s).\n",
2859 rc, CARD_DDEV_ID(card));
2860 QETH_DBF_TEXT(trace,2,"qinberr");
2861 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2862 }
2863 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2864 QDIO_MAX_BUFFERS_PER_Q;
2865 }
2866}
2867
2868static inline void
2869qeth_put_buffer_pool_entry(struct qeth_card *card,
2870 struct qeth_buffer_pool_entry *entry)
2871{
2872 QETH_DBF_TEXT(trace, 6, "ptbfplen");
2873 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2874}
2875
2876static void
2877qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2878 unsigned int qdio_err, unsigned int siga_err,
2879 unsigned int queue, int first_element, int count,
2880 unsigned long card_ptr)
2881{
2882 struct net_device *net_dev;
2883 struct qeth_card *card;
2884 struct qeth_qdio_buffer *buffer;
2885 int index;
2886 int i;
2887
2888 QETH_DBF_TEXT(trace, 6, "qdinput");
2889 card = (struct qeth_card *) card_ptr;
2890 net_dev = card->dev;
2891 if (card->options.performance_stats) {
2892 card->perf_stats.inbound_cnt++;
2893 card->perf_stats.inbound_start_time = qeth_get_micros();
2894 }
2895 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2896 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2897 QETH_DBF_TEXT(trace, 1,"qdinchk");
2898 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2899 QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count);
2900 QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status);
2901 qeth_schedule_recovery(card);
2902 return;
2903 }
2904 }
2905 for (i = first_element; i < (first_element + count); ++i) {
2906 index = i % QDIO_MAX_BUFFERS_PER_Q;
2907 buffer = &card->qdio.in_q->bufs[index];
2908 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
2909 qeth_check_qdio_errors(buffer->buffer,
2910 qdio_err, siga_err,"qinerr")))
2911 qeth_process_inbound_buffer(card, buffer, index);
2912 /* clear buffer and give back to hardware */
2913 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
2914 qeth_queue_input_buffer(card, index);
2915 }
2916 if (card->options.performance_stats)
2917 card->perf_stats.inbound_time += qeth_get_micros() -
2918 card->perf_stats.inbound_start_time;
2919}
2920
2921static int
2922qeth_handle_send_error(struct qeth_card *card,
2923 struct qeth_qdio_out_buffer *buffer,
2924 unsigned int qdio_err, unsigned int siga_err)
2925{
2926 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2927 int cc = siga_err & 3;
2928
2929 QETH_DBF_TEXT(trace, 6, "hdsnderr");
2930 qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr");
2931 switch (cc) {
2932 case 0:
2933 if (qdio_err){
2934 QETH_DBF_TEXT(trace, 1,"lnkfail");
2935 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2936 QETH_DBF_TEXT_(trace,1,"%04x %02x",
2937 (u16)qdio_err, (u8)sbalf15);
2938 return QETH_SEND_ERROR_LINK_FAILURE;
2939 }
2940 return QETH_SEND_ERROR_NONE;
2941 case 2:
2942 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2943 QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2944 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2945 return QETH_SEND_ERROR_KICK_IT;
2946 }
2947 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2948 return QETH_SEND_ERROR_RETRY;
2949 return QETH_SEND_ERROR_LINK_FAILURE;
2950 /* look at qdio_error and sbalf 15 */
2951 case 1:
2952 QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2953 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2954 return QETH_SEND_ERROR_LINK_FAILURE;
2955 case 3:
2956 default:
2957 QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2958 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2959 return QETH_SEND_ERROR_KICK_IT;
2960 }
2961}
2962
2963void
2964qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2965 int index, int count)
2966{
2967 struct qeth_qdio_out_buffer *buf;
2968 int rc;
2969 int i;
2970 unsigned int qdio_flags;
2971
2972 QETH_DBF_TEXT(trace, 6, "flushbuf");
2973
2974 for (i = index; i < index + count; ++i) {
2975 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2976 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2977 SBAL_FLAGS_LAST_ENTRY;
2978
2979 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2980 continue;
2981
2982 if (!queue->do_pack){
2983 if ((atomic_read(&queue->used_buffers) >=
2984 (QETH_HIGH_WATERMARK_PACK -
2985 QETH_WATERMARK_PACK_FUZZ)) &&
2986 !atomic_read(&queue->set_pci_flags_count)){
2987 /* it's likely that we'll go to packing
2988 * mode soon */
2989 atomic_inc(&queue->set_pci_flags_count);
2990 buf->buffer->element[0].flags |= 0x40;
2991 }
2992 } else {
2993 if (!atomic_read(&queue->set_pci_flags_count)){
2994 /*
2995 * there's no outstanding PCI any more, so we
2996 * have to request a PCI to be sure that the PCI
2997 * will wake at some time in the future then we
2998 * can flush packed buffers that might still be
2999 * hanging around, which can happen if no
3000 * further send was requested by the stack
3001 */
3002 atomic_inc(&queue->set_pci_flags_count);
3003 buf->buffer->element[0].flags |= 0x40;
3004 }
3005 }
3006 }
3007
3008 queue->card->dev->trans_start = jiffies;
3009 if (queue->card->options.performance_stats) {
3010 queue->card->perf_stats.outbound_do_qdio_cnt++;
3011 queue->card->perf_stats.outbound_do_qdio_start_time =
3012 qeth_get_micros();
3013 }
3014 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3015 if (under_int)
3016 qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
3017 if (atomic_read(&queue->set_pci_flags_count))
3018 qdio_flags |= QDIO_FLAG_PCI_OUT;
3019 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3020 queue->queue_no, index, count, NULL);
3021 if (queue->card->options.performance_stats)
3022 queue->card->perf_stats.outbound_do_qdio_time +=
3023 qeth_get_micros() -
3024 queue->card->perf_stats.outbound_do_qdio_start_time;
3025 if (rc){
3026 QETH_DBF_TEXT(trace, 2, "flushbuf");
3027 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
3028 QETH_DBF_TEXT_(trace, 2, "%s", CARD_DDEV_ID(queue->card));
3029 queue->card->stats.tx_errors += count;
3030 /* this must not happen under normal circumstances. if it
3031 * happens something is really wrong -> recover */
3032 qeth_schedule_recovery(queue->card);
3033 return;
3034 }
3035 atomic_add(count, &queue->used_buffers);
3036 if (queue->card->options.performance_stats)
3037 queue->card->perf_stats.bufs_sent += count;
3038}
3039
3040/*
3041 * Switched to packing state if the number of used buffers on a queue
3042 * reaches a certain limit.
3043 */
3044static void
3045qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3046{
3047 if (!queue->do_pack) {
3048 if (atomic_read(&queue->used_buffers)
3049 >= QETH_HIGH_WATERMARK_PACK){
3050 /* switch non-PACKING -> PACKING */
3051 QETH_DBF_TEXT(trace, 6, "np->pack");
3052 if (queue->card->options.performance_stats)
3053 queue->card->perf_stats.sc_dp_p++;
3054 queue->do_pack = 1;
3055 }
3056 }
3057}
3058
3059/*
3060 * Switches from packing to non-packing mode. If there is a packing
3061 * buffer on the queue this buffer will be prepared to be flushed.
3062 * In that case 1 is returned to inform the caller. If no buffer
3063 * has to be flushed, zero is returned.
3064 */
3065static int
3066qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3067{
3068 struct qeth_qdio_out_buffer *buffer;
3069 int flush_count = 0;
3070
3071 if (queue->do_pack) {
3072 if (atomic_read(&queue->used_buffers)
3073 <= QETH_LOW_WATERMARK_PACK) {
3074 /* switch PACKING -> non-PACKING */
3075 QETH_DBF_TEXT(trace, 6, "pack->np");
3076 if (queue->card->options.performance_stats)
3077 queue->card->perf_stats.sc_p_dp++;
3078 queue->do_pack = 0;
3079 /* flush packing buffers */
3080 buffer = &queue->bufs[queue->next_buf_to_fill];
3081 if ((atomic_read(&buffer->state) ==
3082 QETH_QDIO_BUF_EMPTY) &&
3083 (buffer->next_element_to_fill > 0)) {
3084 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
3085 flush_count++;
3086 queue->next_buf_to_fill =
3087 (queue->next_buf_to_fill + 1) %
3088 QDIO_MAX_BUFFERS_PER_Q;
3089 }
3090 }
3091 }
3092 return flush_count;
3093}
3094
3095/*
3096 * Called to flush a packing buffer if no more pci flags are on the queue.
3097 * Checks if there is a packing buffer and prepares it to be flushed.
3098 * In that case returns 1, otherwise zero.
3099 */
3100static int
3101qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
3102{
3103 struct qeth_qdio_out_buffer *buffer;
3104
3105 buffer = &queue->bufs[queue->next_buf_to_fill];
3106 if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3107 (buffer->next_element_to_fill > 0)){
3108 /* it's a packing buffer */
3109 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3110 queue->next_buf_to_fill =
3111 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
3112 return 1;
3113 }
3114 return 0;
3115}
3116
3117static void
3118qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3119{
3120 int index;
3121 int flush_cnt = 0;
3122 int q_was_packing = 0;
3123
3124 /*
3125 * check if weed have to switch to non-packing mode or if
3126 * we have to get a pci flag out on the queue
3127 */
3128 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3129 !atomic_read(&queue->set_pci_flags_count)){
3130 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3131 QETH_OUT_Q_UNLOCKED) {
3132 /*
3133 * If we get in here, there was no action in
3134 * do_send_packet. So, we check if there is a
3135 * packing buffer to be flushed here.
3136 */
3137 netif_stop_queue(queue->card->dev);
3138 index = queue->next_buf_to_fill;
3139 q_was_packing = queue->do_pack;
3140 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3141 if (!flush_cnt &&
3142 !atomic_read(&queue->set_pci_flags_count))
3143 flush_cnt +=
3144 qeth_flush_buffers_on_no_pci(queue);
3145 if (queue->card->options.performance_stats &&
3146 q_was_packing)
3147 queue->card->perf_stats.bufs_sent_pack +=
3148 flush_cnt;
3149 if (flush_cnt)
3150 qeth_flush_buffers(queue, 1, index, flush_cnt);
3151 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3152 }
3153 }
3154}
3155
3156static void
3157qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
3158 unsigned int qdio_error, unsigned int siga_error,
3159 unsigned int __queue, int first_element, int count,
3160 unsigned long card_ptr)
3161{
3162 struct qeth_card *card = (struct qeth_card *) card_ptr;
3163 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3164 struct qeth_qdio_out_buffer *buffer;
3165 int i;
3166
3167 QETH_DBF_TEXT(trace, 6, "qdouhdl");
3168 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
3169 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
3170 QETH_DBF_TEXT(trace, 2, "achkcond");
3171 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
3172 QETH_DBF_TEXT_(trace, 2, "%08x", status);
3173 netif_stop_queue(card->dev);
3174 qeth_schedule_recovery(card);
3175 return;
3176 }
3177 }
3178 if (card->options.performance_stats) {
3179 card->perf_stats.outbound_handler_cnt++;
3180 card->perf_stats.outbound_handler_start_time =
3181 qeth_get_micros();
3182 }
3183 for(i = first_element; i < (first_element + count); ++i){
3184 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
3185 /*we only handle the KICK_IT error by doing a recovery */
3186 if (qeth_handle_send_error(card, buffer,
3187 qdio_error, siga_error)
3188 == QETH_SEND_ERROR_KICK_IT){
3189 netif_stop_queue(card->dev);
3190 qeth_schedule_recovery(card);
3191 return;
3192 }
3193 qeth_clear_output_buffer(queue, buffer);
3194 }
3195 atomic_sub(count, &queue->used_buffers);
3196 /* check if we need to do something on this outbound queue */
3197 if (card->info.type != QETH_CARD_TYPE_IQD)
3198 qeth_check_outbound_queue(queue);
3199
3200 netif_wake_queue(queue->card->dev);
3201 if (card->options.performance_stats)
3202 card->perf_stats.outbound_handler_time += qeth_get_micros() -
3203 card->perf_stats.outbound_handler_start_time;
3204}
3205
3206static void
3207qeth_create_qib_param_field(struct qeth_card *card, char *param_field)
3208{
3209
3210 param_field[0] = _ascebc['P'];
3211 param_field[1] = _ascebc['C'];
3212 param_field[2] = _ascebc['I'];
3213 param_field[3] = _ascebc['T'];
3214 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
3215 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
3216 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
3217}
3218
3219static void
3220qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field)
3221{
3222 param_field[16] = _ascebc['B'];
3223 param_field[17] = _ascebc['L'];
3224 param_field[18] = _ascebc['K'];
3225 param_field[19] = _ascebc['T'];
3226 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
3227 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
3228 *((unsigned int *) (&param_field[28])) = card->info.blkt.inter_packet_jumbo;
3229}
3230
3231static void
3232qeth_initialize_working_pool_list(struct qeth_card *card)
3233{
3234 struct qeth_buffer_pool_entry *entry;
3235
3236 QETH_DBF_TEXT(trace,5,"inwrklst");
3237
3238 list_for_each_entry(entry,
3239 &card->qdio.init_pool.entry_list, init_list) {
3240 qeth_put_buffer_pool_entry(card,entry);
3241 }
3242}
3243
3244static void
3245qeth_clear_working_pool_list(struct qeth_card *card)
3246{
3247 struct qeth_buffer_pool_entry *pool_entry, *tmp;
3248
3249 QETH_DBF_TEXT(trace,5,"clwrklst");
3250 list_for_each_entry_safe(pool_entry, tmp,
3251 &card->qdio.in_buf_pool.entry_list, list){
3252 list_del(&pool_entry->list);
3253 }
3254}
3255
3256static void
3257qeth_free_buffer_pool(struct qeth_card *card)
3258{
3259 struct qeth_buffer_pool_entry *pool_entry, *tmp;
3260 int i=0;
3261 QETH_DBF_TEXT(trace,5,"freepool");
3262 list_for_each_entry_safe(pool_entry, tmp,
3263 &card->qdio.init_pool.entry_list, init_list){
3264 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
3265 free_page((unsigned long)pool_entry->elements[i]);
3266 list_del(&pool_entry->init_list);
3267 kfree(pool_entry);
3268 }
3269}
3270
3271static int
3272qeth_alloc_buffer_pool(struct qeth_card *card)
3273{
3274 struct qeth_buffer_pool_entry *pool_entry;
3275 void *ptr;
3276 int i, j;
3277
3278 QETH_DBF_TEXT(trace,5,"alocpool");
3279 for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
3280 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
3281 if (!pool_entry){
3282 qeth_free_buffer_pool(card);
3283 return -ENOMEM;
3284 }
3285 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
3286 ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
3287 if (!ptr) {
3288 while (j > 0)
3289 free_page((unsigned long)
3290 pool_entry->elements[--j]);
3291 kfree(pool_entry);
3292 qeth_free_buffer_pool(card);
3293 return -ENOMEM;
3294 }
3295 pool_entry->elements[j] = ptr;
3296 }
3297 list_add(&pool_entry->init_list,
3298 &card->qdio.init_pool.entry_list);
3299 }
3300 return 0;
3301}
3302
3303int
3304qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
3305{
3306 QETH_DBF_TEXT(trace, 2, "realcbp");
3307
3308 if ((card->state != CARD_STATE_DOWN) &&
3309 (card->state != CARD_STATE_RECOVER))
3310 return -EPERM;
3311
3312 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
3313 qeth_clear_working_pool_list(card);
3314 qeth_free_buffer_pool(card);
3315 card->qdio.in_buf_pool.buf_count = bufcnt;
3316 card->qdio.init_pool.buf_count = bufcnt;
3317 return qeth_alloc_buffer_pool(card);
3318}
3319
3320static int
3321qeth_alloc_qdio_buffers(struct qeth_card *card)
3322{
3323 int i, j;
3324
3325 QETH_DBF_TEXT(setup, 2, "allcqdbf");
3326
3327 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
3328 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
3329 return 0;
3330
3331 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
3332 GFP_KERNEL|GFP_DMA);
3333 if (!card->qdio.in_q)
3334 goto out_nomem;
3335 QETH_DBF_TEXT(setup, 2, "inq");
3336 QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
3337 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
3338 /* give inbound qeth_qdio_buffers their qdio_buffers */
3339 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3340 card->qdio.in_q->bufs[i].buffer =
3341 &card->qdio.in_q->qdio_bufs[i];
3342 /* inbound buffer pool */
3343 if (qeth_alloc_buffer_pool(card))
3344 goto out_freeinq;
3345 /* outbound */
3346 card->qdio.out_qs =
3347 kmalloc(card->qdio.no_out_queues *
3348 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
3349 if (!card->qdio.out_qs)
3350 goto out_freepool;
3351 for (i = 0; i < card->qdio.no_out_queues; ++i) {
3352 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
3353 GFP_KERNEL|GFP_DMA);
3354 if (!card->qdio.out_qs[i])
3355 goto out_freeoutq;
3356 QETH_DBF_TEXT_(setup, 2, "outq %i", i);
3357 QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
3358 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
3359 card->qdio.out_qs[i]->queue_no = i;
3360 /* give outbound qeth_qdio_buffers their qdio_buffers */
3361 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3362 card->qdio.out_qs[i]->bufs[j].buffer =
3363 &card->qdio.out_qs[i]->qdio_bufs[j];
3364 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
3365 skb_list);
3366 lockdep_set_class(
3367 &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
3368 &qdio_out_skb_queue_key);
3369 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
3370 }
3371 }
3372 return 0;
3373
3374out_freeoutq:
3375 while (i > 0)
3376 kfree(card->qdio.out_qs[--i]);
3377 kfree(card->qdio.out_qs);
3378 card->qdio.out_qs = NULL;
3379out_freepool:
3380 qeth_free_buffer_pool(card);
3381out_freeinq:
3382 kfree(card->qdio.in_q);
3383 card->qdio.in_q = NULL;
3384out_nomem:
3385 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
3386 return -ENOMEM;
3387}
3388
3389static void
3390qeth_free_qdio_buffers(struct qeth_card *card)
3391{
3392 int i, j;
3393
3394 QETH_DBF_TEXT(trace, 2, "freeqdbf");
3395 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
3396 QETH_QDIO_UNINITIALIZED)
3397 return;
3398 kfree(card->qdio.in_q);
3399 card->qdio.in_q = NULL;
3400 /* inbound buffer pool */
3401 qeth_free_buffer_pool(card);
3402 /* free outbound qdio_qs */
3403 if (card->qdio.out_qs) {
3404 for (i = 0; i < card->qdio.no_out_queues; ++i) {
3405 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3406 qeth_clear_output_buffer(card->qdio.out_qs[i],
3407 &card->qdio.out_qs[i]->bufs[j]);
3408 kfree(card->qdio.out_qs[i]);
3409 }
3410 kfree(card->qdio.out_qs);
3411 card->qdio.out_qs = NULL;
3412 }
3413}
3414
3415static void
3416qeth_clear_qdio_buffers(struct qeth_card *card)
3417{
3418 int i, j;
3419
3420 QETH_DBF_TEXT(trace, 2, "clearqdbf");
3421 /* clear outbound buffers to free skbs */
3422 for (i = 0; i < card->qdio.no_out_queues; ++i)
3423 if (card->qdio.out_qs && card->qdio.out_qs[i]) {
3424 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3425 qeth_clear_output_buffer(card->qdio.out_qs[i],
3426 &card->qdio.out_qs[i]->bufs[j]);
3427 }
3428}
3429
3430static void
3431qeth_init_qdio_info(struct qeth_card *card)
3432{
3433 QETH_DBF_TEXT(setup, 4, "intqdinf");
3434 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
3435 /* inbound */
3436 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
3437 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
3438 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
3439 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
3440 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
3441}
3442
3443static int
3444qeth_init_qdio_queues(struct qeth_card *card)
3445{
3446 int i, j;
3447 int rc;
3448
3449 QETH_DBF_TEXT(setup, 2, "initqdqs");
3450
3451 /* inbound queue */
3452 memset(card->qdio.in_q->qdio_bufs, 0,
3453 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3454 qeth_initialize_working_pool_list(card);
3455 /*give only as many buffers to hardware as we have buffer pool entries*/
3456 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
3457 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3458 card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1;
3459 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3460 card->qdio.in_buf_pool.buf_count - 1, NULL);
3461 if (rc) {
3462 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3463 return rc;
3464 }
3465 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
3466 if (rc) {
3467 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3468 return rc;
3469 }
3470 /* outbound queue */
3471 for (i = 0; i < card->qdio.no_out_queues; ++i){
3472 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
3473 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3474 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3475 qeth_clear_output_buffer(card->qdio.out_qs[i],
3476 &card->qdio.out_qs[i]->bufs[j]);
3477 }
3478 card->qdio.out_qs[i]->card = card;
3479 card->qdio.out_qs[i]->next_buf_to_fill = 0;
3480 card->qdio.out_qs[i]->do_pack = 0;
3481 atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
3482 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
3483 atomic_set(&card->qdio.out_qs[i]->state,
3484 QETH_OUT_Q_UNLOCKED);
3485 }
3486 return 0;
3487}
3488
3489static int
3490qeth_qdio_establish(struct qeth_card *card)
3491{
3492 struct qdio_initialize init_data;
3493 char *qib_param_field;
3494 struct qdio_buffer **in_sbal_ptrs;
3495 struct qdio_buffer **out_sbal_ptrs;
3496 int i, j, k;
3497 int rc = 0;
3498
3499 QETH_DBF_TEXT(setup, 2, "qdioest");
3500
3501 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3502 GFP_KERNEL);
3503 if (!qib_param_field)
3504 return -ENOMEM;
3505
3506 qeth_create_qib_param_field(card, qib_param_field);
3507 qeth_create_qib_param_field_blkt(card, qib_param_field);
3508
3509 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3510 GFP_KERNEL);
3511 if (!in_sbal_ptrs) {
3512 kfree(qib_param_field);
3513 return -ENOMEM;
3514 }
3515 for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3516 in_sbal_ptrs[i] = (struct qdio_buffer *)
3517 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3518
3519 out_sbal_ptrs =
3520 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3521 sizeof(void *), GFP_KERNEL);
3522 if (!out_sbal_ptrs) {
3523 kfree(in_sbal_ptrs);
3524 kfree(qib_param_field);
3525 return -ENOMEM;
3526 }
3527 for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3528 for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){
3529 out_sbal_ptrs[k] = (struct qdio_buffer *)
3530 virt_to_phys(card->qdio.out_qs[i]->
3531 bufs[j].buffer);
3532 }
3533
3534 memset(&init_data, 0, sizeof(struct qdio_initialize));
3535 init_data.cdev = CARD_DDEV(card);
3536 init_data.q_format = qeth_get_qdio_q_format(card);
3537 init_data.qib_param_field_format = 0;
3538 init_data.qib_param_field = qib_param_field;
3539 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3540 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3541 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3542 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3543 init_data.no_input_qs = 1;
3544 init_data.no_output_qs = card->qdio.no_out_queues;
3545 init_data.input_handler = (qdio_handler_t *)
3546 qeth_qdio_input_handler;
3547 init_data.output_handler = (qdio_handler_t *)
3548 qeth_qdio_output_handler;
3549 init_data.int_parm = (unsigned long) card;
3550 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3551 QDIO_OUTBOUND_0COPY_SBALS |
3552 QDIO_USE_OUTBOUND_PCIS;
3553 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3554 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3555
3556 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3557 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED)
3558 if ((rc = qdio_initialize(&init_data)))
3559 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3560
3561 kfree(out_sbal_ptrs);
3562 kfree(in_sbal_ptrs);
3563 kfree(qib_param_field);
3564 return rc;
3565}
3566
3567static int
3568qeth_qdio_activate(struct qeth_card *card)
3569{
3570 QETH_DBF_TEXT(setup,3,"qdioact");
3571 return qdio_activate(CARD_DDEV(card), 0);
3572}
3573
3574static int
3575qeth_clear_channel(struct qeth_channel *channel)
3576{
3577 unsigned long flags;
3578 struct qeth_card *card;
3579 int rc;
3580
3581 QETH_DBF_TEXT(trace,3,"clearch");
3582 card = CARD_FROM_CDEV(channel->ccwdev);
3583 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3584 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
3585 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3586
3587 if (rc)
3588 return rc;
3589 rc = wait_event_interruptible_timeout(card->wait_q,
3590 channel->state==CH_STATE_STOPPED, QETH_TIMEOUT);
3591 if (rc == -ERESTARTSYS)
3592 return rc;
3593 if (channel->state != CH_STATE_STOPPED)
3594 return -ETIME;
3595 channel->state = CH_STATE_DOWN;
3596 return 0;
3597}
3598
3599static int
3600qeth_halt_channel(struct qeth_channel *channel)
3601{
3602 unsigned long flags;
3603 struct qeth_card *card;
3604 int rc;
3605
3606 QETH_DBF_TEXT(trace,3,"haltch");
3607 card = CARD_FROM_CDEV(channel->ccwdev);
3608 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3609 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
3610 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3611
3612 if (rc)
3613 return rc;
3614 rc = wait_event_interruptible_timeout(card->wait_q,
3615 channel->state==CH_STATE_HALTED, QETH_TIMEOUT);
3616 if (rc == -ERESTARTSYS)
3617 return rc;
3618 if (channel->state != CH_STATE_HALTED)
3619 return -ETIME;
3620 return 0;
3621}
3622
3623static int
3624qeth_halt_channels(struct qeth_card *card)
3625{
3626 int rc1 = 0, rc2=0, rc3 = 0;
3627
3628 QETH_DBF_TEXT(trace,3,"haltchs");
3629 rc1 = qeth_halt_channel(&card->read);
3630 rc2 = qeth_halt_channel(&card->write);
3631 rc3 = qeth_halt_channel(&card->data);
3632 if (rc1)
3633 return rc1;
3634 if (rc2)
3635 return rc2;
3636 return rc3;
3637}
3638static int
3639qeth_clear_channels(struct qeth_card *card)
3640{
3641 int rc1 = 0, rc2=0, rc3 = 0;
3642
3643 QETH_DBF_TEXT(trace,3,"clearchs");
3644 rc1 = qeth_clear_channel(&card->read);
3645 rc2 = qeth_clear_channel(&card->write);
3646 rc3 = qeth_clear_channel(&card->data);
3647 if (rc1)
3648 return rc1;
3649 if (rc2)
3650 return rc2;
3651 return rc3;
3652}
3653
3654static int
3655qeth_clear_halt_card(struct qeth_card *card, int halt)
3656{
3657 int rc = 0;
3658
3659 QETH_DBF_TEXT(trace,3,"clhacrd");
3660 QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
3661
3662 if (halt)
3663 rc = qeth_halt_channels(card);
3664 if (rc)
3665 return rc;
3666 return qeth_clear_channels(card);
3667}
3668
3669static int
3670qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
3671{
3672 int rc = 0;
3673
3674 QETH_DBF_TEXT(trace,3,"qdioclr");
3675 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
3676 QETH_QDIO_CLEANING)) {
3677 case QETH_QDIO_ESTABLISHED:
3678 if ((rc = qdio_cleanup(CARD_DDEV(card),
3679 (card->info.type == QETH_CARD_TYPE_IQD) ?
3680 QDIO_FLAG_CLEANUP_USING_HALT :
3681 QDIO_FLAG_CLEANUP_USING_CLEAR)))
3682 QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
3683 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3684 break;
3685 case QETH_QDIO_CLEANING:
3686 return rc;
3687 default:
3688 break;
3689 }
3690 if ((rc = qeth_clear_halt_card(card, use_halt)))
3691 QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
3692 card->state = CARD_STATE_DOWN;
3693 return rc;
3694}
3695
3696static int
3697qeth_dm_act(struct qeth_card *card)
3698{
3699 int rc;
3700 struct qeth_cmd_buffer *iob;
3701
3702 QETH_DBF_TEXT(setup,2,"dmact");
3703
3704 iob = qeth_wait_for_buffer(&card->write);
3705 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
3706
3707 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
3708 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
3709 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
3710 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3711 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
3712 return rc;
3713}
3714
3715static int
3716qeth_mpc_initialize(struct qeth_card *card)
3717{
3718 int rc;
3719
3720 QETH_DBF_TEXT(setup,2,"mpcinit");
3721
3722 if ((rc = qeth_issue_next_read(card))){
3723 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3724 return rc;
3725 }
3726 if ((rc = qeth_cm_enable(card))){
3727 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3728 goto out_qdio;
3729 }
3730 if ((rc = qeth_cm_setup(card))){
3731 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3732 goto out_qdio;
3733 }
3734 if ((rc = qeth_ulp_enable(card))){
3735 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3736 goto out_qdio;
3737 }
3738 if ((rc = qeth_ulp_setup(card))){
3739 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3740 goto out_qdio;
3741 }
3742 if ((rc = qeth_alloc_qdio_buffers(card))){
3743 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3744 goto out_qdio;
3745 }
3746 if ((rc = qeth_qdio_establish(card))){
3747 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3748 qeth_free_qdio_buffers(card);
3749 goto out_qdio;
3750 }
3751 if ((rc = qeth_qdio_activate(card))){
3752 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
3753 goto out_qdio;
3754 }
3755 if ((rc = qeth_dm_act(card))){
3756 QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
3757 goto out_qdio;
3758 }
3759
3760 return 0;
3761out_qdio:
3762 qeth_qdio_clear_card(card, card->info.type!=QETH_CARD_TYPE_IQD);
3763 return rc;
3764}
3765
3766static struct net_device *
3767qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
3768{
3769 struct net_device *dev = NULL;
3770
3771 switch (type) {
3772 case QETH_CARD_TYPE_OSAE:
3773 switch (linktype) {
3774 case QETH_LINK_TYPE_LANE_TR:
3775 case QETH_LINK_TYPE_HSTR:
3776#ifdef CONFIG_TR
3777 dev = alloc_trdev(0);
3778#endif /* CONFIG_TR */
3779 break;
3780 default:
3781 dev = alloc_etherdev(0);
3782 }
3783 break;
3784 case QETH_CARD_TYPE_IQD:
3785 dev = alloc_netdev(0, "hsi%d", ether_setup);
3786 break;
3787 case QETH_CARD_TYPE_OSN:
3788 dev = alloc_netdev(0, "osn%d", ether_setup);
3789 break;
3790 default:
3791 dev = alloc_etherdev(0);
3792 }
3793 return dev;
3794}
3795
3796/*hard_header fake function; used in case fake_ll is set */
3797static int
3798qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3799 unsigned short type, const void *daddr, const void *saddr,
3800 unsigned len)
3801{
3802 if(dev->type == ARPHRD_IEEE802_TR){
3803 struct trh_hdr *hdr;
3804 hdr = (struct trh_hdr *)skb_push(skb, QETH_FAKE_LL_LEN_TR);
3805 memcpy(hdr->saddr, dev->dev_addr, TR_ALEN);
3806 memcpy(hdr->daddr, "FAKELL", TR_ALEN);
3807 return QETH_FAKE_LL_LEN_TR;
3808
3809 } else {
3810 struct ethhdr *hdr;
3811 hdr = (struct ethhdr *)skb_push(skb, QETH_FAKE_LL_LEN_ETH);
3812 memcpy(hdr->h_source, dev->dev_addr, ETH_ALEN);
3813 memcpy(hdr->h_dest, "FAKELL", ETH_ALEN);
3814 if (type != ETH_P_802_3)
3815 hdr->h_proto = htons(type);
3816 else
3817 hdr->h_proto = htons(len);
3818 return QETH_FAKE_LL_LEN_ETH;
3819
3820 }
3821}
3822
3823static const struct header_ops qeth_fake_ops = {
3824 .create = qeth_fake_header,
3825 .parse = qeth_hard_header_parse,
3826};
3827
3828static int
3829qeth_send_packet(struct qeth_card *, struct sk_buff *);
3830
3831static int
3832qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3833{
3834 int rc;
3835 struct qeth_card *card;
3836
3837 QETH_DBF_TEXT(trace, 6, "hrdstxmi");
3838 card = (struct qeth_card *)dev->priv;
3839 if (skb==NULL) {
3840 card->stats.tx_dropped++;
3841 card->stats.tx_errors++;
3842 /* return OK; otherwise ksoftirqd goes to 100% */
3843 return NETDEV_TX_OK;
3844 }
3845 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
3846 card->stats.tx_dropped++;
3847 card->stats.tx_errors++;
3848 card->stats.tx_carrier_errors++;
3849 dev_kfree_skb_any(skb);
3850 /* return OK; otherwise ksoftirqd goes to 100% */
3851 return NETDEV_TX_OK;
3852 }
3853 if (card->options.performance_stats) {
3854 card->perf_stats.outbound_cnt++;
3855 card->perf_stats.outbound_start_time = qeth_get_micros();
3856 }
3857 netif_stop_queue(dev);
3858 if ((rc = qeth_send_packet(card, skb))) {
3859 if (rc == -EBUSY) {
3860 return NETDEV_TX_BUSY;
3861 } else {
3862 card->stats.tx_errors++;
3863 card->stats.tx_dropped++;
3864 dev_kfree_skb_any(skb);
3865 /*set to OK; otherwise ksoftirqd goes to 100% */
3866 rc = NETDEV_TX_OK;
3867 }
3868 }
3869 netif_wake_queue(dev);
3870 if (card->options.performance_stats)
3871 card->perf_stats.outbound_time += qeth_get_micros() -
3872 card->perf_stats.outbound_start_time;
3873 return rc;
3874}
3875
3876static int
3877qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
3878{
3879 int rc = 0;
3880#ifdef CONFIG_QETH_VLAN
3881 struct vlan_group *vg;
3882 int i;
3883
3884 if (!(vg = card->vlangrp))
3885 return rc;
3886
3887 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
3888 if (vlan_group_get_device(vg, i) == dev){
3889 rc = QETH_VLAN_CARD;
3890 break;
3891 }
3892 }
3893 if (rc && !(vlan_dev_info(dev)->real_dev->priv == (void *)card))
3894 return 0;
3895
3896#endif
3897 return rc;
3898}
3899
3900static int
3901qeth_verify_dev(struct net_device *dev)
3902{
3903 struct qeth_card *card;
3904 unsigned long flags;
3905 int rc = 0;
3906
3907 read_lock_irqsave(&qeth_card_list.rwlock, flags);
3908 list_for_each_entry(card, &qeth_card_list.list, list){
3909 if (card->dev == dev){
3910 rc = QETH_REAL_CARD;
3911 break;
3912 }
3913 rc = qeth_verify_vlan_dev(dev, card);
3914 if (rc)
3915 break;
3916 }
3917 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
3918
3919 return rc;
3920}
3921
3922static struct qeth_card *
3923qeth_get_card_from_dev(struct net_device *dev)
3924{
3925 struct qeth_card *card = NULL;
3926 int rc;
3927
3928 rc = qeth_verify_dev(dev);
3929 if (rc == QETH_REAL_CARD)
3930 card = (struct qeth_card *)dev->priv;
3931 else if (rc == QETH_VLAN_CARD)
3932 card = (struct qeth_card *)
3933 vlan_dev_info(dev)->real_dev->priv;
3934
3935 QETH_DBF_TEXT_(trace, 4, "%d", rc);
3936 return card ;
3937}
3938
3939static void
3940qeth_tx_timeout(struct net_device *dev)
3941{
3942 struct qeth_card *card;
3943
3944 card = (struct qeth_card *) dev->priv;
3945 card->stats.tx_errors++;
3946 qeth_schedule_recovery(card);
3947}
3948
3949static int
3950qeth_open(struct net_device *dev)
3951{
3952 struct qeth_card *card;
3953
3954 QETH_DBF_TEXT(trace, 4, "qethopen");
3955
3956 card = (struct qeth_card *) dev->priv;
3957
3958 if (card->state != CARD_STATE_SOFTSETUP)
3959 return -ENODEV;
3960
3961 if ( (card->info.type != QETH_CARD_TYPE_OSN) &&
3962 (card->options.layer2) &&
3963 (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
3964 QETH_DBF_TEXT(trace,4,"nomacadr");
3965 return -EPERM;
3966 }
3967 card->data.state = CH_STATE_UP;
3968 card->state = CARD_STATE_UP;
3969 card->dev->flags |= IFF_UP;
3970 netif_start_queue(dev);
3971
3972 if (!card->lan_online && netif_carrier_ok(dev))
3973 netif_carrier_off(dev);
3974 return 0;
3975}
3976
3977static int
3978qeth_stop(struct net_device *dev)
3979{
3980 struct qeth_card *card;
3981
3982 QETH_DBF_TEXT(trace, 4, "qethstop");
3983
3984 card = (struct qeth_card *) dev->priv;
3985
3986 netif_tx_disable(dev);
3987 card->dev->flags &= ~IFF_UP;
3988 if (card->state == CARD_STATE_UP)
3989 card->state = CARD_STATE_SOFTSETUP;
3990 return 0;
3991}
3992
3993static int
3994qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3995{
3996 int cast_type = RTN_UNSPEC;
3997
3998 if (card->info.type == QETH_CARD_TYPE_OSN)
3999 return cast_type;
4000
4001 if (skb->dst && skb->dst->neighbour){
4002 cast_type = skb->dst->neighbour->type;
4003 if ((cast_type == RTN_BROADCAST) ||
4004 (cast_type == RTN_MULTICAST) ||
4005 (cast_type == RTN_ANYCAST))
4006 return cast_type;
4007 else
4008 return RTN_UNSPEC;
4009 }
4010 /* try something else */
4011 if (skb->protocol == ETH_P_IPV6)
4012 return (skb_network_header(skb)[24] == 0xff) ?
4013 RTN_MULTICAST : 0;
4014 else if (skb->protocol == ETH_P_IP)
4015 return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
4016 RTN_MULTICAST : 0;
4017 /* ... */
4018 if (!memcmp(skb->data, skb->dev->broadcast, 6))
4019 return RTN_BROADCAST;
4020 else {
4021 u16 hdr_mac;
4022
4023 hdr_mac = *((u16 *)skb->data);
4024 /* tr multicast? */
4025 switch (card->info.link_type) {
4026 case QETH_LINK_TYPE_HSTR:
4027 case QETH_LINK_TYPE_LANE_TR:
4028 if ((hdr_mac == QETH_TR_MAC_NC) ||
4029 (hdr_mac == QETH_TR_MAC_C))
4030 return RTN_MULTICAST;
4031 break;
4032 /* eth or so multicast? */
4033 default:
4034 if ((hdr_mac == QETH_ETH_MAC_V4) ||
4035 (hdr_mac == QETH_ETH_MAC_V6))
4036 return RTN_MULTICAST;
4037 }
4038 }
4039 return cast_type;
4040}
4041
4042static int
4043qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
4044 int ipv, int cast_type)
4045{
4046 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
4047 return card->qdio.default_out_queue;
4048 switch (card->qdio.no_out_queues) {
4049 case 4:
4050 if (cast_type && card->info.is_multicast_different)
4051 return card->info.is_multicast_different &
4052 (card->qdio.no_out_queues - 1);
4053 if (card->qdio.do_prio_queueing && (ipv == 4)) {
4054 const u8 tos = ip_hdr(skb)->tos;
4055
4056 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
4057 if (tos & IP_TOS_NOTIMPORTANT)
4058 return 3;
4059 if (tos & IP_TOS_HIGHRELIABILITY)
4060 return 2;
4061 if (tos & IP_TOS_HIGHTHROUGHPUT)
4062 return 1;
4063 if (tos & IP_TOS_LOWDELAY)
4064 return 0;
4065 }
4066 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
4067 return 3 - (tos >> 6);
4068 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
4069 /* TODO: IPv6!!! */
4070 }
4071 return card->qdio.default_out_queue;
4072 case 1: /* fallthrough for single-out-queue 1920-device */
4073 default:
4074 return card->qdio.default_out_queue;
4075 }
4076}
4077
4078static inline int
4079qeth_get_ip_version(struct sk_buff *skb)
4080{
4081 switch (skb->protocol) {
4082 case ETH_P_IPV6:
4083 return 6;
4084 case ETH_P_IP:
4085 return 4;
4086 default:
4087 return 0;
4088 }
4089}
4090
4091static struct qeth_hdr *
4092__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
4093{
4094#ifdef CONFIG_QETH_VLAN
4095 u16 *tag;
4096 if (card->vlangrp && vlan_tx_tag_present(skb) &&
4097 ((ipv == 6) || card->options.layer2) ) {
4098 /*
4099 * Move the mac addresses (6 bytes src, 6 bytes dest)
4100 * to the beginning of the new header. We are using three
4101 * memcpys instead of one memmove to save cycles.
4102 */
4103 skb_push(skb, VLAN_HLEN);
4104 skb_copy_to_linear_data(skb, skb->data + 4, 4);
4105 skb_copy_to_linear_data_offset(skb, 4, skb->data + 8, 4);
4106 skb_copy_to_linear_data_offset(skb, 8, skb->data + 12, 4);
4107 tag = (u16 *)(skb->data + 12);
4108 /*
4109 * first two bytes = ETH_P_8021Q (0x8100)
4110 * second two bytes = VLANID
4111 */
4112 *tag = __constant_htons(ETH_P_8021Q);
4113 *(tag + 1) = htons(vlan_tx_tag_get(skb));
4114 }
4115#endif
4116 return ((struct qeth_hdr *)
4117 qeth_push_skb(card, skb, sizeof(struct qeth_hdr)));
4118}
4119
4120static void
4121__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb)
4122{
4123 if (orig_skb != new_skb)
4124 dev_kfree_skb_any(new_skb);
4125}
4126
4127static struct sk_buff *
4128qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
4129 struct qeth_hdr **hdr, int ipv)
4130{
4131 struct sk_buff *new_skb, *new_skb2;
4132
4133 QETH_DBF_TEXT(trace, 6, "prepskb");
4134 new_skb = skb;
4135 new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
4136 if (!new_skb)
4137 return NULL;
4138 new_skb2 = qeth_realloc_headroom(card, new_skb,
4139 sizeof(struct qeth_hdr));
4140 if (!new_skb2) {
4141 __qeth_free_new_skb(skb, new_skb);
4142 return NULL;
4143 }
4144 if (new_skb != skb)
4145 __qeth_free_new_skb(new_skb2, new_skb);
4146 new_skb = new_skb2;
4147 *hdr = __qeth_prepare_skb(card, new_skb, ipv);
4148 if (*hdr == NULL) {
4149 __qeth_free_new_skb(skb, new_skb);
4150 return NULL;
4151 }
4152 return new_skb;
4153}
4154
4155static inline u8
4156qeth_get_qeth_hdr_flags4(int cast_type)
4157{
4158 if (cast_type == RTN_MULTICAST)
4159 return QETH_CAST_MULTICAST;
4160 if (cast_type == RTN_BROADCAST)
4161 return QETH_CAST_BROADCAST;
4162 return QETH_CAST_UNICAST;
4163}
4164
4165static inline u8
4166qeth_get_qeth_hdr_flags6(int cast_type)
4167{
4168 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
4169 if (cast_type == RTN_MULTICAST)
4170 return ct | QETH_CAST_MULTICAST;
4171 if (cast_type == RTN_ANYCAST)
4172 return ct | QETH_CAST_ANYCAST;
4173 if (cast_type == RTN_BROADCAST)
4174 return ct | QETH_CAST_BROADCAST;
4175 return ct | QETH_CAST_UNICAST;
4176}
4177
4178static void
4179qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
4180 struct sk_buff *skb)
4181{
4182 __u16 hdr_mac;
4183
4184 if (!memcmp(skb->data+QETH_HEADER_SIZE,
4185 skb->dev->broadcast,6)) { /* broadcast? */
4186 *(__u32 *)hdr->hdr.l2.flags |=
4187 QETH_LAYER2_FLAG_BROADCAST << 8;
4188 return;
4189 }
4190 hdr_mac=*((__u16*)skb->data);
4191 /* tr multicast? */
4192 switch (card->info.link_type) {
4193 case QETH_LINK_TYPE_HSTR:
4194 case QETH_LINK_TYPE_LANE_TR:
4195 if ((hdr_mac == QETH_TR_MAC_NC) ||
4196 (hdr_mac == QETH_TR_MAC_C) )
4197 *(__u32 *)hdr->hdr.l2.flags |=
4198 QETH_LAYER2_FLAG_MULTICAST << 8;
4199 else
4200 *(__u32 *)hdr->hdr.l2.flags |=
4201 QETH_LAYER2_FLAG_UNICAST << 8;
4202 break;
4203 /* eth or so multicast? */
4204 default:
4205 if ( (hdr_mac==QETH_ETH_MAC_V4) ||
4206 (hdr_mac==QETH_ETH_MAC_V6) )
4207 *(__u32 *)hdr->hdr.l2.flags |=
4208 QETH_LAYER2_FLAG_MULTICAST << 8;
4209 else
4210 *(__u32 *)hdr->hdr.l2.flags |=
4211 QETH_LAYER2_FLAG_UNICAST << 8;
4212 }
4213}
4214
4215static void
4216qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4217 struct sk_buff *skb, int cast_type)
4218{
4219 memset(hdr, 0, sizeof(struct qeth_hdr));
4220 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
4221
4222 /* set byte 0 to "0x02" and byte 3 to casting flags */
4223 if (cast_type==RTN_MULTICAST)
4224 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_MULTICAST << 8;
4225 else if (cast_type==RTN_BROADCAST)
4226 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_BROADCAST << 8;
4227 else
4228 qeth_layer2_get_packet_type(card, hdr, skb);
4229
4230 hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
4231#ifdef CONFIG_QETH_VLAN
4232 /* VSWITCH relies on the VLAN
4233 * information to be present in
4234 * the QDIO header */
4235 if ((card->vlangrp != NULL) &&
4236 vlan_tx_tag_present(skb)) {
4237 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_VLAN << 8;
4238 hdr->hdr.l2.vlan_id = vlan_tx_tag_get(skb);
4239 }
4240#endif
4241}
4242
4243void
4244qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4245 struct sk_buff *skb, int ipv, int cast_type)
4246{
4247 QETH_DBF_TEXT(trace, 6, "fillhdr");
4248
4249 memset(hdr, 0, sizeof(struct qeth_hdr));
4250 if (card->options.layer2) {
4251 qeth_layer2_fill_header(card, hdr, skb, cast_type);
4252 return;
4253 }
4254 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
4255 hdr->hdr.l3.ext_flags = 0;
4256#ifdef CONFIG_QETH_VLAN
4257 /*
4258 * before we're going to overwrite this location with next hop ip.
4259 * v6 uses passthrough, v4 sets the tag in the QDIO header.
4260 */
4261 if (card->vlangrp && vlan_tx_tag_present(skb)) {
4262 hdr->hdr.l3.ext_flags = (ipv == 4) ?
4263 QETH_HDR_EXT_VLAN_FRAME :
4264 QETH_HDR_EXT_INCLUDE_VLAN_TAG;
4265 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
4266 }
4267#endif /* CONFIG_QETH_VLAN */
4268 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
4269 if (ipv == 4) { /* IPv4 */
4270 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags4(cast_type);
4271 memset(hdr->hdr.l3.dest_addr, 0, 12);
4272 if ((skb->dst) && (skb->dst->neighbour)) {
4273 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
4274 *((u32 *) skb->dst->neighbour->primary_key);
4275 } else {
4276 /* fill in destination address used in ip header */
4277 *((u32 *)(&hdr->hdr.l3.dest_addr[12])) =
4278 ip_hdr(skb)->daddr;
4279 }
4280 } else if (ipv == 6) { /* IPv6 or passthru */
4281 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type);
4282 if ((skb->dst) && (skb->dst->neighbour)) {
4283 memcpy(hdr->hdr.l3.dest_addr,
4284 skb->dst->neighbour->primary_key, 16);
4285 } else {
4286 /* fill in destination address used in ip header */
4287 memcpy(hdr->hdr.l3.dest_addr,
4288 &ipv6_hdr(skb)->daddr, 16);
4289 }
4290 } else { /* passthrough */
4291 if((skb->dev->type == ARPHRD_IEEE802_TR) &&
4292 !memcmp(skb->data + sizeof(struct qeth_hdr) +
4293 sizeof(__u16), skb->dev->broadcast, 6)) {
4294 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
4295 QETH_HDR_PASSTHRU;
4296 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
4297 skb->dev->broadcast, 6)) { /* broadcast? */
4298 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
4299 QETH_HDR_PASSTHRU;
4300 } else {
4301 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
4302 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
4303 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
4304 }
4305 }
4306}
4307
4308static void
4309__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4310 int is_tso, int *next_element_to_fill)
4311{
4312 int length = skb->len;
4313 int length_here;
4314 int element;
4315 char *data;
4316 int first_lap ;
4317
4318 element = *next_element_to_fill;
4319 data = skb->data;
4320 first_lap = (is_tso == 0 ? 1 : 0);
4321
4322 while (length > 0) {
4323 /* length_here is the remaining amount of data in this page */
4324 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
4325 if (length < length_here)
4326 length_here = length;
4327
4328 buffer->element[element].addr = data;
4329 buffer->element[element].length = length_here;
4330 length -= length_here;
4331 if (!length) {
4332 if (first_lap)
4333 buffer->element[element].flags = 0;
4334 else
4335 buffer->element[element].flags =
4336 SBAL_FLAGS_LAST_FRAG;
4337 } else {
4338 if (first_lap)
4339 buffer->element[element].flags =
4340 SBAL_FLAGS_FIRST_FRAG;
4341 else
4342 buffer->element[element].flags =
4343 SBAL_FLAGS_MIDDLE_FRAG;
4344 }
4345 data += length_here;
4346 element++;
4347 first_lap = 0;
4348 }
4349 *next_element_to_fill = element;
4350}
4351
4352static int
4353qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4354 struct qeth_qdio_out_buffer *buf,
4355 struct sk_buff *skb)
4356{
4357 struct qdio_buffer *buffer;
4358 struct qeth_hdr_tso *hdr;
4359 int flush_cnt = 0, hdr_len, large_send = 0;
4360
4361 QETH_DBF_TEXT(trace, 6, "qdfillbf");
4362
4363 buffer = buf->buffer;
4364 atomic_inc(&skb->users);
4365 skb_queue_tail(&buf->skb_list, skb);
4366
4367 hdr = (struct qeth_hdr_tso *) skb->data;
4368 /*check first on TSO ....*/
4369 if (hdr->hdr.hdr.l3.id == QETH_HEADER_TYPE_TSO) {
4370 int element = buf->next_element_to_fill;
4371
4372 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
4373 /*fill first buffer entry only with header information */
4374 buffer->element[element].addr = skb->data;
4375 buffer->element[element].length = hdr_len;
4376 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
4377 buf->next_element_to_fill++;
4378 skb->data += hdr_len;
4379 skb->len -= hdr_len;
4380 large_send = 1;
4381 }
4382 if (skb_shinfo(skb)->nr_frags == 0)
4383 __qeth_fill_buffer(skb, buffer, large_send,
4384 (int *)&buf->next_element_to_fill);
4385 else
4386 __qeth_fill_buffer_frag(skb, buffer, large_send,
4387 (int *)&buf->next_element_to_fill);
4388
4389 if (!queue->do_pack) {
4390 QETH_DBF_TEXT(trace, 6, "fillbfnp");
4391 /* set state to PRIMED -> will be flushed */
4392 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4393 flush_cnt = 1;
4394 } else {
4395 QETH_DBF_TEXT(trace, 6, "fillbfpa");
4396 if (queue->card->options.performance_stats)
4397 queue->card->perf_stats.skbs_sent_pack++;
4398 if (buf->next_element_to_fill >=
4399 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
4400 /*
4401 * packed buffer if full -> set state PRIMED
4402 * -> will be flushed
4403 */
4404 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4405 flush_cnt = 1;
4406 }
4407 }
4408 return flush_cnt;
4409}
4410
4411static int
4412qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4413 struct sk_buff *skb, struct qeth_hdr *hdr,
4414 int elements_needed,
4415 struct qeth_eddp_context *ctx)
4416{
4417 struct qeth_qdio_out_buffer *buffer;
4418 int buffers_needed = 0;
4419 int flush_cnt = 0;
4420 int index;
4421
4422 QETH_DBF_TEXT(trace, 6, "dosndpfa");
4423
4424 /* spin until we get the queue ... */
4425 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4426 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4427 /* ... now we've got the queue */
4428 index = queue->next_buf_to_fill;
4429 buffer = &queue->bufs[queue->next_buf_to_fill];
4430 /*
4431 * check if buffer is empty to make sure that we do not 'overtake'
4432 * ourselves and try to fill a buffer that is already primed
4433 */
4434 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4435 goto out;
4436 if (ctx == NULL)
4437 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
4438 QDIO_MAX_BUFFERS_PER_Q;
4439 else {
4440 buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx);
4441 if (buffers_needed < 0)
4442 goto out;
4443 queue->next_buf_to_fill =
4444 (queue->next_buf_to_fill + buffers_needed) %
4445 QDIO_MAX_BUFFERS_PER_Q;
4446 }
4447 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4448 if (ctx == NULL) {
4449 qeth_fill_buffer(queue, buffer, skb);
4450 qeth_flush_buffers(queue, 0, index, 1);
4451 } else {
4452 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
4453 WARN_ON(buffers_needed != flush_cnt);
4454 qeth_flush_buffers(queue, 0, index, flush_cnt);
4455 }
4456 return 0;
4457out:
4458 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4459 return -EBUSY;
4460}
4461
4462static int
4463qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4464 struct sk_buff *skb, struct qeth_hdr *hdr,
4465 int elements_needed, struct qeth_eddp_context *ctx)
4466{
4467 struct qeth_qdio_out_buffer *buffer;
4468 int start_index;
4469 int flush_count = 0;
4470 int do_pack = 0;
4471 int tmp;
4472 int rc = 0;
4473
4474 QETH_DBF_TEXT(trace, 6, "dosndpkt");
4475
4476 /* spin until we get the queue ... */
4477 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4478 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4479 start_index = queue->next_buf_to_fill;
4480 buffer = &queue->bufs[queue->next_buf_to_fill];
4481 /*
4482 * check if buffer is empty to make sure that we do not 'overtake'
4483 * ourselves and try to fill a buffer that is already primed
4484 */
4485 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4486 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4487 return -EBUSY;
4488 }
4489 /* check if we need to switch packing state of this queue */
4490 qeth_switch_to_packing_if_needed(queue);
4491 if (queue->do_pack){
4492 do_pack = 1;
4493 if (ctx == NULL) {
4494 /* does packet fit in current buffer? */
4495 if((QETH_MAX_BUFFER_ELEMENTS(card) -
4496 buffer->next_element_to_fill) < elements_needed){
4497 /* ... no -> set state PRIMED */
4498 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
4499 flush_count++;
4500 queue->next_buf_to_fill =
4501 (queue->next_buf_to_fill + 1) %
4502 QDIO_MAX_BUFFERS_PER_Q;
4503 buffer = &queue->bufs[queue->next_buf_to_fill];
4504 /* we did a step forward, so check buffer state
4505 * again */
4506 if (atomic_read(&buffer->state) !=
4507 QETH_QDIO_BUF_EMPTY){
4508 qeth_flush_buffers(queue, 0, start_index, flush_count);
4509 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4510 return -EBUSY;
4511 }
4512 }
4513 } else {
4514 /* check if we have enough elements (including following
4515 * free buffers) to handle eddp context */
4516 if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
4517 if (net_ratelimit())
4518 PRINT_WARN("eddp tx_dropped 1\n");
4519 rc = -EBUSY;
4520 goto out;
4521 }
4522 }
4523 }
4524 if (ctx == NULL)
4525 tmp = qeth_fill_buffer(queue, buffer, skb);
4526 else {
4527 tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill);
4528 if (tmp < 0) {
4529 printk("eddp tx_dropped 2\n");
4530 rc = - EBUSY;
4531 goto out;
4532 }
4533 }
4534 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
4535 QDIO_MAX_BUFFERS_PER_Q;
4536 flush_count += tmp;
4537out:
4538 if (flush_count)
4539 qeth_flush_buffers(queue, 0, start_index, flush_count);
4540 else if (!atomic_read(&queue->set_pci_flags_count))
4541 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
4542 /*
4543 * queue->state will go from LOCKED -> UNLOCKED or from
4544 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4545 * (switch packing state or flush buffer to get another pci flag out).
4546 * In that case we will enter this loop
4547 */
4548 while (atomic_dec_return(&queue->state)){
4549 flush_count = 0;
4550 start_index = queue->next_buf_to_fill;
4551 /* check if we can go back to non-packing state */
4552 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
4553 /*
4554 * check if we need to flush a packing buffer to get a pci
4555 * flag out on the queue
4556 */
4557 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
4558 flush_count += qeth_flush_buffers_on_no_pci(queue);
4559 if (flush_count)
4560 qeth_flush_buffers(queue, 0, start_index, flush_count);
4561 }
4562 /* at this point the queue is UNLOCKED again */
4563 if (queue->card->options.performance_stats && do_pack)
4564 queue->card->perf_stats.bufs_sent_pack += flush_count;
4565
4566 return rc;
4567}
4568
4569static int
4570qeth_get_elements_no(struct qeth_card *card, void *hdr,
4571 struct sk_buff *skb, int elems)
4572{
4573 int elements_needed = 0;
4574
4575 if (skb_shinfo(skb)->nr_frags > 0)
4576 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
4577 if (elements_needed == 0)
4578 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
4579 + skb->len) >> PAGE_SHIFT);
4580 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){
4581 PRINT_ERR("Invalid size of IP packet "
4582 "(Number=%d / Length=%d). Discarded.\n",
4583 (elements_needed+elems), skb->len);
4584 return 0;
4585 }
4586 return elements_needed;
4587}
4588
4589static void qeth_tx_csum(struct sk_buff *skb)
4590{
4591 int tlen;
4592
4593 if (skb->protocol == htons(ETH_P_IP)) {
4594 tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
4595 switch (ip_hdr(skb)->protocol) {
4596 case IPPROTO_TCP:
4597 tcp_hdr(skb)->check = 0;
4598 tcp_hdr(skb)->check = csum_tcpudp_magic(
4599 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
4600 tlen, ip_hdr(skb)->protocol,
4601 skb_checksum(skb, skb_transport_offset(skb),
4602 tlen, 0));
4603 break;
4604 case IPPROTO_UDP:
4605 udp_hdr(skb)->check = 0;
4606 udp_hdr(skb)->check = csum_tcpudp_magic(
4607 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
4608 tlen, ip_hdr(skb)->protocol,
4609 skb_checksum(skb, skb_transport_offset(skb),
4610 tlen, 0));
4611 break;
4612 }
4613 } else if (skb->protocol == htons(ETH_P_IPV6)) {
4614 switch (ipv6_hdr(skb)->nexthdr) {
4615 case IPPROTO_TCP:
4616 tcp_hdr(skb)->check = 0;
4617 tcp_hdr(skb)->check = csum_ipv6_magic(
4618 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
4619 ipv6_hdr(skb)->payload_len,
4620 ipv6_hdr(skb)->nexthdr,
4621 skb_checksum(skb, skb_transport_offset(skb),
4622 ipv6_hdr(skb)->payload_len, 0));
4623 break;
4624 case IPPROTO_UDP:
4625 udp_hdr(skb)->check = 0;
4626 udp_hdr(skb)->check = csum_ipv6_magic(
4627 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
4628 ipv6_hdr(skb)->payload_len,
4629 ipv6_hdr(skb)->nexthdr,
4630 skb_checksum(skb, skb_transport_offset(skb),
4631 ipv6_hdr(skb)->payload_len, 0));
4632 break;
4633 }
4634 }
4635}
4636
4637static int
4638qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4639{
4640 int ipv = 0;
4641 int cast_type;
4642 struct qeth_qdio_out_q *queue;
4643 struct qeth_hdr *hdr = NULL;
4644 int elements_needed = 0;
4645 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
4646 struct qeth_eddp_context *ctx = NULL;
4647 int tx_bytes = skb->len;
4648 unsigned short nr_frags = skb_shinfo(skb)->nr_frags;
4649 unsigned short tso_size = skb_shinfo(skb)->gso_size;
4650 struct sk_buff *new_skb, *new_skb2;
4651 int rc;
4652
4653 QETH_DBF_TEXT(trace, 6, "sendpkt");
4654
4655 new_skb = skb;
4656 if ((card->info.type == QETH_CARD_TYPE_OSN) &&
4657 (skb->protocol == htons(ETH_P_IPV6)))
4658 return -EPERM;
4659 cast_type = qeth_get_cast_type(card, skb);
4660 if ((cast_type == RTN_BROADCAST) &&
4661 (card->info.broadcast_capable == 0))
4662 return -EPERM;
4663 queue = card->qdio.out_qs
4664 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
4665 if (!card->options.layer2) {
4666 ipv = qeth_get_ip_version(skb);
4667 if ((card->dev->header_ops == &qeth_fake_ops) && ipv) {
4668 new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
4669 if (!new_skb)
4670 return -ENOMEM;
4671 if(card->dev->type == ARPHRD_IEEE802_TR){
4672 skb_pull(new_skb, QETH_FAKE_LL_LEN_TR);
4673 } else {
4674 skb_pull(new_skb, QETH_FAKE_LL_LEN_ETH);
4675 }
4676 }
4677 }
4678 if (skb_is_gso(skb))
4679 large_send = card->options.large_send;
4680 /* check on OSN device*/
4681 if (card->info.type == QETH_CARD_TYPE_OSN)
4682 hdr = (struct qeth_hdr *)new_skb->data;
4683 /*are we able to do TSO ? */
4684 if ((large_send == QETH_LARGE_SEND_TSO) &&
4685 (cast_type == RTN_UNSPEC)) {
4686 rc = qeth_tso_prepare_packet(card, new_skb, ipv, cast_type);
4687 if (rc) {
4688 __qeth_free_new_skb(skb, new_skb);
4689 return rc;
4690 }
4691 elements_needed++;
4692 } else if (card->info.type != QETH_CARD_TYPE_OSN) {
4693 new_skb2 = qeth_prepare_skb(card, new_skb, &hdr, ipv);
4694 if (!new_skb2) {
4695 __qeth_free_new_skb(skb, new_skb);
4696 return -EINVAL;
4697 }
4698 if (new_skb != skb)
4699 __qeth_free_new_skb(new_skb2, new_skb);
4700 new_skb = new_skb2;
4701 qeth_fill_header(card, hdr, new_skb, ipv, cast_type);
4702 }
4703 if (large_send == QETH_LARGE_SEND_EDDP) {
4704 ctx = qeth_eddp_create_context(card, new_skb, hdr,
4705 skb->sk->sk_protocol);
4706 if (ctx == NULL) {
4707 __qeth_free_new_skb(skb, new_skb);
4708 PRINT_WARN("could not create eddp context\n");
4709 return -EINVAL;
4710 }
4711 } else {
4712 int elems = qeth_get_elements_no(card,(void*) hdr, new_skb,
4713 elements_needed);
4714 if (!elems) {
4715 __qeth_free_new_skb(skb, new_skb);
4716 return -EINVAL;
4717 }
4718 elements_needed += elems;
4719 }
4720
4721 if ((large_send == QETH_LARGE_SEND_NO) &&
4722 (skb->ip_summed == CHECKSUM_PARTIAL))
4723 qeth_tx_csum(new_skb);
4724
4725 if (card->info.type != QETH_CARD_TYPE_IQD)
4726 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
4727 elements_needed, ctx);
4728 else {
4729 if ((!card->options.layer2) &&
4730 (ipv == 0)) {
4731 __qeth_free_new_skb(skb, new_skb);
4732 return -EPERM;
4733 }
4734 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
4735 elements_needed, ctx);
4736 }
4737 if (!rc) {
4738 card->stats.tx_packets++;
4739 card->stats.tx_bytes += tx_bytes;
4740 if (new_skb != skb)
4741 dev_kfree_skb_any(skb);
4742 if (card->options.performance_stats) {
4743 if (tso_size &&
4744 !(large_send == QETH_LARGE_SEND_NO)) {
4745 card->perf_stats.large_send_bytes += tx_bytes;
4746 card->perf_stats.large_send_cnt++;
4747 }
4748 if (nr_frags > 0) {
4749 card->perf_stats.sg_skbs_sent++;
4750 /* nr_frags + skb->data */
4751 card->perf_stats.sg_frags_sent +=
4752 nr_frags + 1;
4753 }
4754 }
4755 } else {
4756 card->stats.tx_dropped++;
4757 __qeth_free_new_skb(skb, new_skb);
4758 }
4759 if (ctx != NULL) {
4760 /* drop creator's reference */
4761 qeth_eddp_put_context(ctx);
4762 /* free skb; it's not referenced by a buffer */
4763 if (!rc)
4764 dev_kfree_skb_any(new_skb);
4765 }
4766 return rc;
4767}
4768
4769static int
4770qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4771{
4772 struct qeth_card *card = (struct qeth_card *) dev->priv;
4773 int rc = 0;
4774
4775 switch(regnum){
4776 case MII_BMCR: /* Basic mode control register */
4777 rc = BMCR_FULLDPLX;
4778 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
4779 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4780 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
4781 rc |= BMCR_SPEED100;
4782 break;
4783 case MII_BMSR: /* Basic mode status register */
4784 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4785 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4786 BMSR_100BASE4;
4787 break;
4788 case MII_PHYSID1: /* PHYS ID 1 */
4789 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4790 dev->dev_addr[2];
4791 rc = (rc >> 5) & 0xFFFF;
4792 break;
4793 case MII_PHYSID2: /* PHYS ID 2 */
4794 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4795 break;
4796 case MII_ADVERTISE: /* Advertisement control reg */
4797 rc = ADVERTISE_ALL;
4798 break;
4799 case MII_LPA: /* Link partner ability reg */
4800 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4801 LPA_100BASE4 | LPA_LPACK;
4802 break;
4803 case MII_EXPANSION: /* Expansion register */
4804 break;
4805 case MII_DCOUNTER: /* disconnect counter */
4806 break;
4807 case MII_FCSCOUNTER: /* false carrier counter */
4808 break;
4809 case MII_NWAYTEST: /* N-way auto-neg test register */
4810 break;
4811 case MII_RERRCOUNTER: /* rx error counter */
4812 rc = card->stats.rx_errors;
4813 break;
4814 case MII_SREVISION: /* silicon revision */
4815 break;
4816 case MII_RESV1: /* reserved 1 */
4817 break;
4818 case MII_LBRERROR: /* loopback, rx, bypass error */
4819 break;
4820 case MII_PHYADDR: /* physical address */
4821 break;
4822 case MII_RESV2: /* reserved 2 */
4823 break;
4824 case MII_TPISTATUS: /* TPI status for 10mbps */
4825 break;
4826 case MII_NCONFIG: /* network interface config */
4827 break;
4828 default:
4829 break;
4830 }
4831 return rc;
4832}
4833
4834
4835static const char *
4836qeth_arp_get_error_cause(int *rc)
4837{
4838 switch (*rc) {
4839 case QETH_IPA_ARP_RC_FAILED:
4840 *rc = -EIO;
4841 return "operation failed";
4842 case QETH_IPA_ARP_RC_NOTSUPP:
4843 *rc = -EOPNOTSUPP;
4844 return "operation not supported";
4845 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
4846 *rc = -EINVAL;
4847 return "argument out of range";
4848 case QETH_IPA_ARP_RC_Q_NOTSUPP:
4849 *rc = -EOPNOTSUPP;
4850 return "query operation not supported";
4851 case QETH_IPA_ARP_RC_Q_NO_DATA:
4852 *rc = -ENOENT;
4853 return "no query data available";
4854 default:
4855 return "unknown error";
4856 }
4857}
4858
4859static int
4860qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
4861 __u16, long);
4862
4863static int
4864qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4865{
4866 int tmp;
4867 int rc;
4868
4869 QETH_DBF_TEXT(trace,3,"arpstnoe");
4870
4871 /*
4872 * currently GuestLAN only supports the ARP assist function
4873 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
4874 * thus we say EOPNOTSUPP for this ARP function
4875 */
4876 if (card->info.guestlan)
4877 return -EOPNOTSUPP;
4878 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4879 PRINT_WARN("ARP processing not supported "
4880 "on %s!\n", QETH_CARD_IFNAME(card));
4881 return -EOPNOTSUPP;
4882 }
4883 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4884 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
4885 no_entries);
4886 if (rc) {
4887 tmp = rc;
4888 PRINT_WARN("Could not set number of ARP entries on %s: "
4889 "%s (0x%x/%d)\n",
4890 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4891 tmp, tmp);
4892 }
4893 return rc;
4894}
4895
4896static void
4897qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4898 struct qeth_arp_query_data *qdata,
4899 int entry_size, int uentry_size)
4900{
4901 char *entry_ptr;
4902 char *uentry_ptr;
4903 int i;
4904
4905 entry_ptr = (char *)&qdata->data;
4906 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
4907 for (i = 0; i < qdata->no_entries; ++i){
4908 /* strip off 32 bytes "media specific information" */
4909 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
4910 entry_ptr += entry_size;
4911 uentry_ptr += uentry_size;
4912 }
4913}
4914
4915static int
4916qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply,
4917 unsigned long data)
4918{
4919 struct qeth_ipa_cmd *cmd;
4920 struct qeth_arp_query_data *qdata;
4921 struct qeth_arp_query_info *qinfo;
4922 int entry_size;
4923 int uentry_size;
4924 int i;
4925
4926 QETH_DBF_TEXT(trace,4,"arpquecb");
4927
4928 qinfo = (struct qeth_arp_query_info *) reply->param;
4929 cmd = (struct qeth_ipa_cmd *) data;
4930 if (cmd->hdr.return_code) {
4931 QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code);
4932 return 0;
4933 }
4934 if (cmd->data.setassparms.hdr.return_code) {
4935 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
4936 QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code);
4937 return 0;
4938 }
4939 qdata = &cmd->data.setassparms.data.query_arp;
4940 switch(qdata->reply_bits){
4941 case 5:
4942 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
4943 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4944 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
4945 break;
4946 case 7:
4947 /* fall through to default */
4948 default:
4949 /* tr is the same as eth -> entry7 */
4950 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
4951 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4952 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
4953 break;
4954 }
4955 /* check if there is enough room in userspace */
4956 if ((qinfo->udata_len - qinfo->udata_offset) <
4957 qdata->no_entries * uentry_size){
4958 QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
4959 cmd->hdr.return_code = -ENOMEM;
4960 PRINT_WARN("query ARP user space buffer is too small for "
4961 "the returned number of ARP entries. "
4962 "Aborting query!\n");
4963 goto out_error;
4964 }
4965 QETH_DBF_TEXT_(trace, 4, "anore%i",
4966 cmd->data.setassparms.hdr.number_of_replies);
4967 QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
4968 QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
4969
4970 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
4971 /* strip off "media specific information" */
4972 qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size,
4973 uentry_size);
4974 } else
4975 /*copy entries to user buffer*/
4976 memcpy(qinfo->udata + qinfo->udata_offset,
4977 (char *)&qdata->data, qdata->no_entries*uentry_size);
4978
4979 qinfo->no_entries += qdata->no_entries;
4980 qinfo->udata_offset += (qdata->no_entries*uentry_size);
4981 /* check if all replies received ... */
4982 if (cmd->data.setassparms.hdr.seq_no <
4983 cmd->data.setassparms.hdr.number_of_replies)
4984 return 1;
4985 memcpy(qinfo->udata, &qinfo->no_entries, 4);
4986 /* keep STRIP_ENTRIES flag so the user program can distinguish
4987 * stripped entries from normal ones */
4988 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4989 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
4990 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2);
4991 return 0;
4992out_error:
4993 i = 0;
4994 memcpy(qinfo->udata, &i, 4);
4995 return 0;
4996}
4997
4998static int
4999qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
5000 int len, int (*reply_cb)(struct qeth_card *,
5001 struct qeth_reply *,
5002 unsigned long),
5003 void *reply_param)
5004{
5005 QETH_DBF_TEXT(trace,4,"sendarp");
5006
5007 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
5008 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
5009 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
5010 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
5011 reply_cb, reply_param);
5012}
5013
5014static int
5015qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
5016 int len, int (*reply_cb)(struct qeth_card *,
5017 struct qeth_reply *,
5018 unsigned long),
5019 void *reply_param)
5020{
5021 u16 s1, s2;
5022
5023 QETH_DBF_TEXT(trace,4,"sendsnmp");
5024
5025 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
5026 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
5027 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
5028 /* adjust PDU length fields in IPA_PDU_HEADER */
5029 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
5030 s2 = (u32) len;
5031 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
5032 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
5033 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
5034 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
5035 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
5036 reply_cb, reply_param);
5037}
5038
5039static struct qeth_cmd_buffer *
5040qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
5041 __u16, __u16, enum qeth_prot_versions);
5042static int
5043qeth_arp_query(struct qeth_card *card, char __user *udata)
5044{
5045 struct qeth_cmd_buffer *iob;
5046 struct qeth_arp_query_info qinfo = {0, };
5047 int tmp;
5048 int rc;
5049
5050 QETH_DBF_TEXT(trace,3,"arpquery");
5051
5052 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
5053 IPA_ARP_PROCESSING)) {
5054 PRINT_WARN("ARP processing not supported "
5055 "on %s!\n", QETH_CARD_IFNAME(card));
5056 return -EOPNOTSUPP;
5057 }
5058 /* get size of userspace buffer and mask_bits -> 6 bytes */
5059 if (copy_from_user(&qinfo, udata, 6))
5060 return -EFAULT;
5061 if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL)))
5062 return -ENOMEM;
5063 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
5064 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5065 IPA_CMD_ASS_ARP_QUERY_INFO,
5066 sizeof(int),QETH_PROT_IPV4);
5067
5068 rc = qeth_send_ipa_arp_cmd(card, iob,
5069 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
5070 qeth_arp_query_cb, (void *)&qinfo);
5071 if (rc) {
5072 tmp = rc;
5073 PRINT_WARN("Error while querying ARP cache on %s: %s "
5074 "(0x%x/%d)\n",
5075 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
5076 tmp, tmp);
5077 if (copy_to_user(udata, qinfo.udata, 4))
5078 rc = -EFAULT;
5079 } else {
5080 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
5081 rc = -EFAULT;
5082 }
5083 kfree(qinfo.udata);
5084 return rc;
5085}
5086
5087/**
5088 * SNMP command callback
5089 */
5090static int
5091qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply,
5092 unsigned long sdata)
5093{
5094 struct qeth_ipa_cmd *cmd;
5095 struct qeth_arp_query_info *qinfo;
5096 struct qeth_snmp_cmd *snmp;
5097 unsigned char *data;
5098 __u16 data_len;
5099
5100 QETH_DBF_TEXT(trace,3,"snpcmdcb");
5101
5102 cmd = (struct qeth_ipa_cmd *) sdata;
5103 data = (unsigned char *)((char *)cmd - reply->offset);
5104 qinfo = (struct qeth_arp_query_info *) reply->param;
5105 snmp = &cmd->data.setadapterparms.data.snmp;
5106
5107 if (cmd->hdr.return_code) {
5108 QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code);
5109 return 0;
5110 }
5111 if (cmd->data.setadapterparms.hdr.return_code) {
5112 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
5113 QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code);
5114 return 0;
5115 }
5116 data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data));
5117 if (cmd->data.setadapterparms.hdr.seq_no == 1)
5118 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
5119 else
5120 data_len -= (__u16)((char*)&snmp->request - (char *)cmd);
5121
5122 /* check if there is enough room in userspace */
5123 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
5124 QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
5125 cmd->hdr.return_code = -ENOMEM;
5126 return 0;
5127 }
5128 QETH_DBF_TEXT_(trace, 4, "snore%i",
5129 cmd->data.setadapterparms.hdr.used_total);
5130 QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no);
5131 /*copy entries to user buffer*/
5132 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
5133 memcpy(qinfo->udata + qinfo->udata_offset,
5134 (char *)snmp,
5135 data_len + offsetof(struct qeth_snmp_cmd,data));
5136 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
5137 } else {
5138 memcpy(qinfo->udata + qinfo->udata_offset,
5139 (char *)&snmp->request, data_len);
5140 }
5141 qinfo->udata_offset += data_len;
5142 /* check if all replies received ... */
5143 QETH_DBF_TEXT_(trace, 4, "srtot%i",
5144 cmd->data.setadapterparms.hdr.used_total);
5145 QETH_DBF_TEXT_(trace, 4, "srseq%i",
5146 cmd->data.setadapterparms.hdr.seq_no);
5147 if (cmd->data.setadapterparms.hdr.seq_no <
5148 cmd->data.setadapterparms.hdr.used_total)
5149 return 1;
5150 return 0;
5151}
5152
5153static struct qeth_cmd_buffer *
5154qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds,
5155 enum qeth_prot_versions );
5156
5157static struct qeth_cmd_buffer *
5158qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen)
5159{
5160 struct qeth_cmd_buffer *iob;
5161 struct qeth_ipa_cmd *cmd;
5162
5163 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS,
5164 QETH_PROT_IPV4);
5165 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5166 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
5167 cmd->data.setadapterparms.hdr.command_code = command;
5168 cmd->data.setadapterparms.hdr.used_total = 1;
5169 cmd->data.setadapterparms.hdr.seq_no = 1;
5170
5171 return iob;
5172}
5173
5174/**
5175 * function to send SNMP commands to OSA-E card
5176 */
5177static int
5178qeth_snmp_command(struct qeth_card *card, char __user *udata)
5179{
5180 struct qeth_cmd_buffer *iob;
5181 struct qeth_ipa_cmd *cmd;
5182 struct qeth_snmp_ureq *ureq;
5183 int req_len;
5184 struct qeth_arp_query_info qinfo = {0, };
5185 int rc = 0;
5186
5187 QETH_DBF_TEXT(trace,3,"snmpcmd");
5188
5189 if (card->info.guestlan)
5190 return -EOPNOTSUPP;
5191
5192 if ((!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) &&
5193 (!card->options.layer2) ) {
5194 PRINT_WARN("SNMP Query MIBS not supported "
5195 "on %s!\n", QETH_CARD_IFNAME(card));
5196 return -EOPNOTSUPP;
5197 }
5198 /* skip 4 bytes (data_len struct member) to get req_len */
5199 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
5200 return -EFAULT;
5201 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
5202 if (!ureq) {
5203 QETH_DBF_TEXT(trace, 2, "snmpnome");
5204 return -ENOMEM;
5205 }
5206 if (copy_from_user(ureq, udata,
5207 req_len+sizeof(struct qeth_snmp_ureq_hdr))){
5208 kfree(ureq);
5209 return -EFAULT;
5210 }
5211 qinfo.udata_len = ureq->hdr.data_len;
5212 if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL))){
5213 kfree(ureq);
5214 return -ENOMEM;
5215 }
5216 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
5217
5218 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
5219 QETH_SNMP_SETADP_CMDLENGTH + req_len);
5220 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5221 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
5222 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
5223 qeth_snmp_command_cb, (void *)&qinfo);
5224 if (rc)
5225 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
5226 QETH_CARD_IFNAME(card), rc);
5227 else {
5228 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
5229 rc = -EFAULT;
5230 }
5231
5232 kfree(ureq);
5233 kfree(qinfo.udata);
5234 return rc;
5235}
5236
5237static int
5238qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *,
5239 unsigned long);
5240
5241static int
5242qeth_default_setadapterparms_cb(struct qeth_card *card,
5243 struct qeth_reply *reply,
5244 unsigned long data);
5245static int
5246qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *,
5247 __u16, long,
5248 int (*reply_cb)
5249 (struct qeth_card *, struct qeth_reply *, unsigned long),
5250 void *reply_param);
5251
5252static int
5253qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
5254{
5255 struct qeth_cmd_buffer *iob;
5256 char buf[16];
5257 int tmp;
5258 int rc;
5259
5260 QETH_DBF_TEXT(trace,3,"arpadent");
5261
5262 /*
5263 * currently GuestLAN only supports the ARP assist function
5264 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
5265 * thus we say EOPNOTSUPP for this ARP function
5266 */
5267 if (card->info.guestlan)
5268 return -EOPNOTSUPP;
5269 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5270 PRINT_WARN("ARP processing not supported "
5271 "on %s!\n", QETH_CARD_IFNAME(card));
5272 return -EOPNOTSUPP;
5273 }
5274
5275 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5276 IPA_CMD_ASS_ARP_ADD_ENTRY,
5277 sizeof(struct qeth_arp_cache_entry),
5278 QETH_PROT_IPV4);
5279 rc = qeth_send_setassparms(card, iob,
5280 sizeof(struct qeth_arp_cache_entry),
5281 (unsigned long) entry,
5282 qeth_default_setassparms_cb, NULL);
5283 if (rc) {
5284 tmp = rc;
5285 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
5286 PRINT_WARN("Could not add ARP entry for address %s on %s: "
5287 "%s (0x%x/%d)\n",
5288 buf, QETH_CARD_IFNAME(card),
5289 qeth_arp_get_error_cause(&rc), tmp, tmp);
5290 }
5291 return rc;
5292}
5293
5294static int
5295qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
5296{
5297 struct qeth_cmd_buffer *iob;
5298 char buf[16] = {0, };
5299 int tmp;
5300 int rc;
5301
5302 QETH_DBF_TEXT(trace,3,"arprment");
5303
5304 /*
5305 * currently GuestLAN only supports the ARP assist function
5306 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
5307 * thus we say EOPNOTSUPP for this ARP function
5308 */
5309 if (card->info.guestlan)
5310 return -EOPNOTSUPP;
5311 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5312 PRINT_WARN("ARP processing not supported "
5313 "on %s!\n", QETH_CARD_IFNAME(card));
5314 return -EOPNOTSUPP;
5315 }
5316 memcpy(buf, entry, 12);
5317 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5318 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
5319 12,
5320 QETH_PROT_IPV4);
5321 rc = qeth_send_setassparms(card, iob,
5322 12, (unsigned long)buf,
5323 qeth_default_setassparms_cb, NULL);
5324 if (rc) {
5325 tmp = rc;
5326 memset(buf, 0, 16);
5327 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
5328 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
5329 "%s (0x%x/%d)\n",
5330 buf, QETH_CARD_IFNAME(card),
5331 qeth_arp_get_error_cause(&rc), tmp, tmp);
5332 }
5333 return rc;
5334}
5335
5336static int
5337qeth_arp_flush_cache(struct qeth_card *card)
5338{
5339 int rc;
5340 int tmp;
5341
5342 QETH_DBF_TEXT(trace,3,"arpflush");
5343
5344 /*
5345 * currently GuestLAN only supports the ARP assist function
5346 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
5347 * thus we say EOPNOTSUPP for this ARP function
5348 */
5349 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
5350 return -EOPNOTSUPP;
5351 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5352 PRINT_WARN("ARP processing not supported "
5353 "on %s!\n", QETH_CARD_IFNAME(card));
5354 return -EOPNOTSUPP;
5355 }
5356 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
5357 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
5358 if (rc){
5359 tmp = rc;
5360 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
5361 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
5362 tmp, tmp);
5363 }
5364 return rc;
5365}
5366
5367static int
5368qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5369{
5370 struct qeth_card *card = (struct qeth_card *)dev->priv;
5371 struct qeth_arp_cache_entry arp_entry;
5372 struct mii_ioctl_data *mii_data;
5373 int rc = 0;
5374
5375 if (!card)
5376 return -ENODEV;
5377
5378 if ((card->state != CARD_STATE_UP) &&
5379 (card->state != CARD_STATE_SOFTSETUP))
5380 return -ENODEV;
5381
5382 if (card->info.type == QETH_CARD_TYPE_OSN)
5383 return -EPERM;
5384
5385 switch (cmd){
5386 case SIOC_QETH_ARP_SET_NO_ENTRIES:
5387 if ( !capable(CAP_NET_ADMIN) ||
5388 (card->options.layer2) ) {
5389 rc = -EPERM;
5390 break;
5391 }
5392 rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
5393 break;
5394 case SIOC_QETH_ARP_QUERY_INFO:
5395 if ( !capable(CAP_NET_ADMIN) ||
5396 (card->options.layer2) ) {
5397 rc = -EPERM;
5398 break;
5399 }
5400 rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data);
5401 break;
5402 case SIOC_QETH_ARP_ADD_ENTRY:
5403 if ( !capable(CAP_NET_ADMIN) ||
5404 (card->options.layer2) ) {
5405 rc = -EPERM;
5406 break;
5407 }
5408 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
5409 sizeof(struct qeth_arp_cache_entry)))
5410 rc = -EFAULT;
5411 else
5412 rc = qeth_arp_add_entry(card, &arp_entry);
5413 break;
5414 case SIOC_QETH_ARP_REMOVE_ENTRY:
5415 if ( !capable(CAP_NET_ADMIN) ||
5416 (card->options.layer2) ) {
5417 rc = -EPERM;
5418 break;
5419 }
5420 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
5421 sizeof(struct qeth_arp_cache_entry)))
5422 rc = -EFAULT;
5423 else
5424 rc = qeth_arp_remove_entry(card, &arp_entry);
5425 break;
5426 case SIOC_QETH_ARP_FLUSH_CACHE:
5427 if ( !capable(CAP_NET_ADMIN) ||
5428 (card->options.layer2) ) {
5429 rc = -EPERM;
5430 break;
5431 }
5432 rc = qeth_arp_flush_cache(card);
5433 break;
5434 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5435 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5436 break;
5437 case SIOC_QETH_GET_CARD_TYPE:
5438 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
5439 !card->info.guestlan)
5440 return 1;
5441 return 0;
5442 break;
5443 case SIOCGMIIPHY:
5444 mii_data = if_mii(rq);
5445 mii_data->phy_id = 0;
5446 break;
5447 case SIOCGMIIREG:
5448 mii_data = if_mii(rq);
5449 if (mii_data->phy_id != 0)
5450 rc = -EINVAL;
5451 else
5452 mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
5453 mii_data->reg_num);
5454 break;
5455 default:
5456 rc = -EOPNOTSUPP;
5457 }
5458 if (rc)
5459 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
5460 return rc;
5461}
5462
5463static struct net_device_stats *
5464qeth_get_stats(struct net_device *dev)
5465{
5466 struct qeth_card *card;
5467
5468 card = (struct qeth_card *) (dev->priv);
5469
5470 QETH_DBF_TEXT(trace,5,"getstat");
5471
5472 return &card->stats;
5473}
5474
5475static int
5476qeth_change_mtu(struct net_device *dev, int new_mtu)
5477{
5478 struct qeth_card *card;
5479 char dbf_text[15];
5480
5481 card = (struct qeth_card *) (dev->priv);
5482
5483 QETH_DBF_TEXT(trace,4,"chgmtu");
5484 sprintf(dbf_text, "%8x", new_mtu);
5485 QETH_DBF_TEXT(trace,4,dbf_text);
5486
5487 if (new_mtu < 64)
5488 return -EINVAL;
5489 if (new_mtu > 65535)
5490 return -EINVAL;
5491 if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) &&
5492 (!qeth_mtu_is_valid(card, new_mtu)))
5493 return -EINVAL;
5494 dev->mtu = new_mtu;
5495 return 0;
5496}
5497
5498#ifdef CONFIG_QETH_VLAN
5499static void
5500qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5501{
5502 struct qeth_card *card;
5503 unsigned long flags;
5504
5505 QETH_DBF_TEXT(trace,4,"vlanreg");
5506
5507 card = (struct qeth_card *) dev->priv;
5508 spin_lock_irqsave(&card->vlanlock, flags);
5509 card->vlangrp = grp;
5510 spin_unlock_irqrestore(&card->vlanlock, flags);
5511}
5512
5513static void
5514qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5515 unsigned short vid)
5516{
5517 int i;
5518 struct sk_buff *skb;
5519 struct sk_buff_head tmp_list;
5520
5521 skb_queue_head_init(&tmp_list);
5522 lockdep_set_class(&tmp_list.lock, &qdio_out_skb_queue_key);
5523 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
5524 while ((skb = skb_dequeue(&buf->skb_list))){
5525 if (vlan_tx_tag_present(skb) &&
5526 (vlan_tx_tag_get(skb) == vid)) {
5527 atomic_dec(&skb->users);
5528 dev_kfree_skb(skb);
5529 } else
5530 skb_queue_tail(&tmp_list, skb);
5531 }
5532 }
5533 while ((skb = skb_dequeue(&tmp_list)))
5534 skb_queue_tail(&buf->skb_list, skb);
5535}
5536
5537static void
5538qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid)
5539{
5540 int i, j;
5541
5542 QETH_DBF_TEXT(trace, 4, "frvlskbs");
5543 for (i = 0; i < card->qdio.no_out_queues; ++i){
5544 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
5545 qeth_free_vlan_buffer(card, &card->qdio.
5546 out_qs[i]->bufs[j], vid);
5547 }
5548}
5549
5550static void
5551qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
5552{
5553 struct in_device *in_dev;
5554 struct in_ifaddr *ifa;
5555 struct qeth_ipaddr *addr;
5556
5557 QETH_DBF_TEXT(trace, 4, "frvaddr4");
5558
5559 rcu_read_lock();
5560 in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid));
5561 if (!in_dev)
5562 goto out;
5563 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
5564 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
5565 if (addr){
5566 addr->u.a4.addr = ifa->ifa_address;
5567 addr->u.a4.mask = ifa->ifa_mask;
5568 addr->type = QETH_IP_TYPE_NORMAL;
5569 if (!qeth_delete_ip(card, addr))
5570 kfree(addr);
5571 }
5572 }
5573out:
5574 rcu_read_unlock();
5575}
5576
5577static void
5578qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
5579{
5580#ifdef CONFIG_QETH_IPV6
5581 struct inet6_dev *in6_dev;
5582 struct inet6_ifaddr *ifa;
5583 struct qeth_ipaddr *addr;
5584
5585 QETH_DBF_TEXT(trace, 4, "frvaddr6");
5586
5587 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
5588 if (!in6_dev)
5589 return;
5590 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
5591 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
5592 if (addr){
5593 memcpy(&addr->u.a6.addr, &ifa->addr,
5594 sizeof(struct in6_addr));
5595 addr->u.a6.pfxlen = ifa->prefix_len;
5596 addr->type = QETH_IP_TYPE_NORMAL;
5597 if (!qeth_delete_ip(card, addr))
5598 kfree(addr);
5599 }
5600 }
5601 in6_dev_put(in6_dev);
5602#endif /* CONFIG_QETH_IPV6 */
5603}
5604
5605static void
5606qeth_free_vlan_addresses(struct qeth_card *card, unsigned short vid)
5607{
5608 if (card->options.layer2 || !card->vlangrp)
5609 return;
5610 qeth_free_vlan_addresses4(card, vid);
5611 qeth_free_vlan_addresses6(card, vid);
5612}
5613
5614static int
5615qeth_layer2_send_setdelvlan_cb(struct qeth_card *card,
5616 struct qeth_reply *reply,
5617 unsigned long data)
5618{
5619 struct qeth_ipa_cmd *cmd;
5620
5621 QETH_DBF_TEXT(trace, 2, "L2sdvcb");
5622 cmd = (struct qeth_ipa_cmd *) data;
5623 if (cmd->hdr.return_code) {
5624 PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
5625 "Continuing\n",cmd->data.setdelvlan.vlan_id,
5626 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
5627 QETH_DBF_TEXT_(trace, 2, "L2VL%4x", cmd->hdr.command);
5628 QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card));
5629 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
5630 }
5631 return 0;
5632}
5633
5634static int
5635qeth_layer2_send_setdelvlan(struct qeth_card *card, __u16 i,
5636 enum qeth_ipa_cmds ipacmd)
5637{
5638 struct qeth_ipa_cmd *cmd;
5639 struct qeth_cmd_buffer *iob;
5640
5641 QETH_DBF_TEXT_(trace, 4, "L2sdv%x",ipacmd);
5642 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5643 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5644 cmd->data.setdelvlan.vlan_id = i;
5645 return qeth_send_ipa_cmd(card, iob,
5646 qeth_layer2_send_setdelvlan_cb, NULL);
5647}
5648
5649static void
5650qeth_layer2_process_vlans(struct qeth_card *card, int clear)
5651{
5652 unsigned short i;
5653
5654 QETH_DBF_TEXT(trace, 3, "L2prcvln");
5655
5656 if (!card->vlangrp)
5657 return;
5658 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5659 if (vlan_group_get_device(card->vlangrp, i) == NULL)
5660 continue;
5661 if (clear)
5662 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
5663 else
5664 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_SETVLAN);
5665 }
5666}
5667
5668/*add_vid is layer 2 used only ....*/
5669static void
5670qeth_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
5671{
5672 struct qeth_card *card;
5673
5674 QETH_DBF_TEXT_(trace, 4, "aid:%d", vid);
5675
5676 card = (struct qeth_card *) dev->priv;
5677 if (!card->options.layer2)
5678 return;
5679 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
5680}
5681
5682/*... kill_vid used for both modes*/
5683static void
5684qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
5685{
5686 struct qeth_card *card;
5687 unsigned long flags;
5688
5689 QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
5690
5691 card = (struct qeth_card *) dev->priv;
5692 /* free all skbs for the vlan device */
5693 qeth_free_vlan_skbs(card, vid);
5694 spin_lock_irqsave(&card->vlanlock, flags);
5695 /* unregister IP addresses of vlan device */
5696 qeth_free_vlan_addresses(card, vid);
5697 vlan_group_set_device(card->vlangrp, vid, NULL);
5698 spin_unlock_irqrestore(&card->vlanlock, flags);
5699 if (card->options.layer2)
5700 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
5701 qeth_set_multicast_list(card->dev);
5702}
5703#endif
5704/**
5705 * Examine hardware response to SET_PROMISC_MODE
5706 */
5707static int
5708qeth_setadp_promisc_mode_cb(struct qeth_card *card,
5709 struct qeth_reply *reply,
5710 unsigned long data)
5711{
5712 struct qeth_ipa_cmd *cmd;
5713 struct qeth_ipacmd_setadpparms *setparms;
5714
5715 QETH_DBF_TEXT(trace,4,"prmadpcb");
5716
5717 cmd = (struct qeth_ipa_cmd *) data;
5718 setparms = &(cmd->data.setadapterparms);
5719
5720 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
5721 if (cmd->hdr.return_code) {
5722 QETH_DBF_TEXT_(trace,4,"prmrc%2.2x",cmd->hdr.return_code);
5723 setparms->data.mode = SET_PROMISC_MODE_OFF;
5724 }
5725 card->info.promisc_mode = setparms->data.mode;
5726 return 0;
5727}
5728/*
5729 * Set promiscuous mode (on or off) (SET_PROMISC_MODE command)
5730 */
5731static void
5732qeth_setadp_promisc_mode(struct qeth_card *card)
5733{
5734 enum qeth_ipa_promisc_modes mode;
5735 struct net_device *dev = card->dev;
5736 struct qeth_cmd_buffer *iob;
5737 struct qeth_ipa_cmd *cmd;
5738
5739 QETH_DBF_TEXT(trace, 4, "setprom");
5740
5741 if (((dev->flags & IFF_PROMISC) &&
5742 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
5743 (!(dev->flags & IFF_PROMISC) &&
5744 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
5745 return;
5746 mode = SET_PROMISC_MODE_OFF;
5747 if (dev->flags & IFF_PROMISC)
5748 mode = SET_PROMISC_MODE_ON;
5749 QETH_DBF_TEXT_(trace, 4, "mode:%x", mode);
5750
5751 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
5752 sizeof(struct qeth_ipacmd_setadpparms));
5753 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
5754 cmd->data.setadapterparms.data.mode = mode;
5755 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
5756}
5757
5758/**
5759 * set multicast address on card
5760 */
5761static void
5762qeth_set_multicast_list(struct net_device *dev)
5763{
5764 struct qeth_card *card = (struct qeth_card *) dev->priv;
5765
5766 if (card->info.type == QETH_CARD_TYPE_OSN)
5767 return ;
5768
5769 QETH_DBF_TEXT(trace, 3, "setmulti");
5770 qeth_delete_mc_addresses(card);
5771 if (card->options.layer2) {
5772 qeth_layer2_add_multicast(card);
5773 goto out;
5774 }
5775 qeth_add_multicast_ipv4(card);
5776#ifdef CONFIG_QETH_IPV6
5777 qeth_add_multicast_ipv6(card);
5778#endif
5779out:
5780 qeth_set_ip_addr_list(card);
5781 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
5782 return;
5783 qeth_setadp_promisc_mode(card);
5784}
5785
5786static int
5787qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np)
5788{
5789 return 0;
5790}
5791
5792static void
5793qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
5794{
5795 if (dev->type == ARPHRD_IEEE802_TR)
5796 ip_tr_mc_map(ipm, mac);
5797 else
5798 ip_eth_mc_map(ipm, mac);
5799}
5800
5801static struct qeth_ipaddr *
5802qeth_get_addr_buffer(enum qeth_prot_versions prot)
5803{
5804 struct qeth_ipaddr *addr;
5805
5806 addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
5807 if (addr == NULL) {
5808 PRINT_WARN("Not enough memory to add address\n");
5809 return NULL;
5810 }
5811 addr->type = QETH_IP_TYPE_NORMAL;
5812 addr->proto = prot;
5813 return addr;
5814}
5815
5816int
5817qeth_osn_assist(struct net_device *dev,
5818 void *data,
5819 int data_len)
5820{
5821 struct qeth_cmd_buffer *iob;
5822 struct qeth_card *card;
5823 int rc;
5824
5825 QETH_DBF_TEXT(trace, 2, "osnsdmc");
5826 if (!dev)
5827 return -ENODEV;
5828 card = (struct qeth_card *)dev->priv;
5829 if (!card)
5830 return -ENODEV;
5831 if ((card->state != CARD_STATE_UP) &&
5832 (card->state != CARD_STATE_SOFTSETUP))
5833 return -ENODEV;
5834 iob = qeth_wait_for_buffer(&card->write);
5835 memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
5836 rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
5837 return rc;
5838}
5839
5840static struct net_device *
5841qeth_netdev_by_devno(unsigned char *read_dev_no)
5842{
5843 struct qeth_card *card;
5844 struct net_device *ndev;
5845 unsigned char *readno;
5846 __u16 temp_dev_no, card_dev_no;
5847 char *endp;
5848 unsigned long flags;
5849
5850 ndev = NULL;
5851 memcpy(&temp_dev_no, read_dev_no, 2);
5852 read_lock_irqsave(&qeth_card_list.rwlock, flags);
5853 list_for_each_entry(card, &qeth_card_list.list, list) {
5854 readno = CARD_RDEV_ID(card);
5855 readno += (strlen(readno) - 4);
5856 card_dev_no = simple_strtoul(readno, &endp, 16);
5857 if (card_dev_no == temp_dev_no) {
5858 ndev = card->dev;
5859 break;
5860 }
5861 }
5862 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
5863 return ndev;
5864}
5865
5866int
5867qeth_osn_register(unsigned char *read_dev_no,
5868 struct net_device **dev,
5869 int (*assist_cb)(struct net_device *, void *),
5870 int (*data_cb)(struct sk_buff *))
5871{
5872 struct qeth_card * card;
5873
5874 QETH_DBF_TEXT(trace, 2, "osnreg");
5875 *dev = qeth_netdev_by_devno(read_dev_no);
5876 if (*dev == NULL)
5877 return -ENODEV;
5878 card = (struct qeth_card *)(*dev)->priv;
5879 if (!card)
5880 return -ENODEV;
5881 if ((assist_cb == NULL) || (data_cb == NULL))
5882 return -EINVAL;
5883 card->osn_info.assist_cb = assist_cb;
5884 card->osn_info.data_cb = data_cb;
5885 return 0;
5886}
5887
5888void
5889qeth_osn_deregister(struct net_device * dev)
5890{
5891 struct qeth_card *card;
5892
5893 QETH_DBF_TEXT(trace, 2, "osndereg");
5894 if (!dev)
5895 return;
5896 card = (struct qeth_card *)dev->priv;
5897 if (!card)
5898 return;
5899 card->osn_info.assist_cb = NULL;
5900 card->osn_info.data_cb = NULL;
5901 return;
5902}
5903
5904static void
5905qeth_delete_mc_addresses(struct qeth_card *card)
5906{
5907 struct qeth_ipaddr *iptodo;
5908 unsigned long flags;
5909
5910 QETH_DBF_TEXT(trace,4,"delmc");
5911 iptodo = qeth_get_addr_buffer(QETH_PROT_IPV4);
5912 if (!iptodo) {
5913 QETH_DBF_TEXT(trace, 2, "dmcnomem");
5914 return;
5915 }
5916 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
5917 spin_lock_irqsave(&card->ip_lock, flags);
5918 if (!__qeth_insert_ip_todo(card, iptodo, 0))
5919 kfree(iptodo);
5920 spin_unlock_irqrestore(&card->ip_lock, flags);
5921}
5922
5923static void
5924qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5925{
5926 struct qeth_ipaddr *ipm;
5927 struct ip_mc_list *im4;
5928 char buf[MAX_ADDR_LEN];
5929
5930 QETH_DBF_TEXT(trace,4,"addmc");
5931 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
5932 qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
5933 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
5934 if (!ipm)
5935 continue;
5936 ipm->u.a4.addr = im4->multiaddr;
5937 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5938 ipm->is_multicast = 1;
5939 if (!qeth_add_ip(card,ipm))
5940 kfree(ipm);
5941 }
5942}
5943
5944static inline void
5945qeth_add_vlan_mc(struct qeth_card *card)
5946{
5947#ifdef CONFIG_QETH_VLAN
5948 struct in_device *in_dev;
5949 struct vlan_group *vg;
5950 int i;
5951
5952 QETH_DBF_TEXT(trace,4,"addmcvl");
5953 if ( ((card->options.layer2 == 0) &&
5954 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5955 (card->vlangrp == NULL) )
5956 return ;
5957
5958 vg = card->vlangrp;
5959 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5960 struct net_device *netdev = vlan_group_get_device(vg, i);
5961 if (netdev == NULL ||
5962 !(netdev->flags & IFF_UP))
5963 continue;
5964 in_dev = in_dev_get(netdev);
5965 if (!in_dev)
5966 continue;
5967 read_lock(&in_dev->mc_list_lock);
5968 qeth_add_mc(card,in_dev);
5969 read_unlock(&in_dev->mc_list_lock);
5970 in_dev_put(in_dev);
5971 }
5972#endif
5973}
5974
5975static void
5976qeth_add_multicast_ipv4(struct qeth_card *card)
5977{
5978 struct in_device *in4_dev;
5979
5980 QETH_DBF_TEXT(trace,4,"chkmcv4");
5981 in4_dev = in_dev_get(card->dev);
5982 if (in4_dev == NULL)
5983 return;
5984 read_lock(&in4_dev->mc_list_lock);
5985 qeth_add_mc(card, in4_dev);
5986 qeth_add_vlan_mc(card);
5987 read_unlock(&in4_dev->mc_list_lock);
5988 in_dev_put(in4_dev);
5989}
5990
5991static void
5992qeth_layer2_add_multicast(struct qeth_card *card)
5993{
5994 struct qeth_ipaddr *ipm;
5995 struct dev_mc_list *dm;
5996
5997 QETH_DBF_TEXT(trace,4,"L2addmc");
5998 for (dm = card->dev->mc_list; dm; dm = dm->next) {
5999 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
6000 if (!ipm)
6001 continue;
6002 memcpy(ipm->mac,dm->dmi_addr,MAX_ADDR_LEN);
6003 ipm->is_multicast = 1;
6004 if (!qeth_add_ip(card, ipm))
6005 kfree(ipm);
6006 }
6007}
6008
6009#ifdef CONFIG_QETH_IPV6
6010static void
6011qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
6012{
6013 struct qeth_ipaddr *ipm;
6014 struct ifmcaddr6 *im6;
6015 char buf[MAX_ADDR_LEN];
6016
6017 QETH_DBF_TEXT(trace,4,"addmc6");
6018 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
6019 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
6020 ipm = qeth_get_addr_buffer(QETH_PROT_IPV6);
6021 if (!ipm)
6022 continue;
6023 ipm->is_multicast = 1;
6024 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
6025 memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr,
6026 sizeof(struct in6_addr));
6027 if (!qeth_add_ip(card,ipm))
6028 kfree(ipm);
6029 }
6030}
6031
6032static inline void
6033qeth_add_vlan_mc6(struct qeth_card *card)
6034{
6035#ifdef CONFIG_QETH_VLAN
6036 struct inet6_dev *in_dev;
6037 struct vlan_group *vg;
6038 int i;
6039
6040 QETH_DBF_TEXT(trace,4,"admc6vl");
6041 if ( ((card->options.layer2 == 0) &&
6042 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
6043 (card->vlangrp == NULL))
6044 return ;
6045
6046 vg = card->vlangrp;
6047 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
6048 struct net_device *netdev = vlan_group_get_device(vg, i);
6049 if (netdev == NULL ||
6050 !(netdev->flags & IFF_UP))
6051 continue;
6052 in_dev = in6_dev_get(netdev);
6053 if (!in_dev)
6054 continue;
6055 read_lock_bh(&in_dev->lock);
6056 qeth_add_mc6(card,in_dev);
6057 read_unlock_bh(&in_dev->lock);
6058 in6_dev_put(in_dev);
6059 }
6060#endif /* CONFIG_QETH_VLAN */
6061}
6062
6063static void
6064qeth_add_multicast_ipv6(struct qeth_card *card)
6065{
6066 struct inet6_dev *in6_dev;
6067
6068 QETH_DBF_TEXT(trace,4,"chkmcv6");
6069 if (!qeth_is_supported(card, IPA_IPV6))
6070 return ;
6071 in6_dev = in6_dev_get(card->dev);
6072 if (in6_dev == NULL)
6073 return;
6074 read_lock_bh(&in6_dev->lock);
6075 qeth_add_mc6(card, in6_dev);
6076 qeth_add_vlan_mc6(card);
6077 read_unlock_bh(&in6_dev->lock);
6078 in6_dev_put(in6_dev);
6079}
6080#endif /* CONFIG_QETH_IPV6 */
6081
6082static int
6083qeth_layer2_send_setdelmac(struct qeth_card *card, __u8 *mac,
6084 enum qeth_ipa_cmds ipacmd,
6085 int (*reply_cb) (struct qeth_card *,
6086 struct qeth_reply*,
6087 unsigned long))
6088{
6089 struct qeth_ipa_cmd *cmd;
6090 struct qeth_cmd_buffer *iob;
6091
6092 QETH_DBF_TEXT(trace, 2, "L2sdmac");
6093 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
6094 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6095 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
6096 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
6097 return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
6098}
6099
6100static int
6101qeth_layer2_send_setgroupmac_cb(struct qeth_card *card,
6102 struct qeth_reply *reply,
6103 unsigned long data)
6104{
6105 struct qeth_ipa_cmd *cmd;
6106 __u8 *mac;
6107
6108 QETH_DBF_TEXT(trace, 2, "L2Sgmacb");
6109 cmd = (struct qeth_ipa_cmd *) data;
6110 mac = &cmd->data.setdelmac.mac[0];
6111 /* MAC already registered, needed in couple/uncouple case */
6112 if (cmd->hdr.return_code == 0x2005) {
6113 PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
6114 "already existing on %s \n",
6115 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
6116 QETH_CARD_IFNAME(card));
6117 cmd->hdr.return_code = 0;
6118 }
6119 if (cmd->hdr.return_code)
6120 PRINT_ERR("Could not set group MAC " \
6121 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
6122 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
6123 QETH_CARD_IFNAME(card),cmd->hdr.return_code);
6124 return 0;
6125}
6126
6127static int
6128qeth_layer2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
6129{
6130 QETH_DBF_TEXT(trace, 2, "L2Sgmac");
6131 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
6132 qeth_layer2_send_setgroupmac_cb);
6133}
6134
6135static int
6136qeth_layer2_send_delgroupmac_cb(struct qeth_card *card,
6137 struct qeth_reply *reply,
6138 unsigned long data)
6139{
6140 struct qeth_ipa_cmd *cmd;
6141 __u8 *mac;
6142
6143 QETH_DBF_TEXT(trace, 2, "L2Dgmacb");
6144 cmd = (struct qeth_ipa_cmd *) data;
6145 mac = &cmd->data.setdelmac.mac[0];
6146 if (cmd->hdr.return_code)
6147 PRINT_ERR("Could not delete group MAC " \
6148 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
6149 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
6150 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
6151 return 0;
6152}
6153
6154static int
6155qeth_layer2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
6156{
6157 QETH_DBF_TEXT(trace, 2, "L2Dgmac");
6158 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
6159 qeth_layer2_send_delgroupmac_cb);
6160}
6161
6162static int
6163qeth_layer2_send_setmac_cb(struct qeth_card *card,
6164 struct qeth_reply *reply,
6165 unsigned long data)
6166{
6167 struct qeth_ipa_cmd *cmd;
6168
6169 QETH_DBF_TEXT(trace, 2, "L2Smaccb");
6170 cmd = (struct qeth_ipa_cmd *) data;
6171 if (cmd->hdr.return_code) {
6172 QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code);
6173 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
6174 cmd->hdr.return_code = -EIO;
6175 } else {
6176 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
6177 memcpy(card->dev->dev_addr,cmd->data.setdelmac.mac,
6178 OSA_ADDR_LEN);
6179 PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
6180 "successfully registered on device %s\n",
6181 card->dev->dev_addr[0], card->dev->dev_addr[1],
6182 card->dev->dev_addr[2], card->dev->dev_addr[3],
6183 card->dev->dev_addr[4], card->dev->dev_addr[5],
6184 card->dev->name);
6185 }
6186 return 0;
6187}
6188
6189static int
6190qeth_layer2_send_setmac(struct qeth_card *card, __u8 *mac)
6191{
6192 QETH_DBF_TEXT(trace, 2, "L2Setmac");
6193 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
6194 qeth_layer2_send_setmac_cb);
6195}
6196
6197static int
6198qeth_layer2_send_delmac_cb(struct qeth_card *card,
6199 struct qeth_reply *reply,
6200 unsigned long data)
6201{
6202 struct qeth_ipa_cmd *cmd;
6203
6204 QETH_DBF_TEXT(trace, 2, "L2Dmaccb");
6205 cmd = (struct qeth_ipa_cmd *) data;
6206 if (cmd->hdr.return_code) {
6207 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
6208 cmd->hdr.return_code = -EIO;
6209 return 0;
6210 }
6211 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
6212
6213 return 0;
6214}
6215static int
6216qeth_layer2_send_delmac(struct qeth_card *card, __u8 *mac)
6217{
6218 QETH_DBF_TEXT(trace, 2, "L2Delmac");
6219 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
6220 return 0;
6221 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
6222 qeth_layer2_send_delmac_cb);
6223}
6224
6225static int
6226qeth_layer2_set_mac_address(struct net_device *dev, void *p)
6227{
6228 struct sockaddr *addr = p;
6229 struct qeth_card *card;
6230 int rc = 0;
6231
6232 QETH_DBF_TEXT(trace, 3, "setmac");
6233
6234 if (qeth_verify_dev(dev) != QETH_REAL_CARD) {
6235 QETH_DBF_TEXT(trace, 3, "setmcINV");
6236 return -EOPNOTSUPP;
6237 }
6238 card = (struct qeth_card *) dev->priv;
6239
6240 if (!card->options.layer2) {
6241 PRINT_WARN("Setting MAC address on %s is not supported "
6242 "in Layer 3 mode.\n", dev->name);
6243 QETH_DBF_TEXT(trace, 3, "setmcLY3");
6244 return -EOPNOTSUPP;
6245 }
6246 if (card->info.type == QETH_CARD_TYPE_OSN) {
6247 PRINT_WARN("Setting MAC address on %s is not supported.\n",
6248 dev->name);
6249 QETH_DBF_TEXT(trace, 3, "setmcOSN");
6250 return -EOPNOTSUPP;
6251 }
6252 QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
6253 QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
6254 rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]);
6255 if (!rc)
6256 rc = qeth_layer2_send_setmac(card, addr->sa_data);
6257 return rc;
6258}
6259
6260static void
6261qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd,
6262 __u8 command, enum qeth_prot_versions prot)
6263{
6264 memset(cmd, 0, sizeof (struct qeth_ipa_cmd));
6265 cmd->hdr.command = command;
6266 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
6267 cmd->hdr.seqno = card->seqno.ipa;
6268 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
6269 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
6270 if (card->options.layer2)
6271 cmd->hdr.prim_version_no = 2;
6272 else
6273 cmd->hdr.prim_version_no = 1;
6274 cmd->hdr.param_count = 1;
6275 cmd->hdr.prot_version = prot;
6276 cmd->hdr.ipa_supported = 0;
6277 cmd->hdr.ipa_enabled = 0;
6278}
6279
6280static struct qeth_cmd_buffer *
6281qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
6282 enum qeth_prot_versions prot)
6283{
6284 struct qeth_cmd_buffer *iob;
6285 struct qeth_ipa_cmd *cmd;
6286
6287 iob = qeth_wait_for_buffer(&card->write);
6288 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6289 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
6290
6291 return iob;
6292}
6293
6294static int
6295qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
6296{
6297 int rc;
6298 struct qeth_cmd_buffer *iob;
6299 struct qeth_ipa_cmd *cmd;
6300
6301 QETH_DBF_TEXT(trace,4,"setdelmc");
6302
6303 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
6304 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6305 memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN);
6306 if (addr->proto == QETH_PROT_IPV6)
6307 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
6308 sizeof(struct in6_addr));
6309 else
6310 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4);
6311
6312 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6313
6314 return rc;
6315}
6316static void
6317qeth_fill_netmask(u8 *netmask, unsigned int len)
6318{
6319 int i,j;
6320 for (i=0;i<16;i++) {
6321 j=(len)-(i*8);
6322 if (j >= 8)
6323 netmask[i] = 0xff;
6324 else if (j > 0)
6325 netmask[i] = (u8)(0xFF00>>j);
6326 else
6327 netmask[i] = 0;
6328 }
6329}
6330
6331static int
6332qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr,
6333 int ipacmd, unsigned int flags)
6334{
6335 int rc;
6336 struct qeth_cmd_buffer *iob;
6337 struct qeth_ipa_cmd *cmd;
6338 __u8 netmask[16];
6339
6340 QETH_DBF_TEXT(trace,4,"setdelip");
6341 QETH_DBF_TEXT_(trace,4,"flags%02X", flags);
6342
6343 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
6344 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6345 if (addr->proto == QETH_PROT_IPV6) {
6346 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
6347 sizeof(struct in6_addr));
6348 qeth_fill_netmask(netmask,addr->u.a6.pfxlen);
6349 memcpy(cmd->data.setdelip6.mask, netmask,
6350 sizeof(struct in6_addr));
6351 cmd->data.setdelip6.flags = flags;
6352 } else {
6353 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
6354 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
6355 cmd->data.setdelip4.flags = flags;
6356 }
6357
6358 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6359
6360 return rc;
6361}
6362
6363static int
6364qeth_layer2_register_addr_entry(struct qeth_card *card,
6365 struct qeth_ipaddr *addr)
6366{
6367 if (!addr->is_multicast)
6368 return 0;
6369 QETH_DBF_TEXT(trace, 2, "setgmac");
6370 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
6371 return qeth_layer2_send_setgroupmac(card, &addr->mac[0]);
6372}
6373
6374static int
6375qeth_layer2_deregister_addr_entry(struct qeth_card *card,
6376 struct qeth_ipaddr *addr)
6377{
6378 if (!addr->is_multicast)
6379 return 0;
6380 QETH_DBF_TEXT(trace, 2, "delgmac");
6381 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
6382 return qeth_layer2_send_delgroupmac(card, &addr->mac[0]);
6383}
6384
6385static int
6386qeth_layer3_register_addr_entry(struct qeth_card *card,
6387 struct qeth_ipaddr *addr)
6388{
6389 char buf[50];
6390 int rc;
6391 int cnt = 3;
6392
6393 if (addr->proto == QETH_PROT_IPV4) {
6394 QETH_DBF_TEXT(trace, 2,"setaddr4");
6395 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
6396 } else if (addr->proto == QETH_PROT_IPV6) {
6397 QETH_DBF_TEXT(trace, 2, "setaddr6");
6398 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
6399 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
6400 } else {
6401 QETH_DBF_TEXT(trace, 2, "setaddr?");
6402 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
6403 }
6404 do {
6405 if (addr->is_multicast)
6406 rc = qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM);
6407 else
6408 rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP,
6409 addr->set_flags);
6410 if (rc)
6411 QETH_DBF_TEXT(trace, 2, "failed");
6412 } while ((--cnt > 0) && rc);
6413 if (rc){
6414 QETH_DBF_TEXT(trace, 2, "FAILED");
6415 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
6416 PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
6417 buf, rc, rc);
6418 }
6419 return rc;
6420}
6421
6422static int
6423qeth_layer3_deregister_addr_entry(struct qeth_card *card,
6424 struct qeth_ipaddr *addr)
6425{
6426 //char buf[50];
6427 int rc;
6428
6429 if (addr->proto == QETH_PROT_IPV4) {
6430 QETH_DBF_TEXT(trace, 2,"deladdr4");
6431 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
6432 } else if (addr->proto == QETH_PROT_IPV6) {
6433 QETH_DBF_TEXT(trace, 2, "deladdr6");
6434 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
6435 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
6436 } else {
6437 QETH_DBF_TEXT(trace, 2, "deladdr?");
6438 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
6439 }
6440 if (addr->is_multicast)
6441 rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM);
6442 else
6443 rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP,
6444 addr->del_flags);
6445 if (rc) {
6446 QETH_DBF_TEXT(trace, 2, "failed");
6447 /* TODO: re-activate this warning as soon as we have a
6448 * clean mirco code
6449 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
6450 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
6451 buf, rc);
6452 */
6453 }
6454 return rc;
6455}
6456
6457static int
6458qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
6459{
6460 if (card->options.layer2)
6461 return qeth_layer2_register_addr_entry(card, addr);
6462
6463 return qeth_layer3_register_addr_entry(card, addr);
6464}
6465
6466static int
6467qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
6468{
6469 if (card->options.layer2)
6470 return qeth_layer2_deregister_addr_entry(card, addr);
6471
6472 return qeth_layer3_deregister_addr_entry(card, addr);
6473}
6474
6475static u32
6476qeth_ethtool_get_tx_csum(struct net_device *dev)
6477{
6478 return (dev->features & NETIF_F_HW_CSUM) != 0;
6479}
6480
6481static int
6482qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
6483{
6484 if (data)
6485 dev->features |= NETIF_F_HW_CSUM;
6486 else
6487 dev->features &= ~NETIF_F_HW_CSUM;
6488
6489 return 0;
6490}
6491
6492static u32
6493qeth_ethtool_get_rx_csum(struct net_device *dev)
6494{
6495 struct qeth_card *card = (struct qeth_card *)dev->priv;
6496
6497 return (card->options.checksum_type == HW_CHECKSUMMING);
6498}
6499
6500static int
6501qeth_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6502{
6503 struct qeth_card *card = (struct qeth_card *)dev->priv;
6504
6505 if ((card->state != CARD_STATE_DOWN) &&
6506 (card->state != CARD_STATE_RECOVER))
6507 return -EPERM;
6508 if (data)
6509 card->options.checksum_type = HW_CHECKSUMMING;
6510 else
6511 card->options.checksum_type = SW_CHECKSUMMING;
6512 return 0;
6513}
6514
6515static u32
6516qeth_ethtool_get_sg(struct net_device *dev)
6517{
6518 struct qeth_card *card = (struct qeth_card *)dev->priv;
6519
6520 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
6521 (dev->features & NETIF_F_SG));
6522}
6523
6524static int
6525qeth_ethtool_set_sg(struct net_device *dev, u32 data)
6526{
6527 struct qeth_card *card = (struct qeth_card *)dev->priv;
6528
6529 if (data) {
6530 if (card->options.large_send != QETH_LARGE_SEND_NO)
6531 dev->features |= NETIF_F_SG;
6532 else {
6533 dev->features &= ~NETIF_F_SG;
6534 return -EINVAL;
6535 }
6536 } else
6537 dev->features &= ~NETIF_F_SG;
6538 return 0;
6539}
6540
6541static u32
6542qeth_ethtool_get_tso(struct net_device *dev)
6543{
6544 struct qeth_card *card = (struct qeth_card *)dev->priv;
6545
6546 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
6547 (dev->features & NETIF_F_TSO));
6548}
6549
6550static int
6551qeth_ethtool_set_tso(struct net_device *dev, u32 data)
6552{
6553 struct qeth_card *card = (struct qeth_card *)dev->priv;
6554
6555 if (data) {
6556 if (card->options.large_send != QETH_LARGE_SEND_NO)
6557 dev->features |= NETIF_F_TSO;
6558 else {
6559 dev->features &= ~NETIF_F_TSO;
6560 return -EINVAL;
6561 }
6562 } else
6563 dev->features &= ~NETIF_F_TSO;
6564 return 0;
6565}
6566
6567static struct ethtool_ops qeth_ethtool_ops = {
6568 .get_tx_csum = qeth_ethtool_get_tx_csum,
6569 .set_tx_csum = qeth_ethtool_set_tx_csum,
6570 .get_rx_csum = qeth_ethtool_get_rx_csum,
6571 .set_rx_csum = qeth_ethtool_set_rx_csum,
6572 .get_sg = qeth_ethtool_get_sg,
6573 .set_sg = qeth_ethtool_set_sg,
6574 .get_tso = qeth_ethtool_get_tso,
6575 .set_tso = qeth_ethtool_set_tso,
6576};
6577
6578static int
6579qeth_hard_header_parse(const struct sk_buff *skb, unsigned char *haddr)
6580{
6581 const struct qeth_card *card;
6582 const struct ethhdr *eth;
6583 struct net_device *dev = skb->dev;
6584
6585 if (dev->type != ARPHRD_IEEE802_TR)
6586 return 0;
6587
6588 card = qeth_get_card_from_dev(dev);
6589 if (card->options.layer2)
6590 goto haveheader;
6591#ifdef CONFIG_QETH_IPV6
6592 /* cause of the manipulated arp constructor and the ARP
6593 flag for OSAE devices we have some nasty exceptions */
6594 if (card->info.type == QETH_CARD_TYPE_OSAE) {
6595 if (!card->options.fake_ll) {
6596 if ((skb->pkt_type==PACKET_OUTGOING) &&
6597 (skb->protocol==ETH_P_IPV6))
6598 goto haveheader;
6599 else
6600 return 0;
6601 } else {
6602 if ((skb->pkt_type==PACKET_OUTGOING) &&
6603 (skb->protocol==ETH_P_IP))
6604 return 0;
6605 else
6606 goto haveheader;
6607 }
6608 }
6609#endif
6610 if (!card->options.fake_ll)
6611 return 0;
6612haveheader:
6613 eth = eth_hdr(skb);
6614 memcpy(haddr, eth->h_source, ETH_ALEN);
6615 return ETH_ALEN;
6616}
6617
6618static const struct header_ops qeth_null_ops = {
6619 .parse = qeth_hard_header_parse,
6620};
6621
6622static int
6623qeth_netdev_init(struct net_device *dev)
6624{
6625 struct qeth_card *card;
6626
6627 card = (struct qeth_card *) dev->priv;
6628
6629 QETH_DBF_TEXT(trace,3,"initdev");
6630
6631 dev->tx_timeout = &qeth_tx_timeout;
6632 dev->watchdog_timeo = QETH_TX_TIMEOUT;
6633 dev->open = qeth_open;
6634 dev->stop = qeth_stop;
6635 dev->hard_start_xmit = qeth_hard_start_xmit;
6636 dev->do_ioctl = qeth_do_ioctl;
6637 dev->get_stats = qeth_get_stats;
6638 dev->change_mtu = qeth_change_mtu;
6639 dev->neigh_setup = qeth_neigh_setup;
6640 dev->set_multicast_list = qeth_set_multicast_list;
6641#ifdef CONFIG_QETH_VLAN
6642 dev->vlan_rx_register = qeth_vlan_rx_register;
6643 dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
6644 dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid;
6645#endif
6646 if (qeth_get_netdev_flags(card) & IFF_NOARP)
6647 dev->header_ops = &qeth_null_ops;
6648
6649#ifdef CONFIG_QETH_IPV6
6650 /*IPv6 address autoconfiguration stuff*/
6651 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
6652 card->dev->dev_id = card->info.unique_id & 0xffff;
6653#endif
6654 if (card->options.fake_ll &&
6655 (qeth_get_netdev_flags(card) & IFF_NOARP))
6656 dev->header_ops = &qeth_fake_ops;
6657
6658 dev->set_mac_address = qeth_layer2_set_mac_address;
6659 dev->flags |= qeth_get_netdev_flags(card);
6660 if ((card->options.fake_broadcast) ||
6661 (card->info.broadcast_capable))
6662 dev->flags |= IFF_BROADCAST;
6663 dev->hard_header_len =
6664 qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
6665 dev->addr_len = OSA_ADDR_LEN;
6666 dev->mtu = card->info.initial_mtu;
6667 if (card->info.type != QETH_CARD_TYPE_OSN)
6668 SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
6669 return 0;
6670}
6671
6672static void
6673qeth_init_func_level(struct qeth_card *card)
6674{
6675 if (card->ipato.enabled) {
6676 if (card->info.type == QETH_CARD_TYPE_IQD)
6677 card->info.func_level =
6678 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
6679 else
6680 card->info.func_level =
6681 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
6682 } else {
6683 if (card->info.type == QETH_CARD_TYPE_IQD)
6684 /*FIXME:why do we have same values for dis and ena for osae??? */
6685 card->info.func_level =
6686 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
6687 else
6688 card->info.func_level =
6689 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
6690 }
6691}
6692
6693/**
6694 * hardsetup card, initialize MPC and QDIO stuff
6695 */
6696static int
6697qeth_hardsetup_card(struct qeth_card *card)
6698{
6699 int retries = 3;
6700 int rc;
6701
6702 QETH_DBF_TEXT(setup, 2, "hrdsetup");
6703
6704 atomic_set(&card->force_alloc_skb, 0);
6705retry:
6706 if (retries < 3){
6707 PRINT_WARN("Retrying to do IDX activates.\n");
6708 ccw_device_set_offline(CARD_DDEV(card));
6709 ccw_device_set_offline(CARD_WDEV(card));
6710 ccw_device_set_offline(CARD_RDEV(card));
6711 ccw_device_set_online(CARD_RDEV(card));
6712 ccw_device_set_online(CARD_WDEV(card));
6713 ccw_device_set_online(CARD_DDEV(card));
6714 }
6715 rc = qeth_qdio_clear_card(card,card->info.type!=QETH_CARD_TYPE_IQD);
6716 if (rc == -ERESTARTSYS) {
6717 QETH_DBF_TEXT(setup, 2, "break1");
6718 return rc;
6719 } else if (rc) {
6720 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6721 if (--retries < 0)
6722 goto out;
6723 else
6724 goto retry;
6725 }
6726 if ((rc = qeth_get_unitaddr(card))){
6727 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6728 return rc;
6729 }
6730 qeth_init_tokens(card);
6731 qeth_init_func_level(card);
6732 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
6733 if (rc == -ERESTARTSYS) {
6734 QETH_DBF_TEXT(setup, 2, "break2");
6735 return rc;
6736 } else if (rc) {
6737 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6738 if (--retries < 0)
6739 goto out;
6740 else
6741 goto retry;
6742 }
6743 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
6744 if (rc == -ERESTARTSYS) {
6745 QETH_DBF_TEXT(setup, 2, "break3");
6746 return rc;
6747 } else if (rc) {
6748 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6749 if (--retries < 0)
6750 goto out;
6751 else
6752 goto retry;
6753 }
6754 if ((rc = qeth_mpc_initialize(card))){
6755 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6756 goto out;
6757 }
6758 /*network device will be recovered*/
6759 if (card->dev) {
6760 card->dev->header_ops = card->orig_header_ops;
6761 if (card->options.fake_ll &&
6762 (qeth_get_netdev_flags(card) & IFF_NOARP))
6763 card->dev->header_ops = &qeth_fake_ops;
6764 return 0;
6765 }
6766 /* at first set_online allocate netdev */
6767 card->dev = qeth_get_netdevice(card->info.type,
6768 card->info.link_type);
6769 if (!card->dev){
6770 qeth_qdio_clear_card(card, card->info.type !=
6771 QETH_CARD_TYPE_IQD);
6772 rc = -ENODEV;
6773 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
6774 goto out;
6775 }
6776 card->dev->priv = card;
6777 card->orig_header_ops = card->dev->header_ops;
6778 card->dev->type = qeth_get_arphdr_type(card->info.type,
6779 card->info.link_type);
6780 card->dev->init = qeth_netdev_init;
6781 return 0;
6782out:
6783 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
6784 return rc;
6785}
6786
6787static int
6788qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6789 unsigned long data)
6790{
6791 struct qeth_ipa_cmd *cmd;
6792
6793 QETH_DBF_TEXT(trace,4,"defadpcb");
6794
6795 cmd = (struct qeth_ipa_cmd *) data;
6796 if (cmd->hdr.return_code == 0){
6797 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6798 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6799 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6800#ifdef CONFIG_QETH_IPV6
6801 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6802 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6803#endif
6804 }
6805 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
6806 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
6807 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
6808 QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
6809 }
6810 return 0;
6811}
6812
6813static int
6814qeth_default_setadapterparms_cb(struct qeth_card *card,
6815 struct qeth_reply *reply,
6816 unsigned long data)
6817{
6818 struct qeth_ipa_cmd *cmd;
6819
6820 QETH_DBF_TEXT(trace,4,"defadpcb");
6821
6822 cmd = (struct qeth_ipa_cmd *) data;
6823 if (cmd->hdr.return_code == 0)
6824 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
6825 return 0;
6826}
6827
6828
6829
6830static int
6831qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6832 unsigned long data)
6833{
6834 struct qeth_ipa_cmd *cmd;
6835
6836 QETH_DBF_TEXT(trace,3,"quyadpcb");
6837
6838 cmd = (struct qeth_ipa_cmd *) data;
6839 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
6840 card->info.link_type =
6841 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
6842 card->options.adp.supported_funcs =
6843 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
6844 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
6845}
6846
6847static int
6848qeth_query_setadapterparms(struct qeth_card *card)
6849{
6850 int rc;
6851 struct qeth_cmd_buffer *iob;
6852
6853 QETH_DBF_TEXT(trace,3,"queryadp");
6854 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
6855 sizeof(struct qeth_ipacmd_setadpparms));
6856 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
6857 return rc;
6858}
6859
6860static int
6861qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
6862 struct qeth_reply *reply,
6863 unsigned long data)
6864{
6865 struct qeth_ipa_cmd *cmd;
6866
6867 QETH_DBF_TEXT(trace,4,"chgmaccb");
6868
6869 cmd = (struct qeth_ipa_cmd *) data;
6870 if (!card->options.layer2 ||
6871 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
6872 memcpy(card->dev->dev_addr,
6873 &cmd->data.setadapterparms.data.change_addr.addr,
6874 OSA_ADDR_LEN);
6875 card->info.mac_bits |= QETH_LAYER2_MAC_READ;
6876 }
6877 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
6878 return 0;
6879}
6880
6881static int
6882qeth_setadpparms_change_macaddr(struct qeth_card *card)
6883{
6884 int rc;
6885 struct qeth_cmd_buffer *iob;
6886 struct qeth_ipa_cmd *cmd;
6887
6888 QETH_DBF_TEXT(trace,4,"chgmac");
6889
6890 iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS,
6891 sizeof(struct qeth_ipacmd_setadpparms));
6892 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6893 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
6894 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
6895 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
6896 card->dev->dev_addr, OSA_ADDR_LEN);
6897 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
6898 NULL);
6899 return rc;
6900}
6901
6902static int
6903qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6904{
6905 int rc;
6906 struct qeth_cmd_buffer *iob;
6907 struct qeth_ipa_cmd *cmd;
6908
6909 QETH_DBF_TEXT(trace,4,"adpmode");
6910
6911 iob = qeth_get_adapter_cmd(card, command,
6912 sizeof(struct qeth_ipacmd_setadpparms));
6913 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6914 cmd->data.setadapterparms.data.mode = mode;
6915 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
6916 NULL);
6917 return rc;
6918}
6919
6920static int
6921qeth_setadapter_hstr(struct qeth_card *card)
6922{
6923 int rc;
6924
6925 QETH_DBF_TEXT(trace,4,"adphstr");
6926
6927 if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) {
6928 rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE,
6929 card->options.broadcast_mode);
6930 if (rc)
6931 PRINT_WARN("couldn't set broadcast mode on "
6932 "device %s: x%x\n",
6933 CARD_BUS_ID(card), rc);
6934 rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS,
6935 card->options.macaddr_mode);
6936 if (rc)
6937 PRINT_WARN("couldn't set macaddr mode on "
6938 "device %s: x%x\n", CARD_BUS_ID(card), rc);
6939 return rc;
6940 }
6941 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
6942 PRINT_WARN("set adapter parameters not available "
6943 "to set broadcast mode, using ALLRINGS "
6944 "on device %s:\n", CARD_BUS_ID(card));
6945 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
6946 PRINT_WARN("set adapter parameters not available "
6947 "to set macaddr mode, using NONCANONICAL "
6948 "on device %s:\n", CARD_BUS_ID(card));
6949 return 0;
6950}
6951
6952static int
6953qeth_setadapter_parms(struct qeth_card *card)
6954{
6955 int rc;
6956
6957 QETH_DBF_TEXT(setup, 2, "setadprm");
6958
6959 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){
6960 PRINT_WARN("set adapter parameters not supported "
6961 "on device %s.\n",
6962 CARD_BUS_ID(card));
6963 QETH_DBF_TEXT(setup, 2, " notsupp");
6964 return 0;
6965 }
6966 rc = qeth_query_setadapterparms(card);
6967 if (rc) {
6968 PRINT_WARN("couldn't set adapter parameters on device %s: "
6969 "x%x\n", CARD_BUS_ID(card), rc);
6970 return rc;
6971 }
6972 if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) {
6973 rc = qeth_setadpparms_change_macaddr(card);
6974 if (rc)
6975 PRINT_WARN("couldn't get MAC address on "
6976 "device %s: x%x\n",
6977 CARD_BUS_ID(card), rc);
6978 }
6979
6980 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
6981 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
6982 rc = qeth_setadapter_hstr(card);
6983
6984 return rc;
6985}
6986
6987static int
6988qeth_layer2_initialize(struct qeth_card *card)
6989{
6990 int rc = 0;
6991
6992
6993 QETH_DBF_TEXT(setup, 2, "doL2init");
6994 QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card));
6995
6996 rc = qeth_query_setadapterparms(card);
6997 if (rc) {
6998 PRINT_WARN("could not query adapter parameters on device %s: "
6999 "x%x\n", CARD_BUS_ID(card), rc);
7000 }
7001
7002 rc = qeth_setadpparms_change_macaddr(card);
7003 if (rc) {
7004 PRINT_WARN("couldn't get MAC address on "
7005 "device %s: x%x\n",
7006 CARD_BUS_ID(card), rc);
7007 QETH_DBF_TEXT_(setup, 2,"1err%d",rc);
7008 return rc;
7009 }
7010 QETH_DBF_HEX(setup,2, card->dev->dev_addr, OSA_ADDR_LEN);
7011
7012 rc = qeth_layer2_send_setmac(card, &card->dev->dev_addr[0]);
7013 if (rc)
7014 QETH_DBF_TEXT_(setup, 2,"2err%d",rc);
7015 return 0;
7016}
7017
7018
7019static int
7020qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
7021 enum qeth_prot_versions prot)
7022{
7023 int rc;
7024 struct qeth_cmd_buffer *iob;
7025
7026 iob = qeth_get_ipacmd_buffer(card,ipacmd,prot);
7027 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7028
7029 return rc;
7030}
7031
7032static int
7033qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot)
7034{
7035 int rc;
7036
7037 QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot);
7038
7039 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot);
7040 return rc;
7041}
7042
7043static int
7044qeth_send_stoplan(struct qeth_card *card)
7045{
7046 int rc = 0;
7047
7048 /*
7049 * TODO: according to the IPA format document page 14,
7050 * TCP/IP (we!) never issue a STOPLAN
7051 * is this right ?!?
7052 */
7053 QETH_DBF_TEXT(trace, 2, "stoplan");
7054
7055 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4);
7056 return rc;
7057}
7058
7059static int
7060qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
7061 unsigned long data)
7062{
7063 struct qeth_ipa_cmd *cmd;
7064
7065 QETH_DBF_TEXT(setup, 2, "qipasscb");
7066
7067 cmd = (struct qeth_ipa_cmd *) data;
7068 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
7069 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
7070 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
7071 /* Disable IPV6 support hard coded for Hipersockets */
7072 if(card->info.type == QETH_CARD_TYPE_IQD)
7073 card->options.ipa4.supported_funcs &= ~IPA_IPV6;
7074 } else {
7075#ifdef CONFIG_QETH_IPV6
7076 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
7077 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
7078#endif
7079 }
7080 QETH_DBF_TEXT(setup, 2, "suppenbl");
7081 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_supported);
7082 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_enabled);
7083 return 0;
7084}
7085
7086static int
7087qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
7088{
7089 int rc;
7090 struct qeth_cmd_buffer *iob;
7091
7092 QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
7093 if (card->options.layer2) {
7094 QETH_DBF_TEXT(setup, 2, "noprmly2");
7095 return -EPERM;
7096 }
7097
7098 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot);
7099 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
7100 return rc;
7101}
7102
7103static struct qeth_cmd_buffer *
7104qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func,
7105 __u16 cmd_code, __u16 len,
7106 enum qeth_prot_versions prot)
7107{
7108 struct qeth_cmd_buffer *iob;
7109 struct qeth_ipa_cmd *cmd;
7110
7111 QETH_DBF_TEXT(trace,4,"getasscm");
7112 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot);
7113
7114 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7115 cmd->data.setassparms.hdr.assist_no = ipa_func;
7116 cmd->data.setassparms.hdr.length = 8 + len;
7117 cmd->data.setassparms.hdr.command_code = cmd_code;
7118 cmd->data.setassparms.hdr.return_code = 0;
7119 cmd->data.setassparms.hdr.seq_no = 0;
7120
7121 return iob;
7122}
7123
7124static int
7125qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob,
7126 __u16 len, long data,
7127 int (*reply_cb)
7128 (struct qeth_card *,struct qeth_reply *,unsigned long),
7129 void *reply_param)
7130{
7131 int rc;
7132 struct qeth_ipa_cmd *cmd;
7133
7134 QETH_DBF_TEXT(trace,4,"sendassp");
7135
7136 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7137 if (len <= sizeof(__u32))
7138 cmd->data.setassparms.data.flags_32bit = (__u32) data;
7139 else /* (len > sizeof(__u32)) */
7140 memcpy(&cmd->data.setassparms.data, (void *) data, len);
7141
7142 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
7143 return rc;
7144}
7145
7146#ifdef CONFIG_QETH_IPV6
7147static int
7148qeth_send_simple_setassparms_ipv6(struct qeth_card *card,
7149 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
7150
7151{
7152 int rc;
7153 struct qeth_cmd_buffer *iob;
7154
7155 QETH_DBF_TEXT(trace,4,"simassp6");
7156 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
7157 0, QETH_PROT_IPV6);
7158 rc = qeth_send_setassparms(card, iob, 0, 0,
7159 qeth_default_setassparms_cb, NULL);
7160 return rc;
7161}
7162#endif
7163
7164static int
7165qeth_send_simple_setassparms(struct qeth_card *card,
7166 enum qeth_ipa_funcs ipa_func,
7167 __u16 cmd_code, long data)
7168{
7169 int rc;
7170 int length = 0;
7171 struct qeth_cmd_buffer *iob;
7172
7173 QETH_DBF_TEXT(trace,4,"simassp4");
7174 if (data)
7175 length = sizeof(__u32);
7176 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
7177 length, QETH_PROT_IPV4);
7178 rc = qeth_send_setassparms(card, iob, length, data,
7179 qeth_default_setassparms_cb, NULL);
7180 return rc;
7181}
7182
7183static int
7184qeth_start_ipa_arp_processing(struct qeth_card *card)
7185{
7186 int rc;
7187
7188 QETH_DBF_TEXT(trace,3,"ipaarp");
7189
7190 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
7191 PRINT_WARN("ARP processing not supported "
7192 "on %s!\n", QETH_CARD_IFNAME(card));
7193 return 0;
7194 }
7195 rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING,
7196 IPA_CMD_ASS_START, 0);
7197 if (rc) {
7198 PRINT_WARN("Could not start ARP processing "
7199 "assist on %s: 0x%x\n",
7200 QETH_CARD_IFNAME(card), rc);
7201 }
7202 return rc;
7203}
7204
7205static int
7206qeth_start_ipa_ip_fragmentation(struct qeth_card *card)
7207{
7208 int rc;
7209
7210 QETH_DBF_TEXT(trace,3,"ipaipfrg");
7211
7212 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
7213 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
7214 QETH_CARD_IFNAME(card));
7215 return -EOPNOTSUPP;
7216 }
7217
7218 rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
7219 IPA_CMD_ASS_START, 0);
7220 if (rc) {
7221 PRINT_WARN("Could not start Hardware IP fragmentation "
7222 "assist on %s: 0x%x\n",
7223 QETH_CARD_IFNAME(card), rc);
7224 } else
7225 PRINT_INFO("Hardware IP fragmentation enabled \n");
7226 return rc;
7227}
7228
7229static int
7230qeth_start_ipa_source_mac(struct qeth_card *card)
7231{
7232 int rc;
7233
7234 QETH_DBF_TEXT(trace,3,"stsrcmac");
7235
7236 if (!card->options.fake_ll)
7237 return -EOPNOTSUPP;
7238
7239 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
7240 PRINT_INFO("Inbound source address not "
7241 "supported on %s\n", QETH_CARD_IFNAME(card));
7242 return -EOPNOTSUPP;
7243 }
7244
7245 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
7246 IPA_CMD_ASS_START, 0);
7247 if (rc)
7248 PRINT_WARN("Could not start inbound source "
7249 "assist on %s: 0x%x\n",
7250 QETH_CARD_IFNAME(card), rc);
7251 return rc;
7252}
7253
7254static int
7255qeth_start_ipa_vlan(struct qeth_card *card)
7256{
7257 int rc = 0;
7258
7259 QETH_DBF_TEXT(trace,3,"strtvlan");
7260
7261#ifdef CONFIG_QETH_VLAN
7262 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
7263 PRINT_WARN("VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
7264 return -EOPNOTSUPP;
7265 }
7266
7267 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
7268 IPA_CMD_ASS_START,0);
7269 if (rc) {
7270 PRINT_WARN("Could not start vlan "
7271 "assist on %s: 0x%x\n",
7272 QETH_CARD_IFNAME(card), rc);
7273 } else {
7274 PRINT_INFO("VLAN enabled \n");
7275 card->dev->features |=
7276 NETIF_F_HW_VLAN_FILTER |
7277 NETIF_F_HW_VLAN_TX |
7278 NETIF_F_HW_VLAN_RX;
7279 }
7280#endif /* QETH_VLAN */
7281 return rc;
7282}
7283
7284static int
7285qeth_start_ipa_multicast(struct qeth_card *card)
7286{
7287 int rc;
7288
7289 QETH_DBF_TEXT(trace,3,"stmcast");
7290
7291 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
7292 PRINT_WARN("Multicast not supported on %s\n",
7293 QETH_CARD_IFNAME(card));
7294 return -EOPNOTSUPP;
7295 }
7296
7297 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
7298 IPA_CMD_ASS_START,0);
7299 if (rc) {
7300 PRINT_WARN("Could not start multicast "
7301 "assist on %s: rc=%i\n",
7302 QETH_CARD_IFNAME(card), rc);
7303 } else {
7304 PRINT_INFO("Multicast enabled\n");
7305 card->dev->flags |= IFF_MULTICAST;
7306 }
7307 return rc;
7308}
7309
7310#ifdef CONFIG_QETH_IPV6
7311static int
7312qeth_softsetup_ipv6(struct qeth_card *card)
7313{
7314 int rc;
7315
7316 QETH_DBF_TEXT(trace,3,"softipv6");
7317
7318 rc = qeth_send_startlan(card, QETH_PROT_IPV6);
7319 if (rc) {
7320 PRINT_ERR("IPv6 startlan failed on %s\n",
7321 QETH_CARD_IFNAME(card));
7322 return rc;
7323 }
7324 rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
7325 if (rc) {
7326 PRINT_ERR("IPv6 query ipassist failed on %s\n",
7327 QETH_CARD_IFNAME(card));
7328 return rc;
7329 }
7330 rc = qeth_send_simple_setassparms(card, IPA_IPV6,
7331 IPA_CMD_ASS_START, 3);
7332 if (rc) {
7333 PRINT_WARN("IPv6 start assist (version 4) failed "
7334 "on %s: 0x%x\n",
7335 QETH_CARD_IFNAME(card), rc);
7336 return rc;
7337 }
7338 rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6,
7339 IPA_CMD_ASS_START);
7340 if (rc) {
7341 PRINT_WARN("IPV6 start assist (version 6) failed "
7342 "on %s: 0x%x\n",
7343 QETH_CARD_IFNAME(card), rc);
7344 return rc;
7345 }
7346 rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
7347 IPA_CMD_ASS_START);
7348 if (rc) {
7349 PRINT_WARN("Could not enable passthrough "
7350 "on %s: 0x%x\n",
7351 QETH_CARD_IFNAME(card), rc);
7352 return rc;
7353 }
7354 PRINT_INFO("IPV6 enabled \n");
7355 return 0;
7356}
7357
7358#endif
7359
7360static int
7361qeth_start_ipa_ipv6(struct qeth_card *card)
7362{
7363 int rc = 0;
7364#ifdef CONFIG_QETH_IPV6
7365 QETH_DBF_TEXT(trace,3,"strtipv6");
7366
7367 if (!qeth_is_supported(card, IPA_IPV6)) {
7368 PRINT_WARN("IPv6 not supported on %s\n",
7369 QETH_CARD_IFNAME(card));
7370 return 0;
7371 }
7372 rc = qeth_softsetup_ipv6(card);
7373#endif
7374 return rc ;
7375}
7376
7377static int
7378qeth_start_ipa_broadcast(struct qeth_card *card)
7379{
7380 int rc;
7381
7382 QETH_DBF_TEXT(trace,3,"stbrdcst");
7383 card->info.broadcast_capable = 0;
7384 if (!qeth_is_supported(card, IPA_FILTERING)) {
7385 PRINT_WARN("Broadcast not supported on %s\n",
7386 QETH_CARD_IFNAME(card));
7387 rc = -EOPNOTSUPP;
7388 goto out;
7389 }
7390 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7391 IPA_CMD_ASS_START, 0);
7392 if (rc) {
7393 PRINT_WARN("Could not enable broadcasting filtering "
7394 "on %s: 0x%x\n",
7395 QETH_CARD_IFNAME(card), rc);
7396 goto out;
7397 }
7398
7399 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7400 IPA_CMD_ASS_CONFIGURE, 1);
7401 if (rc) {
7402 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
7403 QETH_CARD_IFNAME(card), rc);
7404 goto out;
7405 }
7406 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
7407 PRINT_INFO("Broadcast enabled \n");
7408 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7409 IPA_CMD_ASS_ENABLE, 1);
7410 if (rc) {
7411 PRINT_WARN("Could not set up broadcast echo filtering on "
7412 "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
7413 goto out;
7414 }
7415 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
7416out:
7417 if (card->info.broadcast_capable)
7418 card->dev->flags |= IFF_BROADCAST;
7419 else
7420 card->dev->flags &= ~IFF_BROADCAST;
7421 return rc;
7422}
7423
7424static int
7425qeth_send_checksum_command(struct qeth_card *card)
7426{
7427 int rc;
7428
7429 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
7430 IPA_CMD_ASS_START, 0);
7431 if (rc) {
7432 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
7433 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
7434 QETH_CARD_IFNAME(card), rc);
7435 return rc;
7436 }
7437 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
7438 IPA_CMD_ASS_ENABLE,
7439 card->info.csum_mask);
7440 if (rc) {
7441 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
7442 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
7443 QETH_CARD_IFNAME(card), rc);
7444 return rc;
7445 }
7446 return 0;
7447}
7448
7449static int
7450qeth_start_ipa_checksum(struct qeth_card *card)
7451{
7452 int rc = 0;
7453
7454 QETH_DBF_TEXT(trace,3,"strtcsum");
7455
7456 if (card->options.checksum_type == NO_CHECKSUMMING) {
7457 PRINT_WARN("Using no checksumming on %s.\n",
7458 QETH_CARD_IFNAME(card));
7459 return 0;
7460 }
7461 if (card->options.checksum_type == SW_CHECKSUMMING) {
7462 PRINT_WARN("Using SW checksumming on %s.\n",
7463 QETH_CARD_IFNAME(card));
7464 return 0;
7465 }
7466 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
7467 PRINT_WARN("Inbound HW Checksumming not "
7468 "supported on %s,\ncontinuing "
7469 "using Inbound SW Checksumming\n",
7470 QETH_CARD_IFNAME(card));
7471 card->options.checksum_type = SW_CHECKSUMMING;
7472 return 0;
7473 }
7474 rc = qeth_send_checksum_command(card);
7475 if (!rc) {
7476 PRINT_INFO("HW Checksumming (inbound) enabled \n");
7477 }
7478 return rc;
7479}
7480
7481static int
7482qeth_start_ipa_tso(struct qeth_card *card)
7483{
7484 int rc;
7485
7486 QETH_DBF_TEXT(trace,3,"sttso");
7487
7488 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
7489 PRINT_WARN("Outbound TSO not supported on %s\n",
7490 QETH_CARD_IFNAME(card));
7491 rc = -EOPNOTSUPP;
7492 } else {
7493 rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
7494 IPA_CMD_ASS_START,0);
7495 if (rc)
7496 PRINT_WARN("Could not start outbound TSO "
7497 "assist on %s: rc=%i\n",
7498 QETH_CARD_IFNAME(card), rc);
7499 else
7500 PRINT_INFO("Outbound TSO enabled\n");
7501 }
7502 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
7503 card->options.large_send = QETH_LARGE_SEND_NO;
7504 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
7505 NETIF_F_HW_CSUM);
7506 }
7507 return rc;
7508}
7509
7510static int
7511qeth_start_ipassists(struct qeth_card *card)
7512{
7513 QETH_DBF_TEXT(trace,3,"strtipas");
7514 qeth_start_ipa_arp_processing(card); /* go on*/
7515 qeth_start_ipa_ip_fragmentation(card); /* go on*/
7516 qeth_start_ipa_source_mac(card); /* go on*/
7517 qeth_start_ipa_vlan(card); /* go on*/
7518 qeth_start_ipa_multicast(card); /* go on*/
7519 qeth_start_ipa_ipv6(card); /* go on*/
7520 qeth_start_ipa_broadcast(card); /* go on*/
7521 qeth_start_ipa_checksum(card); /* go on*/
7522 qeth_start_ipa_tso(card); /* go on*/
7523 return 0;
7524}
7525
7526static int
7527qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type,
7528 enum qeth_prot_versions prot)
7529{
7530 int rc;
7531 struct qeth_ipa_cmd *cmd;
7532 struct qeth_cmd_buffer *iob;
7533
7534 QETH_DBF_TEXT(trace,4,"setroutg");
7535 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
7536 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7537 cmd->data.setrtg.type = (type);
7538 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7539
7540 return rc;
7541
7542}
7543
7544static void
7545qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type,
7546 enum qeth_prot_versions prot)
7547{
7548 if (card->info.type == QETH_CARD_TYPE_IQD) {
7549 switch (*type) {
7550 case NO_ROUTER:
7551 case PRIMARY_CONNECTOR:
7552 case SECONDARY_CONNECTOR:
7553 case MULTICAST_ROUTER:
7554 return;
7555 default:
7556 goto out_inval;
7557 }
7558 } else {
7559 switch (*type) {
7560 case NO_ROUTER:
7561 case PRIMARY_ROUTER:
7562 case SECONDARY_ROUTER:
7563 return;
7564 case MULTICAST_ROUTER:
7565 if (qeth_is_ipafunc_supported(card, prot,
7566 IPA_OSA_MC_ROUTER))
7567 return;
7568 default:
7569 goto out_inval;
7570 }
7571 }
7572out_inval:
7573 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
7574 "Router status set to 'no router'.\n",
7575 ((*type == PRIMARY_ROUTER)? "primary router" :
7576 (*type == SECONDARY_ROUTER)? "secondary router" :
7577 (*type == PRIMARY_CONNECTOR)? "primary connector" :
7578 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
7579 (*type == MULTICAST_ROUTER)? "multicast router" :
7580 "unknown"),
7581 card->dev->name);
7582 *type = NO_ROUTER;
7583}
7584
7585int
7586qeth_setrouting_v4(struct qeth_card *card)
7587{
7588 int rc;
7589
7590 QETH_DBF_TEXT(trace,3,"setrtg4");
7591
7592 qeth_correct_routing_type(card, &card->options.route4.type,
7593 QETH_PROT_IPV4);
7594
7595 rc = qeth_send_setrouting(card, card->options.route4.type,
7596 QETH_PROT_IPV4);
7597 if (rc) {
7598 card->options.route4.type = NO_ROUTER;
7599 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7600 "Type set to 'no router'.\n",
7601 rc, QETH_CARD_IFNAME(card));
7602 }
7603 return rc;
7604}
7605
7606int
7607qeth_setrouting_v6(struct qeth_card *card)
7608{
7609 int rc = 0;
7610
7611 QETH_DBF_TEXT(trace,3,"setrtg6");
7612#ifdef CONFIG_QETH_IPV6
7613
7614 if (!qeth_is_supported(card, IPA_IPV6))
7615 return 0;
7616 qeth_correct_routing_type(card, &card->options.route6.type,
7617 QETH_PROT_IPV6);
7618
7619 rc = qeth_send_setrouting(card, card->options.route6.type,
7620 QETH_PROT_IPV6);
7621 if (rc) {
7622 card->options.route6.type = NO_ROUTER;
7623 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7624 "Type set to 'no router'.\n",
7625 rc, QETH_CARD_IFNAME(card));
7626 }
7627#endif
7628 return rc;
7629}
7630
7631int
7632qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type)
7633{
7634 int rc = 0;
7635
7636 if (card->dev == NULL) {
7637 card->options.large_send = type;
7638 return 0;
7639 }
7640 if (card->state == CARD_STATE_UP)
7641 netif_tx_disable(card->dev);
7642 card->options.large_send = type;
7643 switch (card->options.large_send) {
7644 case QETH_LARGE_SEND_EDDP:
7645 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
7646 NETIF_F_HW_CSUM;
7647 break;
7648 case QETH_LARGE_SEND_TSO:
7649 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
7650 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
7651 NETIF_F_HW_CSUM;
7652 } else {
7653 PRINT_WARN("TSO not supported on %s. "
7654 "large_send set to 'no'.\n",
7655 card->dev->name);
7656 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
7657 NETIF_F_HW_CSUM);
7658 card->options.large_send = QETH_LARGE_SEND_NO;
7659 rc = -EOPNOTSUPP;
7660 }
7661 break;
7662 default: /* includes QETH_LARGE_SEND_NO */
7663 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
7664 NETIF_F_HW_CSUM);
7665 break;
7666 }
7667 if (card->state == CARD_STATE_UP)
7668 netif_wake_queue(card->dev);
7669 return rc;
7670}
7671
7672/*
7673 * softsetup card: init IPA stuff
7674 */
7675static int
7676qeth_softsetup_card(struct qeth_card *card)
7677{
7678 int rc;
7679
7680 QETH_DBF_TEXT(setup, 2, "softsetp");
7681
7682 if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){
7683 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7684 if (rc == 0xe080){
7685 PRINT_WARN("LAN on card %s if offline! "
7686 "Waiting for STARTLAN from card.\n",
7687 CARD_BUS_ID(card));
7688 card->lan_online = 0;
7689 }
7690 return rc;
7691 } else
7692 card->lan_online = 1;
7693 if (card->info.type==QETH_CARD_TYPE_OSN)
7694 goto out;
7695 qeth_set_large_send(card, card->options.large_send);
7696 if (card->options.layer2) {
7697 card->dev->features |=
7698 NETIF_F_HW_VLAN_FILTER |
7699 NETIF_F_HW_VLAN_TX |
7700 NETIF_F_HW_VLAN_RX;
7701 card->dev->flags|=IFF_MULTICAST|IFF_BROADCAST;
7702 card->info.broadcast_capable=1;
7703 if ((rc = qeth_layer2_initialize(card))) {
7704 QETH_DBF_TEXT_(setup, 2, "L2err%d", rc);
7705 return rc;
7706 }
7707#ifdef CONFIG_QETH_VLAN
7708 qeth_layer2_process_vlans(card, 0);
7709#endif
7710 goto out;
7711 }
7712 if ((rc = qeth_setadapter_parms(card)))
7713 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7714 if ((rc = qeth_start_ipassists(card)))
7715 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7716 if ((rc = qeth_setrouting_v4(card)))
7717 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7718 if ((rc = qeth_setrouting_v6(card)))
7719 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7720out:
7721 netif_tx_disable(card->dev);
7722 return 0;
7723}
7724
7725#ifdef CONFIG_QETH_IPV6
7726static int
7727qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply,
7728 unsigned long data)
7729{
7730 struct qeth_ipa_cmd *cmd;
7731
7732 cmd = (struct qeth_ipa_cmd *) data;
7733 if (cmd->hdr.return_code == 0)
7734 card->info.unique_id = *((__u16 *)
7735 &cmd->data.create_destroy_addr.unique_id[6]);
7736 else {
7737 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7738 UNIQUE_ID_NOT_BY_CARD;
7739 PRINT_WARN("couldn't get a unique id from the card on device "
7740 "%s (result=x%x), using default id. ipv6 "
7741 "autoconfig on other lpars may lead to duplicate "
7742 "ip addresses. please use manually "
7743 "configured ones.\n",
7744 CARD_BUS_ID(card), cmd->hdr.return_code);
7745 }
7746 return 0;
7747}
7748#endif
7749
7750static int
7751qeth_put_unique_id(struct qeth_card *card)
7752{
7753
7754 int rc = 0;
7755#ifdef CONFIG_QETH_IPV6
7756 struct qeth_cmd_buffer *iob;
7757 struct qeth_ipa_cmd *cmd;
7758
7759 QETH_DBF_TEXT(trace,2,"puniqeid");
7760
7761 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
7762 UNIQUE_ID_NOT_BY_CARD)
7763 return -1;
7764 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
7765 QETH_PROT_IPV6);
7766 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7767 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7768 card->info.unique_id;
7769 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
7770 card->dev->dev_addr, OSA_ADDR_LEN);
7771 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7772#else
7773 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7774 UNIQUE_ID_NOT_BY_CARD;
7775#endif
7776 return rc;
7777}
7778
7779/**
7780 * Clear IP List
7781 */
7782static void
7783qeth_clear_ip_list(struct qeth_card *card, int clean, int recover)
7784{
7785 struct qeth_ipaddr *addr, *tmp;
7786 unsigned long flags;
7787
7788 QETH_DBF_TEXT(trace,4,"clearip");
7789 spin_lock_irqsave(&card->ip_lock, flags);
7790 /* clear todo list */
7791 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry){
7792 list_del(&addr->entry);
7793 kfree(addr);
7794 }
7795
7796 while (!list_empty(&card->ip_list)) {
7797 addr = list_entry(card->ip_list.next,
7798 struct qeth_ipaddr, entry);
7799 list_del_init(&addr->entry);
7800 if (clean) {
7801 spin_unlock_irqrestore(&card->ip_lock, flags);
7802 qeth_deregister_addr_entry(card, addr);
7803 spin_lock_irqsave(&card->ip_lock, flags);
7804 }
7805 if (!recover || addr->is_multicast) {
7806 kfree(addr);
7807 continue;
7808 }
7809 list_add_tail(&addr->entry, card->ip_tbd_list);
7810 }
7811 spin_unlock_irqrestore(&card->ip_lock, flags);
7812}
7813
7814static void
7815qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7816 int clear_start_mask)
7817{
7818 unsigned long flags;
7819
7820 spin_lock_irqsave(&card->thread_mask_lock, flags);
7821 card->thread_allowed_mask = threads;
7822 if (clear_start_mask)
7823 card->thread_start_mask &= threads;
7824 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7825 wake_up(&card->wait_q);
7826}
7827
7828static int
7829qeth_threads_running(struct qeth_card *card, unsigned long threads)
7830{
7831 unsigned long flags;
7832 int rc = 0;
7833
7834 spin_lock_irqsave(&card->thread_mask_lock, flags);
7835 rc = (card->thread_running_mask & threads);
7836 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7837 return rc;
7838}
7839
7840static int
7841qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
7842{
7843 return wait_event_interruptible(card->wait_q,
7844 qeth_threads_running(card, threads) == 0);
7845}
7846
7847static int
7848qeth_stop_card(struct qeth_card *card, int recovery_mode)
7849{
7850 int rc = 0;
7851
7852 QETH_DBF_TEXT(setup ,2,"stopcard");
7853 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7854
7855 qeth_set_allowed_threads(card, 0, 1);
7856 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
7857 return -ERESTARTSYS;
7858 if (card->read.state == CH_STATE_UP &&
7859 card->write.state == CH_STATE_UP &&
7860 (card->state == CARD_STATE_UP)) {
7861 if (recovery_mode &&
7862 card->info.type != QETH_CARD_TYPE_OSN) {
7863 qeth_stop(card->dev);
7864 } else {
7865 rtnl_lock();
7866 dev_close(card->dev);
7867 rtnl_unlock();
7868 }
7869 if (!card->use_hard_stop) {
7870 __u8 *mac = &card->dev->dev_addr[0];
7871 rc = qeth_layer2_send_delmac(card, mac);
7872 QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc);
7873 if ((rc = qeth_send_stoplan(card)))
7874 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7875 }
7876 card->state = CARD_STATE_SOFTSETUP;
7877 }
7878 if (card->state == CARD_STATE_SOFTSETUP) {
7879#ifdef CONFIG_QETH_VLAN
7880 if (card->options.layer2)
7881 qeth_layer2_process_vlans(card, 1);
7882#endif
7883 qeth_clear_ip_list(card, !card->use_hard_stop, 1);
7884 qeth_clear_ipacmd_list(card);
7885 card->state = CARD_STATE_HARDSETUP;
7886 }
7887 if (card->state == CARD_STATE_HARDSETUP) {
7888 if ((!card->use_hard_stop) &&
7889 (!card->options.layer2))
7890 if ((rc = qeth_put_unique_id(card)))
7891 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7892 qeth_qdio_clear_card(card, 0);
7893 qeth_clear_qdio_buffers(card);
7894 qeth_clear_working_pool_list(card);
7895 card->state = CARD_STATE_DOWN;
7896 }
7897 if (card->state == CARD_STATE_DOWN) {
7898 qeth_clear_cmd_buffers(&card->read);
7899 qeth_clear_cmd_buffers(&card->write);
7900 }
7901 card->use_hard_stop = 0;
7902 return rc;
7903}
7904
7905
7906static int
7907qeth_get_unique_id(struct qeth_card *card)
7908{
7909 int rc = 0;
7910#ifdef CONFIG_QETH_IPV6
7911 struct qeth_cmd_buffer *iob;
7912 struct qeth_ipa_cmd *cmd;
7913
7914 QETH_DBF_TEXT(setup, 2, "guniqeid");
7915
7916 if (!qeth_is_supported(card,IPA_IPV6)) {
7917 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7918 UNIQUE_ID_NOT_BY_CARD;
7919 return 0;
7920 }
7921
7922 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
7923 QETH_PROT_IPV6);
7924 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7925 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7926 card->info.unique_id;
7927
7928 rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL);
7929#else
7930 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7931 UNIQUE_ID_NOT_BY_CARD;
7932#endif
7933 return rc;
7934}
7935static void
7936qeth_print_status_with_portname(struct qeth_card *card)
7937{
7938 char dbf_text[15];
7939 int i;
7940
7941 sprintf(dbf_text, "%s", card->info.portname + 1);
7942 for (i = 0; i < 8; i++)
7943 dbf_text[i] =
7944 (char) _ebcasc[(__u8) dbf_text[i]];
7945 dbf_text[8] = 0;
7946 printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
7947 "with link type %s (portname: %s)\n",
7948 CARD_RDEV_ID(card),
7949 CARD_WDEV_ID(card),
7950 CARD_DDEV_ID(card),
7951 qeth_get_cardname(card),
7952 (card->info.mcl_level[0]) ? " (level: " : "",
7953 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7954 (card->info.mcl_level[0]) ? ")" : "",
7955 qeth_get_cardname_short(card),
7956 dbf_text);
7957
7958}
7959
7960static void
7961qeth_print_status_no_portname(struct qeth_card *card)
7962{
7963 if (card->info.portname[0])
7964 printk("qeth: Device %s/%s/%s is a%s "
7965 "card%s%s%s\nwith link type %s "
7966 "(no portname needed by interface).\n",
7967 CARD_RDEV_ID(card),
7968 CARD_WDEV_ID(card),
7969 CARD_DDEV_ID(card),
7970 qeth_get_cardname(card),
7971 (card->info.mcl_level[0]) ? " (level: " : "",
7972 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7973 (card->info.mcl_level[0]) ? ")" : "",
7974 qeth_get_cardname_short(card));
7975 else
7976 printk("qeth: Device %s/%s/%s is a%s "
7977 "card%s%s%s\nwith link type %s.\n",
7978 CARD_RDEV_ID(card),
7979 CARD_WDEV_ID(card),
7980 CARD_DDEV_ID(card),
7981 qeth_get_cardname(card),
7982 (card->info.mcl_level[0]) ? " (level: " : "",
7983 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7984 (card->info.mcl_level[0]) ? ")" : "",
7985 qeth_get_cardname_short(card));
7986}
7987
7988static void
7989qeth_print_status_message(struct qeth_card *card)
7990{
7991 switch (card->info.type) {
7992 case QETH_CARD_TYPE_OSAE:
7993 /* VM will use a non-zero first character
7994 * to indicate a HiperSockets like reporting
7995 * of the level OSA sets the first character to zero
7996 * */
7997 if (!card->info.mcl_level[0]) {
7998 sprintf(card->info.mcl_level,"%02x%02x",
7999 card->info.mcl_level[2],
8000 card->info.mcl_level[3]);
8001
8002 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
8003 break;
8004 }
8005 /* fallthrough */
8006 case QETH_CARD_TYPE_IQD:
8007 if (card->info.guestlan) {
8008 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
8009 card->info.mcl_level[0]];
8010 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
8011 card->info.mcl_level[1]];
8012 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
8013 card->info.mcl_level[2]];
8014 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
8015 card->info.mcl_level[3]];
8016 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
8017 }
8018 break;
8019 default:
8020 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
8021 }
8022 if (card->info.portname_required)
8023 qeth_print_status_with_portname(card);
8024 else
8025 qeth_print_status_no_portname(card);
8026}
8027
8028static int
8029qeth_register_netdev(struct qeth_card *card)
8030{
8031 QETH_DBF_TEXT(setup, 3, "regnetd");
8032 if (card->dev->reg_state != NETREG_UNINITIALIZED)
8033 return 0;
8034 /* sysfs magic */
8035 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
8036 return register_netdev(card->dev);
8037}
8038
8039static void
8040qeth_start_again(struct qeth_card *card, int recovery_mode)
8041{
8042 QETH_DBF_TEXT(setup ,2, "startag");
8043
8044 if (recovery_mode &&
8045 card->info.type != QETH_CARD_TYPE_OSN) {
8046 qeth_open(card->dev);
8047 } else {
8048 rtnl_lock();
8049 dev_open(card->dev);
8050 rtnl_unlock();
8051 }
8052 /* this also sets saved unicast addresses */
8053 qeth_set_multicast_list(card->dev);
8054}
8055
8056
8057/* Layer 2 specific stuff */
8058#define IGNORE_PARAM_EQ(option,value,reset_value,msg) \
8059 if (card->options.option == value) { \
8060 PRINT_ERR("%s not supported with layer 2 " \
8061 "functionality, ignoring option on read" \
8062 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
8063 card->options.option = reset_value; \
8064 }
8065#define IGNORE_PARAM_NEQ(option,value,reset_value,msg) \
8066 if (card->options.option != value) { \
8067 PRINT_ERR("%s not supported with layer 2 " \
8068 "functionality, ignoring option on read" \
8069 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
8070 card->options.option = reset_value; \
8071 }
8072
8073
8074static void qeth_make_parameters_consistent(struct qeth_card *card)
8075{
8076
8077 if (card->options.layer2 == 0)
8078 return;
8079 if (card->info.type == QETH_CARD_TYPE_OSN)
8080 return;
8081 if (card->info.type == QETH_CARD_TYPE_IQD) {
8082 PRINT_ERR("Device %s does not support layer 2 functionality." \
8083 " Ignoring layer2 option.\n",CARD_BUS_ID(card));
8084 card->options.layer2 = 0;
8085 return;
8086 }
8087 IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
8088 "Routing options are");
8089#ifdef CONFIG_QETH_IPV6
8090 IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
8091 "Routing options are");
8092#endif
8093 IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
8094 QETH_CHECKSUM_DEFAULT,
8095 "Checksumming options are");
8096 IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
8097 QETH_TR_BROADCAST_ALLRINGS,
8098 "Broadcast mode options are");
8099 IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
8100 QETH_TR_MACADDR_NONCANONICAL,
8101 "Canonical MAC addr options are");
8102 IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
8103 "Broadcast faking options are");
8104 IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
8105 DEFAULT_ADD_HHLEN,"Option add_hhlen is");
8106 IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
8107}
8108
8109
8110static int
8111__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode)
8112{
8113 struct qeth_card *card = gdev->dev.driver_data;
8114 int rc = 0;
8115 enum qeth_card_states recover_flag;
8116
8117 BUG_ON(!card);
8118 QETH_DBF_TEXT(setup ,2, "setonlin");
8119 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
8120
8121 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
8122 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){
8123 PRINT_WARN("set_online of card %s interrupted by user!\n",
8124 CARD_BUS_ID(card));
8125 return -ERESTARTSYS;
8126 }
8127
8128 recover_flag = card->state;
8129 if ((rc = ccw_device_set_online(CARD_RDEV(card))) ||
8130 (rc = ccw_device_set_online(CARD_WDEV(card))) ||
8131 (rc = ccw_device_set_online(CARD_DDEV(card)))){
8132 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
8133 return -EIO;
8134 }
8135
8136 qeth_make_parameters_consistent(card);
8137
8138 if ((rc = qeth_hardsetup_card(card))){
8139 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
8140 goto out_remove;
8141 }
8142 card->state = CARD_STATE_HARDSETUP;
8143
8144 if (!(rc = qeth_query_ipassists(card,QETH_PROT_IPV4)))
8145 rc = qeth_get_unique_id(card);
8146
8147 if (rc && card->options.layer2 == 0) {
8148 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
8149 goto out_remove;
8150 }
8151 qeth_print_status_message(card);
8152 if ((rc = qeth_register_netdev(card))){
8153 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
8154 goto out_remove;
8155 }
8156 if ((rc = qeth_softsetup_card(card))){
8157 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
8158 goto out_remove;
8159 }
8160
8161 if ((rc = qeth_init_qdio_queues(card))){
8162 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
8163 goto out_remove;
8164 }
8165 card->state = CARD_STATE_SOFTSETUP;
8166 netif_carrier_on(card->dev);
8167
8168 qeth_set_allowed_threads(card, 0xffffffff, 0);
8169 if (recover_flag == CARD_STATE_RECOVER)
8170 qeth_start_again(card, recovery_mode);
8171 qeth_notify_processes();
8172 return 0;
8173out_remove:
8174 card->use_hard_stop = 1;
8175 qeth_stop_card(card, 0);
8176 ccw_device_set_offline(CARD_DDEV(card));
8177 ccw_device_set_offline(CARD_WDEV(card));
8178 ccw_device_set_offline(CARD_RDEV(card));
8179 if (recover_flag == CARD_STATE_RECOVER)
8180 card->state = CARD_STATE_RECOVER;
8181 else
8182 card->state = CARD_STATE_DOWN;
8183 return -ENODEV;
8184}
8185
8186static int
8187qeth_set_online(struct ccwgroup_device *gdev)
8188{
8189 return __qeth_set_online(gdev, 0);
8190}
8191
8192static struct ccw_device_id qeth_ids[] = {
8193 {CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE},
8194 {CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD},
8195 {CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN},
8196 {},
8197};
8198MODULE_DEVICE_TABLE(ccw, qeth_ids);
8199
8200struct device *qeth_root_dev = NULL;
8201
8202struct ccwgroup_driver qeth_ccwgroup_driver = {
8203 .owner = THIS_MODULE,
8204 .name = "qeth",
8205 .driver_id = 0xD8C5E3C8,
8206 .probe = qeth_probe_device,
8207 .remove = qeth_remove_device,
8208 .set_online = qeth_set_online,
8209 .set_offline = qeth_set_offline,
8210};
8211
8212struct ccw_driver qeth_ccw_driver = {
8213 .name = "qeth",
8214 .ids = qeth_ids,
8215 .probe = ccwgroup_probe_ccwdev,
8216 .remove = ccwgroup_remove_ccwdev,
8217};
8218
8219
8220static void
8221qeth_unregister_dbf_views(void)
8222{
8223 if (qeth_dbf_setup)
8224 debug_unregister(qeth_dbf_setup);
8225 if (qeth_dbf_qerr)
8226 debug_unregister(qeth_dbf_qerr);
8227 if (qeth_dbf_sense)
8228 debug_unregister(qeth_dbf_sense);
8229 if (qeth_dbf_misc)
8230 debug_unregister(qeth_dbf_misc);
8231 if (qeth_dbf_data)
8232 debug_unregister(qeth_dbf_data);
8233 if (qeth_dbf_control)
8234 debug_unregister(qeth_dbf_control);
8235 if (qeth_dbf_trace)
8236 debug_unregister(qeth_dbf_trace);
8237}
8238static int
8239qeth_register_dbf_views(void)
8240{
8241 qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
8242 QETH_DBF_SETUP_PAGES,
8243 QETH_DBF_SETUP_NR_AREAS,
8244 QETH_DBF_SETUP_LEN);
8245 qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
8246 QETH_DBF_MISC_PAGES,
8247 QETH_DBF_MISC_NR_AREAS,
8248 QETH_DBF_MISC_LEN);
8249 qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
8250 QETH_DBF_DATA_PAGES,
8251 QETH_DBF_DATA_NR_AREAS,
8252 QETH_DBF_DATA_LEN);
8253 qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
8254 QETH_DBF_CONTROL_PAGES,
8255 QETH_DBF_CONTROL_NR_AREAS,
8256 QETH_DBF_CONTROL_LEN);
8257 qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
8258 QETH_DBF_SENSE_PAGES,
8259 QETH_DBF_SENSE_NR_AREAS,
8260 QETH_DBF_SENSE_LEN);
8261 qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
8262 QETH_DBF_QERR_PAGES,
8263 QETH_DBF_QERR_NR_AREAS,
8264 QETH_DBF_QERR_LEN);
8265 qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
8266 QETH_DBF_TRACE_PAGES,
8267 QETH_DBF_TRACE_NR_AREAS,
8268 QETH_DBF_TRACE_LEN);
8269
8270 if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
8271 (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
8272 (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
8273 (qeth_dbf_trace == NULL)) {
8274 qeth_unregister_dbf_views();
8275 return -ENOMEM;
8276 }
8277 debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
8278 debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
8279
8280 debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
8281 debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
8282
8283 debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
8284 debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
8285
8286 debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
8287 debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
8288
8289 debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
8290 debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
8291
8292 debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
8293 debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
8294
8295 debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
8296 debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
8297
8298 return 0;
8299}
8300
8301#ifdef CONFIG_QETH_IPV6
8302extern struct neigh_table arp_tbl;
8303static struct neigh_ops *arp_direct_ops;
8304static int (*qeth_old_arp_constructor) (struct neighbour *);
8305
8306static struct neigh_ops arp_direct_ops_template = {
8307 .family = AF_INET,
8308 .solicit = NULL,
8309 .error_report = NULL,
8310 .output = dev_queue_xmit,
8311 .connected_output = dev_queue_xmit,
8312 .hh_output = dev_queue_xmit,
8313 .queue_xmit = dev_queue_xmit
8314};
8315
8316static int
8317qeth_arp_constructor(struct neighbour *neigh)
8318{
8319 struct net_device *dev = neigh->dev;
8320 struct in_device *in_dev;
8321 struct neigh_parms *parms;
8322 struct qeth_card *card;
8323
8324 card = qeth_get_card_from_dev(dev);
8325 if (card == NULL)
8326 goto out;
8327 if((card->options.layer2) ||
8328 (card->dev->header_ops == &qeth_fake_ops))
8329 goto out;
8330
8331 rcu_read_lock();
8332 in_dev = __in_dev_get_rcu(dev);
8333 if (in_dev == NULL) {
8334 rcu_read_unlock();
8335 return -EINVAL;
8336 }
8337
8338 parms = in_dev->arp_parms;
8339 __neigh_parms_put(neigh->parms);
8340 neigh->parms = neigh_parms_clone(parms);
8341 rcu_read_unlock();
8342
8343 neigh->type = inet_addr_type(&init_net, *(__be32 *) neigh->primary_key);
8344 neigh->nud_state = NUD_NOARP;
8345 neigh->ops = arp_direct_ops;
8346 neigh->output = neigh->ops->queue_xmit;
8347 return 0;
8348out:
8349 return qeth_old_arp_constructor(neigh);
8350}
8351#endif /*CONFIG_QETH_IPV6*/
8352
8353/*
8354 * IP address takeover related functions
8355 */
8356static void
8357qeth_clear_ipato_list(struct qeth_card *card)
8358{
8359 struct qeth_ipato_entry *ipatoe, *tmp;
8360 unsigned long flags;
8361
8362 spin_lock_irqsave(&card->ip_lock, flags);
8363 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
8364 list_del(&ipatoe->entry);
8365 kfree(ipatoe);
8366 }
8367 spin_unlock_irqrestore(&card->ip_lock, flags);
8368}
8369
8370int
8371qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new)
8372{
8373 struct qeth_ipato_entry *ipatoe;
8374 unsigned long flags;
8375 int rc = 0;
8376
8377 QETH_DBF_TEXT(trace, 2, "addipato");
8378 spin_lock_irqsave(&card->ip_lock, flags);
8379 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
8380 if (ipatoe->proto != new->proto)
8381 continue;
8382 if (!memcmp(ipatoe->addr, new->addr,
8383 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
8384 (ipatoe->mask_bits == new->mask_bits)){
8385 PRINT_WARN("ipato entry already exists!\n");
8386 rc = -EEXIST;
8387 break;
8388 }
8389 }
8390 if (!rc) {
8391 list_add_tail(&new->entry, &card->ipato.entries);
8392 }
8393 spin_unlock_irqrestore(&card->ip_lock, flags);
8394 return rc;
8395}
8396
8397void
8398qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
8399 u8 *addr, int mask_bits)
8400{
8401 struct qeth_ipato_entry *ipatoe, *tmp;
8402 unsigned long flags;
8403
8404 QETH_DBF_TEXT(trace, 2, "delipato");
8405 spin_lock_irqsave(&card->ip_lock, flags);
8406 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){
8407 if (ipatoe->proto != proto)
8408 continue;
8409 if (!memcmp(ipatoe->addr, addr,
8410 (proto == QETH_PROT_IPV4)? 4:16) &&
8411 (ipatoe->mask_bits == mask_bits)){
8412 list_del(&ipatoe->entry);
8413 kfree(ipatoe);
8414 }
8415 }
8416 spin_unlock_irqrestore(&card->ip_lock, flags);
8417}
8418
8419static void
8420qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
8421{
8422 int i, j;
8423 u8 octet;
8424
8425 for (i = 0; i < len; ++i){
8426 octet = addr[i];
8427 for (j = 7; j >= 0; --j){
8428 bits[i*8 + j] = octet & 1;
8429 octet >>= 1;
8430 }
8431 }
8432}
8433
8434static int
8435qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr)
8436{
8437 struct qeth_ipato_entry *ipatoe;
8438 u8 addr_bits[128] = {0, };
8439 u8 ipatoe_bits[128] = {0, };
8440 int rc = 0;
8441
8442 if (!card->ipato.enabled)
8443 return 0;
8444
8445 qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
8446 (addr->proto == QETH_PROT_IPV4)? 4:16);
8447 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
8448 if (addr->proto != ipatoe->proto)
8449 continue;
8450 qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
8451 (ipatoe->proto==QETH_PROT_IPV4) ?
8452 4:16);
8453 if (addr->proto == QETH_PROT_IPV4)
8454 rc = !memcmp(addr_bits, ipatoe_bits,
8455 min(32, ipatoe->mask_bits));
8456 else
8457 rc = !memcmp(addr_bits, ipatoe_bits,
8458 min(128, ipatoe->mask_bits));
8459 if (rc)
8460 break;
8461 }
8462 /* invert? */
8463 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
8464 rc = !rc;
8465 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
8466 rc = !rc;
8467
8468 return rc;
8469}
8470
8471/*
8472 * VIPA related functions
8473 */
8474int
8475qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8476 const u8 *addr)
8477{
8478 struct qeth_ipaddr *ipaddr;
8479 unsigned long flags;
8480 int rc = 0;
8481
8482 ipaddr = qeth_get_addr_buffer(proto);
8483 if (ipaddr){
8484 if (proto == QETH_PROT_IPV4){
8485 QETH_DBF_TEXT(trace, 2, "addvipa4");
8486 memcpy(&ipaddr->u.a4.addr, addr, 4);
8487 ipaddr->u.a4.mask = 0;
8488#ifdef CONFIG_QETH_IPV6
8489 } else if (proto == QETH_PROT_IPV6){
8490 QETH_DBF_TEXT(trace, 2, "addvipa6");
8491 memcpy(&ipaddr->u.a6.addr, addr, 16);
8492 ipaddr->u.a6.pfxlen = 0;
8493#endif
8494 }
8495 ipaddr->type = QETH_IP_TYPE_VIPA;
8496 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
8497 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
8498 } else
8499 return -ENOMEM;
8500 spin_lock_irqsave(&card->ip_lock, flags);
8501 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
8502 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
8503 rc = -EEXIST;
8504 spin_unlock_irqrestore(&card->ip_lock, flags);
8505 if (rc){
8506 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
8507 return rc;
8508 }
8509 if (!qeth_add_ip(card, ipaddr))
8510 kfree(ipaddr);
8511 qeth_set_ip_addr_list(card);
8512 return rc;
8513}
8514
8515void
8516qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8517 const u8 *addr)
8518{
8519 struct qeth_ipaddr *ipaddr;
8520
8521 ipaddr = qeth_get_addr_buffer(proto);
8522 if (ipaddr){
8523 if (proto == QETH_PROT_IPV4){
8524 QETH_DBF_TEXT(trace, 2, "delvipa4");
8525 memcpy(&ipaddr->u.a4.addr, addr, 4);
8526 ipaddr->u.a4.mask = 0;
8527#ifdef CONFIG_QETH_IPV6
8528 } else if (proto == QETH_PROT_IPV6){
8529 QETH_DBF_TEXT(trace, 2, "delvipa6");
8530 memcpy(&ipaddr->u.a6.addr, addr, 16);
8531 ipaddr->u.a6.pfxlen = 0;
8532#endif
8533 }
8534 ipaddr->type = QETH_IP_TYPE_VIPA;
8535 } else
8536 return;
8537 if (!qeth_delete_ip(card, ipaddr))
8538 kfree(ipaddr);
8539 qeth_set_ip_addr_list(card);
8540}
8541
8542/*
8543 * proxy ARP related functions
8544 */
8545int
8546qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8547 const u8 *addr)
8548{
8549 struct qeth_ipaddr *ipaddr;
8550 unsigned long flags;
8551 int rc = 0;
8552
8553 ipaddr = qeth_get_addr_buffer(proto);
8554 if (ipaddr){
8555 if (proto == QETH_PROT_IPV4){
8556 QETH_DBF_TEXT(trace, 2, "addrxip4");
8557 memcpy(&ipaddr->u.a4.addr, addr, 4);
8558 ipaddr->u.a4.mask = 0;
8559#ifdef CONFIG_QETH_IPV6
8560 } else if (proto == QETH_PROT_IPV6){
8561 QETH_DBF_TEXT(trace, 2, "addrxip6");
8562 memcpy(&ipaddr->u.a6.addr, addr, 16);
8563 ipaddr->u.a6.pfxlen = 0;
8564#endif
8565 }
8566 ipaddr->type = QETH_IP_TYPE_RXIP;
8567 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
8568 ipaddr->del_flags = 0;
8569 } else
8570 return -ENOMEM;
8571 spin_lock_irqsave(&card->ip_lock, flags);
8572 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
8573 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
8574 rc = -EEXIST;
8575 spin_unlock_irqrestore(&card->ip_lock, flags);
8576 if (rc){
8577 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
8578 return rc;
8579 }
8580 if (!qeth_add_ip(card, ipaddr))
8581 kfree(ipaddr);
8582 qeth_set_ip_addr_list(card);
8583 return 0;
8584}
8585
8586void
8587qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8588 const u8 *addr)
8589{
8590 struct qeth_ipaddr *ipaddr;
8591
8592 ipaddr = qeth_get_addr_buffer(proto);
8593 if (ipaddr){
8594 if (proto == QETH_PROT_IPV4){
8595 QETH_DBF_TEXT(trace, 2, "addrxip4");
8596 memcpy(&ipaddr->u.a4.addr, addr, 4);
8597 ipaddr->u.a4.mask = 0;
8598#ifdef CONFIG_QETH_IPV6
8599 } else if (proto == QETH_PROT_IPV6){
8600 QETH_DBF_TEXT(trace, 2, "addrxip6");
8601 memcpy(&ipaddr->u.a6.addr, addr, 16);
8602 ipaddr->u.a6.pfxlen = 0;
8603#endif
8604 }
8605 ipaddr->type = QETH_IP_TYPE_RXIP;
8606 } else
8607 return;
8608 if (!qeth_delete_ip(card, ipaddr))
8609 kfree(ipaddr);
8610 qeth_set_ip_addr_list(card);
8611}
8612
8613/**
8614 * IP event handler
8615 */
8616static int
8617qeth_ip_event(struct notifier_block *this,
8618 unsigned long event,void *ptr)
8619{
8620 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
8621 struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev;
8622 struct qeth_ipaddr *addr;
8623 struct qeth_card *card;
8624
8625 QETH_DBF_TEXT(trace,3,"ipevent");
8626 card = qeth_get_card_from_dev(dev);
8627 if (!card)
8628 return NOTIFY_DONE;
8629 if (card->options.layer2)
8630 return NOTIFY_DONE;
8631
8632 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
8633 if (addr != NULL) {
8634 addr->u.a4.addr = ifa->ifa_address;
8635 addr->u.a4.mask = ifa->ifa_mask;
8636 addr->type = QETH_IP_TYPE_NORMAL;
8637 } else
8638 goto out;
8639
8640 switch(event) {
8641 case NETDEV_UP:
8642 if (!qeth_add_ip(card, addr))
8643 kfree(addr);
8644 break;
8645 case NETDEV_DOWN:
8646 if (!qeth_delete_ip(card, addr))
8647 kfree(addr);
8648 break;
8649 default:
8650 break;
8651 }
8652 qeth_set_ip_addr_list(card);
8653out:
8654 return NOTIFY_DONE;
8655}
8656
8657static struct notifier_block qeth_ip_notifier = {
8658 qeth_ip_event,
8659 NULL,
8660};
8661
8662#ifdef CONFIG_QETH_IPV6
8663/**
8664 * IPv6 event handler
8665 */
8666static int
8667qeth_ip6_event(struct notifier_block *this,
8668 unsigned long event,void *ptr)
8669{
8670
8671 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
8672 struct net_device *dev = (struct net_device *)ifa->idev->dev;
8673 struct qeth_ipaddr *addr;
8674 struct qeth_card *card;
8675
8676 QETH_DBF_TEXT(trace,3,"ip6event");
8677
8678 card = qeth_get_card_from_dev(dev);
8679 if (!card)
8680 return NOTIFY_DONE;
8681 if (!qeth_is_supported(card, IPA_IPV6))
8682 return NOTIFY_DONE;
8683
8684 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
8685 if (addr != NULL) {
8686 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
8687 addr->u.a6.pfxlen = ifa->prefix_len;
8688 addr->type = QETH_IP_TYPE_NORMAL;
8689 } else
8690 goto out;
8691
8692 switch(event) {
8693 case NETDEV_UP:
8694 if (!qeth_add_ip(card, addr))
8695 kfree(addr);
8696 break;
8697 case NETDEV_DOWN:
8698 if (!qeth_delete_ip(card, addr))
8699 kfree(addr);
8700 break;
8701 default:
8702 break;
8703 }
8704 qeth_set_ip_addr_list(card);
8705out:
8706 return NOTIFY_DONE;
8707}
8708
8709static struct notifier_block qeth_ip6_notifier = {
8710 qeth_ip6_event,
8711 NULL,
8712};
8713#endif
8714
8715static int
8716__qeth_reboot_event_card(struct device *dev, void *data)
8717{
8718 struct qeth_card *card;
8719
8720 card = (struct qeth_card *) dev->driver_data;
8721 qeth_clear_ip_list(card, 0, 0);
8722 qeth_qdio_clear_card(card, 0);
8723 qeth_clear_qdio_buffers(card);
8724 return 0;
8725}
8726
8727static int
8728qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
8729{
8730 int ret;
8731
8732 ret = driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL,
8733 __qeth_reboot_event_card);
8734 return ret ? NOTIFY_BAD : NOTIFY_DONE;
8735}
8736
8737
8738static struct notifier_block qeth_reboot_notifier = {
8739 qeth_reboot_event,
8740 NULL,
8741};
8742
8743static int
8744qeth_register_notifiers(void)
8745{
8746 int r;
8747
8748 QETH_DBF_TEXT(trace,5,"regnotif");
8749 if ((r = register_reboot_notifier(&qeth_reboot_notifier)))
8750 return r;
8751 if ((r = register_inetaddr_notifier(&qeth_ip_notifier)))
8752 goto out_reboot;
8753#ifdef CONFIG_QETH_IPV6
8754 if ((r = register_inet6addr_notifier(&qeth_ip6_notifier)))
8755 goto out_ipv4;
8756#endif
8757 return 0;
8758
8759#ifdef CONFIG_QETH_IPV6
8760out_ipv4:
8761 unregister_inetaddr_notifier(&qeth_ip_notifier);
8762#endif
8763out_reboot:
8764 unregister_reboot_notifier(&qeth_reboot_notifier);
8765 return r;
8766}
8767
8768/**
8769 * unregister all event notifiers
8770 */
8771static void
8772qeth_unregister_notifiers(void)
8773{
8774
8775 QETH_DBF_TEXT(trace,5,"unregnot");
8776 BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier));
8777 BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier));
8778#ifdef CONFIG_QETH_IPV6
8779 BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier));
8780#endif /* QETH_IPV6 */
8781
8782}
8783
8784#ifdef CONFIG_QETH_IPV6
8785static int
8786qeth_ipv6_init(void)
8787{
8788 qeth_old_arp_constructor = arp_tbl.constructor;
8789 write_lock_bh(&arp_tbl.lock);
8790 arp_tbl.constructor = qeth_arp_constructor;
8791 write_unlock_bh(&arp_tbl.lock);
8792
8793 arp_direct_ops = (struct neigh_ops*)
8794 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
8795 if (!arp_direct_ops)
8796 return -ENOMEM;
8797
8798 memcpy(arp_direct_ops, &arp_direct_ops_template,
8799 sizeof(struct neigh_ops));
8800
8801 return 0;
8802}
8803
8804static void
8805qeth_ipv6_uninit(void)
8806{
8807 write_lock_bh(&arp_tbl.lock);
8808 arp_tbl.constructor = qeth_old_arp_constructor;
8809 write_unlock_bh(&arp_tbl.lock);
8810 kfree(arp_direct_ops);
8811}
8812#endif /* CONFIG_QETH_IPV6 */
8813
8814static void
8815qeth_sysfs_unregister(void)
8816{
8817 s390_root_dev_unregister(qeth_root_dev);
8818 qeth_remove_driver_attributes();
8819 ccw_driver_unregister(&qeth_ccw_driver);
8820 ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
8821}
8822
8823/**
8824 * register qeth at sysfs
8825 */
8826static int
8827qeth_sysfs_register(void)
8828{
8829 int rc;
8830
8831 rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
8832 if (rc)
8833 goto out;
8834
8835 rc = ccw_driver_register(&qeth_ccw_driver);
8836 if (rc)
8837 goto out_ccw_driver;
8838
8839 rc = qeth_create_driver_attributes();
8840 if (rc)
8841 goto out_qeth_attr;
8842
8843 qeth_root_dev = s390_root_dev_register("qeth");
8844 rc = IS_ERR(qeth_root_dev) ? PTR_ERR(qeth_root_dev) : 0;
8845 if (!rc)
8846 goto out;
8847
8848 qeth_remove_driver_attributes();
8849out_qeth_attr:
8850 ccw_driver_unregister(&qeth_ccw_driver);
8851out_ccw_driver:
8852 ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
8853out:
8854 return rc;
8855}
8856
8857/***
8858 * init function
8859 */
8860static int __init
8861qeth_init(void)
8862{
8863 int rc;
8864
8865 PRINT_INFO("loading %s\n", version);
8866
8867 INIT_LIST_HEAD(&qeth_card_list.list);
8868 INIT_LIST_HEAD(&qeth_notify_list);
8869 spin_lock_init(&qeth_notify_lock);
8870 rwlock_init(&qeth_card_list.rwlock);
8871
8872 rc = qeth_register_dbf_views();
8873 if (rc)
8874 goto out_err;
8875
8876 rc = qeth_sysfs_register();
8877 if (rc)
8878 goto out_dbf;
8879
8880#ifdef CONFIG_QETH_IPV6
8881 rc = qeth_ipv6_init();
8882 if (rc) {
8883 PRINT_ERR("Out of memory during ipv6 init code = %d\n", rc);
8884 goto out_sysfs;
8885 }
8886#endif /* QETH_IPV6 */
8887 rc = qeth_register_notifiers();
8888 if (rc)
8889 goto out_ipv6;
8890 rc = qeth_create_procfs_entries();
8891 if (rc)
8892 goto out_notifiers;
8893
8894 return rc;
8895
8896out_notifiers:
8897 qeth_unregister_notifiers();
8898out_ipv6:
8899#ifdef CONFIG_QETH_IPV6
8900 qeth_ipv6_uninit();
8901out_sysfs:
8902#endif /* QETH_IPV6 */
8903 qeth_sysfs_unregister();
8904out_dbf:
8905 qeth_unregister_dbf_views();
8906out_err:
8907 PRINT_ERR("Initialization failed with code %d\n", rc);
8908 return rc;
8909}
8910
8911static void
8912__exit qeth_exit(void)
8913{
8914 struct qeth_card *card, *tmp;
8915 unsigned long flags;
8916
8917 QETH_DBF_TEXT(trace,1, "cleanup.");
8918
8919 /*
8920 * Weed would not need to clean up our devices here, because the
8921 * common device layer calls qeth_remove_device for each device
8922 * as soon as we unregister our driver (done in qeth_sysfs_unregister).
8923 * But we do cleanup here so we can do a "soft" shutdown of our cards.
8924 * qeth_remove_device called by the common device layer would otherwise
8925 * do a "hard" shutdown (card->use_hard_stop is set to one in
8926 * qeth_remove_device).
8927 */
8928again:
8929 read_lock_irqsave(&qeth_card_list.rwlock, flags);
8930 list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){
8931 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8932 qeth_set_offline(card->gdev);
8933 qeth_remove_device(card->gdev);
8934 goto again;
8935 }
8936 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8937#ifdef CONFIG_QETH_IPV6
8938 qeth_ipv6_uninit();
8939#endif
8940 qeth_unregister_notifiers();
8941 qeth_remove_procfs_entries();
8942 qeth_sysfs_unregister();
8943 qeth_unregister_dbf_views();
8944 printk("qeth: removed\n");
8945}
8946
8947EXPORT_SYMBOL(qeth_osn_register);
8948EXPORT_SYMBOL(qeth_osn_deregister);
8949EXPORT_SYMBOL(qeth_osn_assist);
8950module_init(qeth_init);
8951module_exit(qeth_exit);
8952MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
8953MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
8954 "Copyright 2000,2003 IBM Corporation\n");
8955
8956MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_mpc.c b/drivers/s390/net/qeth_mpc.c
deleted file mode 100644
index f29a4bc4f6f2..000000000000
--- a/drivers/s390/net/qeth_mpc.c
+++ /dev/null
@@ -1,269 +0,0 @@
1/*
2 * linux/drivers/s390/net/qeth_mpc.c
3 *
4 * Linux on zSeries OSA Express and HiperSockets support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>
8 * Thomas Spatzier <tspat@de.ibm.com>
9 *
10 */
11#include <asm/cio.h>
12#include "qeth_mpc.h"
13
14unsigned char IDX_ACTIVATE_READ[]={
15 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
16 0x19,0x01,0x01,0x80, 0x00,0x00,0x00,0x00,
17 0x00,0x00,0x00,0x00, 0x00,0x00,0xc8,0xc1,
18 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00,
19 0x00,0x00
20};
21
22unsigned char IDX_ACTIVATE_WRITE[]={
23 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
24 0x15,0x01,0x01,0x80, 0x00,0x00,0x00,0x00,
25 0xff,0xff,0x00,0x00, 0x00,0x00,0xc8,0xc1,
26 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00,
27 0x00,0x00
28};
29
30unsigned char CM_ENABLE[]={
31 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x01,
32 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x63,
33 0x10,0x00,0x00,0x01,
34 0x00,0x00,0x00,0x00,
35 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00,
36 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x23,
37 0x00,0x00,0x23,0x05, 0x00,0x00,0x00,0x00,
38 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
39 0x01,0x00,0x00,0x23, 0x00,0x00,0x00,0x40,
40 0x00,0x0c,0x41,0x02, 0x00,0x17,0x00,0x00,
41 0x00,0x00,0x00,0x00,
42 0x00,0x0b,0x04,0x01,
43 0x7e,0x04,0x05,0x00, 0x01,0x01,0x0f,
44 0x00,
45 0x0c,0x04,0x02,0xff, 0xff,0xff,0xff,0xff,
46 0xff,0xff,0xff
47};
48
49unsigned char CM_SETUP[]={
50 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x02,
51 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x64,
52 0x10,0x00,0x00,0x01,
53 0x00,0x00,0x00,0x00,
54 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00,
55 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x24,
56 0x00,0x00,0x24,0x05, 0x00,0x00,0x00,0x00,
57 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
58 0x01,0x00,0x00,0x24, 0x00,0x00,0x00,0x40,
59 0x00,0x0c,0x41,0x04, 0x00,0x18,0x00,0x00,
60 0x00,0x00,0x00,0x00,
61 0x00,0x09,0x04,0x04,
62 0x05,0x00,0x01,0x01, 0x11,
63 0x00,0x09,0x04,
64 0x05,0x05,0x00,0x00, 0x00,0x00,
65 0x00,0x06,
66 0x04,0x06,0xc8,0x00
67};
68
69unsigned char ULP_ENABLE[]={
70 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x03,
71 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6b,
72 0x10,0x00,0x00,0x01,
73 0x00,0x00,0x00,0x00,
74 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x01,
75 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x2b,
76 0x00,0x00,0x2b,0x05, 0x20,0x01,0x00,0x00,
77 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
78 0x01,0x00,0x00,0x2b, 0x00,0x00,0x00,0x40,
79 0x00,0x0c,0x41,0x02, 0x00,0x1f,0x00,0x00,
80 0x00,0x00,0x00,0x00,
81 0x00,0x0b,0x04,0x01,
82 0x03,0x04,0x05,0x00, 0x01,0x01,0x12,
83 0x00,
84 0x14,0x04,0x0a,0x00, 0x20,0x00,0x00,0xff,
85 0xff,0x00,0x08,0xc8, 0xe8,0xc4,0xf1,0xc7,
86 0xf1,0x00,0x00
87};
88
89unsigned char ULP_SETUP[]={
90 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x04,
91 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6c,
92 0x10,0x00,0x00,0x01,
93 0x00,0x00,0x00,0x00,
94 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x02,
95 0x00,0x00,0x00,0x01, 0x00,0x24,0x00,0x2c,
96 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00,
97 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
98 0x01,0x00,0x00,0x2c, 0x00,0x00,0x00,0x40,
99 0x00,0x0c,0x41,0x04, 0x00,0x20,0x00,0x00,
100 0x00,0x00,0x00,0x00,
101 0x00,0x09,0x04,0x04,
102 0x05,0x00,0x01,0x01, 0x14,
103 0x00,0x09,0x04,
104 0x05,0x05,0x30,0x01, 0x00,0x00,
105 0x00,0x06,
106 0x04,0x06,0x40,0x00,
107 0x00,0x08,0x04,0x0b,
108 0x00,0x00,0x00,0x00
109};
110
111unsigned char DM_ACT[]={
112 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x05,
113 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x55,
114 0x10,0x00,0x00,0x01,
115 0x00,0x00,0x00,0x00,
116 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x03,
117 0x00,0x00,0x00,0x02, 0x00,0x24,0x00,0x15,
118 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00,
119 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
120 0x01,0x00,0x00,0x15, 0x00,0x00,0x00,0x40,
121 0x00,0x0c,0x43,0x60, 0x00,0x09,0x00,0x00,
122 0x00,0x00,0x00,0x00,
123 0x00,0x09,0x04,0x04,
124 0x05,0x40,0x01,0x01, 0x00
125};
126
127unsigned char IPA_PDU_HEADER[]={
128 0x00,0xe0,0x00,0x00, 0x77,0x77,0x77,0x77,
129 0x00,0x00,0x00,0x14, 0x00,0x00,
130 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))/256,
131 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))%256,
132 0x10,0x00,0x00,0x01, 0x00,0x00,0x00,0x00,
133 0xc1,0x03,0x00,0x01, 0x00,0x00,0x00,0x00,
134 0x00,0x00,0x00,0x00, 0x00,0x24,
135 sizeof(struct qeth_ipa_cmd)/256,
136 sizeof(struct qeth_ipa_cmd)%256,
137 0x00,
138 sizeof(struct qeth_ipa_cmd)/256,
139 sizeof(struct qeth_ipa_cmd)%256,
140 0x05,
141 0x77,0x77,0x77,0x77,
142 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
143 0x01,0x00,
144 sizeof(struct qeth_ipa_cmd)/256,
145 sizeof(struct qeth_ipa_cmd)%256,
146 0x00,0x00,0x00,0x40,
147};
148
149unsigned char WRITE_CCW[]={
150 0x01,CCW_FLAG_SLI,0,0,
151 0,0,0,0
152};
153
154unsigned char READ_CCW[]={
155 0x02,CCW_FLAG_SLI,0,0,
156 0,0,0,0
157};
158
159
160struct ipa_rc_msg {
161 enum qeth_ipa_return_codes rc;
162 char *msg;
163};
164
165static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
166 {IPA_RC_SUCCESS, "success"},
167 {IPA_RC_NOTSUPP, "Command not supported"},
168 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
169 {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
170 {IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"},
171 {IPA_RC_DUP_IPV6_REMOTE,"ipv6 address already registered remote"},
172 {IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"},
173 {IPA_RC_UNREGISTERED_ADDR, "Address not registered"},
174 {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
175 {IPA_RC_ID_NOT_FOUND, "Identifier not found"},
176 {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"},
177 {IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"},
178 {IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"},
179 {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
180 {IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
181 {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"},
182 {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"},
183 {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"},
184 {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
185 {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
186 {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
187 {IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"},
188 {IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"},
189 {IPA_RC_INVALID_LANTYPE, "Invalid LAN type"},
190 {IPA_RC_INVALID_LANNUM, "Invalid LAN num"},
191 {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"},
192 {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"},
193 {IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"},
194 {IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"},
195 {IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"},
196 {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"},
197 {IPA_RC_MULTICAST_FULL, "No task available, multicast full"},
198 {IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"},
199 {IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"},
200 {IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"},
201 {IPA_RC_PRIMARY_ALREADY_DEFINED,"Primary already defined"},
202 {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"},
203 {IPA_RC_INVALID_SETRTG_INDICATOR,"Invalid SETRTG indicator"},
204 {IPA_RC_MC_ADDR_ALREADY_DEFINED,"Multicast address already defined"},
205 {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
206 {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
207 {IPA_RC_FFFF, "Unknown Error"}
208};
209
210
211
212char *
213qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
214{
215 int x = 0;
216 qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
217 sizeof(struct ipa_rc_msg) - 1].rc = rc;
218 while(qeth_ipa_rc_msg[x].rc != rc)
219 x++;
220 return qeth_ipa_rc_msg[x].msg;
221}
222
223
224struct ipa_cmd_names {
225 enum qeth_ipa_cmds cmd;
226 char *name;
227};
228
229static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
230 {IPA_CMD_STARTLAN, "startlan"},
231 {IPA_CMD_STOPLAN, "stoplan"},
232 {IPA_CMD_SETVMAC, "setvmac"},
233 {IPA_CMD_DELVMAC, "delvmca"},
234 {IPA_CMD_SETGMAC, "setgmac"},
235 {IPA_CMD_DELGMAC, "delgmac"},
236 {IPA_CMD_SETVLAN, "setvlan"},
237 {IPA_CMD_DELVLAN, "delvlan"},
238 {IPA_CMD_SETCCID, "setccid"},
239 {IPA_CMD_DELCCID, "delccid"},
240 {IPA_CMD_MODCCID, "setip"},
241 {IPA_CMD_SETIP, "setip"},
242 {IPA_CMD_QIPASSIST, "qipassist"},
243 {IPA_CMD_SETASSPARMS, "setassparms"},
244 {IPA_CMD_SETIPM, "setipm"},
245 {IPA_CMD_DELIPM, "delipm"},
246 {IPA_CMD_SETRTG, "setrtg"},
247 {IPA_CMD_DELIP, "delip"},
248 {IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
249 {IPA_CMD_SET_DIAG_ASS, "set_diag_ass"},
250 {IPA_CMD_CREATE_ADDR, "create_addr"},
251 {IPA_CMD_DESTROY_ADDR, "destroy_addr"},
252 {IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"},
253 {IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"},
254 {IPA_CMD_UNKNOWN, "unknown"},
255};
256
257char *
258qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
259{
260 int x = 0;
261 qeth_ipa_cmd_names[
262 sizeof(qeth_ipa_cmd_names)/
263 sizeof(struct ipa_cmd_names)-1].cmd = cmd;
264 while(qeth_ipa_cmd_names[x].cmd != cmd)
265 x++;
266 return qeth_ipa_cmd_names[x].name;
267}
268
269
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
deleted file mode 100644
index 46ecd03a597e..000000000000
--- a/drivers/s390/net/qeth_proc.c
+++ /dev/null
@@ -1,316 +0,0 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_fs.c
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 * This file contains code related to procfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 *
10 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 *
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/list.h>
18#include <linux/rwsem.h>
19
20#include "qeth.h"
21#include "qeth_mpc.h"
22#include "qeth_fs.h"
23
24/***** /proc/qeth *****/
25#define QETH_PROCFILE_NAME "qeth"
26static struct proc_dir_entry *qeth_procfile;
27
28static int
29qeth_procfile_seq_match(struct device *dev, void *data)
30{
31 return(dev ? 1 : 0);
32}
33
34static void *
35qeth_procfile_seq_start(struct seq_file *s, loff_t *offset)
36{
37 struct device *dev = NULL;
38 loff_t nr = 0;
39
40 if (*offset == 0)
41 return SEQ_START_TOKEN;
42 while (1) {
43 dev = driver_find_device(&qeth_ccwgroup_driver.driver, dev,
44 NULL, qeth_procfile_seq_match);
45 if (++nr == *offset)
46 break;
47 put_device(dev);
48 }
49 return dev;
50}
51
52static void
53qeth_procfile_seq_stop(struct seq_file *s, void* it)
54{
55}
56
57static void *
58qeth_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset)
59{
60 struct device *prev, *next;
61
62 if (it == SEQ_START_TOKEN)
63 prev = NULL;
64 else
65 prev = (struct device *) it;
66 next = driver_find_device(&qeth_ccwgroup_driver.driver,
67 prev, NULL, qeth_procfile_seq_match);
68 (*offset)++;
69 return (void *) next;
70}
71
72static inline const char *
73qeth_get_router_str(struct qeth_card *card, int ipv)
74{
75 enum qeth_routing_types routing_type = NO_ROUTER;
76
77 if (ipv == 4) {
78 routing_type = card->options.route4.type;
79 } else {
80#ifdef CONFIG_QETH_IPV6
81 routing_type = card->options.route6.type;
82#else
83 return "n/a";
84#endif /* CONFIG_QETH_IPV6 */
85 }
86
87 switch (routing_type){
88 case PRIMARY_ROUTER:
89 return "pri";
90 case SECONDARY_ROUTER:
91 return "sec";
92 case MULTICAST_ROUTER:
93 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
94 return "mc+";
95 return "mc";
96 case PRIMARY_CONNECTOR:
97 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
98 return "p+c";
99 return "p.c";
100 case SECONDARY_CONNECTOR:
101 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
102 return "s+c";
103 return "s.c";
104 default: /* NO_ROUTER */
105 return "no";
106 }
107}
108
109static int
110qeth_procfile_seq_show(struct seq_file *s, void *it)
111{
112 struct device *device;
113 struct qeth_card *card;
114 char tmp[12]; /* for qeth_get_prioq_str */
115
116 if (it == SEQ_START_TOKEN){
117 seq_printf(s, "devices CHPID interface "
118 "cardtype port chksum prio-q'ing rtr4 "
119 "rtr6 fsz cnt\n");
120 seq_printf(s, "-------------------------- ----- ---------- "
121 "-------------- ---- ------ ---------- ---- "
122 "---- ----- -----\n");
123 } else {
124 device = (struct device *) it;
125 card = device->driver_data;
126 seq_printf(s, "%s/%s/%s x%02X %-10s %-14s %-4i ",
127 CARD_RDEV_ID(card),
128 CARD_WDEV_ID(card),
129 CARD_DDEV_ID(card),
130 card->info.chpid,
131 QETH_CARD_IFNAME(card),
132 qeth_get_cardname_short(card),
133 card->info.portno);
134 if (card->lan_online)
135 seq_printf(s, "%-6s %-10s %-4s %-4s %-5s %-5i\n",
136 qeth_get_checksum_str(card),
137 qeth_get_prioq_str(card, tmp),
138 qeth_get_router_str(card, 4),
139 qeth_get_router_str(card, 6),
140 qeth_get_bufsize_str(card),
141 card->qdio.in_buf_pool.buf_count);
142 else
143 seq_printf(s, " +++ LAN OFFLINE +++\n");
144 put_device(device);
145 }
146 return 0;
147}
148
149static const struct seq_operations qeth_procfile_seq_ops = {
150 .start = qeth_procfile_seq_start,
151 .stop = qeth_procfile_seq_stop,
152 .next = qeth_procfile_seq_next,
153 .show = qeth_procfile_seq_show,
154};
155
156static int
157qeth_procfile_open(struct inode *inode, struct file *file)
158{
159 return seq_open(file, &qeth_procfile_seq_ops);
160}
161
162static const struct file_operations qeth_procfile_fops = {
163 .owner = THIS_MODULE,
164 .open = qeth_procfile_open,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = seq_release,
168};
169
170/***** /proc/qeth_perf *****/
171#define QETH_PERF_PROCFILE_NAME "qeth_perf"
172static struct proc_dir_entry *qeth_perf_procfile;
173
174static int
175qeth_perf_procfile_seq_show(struct seq_file *s, void *it)
176{
177 struct device *device;
178 struct qeth_card *card;
179
180
181 if (it == SEQ_START_TOKEN)
182 return 0;
183
184 device = (struct device *) it;
185 card = device->driver_data;
186 seq_printf(s, "For card with devnos %s/%s/%s (%s):\n",
187 CARD_RDEV_ID(card),
188 CARD_WDEV_ID(card),
189 CARD_DDEV_ID(card),
190 QETH_CARD_IFNAME(card)
191 );
192 if (!card->options.performance_stats)
193 seq_printf(s, "Performance statistics are deactivated.\n");
194 seq_printf(s, " Skb's/buffers received : %lu/%u\n"
195 " Skb's/buffers sent : %lu/%u\n\n",
196 card->stats.rx_packets -
197 card->perf_stats.initial_rx_packets,
198 card->perf_stats.bufs_rec,
199 card->stats.tx_packets -
200 card->perf_stats.initial_tx_packets,
201 card->perf_stats.bufs_sent
202 );
203 seq_printf(s, " Skb's/buffers sent without packing : %lu/%u\n"
204 " Skb's/buffers sent with packing : %u/%u\n\n",
205 card->stats.tx_packets - card->perf_stats.initial_tx_packets
206 - card->perf_stats.skbs_sent_pack,
207 card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack,
208 card->perf_stats.skbs_sent_pack,
209 card->perf_stats.bufs_sent_pack
210 );
211 seq_printf(s, " Skbs sent in SG mode : %u\n"
212 " Skb fragments sent in SG mode : %u\n\n",
213 card->perf_stats.sg_skbs_sent,
214 card->perf_stats.sg_frags_sent);
215 seq_printf(s, " Skbs received in SG mode : %u\n"
216 " Skb fragments received in SG mode : %u\n"
217 " Page allocations for rx SG mode : %u\n\n",
218 card->perf_stats.sg_skbs_rx,
219 card->perf_stats.sg_frags_rx,
220 card->perf_stats.sg_alloc_page_rx);
221 seq_printf(s, " large_send tx (in Kbytes) : %u\n"
222 " large_send count : %u\n\n",
223 card->perf_stats.large_send_bytes >> 10,
224 card->perf_stats.large_send_cnt);
225 seq_printf(s, " Packing state changes no pkg.->packing : %u/%u\n"
226 " Watermarks L/H : %i/%i\n"
227 " Current buffer usage (outbound q's) : "
228 "%i/%i/%i/%i\n\n",
229 card->perf_stats.sc_dp_p, card->perf_stats.sc_p_dp,
230 QETH_LOW_WATERMARK_PACK, QETH_HIGH_WATERMARK_PACK,
231 atomic_read(&card->qdio.out_qs[0]->used_buffers),
232 (card->qdio.no_out_queues > 1)?
233 atomic_read(&card->qdio.out_qs[1]->used_buffers)
234 : 0,
235 (card->qdio.no_out_queues > 2)?
236 atomic_read(&card->qdio.out_qs[2]->used_buffers)
237 : 0,
238 (card->qdio.no_out_queues > 3)?
239 atomic_read(&card->qdio.out_qs[3]->used_buffers)
240 : 0
241 );
242 seq_printf(s, " Inbound handler time (in us) : %u\n"
243 " Inbound handler count : %u\n"
244 " Inbound do_QDIO time (in us) : %u\n"
245 " Inbound do_QDIO count : %u\n\n"
246 " Outbound handler time (in us) : %u\n"
247 " Outbound handler count : %u\n\n"
248 " Outbound time (in us, incl QDIO) : %u\n"
249 " Outbound count : %u\n"
250 " Outbound do_QDIO time (in us) : %u\n"
251 " Outbound do_QDIO count : %u\n\n",
252 card->perf_stats.inbound_time,
253 card->perf_stats.inbound_cnt,
254 card->perf_stats.inbound_do_qdio_time,
255 card->perf_stats.inbound_do_qdio_cnt,
256 card->perf_stats.outbound_handler_time,
257 card->perf_stats.outbound_handler_cnt,
258 card->perf_stats.outbound_time,
259 card->perf_stats.outbound_cnt,
260 card->perf_stats.outbound_do_qdio_time,
261 card->perf_stats.outbound_do_qdio_cnt
262 );
263 put_device(device);
264 return 0;
265}
266
267static const struct seq_operations qeth_perf_procfile_seq_ops = {
268 .start = qeth_procfile_seq_start,
269 .stop = qeth_procfile_seq_stop,
270 .next = qeth_procfile_seq_next,
271 .show = qeth_perf_procfile_seq_show,
272};
273
274static int
275qeth_perf_procfile_open(struct inode *inode, struct file *file)
276{
277 return seq_open(file, &qeth_perf_procfile_seq_ops);
278}
279
280static const struct file_operations qeth_perf_procfile_fops = {
281 .owner = THIS_MODULE,
282 .open = qeth_perf_procfile_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = seq_release,
286};
287
288int __init
289qeth_create_procfs_entries(void)
290{
291 qeth_procfile = create_proc_entry(QETH_PROCFILE_NAME,
292 S_IFREG | 0444, NULL);
293 if (qeth_procfile)
294 qeth_procfile->proc_fops = &qeth_procfile_fops;
295
296 qeth_perf_procfile = create_proc_entry(QETH_PERF_PROCFILE_NAME,
297 S_IFREG | 0444, NULL);
298 if (qeth_perf_procfile)
299 qeth_perf_procfile->proc_fops = &qeth_perf_procfile_fops;
300
301 if (qeth_procfile &&
302 qeth_perf_procfile)
303 return 0;
304 else
305 return -ENOMEM;
306}
307
308void __exit
309qeth_remove_procfs_entries(void)
310{
311 if (qeth_procfile)
312 remove_proc_entry(QETH_PROCFILE_NAME, NULL);
313 if (qeth_perf_procfile)
314 remove_proc_entry(QETH_PERF_PROCFILE_NAME, NULL);
315}
316
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
deleted file mode 100644
index 2cc3f3a0e393..000000000000
--- a/drivers/s390/net/qeth_sys.c
+++ /dev/null
@@ -1,1858 +0,0 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_sys.c
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 * This file contains code related to sysfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 *
10 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 * Frank Pavlic <fpavlic@de.ibm.com>
12 *
13 */
14#include <linux/list.h>
15#include <linux/rwsem.h>
16
17#include <asm/ebcdic.h>
18
19#include "qeth.h"
20#include "qeth_mpc.h"
21#include "qeth_fs.h"
22
23/*****************************************************************************/
24/* */
25/* /sys-fs stuff UNDER DEVELOPMENT !!! */
26/* */
27/*****************************************************************************/
28//low/high watermark
29
30static ssize_t
31qeth_dev_state_show(struct device *dev, struct device_attribute *attr, char *buf)
32{
33 struct qeth_card *card = dev->driver_data;
34 if (!card)
35 return -EINVAL;
36
37 switch (card->state) {
38 case CARD_STATE_DOWN:
39 return sprintf(buf, "DOWN\n");
40 case CARD_STATE_HARDSETUP:
41 return sprintf(buf, "HARDSETUP\n");
42 case CARD_STATE_SOFTSETUP:
43 return sprintf(buf, "SOFTSETUP\n");
44 case CARD_STATE_UP:
45 if (card->lan_online)
46 return sprintf(buf, "UP (LAN ONLINE)\n");
47 else
48 return sprintf(buf, "UP (LAN OFFLINE)\n");
49 case CARD_STATE_RECOVER:
50 return sprintf(buf, "RECOVER\n");
51 default:
52 return sprintf(buf, "UNKNOWN\n");
53 }
54}
55
56static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
57
58static ssize_t
59qeth_dev_chpid_show(struct device *dev, struct device_attribute *attr, char *buf)
60{
61 struct qeth_card *card = dev->driver_data;
62 if (!card)
63 return -EINVAL;
64
65 return sprintf(buf, "%02X\n", card->info.chpid);
66}
67
68static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
69
70static ssize_t
71qeth_dev_if_name_show(struct device *dev, struct device_attribute *attr, char *buf)
72{
73 struct qeth_card *card = dev->driver_data;
74 if (!card)
75 return -EINVAL;
76 return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
77}
78
79static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
80
81static ssize_t
82qeth_dev_card_type_show(struct device *dev, struct device_attribute *attr, char *buf)
83{
84 struct qeth_card *card = dev->driver_data;
85 if (!card)
86 return -EINVAL;
87
88 return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
89}
90
91static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
92
93static ssize_t
94qeth_dev_portno_show(struct device *dev, struct device_attribute *attr, char *buf)
95{
96 struct qeth_card *card = dev->driver_data;
97 if (!card)
98 return -EINVAL;
99
100 return sprintf(buf, "%i\n", card->info.portno);
101}
102
103static ssize_t
104qeth_dev_portno_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
105{
106 struct qeth_card *card = dev->driver_data;
107 char *tmp;
108 unsigned int portno;
109
110 if (!card)
111 return -EINVAL;
112
113 if ((card->state != CARD_STATE_DOWN) &&
114 (card->state != CARD_STATE_RECOVER))
115 return -EPERM;
116
117 portno = simple_strtoul(buf, &tmp, 16);
118 if (portno > MAX_PORTNO){
119 PRINT_WARN("portno 0x%X is out of range\n", portno);
120 return -EINVAL;
121 }
122
123 card->info.portno = portno;
124 return count;
125}
126
127static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
128
129static ssize_t
130qeth_dev_portname_show(struct device *dev, struct device_attribute *attr, char *buf)
131{
132 struct qeth_card *card = dev->driver_data;
133 char portname[9] = {0, };
134
135 if (!card)
136 return -EINVAL;
137
138 if (card->info.portname_required) {
139 memcpy(portname, card->info.portname + 1, 8);
140 EBCASC(portname, 8);
141 return sprintf(buf, "%s\n", portname);
142 } else
143 return sprintf(buf, "no portname required\n");
144}
145
146static ssize_t
147qeth_dev_portname_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
148{
149 struct qeth_card *card = dev->driver_data;
150 char *tmp;
151 int i;
152
153 if (!card)
154 return -EINVAL;
155
156 if ((card->state != CARD_STATE_DOWN) &&
157 (card->state != CARD_STATE_RECOVER))
158 return -EPERM;
159
160 tmp = strsep((char **) &buf, "\n");
161 if ((strlen(tmp) > 8) || (strlen(tmp) == 0))
162 return -EINVAL;
163
164 card->info.portname[0] = strlen(tmp);
165 /* for beauty reasons */
166 for (i = 1; i < 9; i++)
167 card->info.portname[i] = ' ';
168 strcpy(card->info.portname + 1, tmp);
169 ASCEBC(card->info.portname + 1, 8);
170
171 return count;
172}
173
174static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
175 qeth_dev_portname_store);
176
177static ssize_t
178qeth_dev_checksum_show(struct device *dev, struct device_attribute *attr, char *buf)
179{
180 struct qeth_card *card = dev->driver_data;
181
182 if (!card)
183 return -EINVAL;
184
185 return sprintf(buf, "%s checksumming\n", qeth_get_checksum_str(card));
186}
187
188static ssize_t
189qeth_dev_checksum_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
190{
191 struct qeth_card *card = dev->driver_data;
192 char *tmp;
193
194 if (!card)
195 return -EINVAL;
196
197 if ((card->state != CARD_STATE_DOWN) &&
198 (card->state != CARD_STATE_RECOVER))
199 return -EPERM;
200
201 tmp = strsep((char **) &buf, "\n");
202 if (!strcmp(tmp, "sw_checksumming"))
203 card->options.checksum_type = SW_CHECKSUMMING;
204 else if (!strcmp(tmp, "hw_checksumming"))
205 card->options.checksum_type = HW_CHECKSUMMING;
206 else if (!strcmp(tmp, "no_checksumming"))
207 card->options.checksum_type = NO_CHECKSUMMING;
208 else {
209 PRINT_WARN("Unknown checksumming type '%s'\n", tmp);
210 return -EINVAL;
211 }
212 return count;
213}
214
215static DEVICE_ATTR(checksumming, 0644, qeth_dev_checksum_show,
216 qeth_dev_checksum_store);
217
218static ssize_t
219qeth_dev_prioqing_show(struct device *dev, struct device_attribute *attr, char *buf)
220{
221 struct qeth_card *card = dev->driver_data;
222
223 if (!card)
224 return -EINVAL;
225
226 switch (card->qdio.do_prio_queueing) {
227 case QETH_PRIO_Q_ING_PREC:
228 return sprintf(buf, "%s\n", "by precedence");
229 case QETH_PRIO_Q_ING_TOS:
230 return sprintf(buf, "%s\n", "by type of service");
231 default:
232 return sprintf(buf, "always queue %i\n",
233 card->qdio.default_out_queue);
234 }
235}
236
237static ssize_t
238qeth_dev_prioqing_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
239{
240 struct qeth_card *card = dev->driver_data;
241 char *tmp;
242
243 if (!card)
244 return -EINVAL;
245
246 if ((card->state != CARD_STATE_DOWN) &&
247 (card->state != CARD_STATE_RECOVER))
248 return -EPERM;
249
250 /* check if 1920 devices are supported ,
251 * if though we have to permit priority queueing
252 */
253 if (card->qdio.no_out_queues == 1) {
254 PRINT_WARN("Priority queueing disabled due "
255 "to hardware limitations!\n");
256 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
257 return -EPERM;
258 }
259
260 tmp = strsep((char **) &buf, "\n");
261 if (!strcmp(tmp, "prio_queueing_prec"))
262 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
263 else if (!strcmp(tmp, "prio_queueing_tos"))
264 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
265 else if (!strcmp(tmp, "no_prio_queueing:0")) {
266 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
267 card->qdio.default_out_queue = 0;
268 } else if (!strcmp(tmp, "no_prio_queueing:1")) {
269 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
270 card->qdio.default_out_queue = 1;
271 } else if (!strcmp(tmp, "no_prio_queueing:2")) {
272 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
273 card->qdio.default_out_queue = 2;
274 } else if (!strcmp(tmp, "no_prio_queueing:3")) {
275 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
276 card->qdio.default_out_queue = 3;
277 } else if (!strcmp(tmp, "no_prio_queueing")) {
278 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
279 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
280 } else {
281 PRINT_WARN("Unknown queueing type '%s'\n", tmp);
282 return -EINVAL;
283 }
284 return count;
285}
286
287static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
288 qeth_dev_prioqing_store);
289
290static ssize_t
291qeth_dev_bufcnt_show(struct device *dev, struct device_attribute *attr, char *buf)
292{
293 struct qeth_card *card = dev->driver_data;
294
295 if (!card)
296 return -EINVAL;
297
298 return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
299}
300
301static ssize_t
302qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
303{
304 struct qeth_card *card = dev->driver_data;
305 char *tmp;
306 int cnt, old_cnt;
307 int rc;
308
309 if (!card)
310 return -EINVAL;
311
312 if ((card->state != CARD_STATE_DOWN) &&
313 (card->state != CARD_STATE_RECOVER))
314 return -EPERM;
315
316 old_cnt = card->qdio.in_buf_pool.buf_count;
317 cnt = simple_strtoul(buf, &tmp, 10);
318 cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
319 ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
320 if (old_cnt != cnt) {
321 if ((rc = qeth_realloc_buffer_pool(card, cnt)))
322 PRINT_WARN("Error (%d) while setting "
323 "buffer count.\n", rc);
324 }
325 return count;
326}
327
328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
329 qeth_dev_bufcnt_store);
330
331static ssize_t
332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route,
333 char *buf)
334{
335 switch (route->type) {
336 case PRIMARY_ROUTER:
337 return sprintf(buf, "%s\n", "primary router");
338 case SECONDARY_ROUTER:
339 return sprintf(buf, "%s\n", "secondary router");
340 case MULTICAST_ROUTER:
341 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
342 return sprintf(buf, "%s\n", "multicast router+");
343 else
344 return sprintf(buf, "%s\n", "multicast router");
345 case PRIMARY_CONNECTOR:
346 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
347 return sprintf(buf, "%s\n", "primary connector+");
348 else
349 return sprintf(buf, "%s\n", "primary connector");
350 case SECONDARY_CONNECTOR:
351 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
352 return sprintf(buf, "%s\n", "secondary connector+");
353 else
354 return sprintf(buf, "%s\n", "secondary connector");
355 default:
356 return sprintf(buf, "%s\n", "no");
357 }
358}
359
360static ssize_t
361qeth_dev_route4_show(struct device *dev, struct device_attribute *attr, char *buf)
362{
363 struct qeth_card *card = dev->driver_data;
364
365 if (!card)
366 return -EINVAL;
367
368 return qeth_dev_route_show(card, &card->options.route4, buf);
369}
370
371static ssize_t
372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route,
373 enum qeth_prot_versions prot, const char *buf, size_t count)
374{
375 enum qeth_routing_types old_route_type = route->type;
376 char *tmp;
377 int rc;
378
379 tmp = strsep((char **) &buf, "\n");
380
381 if (!strcmp(tmp, "no_router")){
382 route->type = NO_ROUTER;
383 } else if (!strcmp(tmp, "primary_connector")) {
384 route->type = PRIMARY_CONNECTOR;
385 } else if (!strcmp(tmp, "secondary_connector")) {
386 route->type = SECONDARY_CONNECTOR;
387 } else if (!strcmp(tmp, "primary_router")) {
388 route->type = PRIMARY_ROUTER;
389 } else if (!strcmp(tmp, "secondary_router")) {
390 route->type = SECONDARY_ROUTER;
391 } else if (!strcmp(tmp, "multicast_router")) {
392 route->type = MULTICAST_ROUTER;
393 } else {
394 PRINT_WARN("Invalid routing type '%s'.\n", tmp);
395 return -EINVAL;
396 }
397 if (((card->state == CARD_STATE_SOFTSETUP) ||
398 (card->state == CARD_STATE_UP)) &&
399 (old_route_type != route->type)){
400 if (prot == QETH_PROT_IPV4)
401 rc = qeth_setrouting_v4(card);
402 else if (prot == QETH_PROT_IPV6)
403 rc = qeth_setrouting_v6(card);
404 }
405 return count;
406}
407
408static ssize_t
409qeth_dev_route4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
410{
411 struct qeth_card *card = dev->driver_data;
412
413 if (!card)
414 return -EINVAL;
415
416 return qeth_dev_route_store(card, &card->options.route4,
417 QETH_PROT_IPV4, buf, count);
418}
419
420static DEVICE_ATTR(route4, 0644, qeth_dev_route4_show, qeth_dev_route4_store);
421
422#ifdef CONFIG_QETH_IPV6
423static ssize_t
424qeth_dev_route6_show(struct device *dev, struct device_attribute *attr, char *buf)
425{
426 struct qeth_card *card = dev->driver_data;
427
428 if (!card)
429 return -EINVAL;
430
431 if (!qeth_is_supported(card, IPA_IPV6))
432 return sprintf(buf, "%s\n", "n/a");
433
434 return qeth_dev_route_show(card, &card->options.route6, buf);
435}
436
437static ssize_t
438qeth_dev_route6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
439{
440 struct qeth_card *card = dev->driver_data;
441
442 if (!card)
443 return -EINVAL;
444
445 if (!qeth_is_supported(card, IPA_IPV6)){
446 PRINT_WARN("IPv6 not supported for interface %s.\n"
447 "Routing status no changed.\n",
448 QETH_CARD_IFNAME(card));
449 return -ENOTSUPP;
450 }
451
452 return qeth_dev_route_store(card, &card->options.route6,
453 QETH_PROT_IPV6, buf, count);
454}
455
456static DEVICE_ATTR(route6, 0644, qeth_dev_route6_show, qeth_dev_route6_store);
457#endif
458
459static ssize_t
460qeth_dev_add_hhlen_show(struct device *dev, struct device_attribute *attr, char *buf)
461{
462 struct qeth_card *card = dev->driver_data;
463
464 if (!card)
465 return -EINVAL;
466
467 return sprintf(buf, "%i\n", card->options.add_hhlen);
468}
469
470static ssize_t
471qeth_dev_add_hhlen_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
472{
473 struct qeth_card *card = dev->driver_data;
474 char *tmp;
475 int i;
476
477 if (!card)
478 return -EINVAL;
479
480 if ((card->state != CARD_STATE_DOWN) &&
481 (card->state != CARD_STATE_RECOVER))
482 return -EPERM;
483
484 i = simple_strtoul(buf, &tmp, 10);
485 if ((i < 0) || (i > MAX_ADD_HHLEN)) {
486 PRINT_WARN("add_hhlen out of range\n");
487 return -EINVAL;
488 }
489 card->options.add_hhlen = i;
490
491 return count;
492}
493
494static DEVICE_ATTR(add_hhlen, 0644, qeth_dev_add_hhlen_show,
495 qeth_dev_add_hhlen_store);
496
497static ssize_t
498qeth_dev_fake_ll_show(struct device *dev, struct device_attribute *attr, char *buf)
499{
500 struct qeth_card *card = dev->driver_data;
501
502 if (!card)
503 return -EINVAL;
504
505 return sprintf(buf, "%i\n", card->options.fake_ll? 1:0);
506}
507
508static ssize_t
509qeth_dev_fake_ll_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
510{
511 struct qeth_card *card = dev->driver_data;
512 char *tmp;
513 int i;
514
515 if (!card)
516 return -EINVAL;
517
518 if ((card->state != CARD_STATE_DOWN) &&
519 (card->state != CARD_STATE_RECOVER))
520 return -EPERM;
521
522 i = simple_strtoul(buf, &tmp, 16);
523 if ((i != 0) && (i != 1)) {
524 PRINT_WARN("fake_ll: write 0 or 1 to this file!\n");
525 return -EINVAL;
526 }
527 card->options.fake_ll = i;
528 return count;
529}
530
531static DEVICE_ATTR(fake_ll, 0644, qeth_dev_fake_ll_show,
532 qeth_dev_fake_ll_store);
533
534static ssize_t
535qeth_dev_fake_broadcast_show(struct device *dev, struct device_attribute *attr, char *buf)
536{
537 struct qeth_card *card = dev->driver_data;
538
539 if (!card)
540 return -EINVAL;
541
542 return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
543}
544
545static ssize_t
546qeth_dev_fake_broadcast_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
547{
548 struct qeth_card *card = dev->driver_data;
549 char *tmp;
550 int i;
551
552 if (!card)
553 return -EINVAL;
554
555 if ((card->state != CARD_STATE_DOWN) &&
556 (card->state != CARD_STATE_RECOVER))
557 return -EPERM;
558
559 i = simple_strtoul(buf, &tmp, 16);
560 if ((i == 0) || (i == 1))
561 card->options.fake_broadcast = i;
562 else {
563 PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n");
564 return -EINVAL;
565 }
566 return count;
567}
568
569static DEVICE_ATTR(fake_broadcast, 0644, qeth_dev_fake_broadcast_show,
570 qeth_dev_fake_broadcast_store);
571
572static ssize_t
573qeth_dev_recover_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
574{
575 struct qeth_card *card = dev->driver_data;
576 char *tmp;
577 int i;
578
579 if (!card)
580 return -EINVAL;
581
582 if (card->state != CARD_STATE_UP)
583 return -EPERM;
584
585 i = simple_strtoul(buf, &tmp, 16);
586 if (i == 1)
587 qeth_schedule_recovery(card);
588
589 return count;
590}
591
592static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
593
594static ssize_t
595qeth_dev_broadcast_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
596{
597 struct qeth_card *card = dev->driver_data;
598
599 if (!card)
600 return -EINVAL;
601
602 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
603 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
604 return sprintf(buf, "n/a\n");
605
606 return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
607 QETH_TR_BROADCAST_ALLRINGS)?
608 "all rings":"local");
609}
610
611static ssize_t
612qeth_dev_broadcast_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
613{
614 struct qeth_card *card = dev->driver_data;
615 char *tmp;
616
617 if (!card)
618 return -EINVAL;
619
620 if ((card->state != CARD_STATE_DOWN) &&
621 (card->state != CARD_STATE_RECOVER))
622 return -EPERM;
623
624 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
625 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){
626 PRINT_WARN("Device is not a tokenring device!\n");
627 return -EINVAL;
628 }
629
630 tmp = strsep((char **) &buf, "\n");
631
632 if (!strcmp(tmp, "local")){
633 card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
634 return count;
635 } else if (!strcmp(tmp, "all_rings")) {
636 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
637 return count;
638 } else {
639 PRINT_WARN("broadcast_mode: invalid mode %s!\n",
640 tmp);
641 return -EINVAL;
642 }
643 return count;
644}
645
646static DEVICE_ATTR(broadcast_mode, 0644, qeth_dev_broadcast_mode_show,
647 qeth_dev_broadcast_mode_store);
648
649static ssize_t
650qeth_dev_canonical_macaddr_show(struct device *dev, struct device_attribute *attr, char *buf)
651{
652 struct qeth_card *card = dev->driver_data;
653
654 if (!card)
655 return -EINVAL;
656
657 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
658 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
659 return sprintf(buf, "n/a\n");
660
661 return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
662 QETH_TR_MACADDR_CANONICAL)? 1:0);
663}
664
665static ssize_t
666qeth_dev_canonical_macaddr_store(struct device *dev, struct device_attribute *attr, const char *buf,
667 size_t count)
668{
669 struct qeth_card *card = dev->driver_data;
670 char *tmp;
671 int i;
672
673 if (!card)
674 return -EINVAL;
675
676 if ((card->state != CARD_STATE_DOWN) &&
677 (card->state != CARD_STATE_RECOVER))
678 return -EPERM;
679
680 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
681 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){
682 PRINT_WARN("Device is not a tokenring device!\n");
683 return -EINVAL;
684 }
685
686 i = simple_strtoul(buf, &tmp, 16);
687 if ((i == 0) || (i == 1))
688 card->options.macaddr_mode = i?
689 QETH_TR_MACADDR_CANONICAL :
690 QETH_TR_MACADDR_NONCANONICAL;
691 else {
692 PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n");
693 return -EINVAL;
694 }
695 return count;
696}
697
698static DEVICE_ATTR(canonical_macaddr, 0644, qeth_dev_canonical_macaddr_show,
699 qeth_dev_canonical_macaddr_store);
700
701static ssize_t
702qeth_dev_layer2_show(struct device *dev, struct device_attribute *attr, char *buf)
703{
704 struct qeth_card *card = dev->driver_data;
705
706 if (!card)
707 return -EINVAL;
708
709 return sprintf(buf, "%i\n", card->options.layer2 ? 1:0);
710}
711
712static ssize_t
713qeth_dev_layer2_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
714{
715 struct qeth_card *card = dev->driver_data;
716 char *tmp;
717 int i;
718
719 if (!card)
720 return -EINVAL;
721 if (card->info.type == QETH_CARD_TYPE_IQD) {
722 PRINT_WARN("Layer2 on Hipersockets is not supported! \n");
723 return -EPERM;
724 }
725
726 if (((card->state != CARD_STATE_DOWN) &&
727 (card->state != CARD_STATE_RECOVER)))
728 return -EPERM;
729
730 i = simple_strtoul(buf, &tmp, 16);
731 if ((i == 0) || (i == 1))
732 card->options.layer2 = i;
733 else {
734 PRINT_WARN("layer2: write 0 or 1 to this file!\n");
735 return -EINVAL;
736 }
737 return count;
738}
739
740static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
741 qeth_dev_layer2_store);
742
743static ssize_t
744qeth_dev_performance_stats_show(struct device *dev, struct device_attribute *attr, char *buf)
745{
746 struct qeth_card *card = dev->driver_data;
747
748 if (!card)
749 return -EINVAL;
750
751 return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
752}
753
754static ssize_t
755qeth_dev_performance_stats_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
756{
757 struct qeth_card *card = dev->driver_data;
758 char *tmp;
759 int i;
760
761 if (!card)
762 return -EINVAL;
763
764 i = simple_strtoul(buf, &tmp, 16);
765 if ((i == 0) || (i == 1)) {
766 if (i == card->options.performance_stats)
767 return count;
768 card->options.performance_stats = i;
769 if (i == 0)
770 memset(&card->perf_stats, 0,
771 sizeof(struct qeth_perf_stats));
772 card->perf_stats.initial_rx_packets = card->stats.rx_packets;
773 card->perf_stats.initial_tx_packets = card->stats.tx_packets;
774 } else {
775 PRINT_WARN("performance_stats: write 0 or 1 to this file!\n");
776 return -EINVAL;
777 }
778 return count;
779}
780
781static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
782 qeth_dev_performance_stats_store);
783
784static ssize_t
785qeth_dev_large_send_show(struct device *dev, struct device_attribute *attr, char *buf)
786{
787 struct qeth_card *card = dev->driver_data;
788
789 if (!card)
790 return -EINVAL;
791
792 switch (card->options.large_send) {
793 case QETH_LARGE_SEND_NO:
794 return sprintf(buf, "%s\n", "no");
795 case QETH_LARGE_SEND_EDDP:
796 return sprintf(buf, "%s\n", "EDDP");
797 case QETH_LARGE_SEND_TSO:
798 return sprintf(buf, "%s\n", "TSO");
799 default:
800 return sprintf(buf, "%s\n", "N/A");
801 }
802}
803
804static ssize_t
805qeth_dev_large_send_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
806{
807 struct qeth_card *card = dev->driver_data;
808 enum qeth_large_send_types type;
809 int rc = 0;
810 char *tmp;
811
812 if (!card)
813 return -EINVAL;
814 tmp = strsep((char **) &buf, "\n");
815 if (!strcmp(tmp, "no")){
816 type = QETH_LARGE_SEND_NO;
817 } else if (!strcmp(tmp, "EDDP")) {
818 type = QETH_LARGE_SEND_EDDP;
819 } else if (!strcmp(tmp, "TSO")) {
820 type = QETH_LARGE_SEND_TSO;
821 } else {
822 PRINT_WARN("large_send: invalid mode %s!\n", tmp);
823 return -EINVAL;
824 }
825 if (card->options.large_send == type)
826 return count;
827 if ((rc = qeth_set_large_send(card, type)))
828 return rc;
829 return count;
830}
831
832static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show,
833 qeth_dev_large_send_store);
834
835static ssize_t
836qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value )
837{
838
839 if (!card)
840 return -EINVAL;
841
842 return sprintf(buf, "%i\n", value);
843}
844
845static ssize_t
846qeth_dev_blkt_store(struct qeth_card *card, const char *buf, size_t count,
847 int *value, int max_value)
848{
849 char *tmp;
850 int i;
851
852 if (!card)
853 return -EINVAL;
854
855 if ((card->state != CARD_STATE_DOWN) &&
856 (card->state != CARD_STATE_RECOVER))
857 return -EPERM;
858
859 i = simple_strtoul(buf, &tmp, 10);
860 if (i <= max_value) {
861 *value = i;
862 } else {
863 PRINT_WARN("blkt total time: write values between"
864 " 0 and %d to this file!\n", max_value);
865 return -EINVAL;
866 }
867 return count;
868}
869
870static ssize_t
871qeth_dev_blkt_total_show(struct device *dev, struct device_attribute *attr, char *buf)
872{
873 struct qeth_card *card = dev->driver_data;
874
875 return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
876}
877
878
879static ssize_t
880qeth_dev_blkt_total_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
881{
882 struct qeth_card *card = dev->driver_data;
883
884 return qeth_dev_blkt_store(card, buf, count,
885 &card->info.blkt.time_total,1000);
886}
887
888
889
890static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
891 qeth_dev_blkt_total_store);
892
893static ssize_t
894qeth_dev_blkt_inter_show(struct device *dev, struct device_attribute *attr, char *buf)
895{
896 struct qeth_card *card = dev->driver_data;
897
898 return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
899}
900
901
902static ssize_t
903qeth_dev_blkt_inter_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
904{
905 struct qeth_card *card = dev->driver_data;
906
907 return qeth_dev_blkt_store(card, buf, count,
908 &card->info.blkt.inter_packet,100);
909}
910
911static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
912 qeth_dev_blkt_inter_store);
913
914static ssize_t
915qeth_dev_blkt_inter_jumbo_show(struct device *dev, struct device_attribute *attr, char *buf)
916{
917 struct qeth_card *card = dev->driver_data;
918
919 return qeth_dev_blkt_show(buf, card,
920 card->info.blkt.inter_packet_jumbo);
921}
922
923
924static ssize_t
925qeth_dev_blkt_inter_jumbo_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
926{
927 struct qeth_card *card = dev->driver_data;
928
929 return qeth_dev_blkt_store(card, buf, count,
930 &card->info.blkt.inter_packet_jumbo,100);
931}
932
933static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
934 qeth_dev_blkt_inter_jumbo_store);
935
936static struct device_attribute * qeth_blkt_device_attrs[] = {
937 &dev_attr_total,
938 &dev_attr_inter,
939 &dev_attr_inter_jumbo,
940 NULL,
941};
942
943static struct attribute_group qeth_device_blkt_group = {
944 .name = "blkt",
945 .attrs = (struct attribute **)qeth_blkt_device_attrs,
946};
947
948static struct device_attribute * qeth_device_attrs[] = {
949 &dev_attr_state,
950 &dev_attr_chpid,
951 &dev_attr_if_name,
952 &dev_attr_card_type,
953 &dev_attr_portno,
954 &dev_attr_portname,
955 &dev_attr_checksumming,
956 &dev_attr_priority_queueing,
957 &dev_attr_buffer_count,
958 &dev_attr_route4,
959#ifdef CONFIG_QETH_IPV6
960 &dev_attr_route6,
961#endif
962 &dev_attr_add_hhlen,
963 &dev_attr_fake_ll,
964 &dev_attr_fake_broadcast,
965 &dev_attr_recover,
966 &dev_attr_broadcast_mode,
967 &dev_attr_canonical_macaddr,
968 &dev_attr_layer2,
969 &dev_attr_large_send,
970 &dev_attr_performance_stats,
971 NULL,
972};
973
974static struct attribute_group qeth_device_attr_group = {
975 .attrs = (struct attribute **)qeth_device_attrs,
976};
977
978static struct device_attribute * qeth_osn_device_attrs[] = {
979 &dev_attr_state,
980 &dev_attr_chpid,
981 &dev_attr_if_name,
982 &dev_attr_card_type,
983 &dev_attr_buffer_count,
984 &dev_attr_recover,
985 NULL,
986};
987
988static struct attribute_group qeth_osn_device_attr_group = {
989 .attrs = (struct attribute **)qeth_osn_device_attrs,
990};
991
992#define QETH_DEVICE_ATTR(_id,_name,_mode,_show,_store) \
993struct device_attribute dev_attr_##_id = { \
994 .attr = {.name=__stringify(_name), .mode=_mode, },\
995 .show = _show, \
996 .store = _store, \
997};
998
999static int
1000qeth_check_layer2(struct qeth_card *card)
1001{
1002 if (card->options.layer2)
1003 return -EPERM;
1004 return 0;
1005}
1006
1007
1008static ssize_t
1009qeth_dev_ipato_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
1010{
1011 struct qeth_card *card = dev->driver_data;
1012
1013 if (!card)
1014 return -EINVAL;
1015
1016 if (qeth_check_layer2(card))
1017 return -EPERM;
1018 return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
1019}
1020
1021static ssize_t
1022qeth_dev_ipato_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1023{
1024 struct qeth_card *card = dev->driver_data;
1025 char *tmp;
1026
1027 if (!card)
1028 return -EINVAL;
1029
1030 if ((card->state != CARD_STATE_DOWN) &&
1031 (card->state != CARD_STATE_RECOVER))
1032 return -EPERM;
1033
1034 if (qeth_check_layer2(card))
1035 return -EPERM;
1036
1037 tmp = strsep((char **) &buf, "\n");
1038 if (!strcmp(tmp, "toggle")){
1039 card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
1040 } else if (!strcmp(tmp, "1")){
1041 card->ipato.enabled = 1;
1042 } else if (!strcmp(tmp, "0")){
1043 card->ipato.enabled = 0;
1044 } else {
1045 PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to "
1046 "this file\n");
1047 return -EINVAL;
1048 }
1049 return count;
1050}
1051
1052static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
1053 qeth_dev_ipato_enable_show,
1054 qeth_dev_ipato_enable_store);
1055
1056static ssize_t
1057qeth_dev_ipato_invert4_show(struct device *dev, struct device_attribute *attr, char *buf)
1058{
1059 struct qeth_card *card = dev->driver_data;
1060
1061 if (!card)
1062 return -EINVAL;
1063
1064 if (qeth_check_layer2(card))
1065 return -EPERM;
1066
1067 return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
1068}
1069
1070static ssize_t
1071qeth_dev_ipato_invert4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1072{
1073 struct qeth_card *card = dev->driver_data;
1074 char *tmp;
1075
1076 if (!card)
1077 return -EINVAL;
1078
1079 if (qeth_check_layer2(card))
1080 return -EPERM;
1081
1082 tmp = strsep((char **) &buf, "\n");
1083 if (!strcmp(tmp, "toggle")){
1084 card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
1085 } else if (!strcmp(tmp, "1")){
1086 card->ipato.invert4 = 1;
1087 } else if (!strcmp(tmp, "0")){
1088 card->ipato.invert4 = 0;
1089 } else {
1090 PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to "
1091 "this file\n");
1092 return -EINVAL;
1093 }
1094 return count;
1095}
1096
1097static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
1098 qeth_dev_ipato_invert4_show,
1099 qeth_dev_ipato_invert4_store);
1100
1101static ssize_t
1102qeth_dev_ipato_add_show(char *buf, struct qeth_card *card,
1103 enum qeth_prot_versions proto)
1104{
1105 struct qeth_ipato_entry *ipatoe;
1106 unsigned long flags;
1107 char addr_str[40];
1108 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1109 int i = 0;
1110
1111 if (qeth_check_layer2(card))
1112 return -EPERM;
1113
1114 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1115 /* add strlen for "/<mask>\n" */
1116 entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
1117 spin_lock_irqsave(&card->ip_lock, flags);
1118 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
1119 if (ipatoe->proto != proto)
1120 continue;
1121 /* String must not be longer than PAGE_SIZE. So we check if
1122 * string length gets near PAGE_SIZE. Then we can savely display
1123 * the next IPv6 address (worst case, compared to IPv4) */
1124 if ((PAGE_SIZE - i) <= entry_len)
1125 break;
1126 qeth_ipaddr_to_string(proto, ipatoe->addr, addr_str);
1127 i += snprintf(buf + i, PAGE_SIZE - i,
1128 "%s/%i\n", addr_str, ipatoe->mask_bits);
1129 }
1130 spin_unlock_irqrestore(&card->ip_lock, flags);
1131 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1132
1133 return i;
1134}
1135
1136static ssize_t
1137qeth_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char *buf)
1138{
1139 struct qeth_card *card = dev->driver_data;
1140
1141 if (!card)
1142 return -EINVAL;
1143
1144 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
1145}
1146
1147static int
1148qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1149 u8 *addr, int *mask_bits)
1150{
1151 const char *start, *end;
1152 char *tmp;
1153 char buffer[40] = {0, };
1154
1155 start = buf;
1156 /* get address string */
1157 end = strchr(start, '/');
1158 if (!end || (end - start >= 40)){
1159 PRINT_WARN("Invalid format for ipato_addx/delx. "
1160 "Use <ip addr>/<mask bits>\n");
1161 return -EINVAL;
1162 }
1163 strncpy(buffer, start, end - start);
1164 if (qeth_string_to_ipaddr(buffer, proto, addr)){
1165 PRINT_WARN("Invalid IP address format!\n");
1166 return -EINVAL;
1167 }
1168 start = end + 1;
1169 *mask_bits = simple_strtoul(start, &tmp, 10);
1170 if (!strlen(start) ||
1171 (tmp == start) ||
1172 (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
1173 PRINT_WARN("Invalid mask bits for ipato_addx/delx !\n");
1174 return -EINVAL;
1175 }
1176 return 0;
1177}
1178
1179static ssize_t
1180qeth_dev_ipato_add_store(const char *buf, size_t count,
1181 struct qeth_card *card, enum qeth_prot_versions proto)
1182{
1183 struct qeth_ipato_entry *ipatoe;
1184 u8 addr[16];
1185 int mask_bits;
1186 int rc;
1187
1188 if (qeth_check_layer2(card))
1189 return -EPERM;
1190 if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits)))
1191 return rc;
1192
1193 if (!(ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL))){
1194 PRINT_WARN("No memory to allocate ipato entry\n");
1195 return -ENOMEM;
1196 }
1197 ipatoe->proto = proto;
1198 memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
1199 ipatoe->mask_bits = mask_bits;
1200
1201 if ((rc = qeth_add_ipato_entry(card, ipatoe))){
1202 kfree(ipatoe);
1203 return rc;
1204 }
1205
1206 return count;
1207}
1208
1209static ssize_t
1210qeth_dev_ipato_add4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1211{
1212 struct qeth_card *card = dev->driver_data;
1213
1214 if (!card)
1215 return -EINVAL;
1216
1217 return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
1218}
1219
1220static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
1221 qeth_dev_ipato_add4_show,
1222 qeth_dev_ipato_add4_store);
1223
1224static ssize_t
1225qeth_dev_ipato_del_store(const char *buf, size_t count,
1226 struct qeth_card *card, enum qeth_prot_versions proto)
1227{
1228 u8 addr[16];
1229 int mask_bits;
1230 int rc;
1231
1232 if (qeth_check_layer2(card))
1233 return -EPERM;
1234 if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits)))
1235 return rc;
1236
1237 qeth_del_ipato_entry(card, proto, addr, mask_bits);
1238
1239 return count;
1240}
1241
1242static ssize_t
1243qeth_dev_ipato_del4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1244{
1245 struct qeth_card *card = dev->driver_data;
1246
1247 if (!card)
1248 return -EINVAL;
1249
1250 return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
1251}
1252
1253static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
1254 qeth_dev_ipato_del4_store);
1255
1256#ifdef CONFIG_QETH_IPV6
1257static ssize_t
1258qeth_dev_ipato_invert6_show(struct device *dev, struct device_attribute *attr, char *buf)
1259{
1260 struct qeth_card *card = dev->driver_data;
1261
1262 if (!card)
1263 return -EINVAL;
1264
1265 if (qeth_check_layer2(card))
1266 return -EPERM;
1267
1268 return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
1269}
1270
1271static ssize_t
1272qeth_dev_ipato_invert6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1273{
1274 struct qeth_card *card = dev->driver_data;
1275 char *tmp;
1276
1277 if (!card)
1278 return -EINVAL;
1279
1280 if (qeth_check_layer2(card))
1281 return -EPERM;
1282
1283 tmp = strsep((char **) &buf, "\n");
1284 if (!strcmp(tmp, "toggle")){
1285 card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
1286 } else if (!strcmp(tmp, "1")){
1287 card->ipato.invert6 = 1;
1288 } else if (!strcmp(tmp, "0")){
1289 card->ipato.invert6 = 0;
1290 } else {
1291 PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to "
1292 "this file\n");
1293 return -EINVAL;
1294 }
1295 return count;
1296}
1297
1298static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
1299 qeth_dev_ipato_invert6_show,
1300 qeth_dev_ipato_invert6_store);
1301
1302
1303static ssize_t
1304qeth_dev_ipato_add6_show(struct device *dev, struct device_attribute *attr, char *buf)
1305{
1306 struct qeth_card *card = dev->driver_data;
1307
1308 if (!card)
1309 return -EINVAL;
1310
1311 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
1312}
1313
1314static ssize_t
1315qeth_dev_ipato_add6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1316{
1317 struct qeth_card *card = dev->driver_data;
1318
1319 if (!card)
1320 return -EINVAL;
1321
1322 return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
1323}
1324
1325static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
1326 qeth_dev_ipato_add6_show,
1327 qeth_dev_ipato_add6_store);
1328
1329static ssize_t
1330qeth_dev_ipato_del6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1331{
1332 struct qeth_card *card = dev->driver_data;
1333
1334 if (!card)
1335 return -EINVAL;
1336
1337 return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
1338}
1339
1340static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
1341 qeth_dev_ipato_del6_store);
1342#endif /* CONFIG_QETH_IPV6 */
1343
1344static struct device_attribute * qeth_ipato_device_attrs[] = {
1345 &dev_attr_ipato_enable,
1346 &dev_attr_ipato_invert4,
1347 &dev_attr_ipato_add4,
1348 &dev_attr_ipato_del4,
1349#ifdef CONFIG_QETH_IPV6
1350 &dev_attr_ipato_invert6,
1351 &dev_attr_ipato_add6,
1352 &dev_attr_ipato_del6,
1353#endif
1354 NULL,
1355};
1356
1357static struct attribute_group qeth_device_ipato_group = {
1358 .name = "ipa_takeover",
1359 .attrs = (struct attribute **)qeth_ipato_device_attrs,
1360};
1361
1362static ssize_t
1363qeth_dev_vipa_add_show(char *buf, struct qeth_card *card,
1364 enum qeth_prot_versions proto)
1365{
1366 struct qeth_ipaddr *ipaddr;
1367 char addr_str[40];
1368 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1369 unsigned long flags;
1370 int i = 0;
1371
1372 if (qeth_check_layer2(card))
1373 return -EPERM;
1374
1375 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1376 entry_len += 2; /* \n + terminator */
1377 spin_lock_irqsave(&card->ip_lock, flags);
1378 list_for_each_entry(ipaddr, &card->ip_list, entry){
1379 if (ipaddr->proto != proto)
1380 continue;
1381 if (ipaddr->type != QETH_IP_TYPE_VIPA)
1382 continue;
1383 /* String must not be longer than PAGE_SIZE. So we check if
1384 * string length gets near PAGE_SIZE. Then we can savely display
1385 * the next IPv6 address (worst case, compared to IPv4) */
1386 if ((PAGE_SIZE - i) <= entry_len)
1387 break;
1388 qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str);
1389 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
1390 }
1391 spin_unlock_irqrestore(&card->ip_lock, flags);
1392 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1393
1394 return i;
1395}
1396
1397static ssize_t
1398qeth_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char *buf)
1399{
1400 struct qeth_card *card = dev->driver_data;
1401
1402 if (!card)
1403 return -EINVAL;
1404
1405 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
1406}
1407
1408static int
1409qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1410 u8 *addr)
1411{
1412 if (qeth_string_to_ipaddr(buf, proto, addr)){
1413 PRINT_WARN("Invalid IP address format!\n");
1414 return -EINVAL;
1415 }
1416 return 0;
1417}
1418
1419static ssize_t
1420qeth_dev_vipa_add_store(const char *buf, size_t count,
1421 struct qeth_card *card, enum qeth_prot_versions proto)
1422{
1423 u8 addr[16] = {0, };
1424 int rc;
1425
1426 if (qeth_check_layer2(card))
1427 return -EPERM;
1428 if ((rc = qeth_parse_vipae(buf, proto, addr)))
1429 return rc;
1430
1431 if ((rc = qeth_add_vipa(card, proto, addr)))
1432 return rc;
1433
1434 return count;
1435}
1436
1437static ssize_t
1438qeth_dev_vipa_add4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1439{
1440 struct qeth_card *card = dev->driver_data;
1441
1442 if (!card)
1443 return -EINVAL;
1444
1445 return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
1446}
1447
1448static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
1449 qeth_dev_vipa_add4_show,
1450 qeth_dev_vipa_add4_store);
1451
1452static ssize_t
1453qeth_dev_vipa_del_store(const char *buf, size_t count,
1454 struct qeth_card *card, enum qeth_prot_versions proto)
1455{
1456 u8 addr[16];
1457 int rc;
1458
1459 if (qeth_check_layer2(card))
1460 return -EPERM;
1461 if ((rc = qeth_parse_vipae(buf, proto, addr)))
1462 return rc;
1463
1464 qeth_del_vipa(card, proto, addr);
1465
1466 return count;
1467}
1468
1469static ssize_t
1470qeth_dev_vipa_del4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1471{
1472 struct qeth_card *card = dev->driver_data;
1473
1474 if (!card)
1475 return -EINVAL;
1476
1477 return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
1478}
1479
1480static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
1481 qeth_dev_vipa_del4_store);
1482
1483#ifdef CONFIG_QETH_IPV6
1484static ssize_t
1485qeth_dev_vipa_add6_show(struct device *dev, struct device_attribute *attr, char *buf)
1486{
1487 struct qeth_card *card = dev->driver_data;
1488
1489 if (!card)
1490 return -EINVAL;
1491
1492 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
1493}
1494
1495static ssize_t
1496qeth_dev_vipa_add6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1497{
1498 struct qeth_card *card = dev->driver_data;
1499
1500 if (!card)
1501 return -EINVAL;
1502
1503 return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
1504}
1505
1506static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
1507 qeth_dev_vipa_add6_show,
1508 qeth_dev_vipa_add6_store);
1509
1510static ssize_t
1511qeth_dev_vipa_del6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1512{
1513 struct qeth_card *card = dev->driver_data;
1514
1515 if (!card)
1516 return -EINVAL;
1517
1518 if (qeth_check_layer2(card))
1519 return -EPERM;
1520
1521 return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
1522}
1523
1524static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
1525 qeth_dev_vipa_del6_store);
1526#endif /* CONFIG_QETH_IPV6 */
1527
1528static struct device_attribute * qeth_vipa_device_attrs[] = {
1529 &dev_attr_vipa_add4,
1530 &dev_attr_vipa_del4,
1531#ifdef CONFIG_QETH_IPV6
1532 &dev_attr_vipa_add6,
1533 &dev_attr_vipa_del6,
1534#endif
1535 NULL,
1536};
1537
1538static struct attribute_group qeth_device_vipa_group = {
1539 .name = "vipa",
1540 .attrs = (struct attribute **)qeth_vipa_device_attrs,
1541};
1542
1543static ssize_t
1544qeth_dev_rxip_add_show(char *buf, struct qeth_card *card,
1545 enum qeth_prot_versions proto)
1546{
1547 struct qeth_ipaddr *ipaddr;
1548 char addr_str[40];
1549 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1550 unsigned long flags;
1551 int i = 0;
1552
1553 if (qeth_check_layer2(card))
1554 return -EPERM;
1555
1556 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1557 entry_len += 2; /* \n + terminator */
1558 spin_lock_irqsave(&card->ip_lock, flags);
1559 list_for_each_entry(ipaddr, &card->ip_list, entry){
1560 if (ipaddr->proto != proto)
1561 continue;
1562 if (ipaddr->type != QETH_IP_TYPE_RXIP)
1563 continue;
1564 /* String must not be longer than PAGE_SIZE. So we check if
1565 * string length gets near PAGE_SIZE. Then we can savely display
1566 * the next IPv6 address (worst case, compared to IPv4) */
1567 if ((PAGE_SIZE - i) <= entry_len)
1568 break;
1569 qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str);
1570 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
1571 }
1572 spin_unlock_irqrestore(&card->ip_lock, flags);
1573 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1574
1575 return i;
1576}
1577
1578static ssize_t
1579qeth_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char *buf)
1580{
1581 struct qeth_card *card = dev->driver_data;
1582
1583 if (!card)
1584 return -EINVAL;
1585
1586 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
1587}
1588
1589static int
1590qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1591 u8 *addr)
1592{
1593 if (qeth_string_to_ipaddr(buf, proto, addr)){
1594 PRINT_WARN("Invalid IP address format!\n");
1595 return -EINVAL;
1596 }
1597 return 0;
1598}
1599
1600static ssize_t
1601qeth_dev_rxip_add_store(const char *buf, size_t count,
1602 struct qeth_card *card, enum qeth_prot_versions proto)
1603{
1604 u8 addr[16] = {0, };
1605 int rc;
1606
1607 if (qeth_check_layer2(card))
1608 return -EPERM;
1609 if ((rc = qeth_parse_rxipe(buf, proto, addr)))
1610 return rc;
1611
1612 if ((rc = qeth_add_rxip(card, proto, addr)))
1613 return rc;
1614
1615 return count;
1616}
1617
1618static ssize_t
1619qeth_dev_rxip_add4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1620{
1621 struct qeth_card *card = dev->driver_data;
1622
1623 if (!card)
1624 return -EINVAL;
1625
1626 return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
1627}
1628
1629static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
1630 qeth_dev_rxip_add4_show,
1631 qeth_dev_rxip_add4_store);
1632
1633static ssize_t
1634qeth_dev_rxip_del_store(const char *buf, size_t count,
1635 struct qeth_card *card, enum qeth_prot_versions proto)
1636{
1637 u8 addr[16];
1638 int rc;
1639
1640 if (qeth_check_layer2(card))
1641 return -EPERM;
1642 if ((rc = qeth_parse_rxipe(buf, proto, addr)))
1643 return rc;
1644
1645 qeth_del_rxip(card, proto, addr);
1646
1647 return count;
1648}
1649
1650static ssize_t
1651qeth_dev_rxip_del4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1652{
1653 struct qeth_card *card = dev->driver_data;
1654
1655 if (!card)
1656 return -EINVAL;
1657
1658 return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
1659}
1660
1661static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
1662 qeth_dev_rxip_del4_store);
1663
1664#ifdef CONFIG_QETH_IPV6
1665static ssize_t
1666qeth_dev_rxip_add6_show(struct device *dev, struct device_attribute *attr, char *buf)
1667{
1668 struct qeth_card *card = dev->driver_data;
1669
1670 if (!card)
1671 return -EINVAL;
1672
1673 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
1674}
1675
1676static ssize_t
1677qeth_dev_rxip_add6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1678{
1679 struct qeth_card *card = dev->driver_data;
1680
1681 if (!card)
1682 return -EINVAL;
1683
1684 return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
1685}
1686
1687static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
1688 qeth_dev_rxip_add6_show,
1689 qeth_dev_rxip_add6_store);
1690
1691static ssize_t
1692qeth_dev_rxip_del6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1693{
1694 struct qeth_card *card = dev->driver_data;
1695
1696 if (!card)
1697 return -EINVAL;
1698
1699 return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
1700}
1701
1702static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
1703 qeth_dev_rxip_del6_store);
1704#endif /* CONFIG_QETH_IPV6 */
1705
1706static struct device_attribute * qeth_rxip_device_attrs[] = {
1707 &dev_attr_rxip_add4,
1708 &dev_attr_rxip_del4,
1709#ifdef CONFIG_QETH_IPV6
1710 &dev_attr_rxip_add6,
1711 &dev_attr_rxip_del6,
1712#endif
1713 NULL,
1714};
1715
1716static struct attribute_group qeth_device_rxip_group = {
1717 .name = "rxip",
1718 .attrs = (struct attribute **)qeth_rxip_device_attrs,
1719};
1720
1721int
1722qeth_create_device_attributes(struct device *dev)
1723{
1724 int ret;
1725 struct qeth_card *card = dev->driver_data;
1726
1727 if (card->info.type == QETH_CARD_TYPE_OSN)
1728 return sysfs_create_group(&dev->kobj,
1729 &qeth_osn_device_attr_group);
1730
1731 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group)))
1732 return ret;
1733 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group))){
1734 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1735 return ret;
1736 }
1737 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group))){
1738 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1739 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1740 return ret;
1741 }
1742 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group))){
1743 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1744 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1745 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1746 return ret;
1747 }
1748 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group))){
1749 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1750 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1751 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1752 sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
1753 return ret;
1754 }
1755 return 0;
1756}
1757
1758void
1759qeth_remove_device_attributes(struct device *dev)
1760{
1761 struct qeth_card *card = dev->driver_data;
1762
1763 if (card->info.type == QETH_CARD_TYPE_OSN) {
1764 sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
1765 return;
1766 }
1767 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1768 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1769 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1770 sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
1771 sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
1772}
1773
1774/**********************/
1775/* DRIVER ATTRIBUTES */
1776/**********************/
1777static ssize_t
1778qeth_driver_group_store(struct device_driver *ddrv, const char *buf,
1779 size_t count)
1780{
1781 const char *start, *end;
1782 char bus_ids[3][BUS_ID_SIZE], *argv[3];
1783 int i;
1784 int err;
1785
1786 start = buf;
1787 for (i = 0; i < 3; i++) {
1788 static const char delim[] = { ',', ',', '\n' };
1789 int len;
1790
1791 if (!(end = strchr(start, delim[i])))
1792 return -EINVAL;
1793 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start);
1794 strncpy(bus_ids[i], start, len);
1795 bus_ids[i][len] = '\0';
1796 start = end + 1;
1797 argv[i] = bus_ids[i];
1798 }
1799 err = ccwgroup_create(qeth_root_dev, qeth_ccwgroup_driver.driver_id,
1800 &qeth_ccw_driver, 3, argv);
1801 if (err)
1802 return err;
1803 else
1804 return count;
1805}
1806
1807
1808static DRIVER_ATTR(group, 0200, NULL, qeth_driver_group_store);
1809
1810static ssize_t
1811qeth_driver_notifier_register_store(struct device_driver *ddrv, const char *buf,
1812 size_t count)
1813{
1814 int rc;
1815 int signum;
1816 char *tmp, *tmp2;
1817
1818 tmp = strsep((char **) &buf, "\n");
1819 if (!strncmp(tmp, "unregister", 10)){
1820 if ((rc = qeth_notifier_unregister(current)))
1821 return rc;
1822 return count;
1823 }
1824
1825 signum = simple_strtoul(tmp, &tmp2, 10);
1826 if ((signum < 0) || (signum > 32)){
1827 PRINT_WARN("Signal number %d is out of range\n", signum);
1828 return -EINVAL;
1829 }
1830 if ((rc = qeth_notifier_register(current, signum)))
1831 return rc;
1832
1833 return count;
1834}
1835
1836static DRIVER_ATTR(notifier_register, 0200, NULL,
1837 qeth_driver_notifier_register_store);
1838
1839int
1840qeth_create_driver_attributes(void)
1841{
1842 int rc;
1843
1844 if ((rc = driver_create_file(&qeth_ccwgroup_driver.driver,
1845 &driver_attr_group)))
1846 return rc;
1847 return driver_create_file(&qeth_ccwgroup_driver.driver,
1848 &driver_attr_notifier_register);
1849}
1850
1851void
1852qeth_remove_driver_attributes(void)
1853{
1854 driver_remove_file(&qeth_ccwgroup_driver.driver,
1855 &driver_attr_group);
1856 driver_remove_file(&qeth_ccwgroup_driver.driver,
1857 &driver_attr_notifier_register);
1858}
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h
deleted file mode 100644
index c20e923cf9ad..000000000000
--- a/drivers/s390/net/qeth_tso.h
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * linux/drivers/s390/net/qeth_tso.h
3 *
4 * Header file for qeth TCP Segmentation Offload support.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>
9 *
10 */
11#ifndef __QETH_TSO_H__
12#define __QETH_TSO_H__
13
14#include <linux/skbuff.h>
15#include <linux/tcp.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <net/ip6_checksum.h>
19#include "qeth.h"
20#include "qeth_mpc.h"
21
22
23static inline struct qeth_hdr_tso *
24qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
25{
26 QETH_DBF_TEXT(trace, 5, "tsoprsk");
27 return qeth_push_skb(card, *skb, sizeof(struct qeth_hdr_tso));
28}
29
30/**
31 * fill header for a TSO packet
32 */
33static inline void
34qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
35{
36 struct qeth_hdr_tso *hdr;
37 struct tcphdr *tcph;
38 struct iphdr *iph;
39
40 QETH_DBF_TEXT(trace, 5, "tsofhdr");
41
42 hdr = (struct qeth_hdr_tso *) skb->data;
43 iph = ip_hdr(skb);
44 tcph = tcp_hdr(skb);
45 /*fix header to TSO values ...*/
46 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
47 /*set values which are fix for the first approach ...*/
48 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
49 hdr->ext.imb_hdr_no = 1;
50 hdr->ext.hdr_type = 1;
51 hdr->ext.hdr_version = 1;
52 hdr->ext.hdr_len = 28;
53 /*insert non-fix values */
54 hdr->ext.mss = skb_shinfo(skb)->gso_size;
55 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
56 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
57 sizeof(struct qeth_hdr_tso));
58}
59
60/**
61 * change some header values as requested by hardware
62 */
63static inline void
64qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
65{
66 struct iphdr *iph = ip_hdr(skb);
67 struct ipv6hdr *ip6h = ipv6_hdr(skb);
68 struct tcphdr *tcph = tcp_hdr(skb);
69
70 tcph->check = 0;
71 if (skb->protocol == ETH_P_IPV6) {
72 ip6h->payload_len = 0;
73 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
74 0, IPPROTO_TCP, 0);
75 return;
76 }
77 /*OSA want us to set these values ...*/
78 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
79 0, IPPROTO_TCP, 0);
80 iph->tot_len = 0;
81 iph->check = 0;
82}
83
84static inline int
85qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
86 int ipv, int cast_type)
87{
88 struct qeth_hdr_tso *hdr;
89
90 QETH_DBF_TEXT(trace, 5, "tsoprep");
91
92 hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
93 if (hdr == NULL) {
94 QETH_DBF_TEXT(trace, 4, "tsoperr");
95 return -ENOMEM;
96 }
97 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
98 /*fill first 32 bytes of qdio header as used
99 *FIXME: TSO has two struct members
100 * with different names but same size
101 * */
102 qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
103 qeth_tso_fill_header(card, skb);
104 qeth_tso_set_tcpip_header(card, skb);
105 return 0;
106}
107
108static inline void
109__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer,
110 int is_tso, int *next_element_to_fill)
111{
112 struct skb_frag_struct *frag;
113 int fragno;
114 unsigned long addr;
115 int element, cnt, dlen;
116
117 fragno = skb_shinfo(skb)->nr_frags;
118 element = *next_element_to_fill;
119 dlen = 0;
120
121 if (is_tso)
122 buffer->element[element].flags =
123 SBAL_FLAGS_MIDDLE_FRAG;
124 else
125 buffer->element[element].flags =
126 SBAL_FLAGS_FIRST_FRAG;
127 if ( (dlen = (skb->len - skb->data_len)) ) {
128 buffer->element[element].addr = skb->data;
129 buffer->element[element].length = dlen;
130 element++;
131 }
132 for (cnt = 0; cnt < fragno; cnt++) {
133 frag = &skb_shinfo(skb)->frags[cnt];
134 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
135 frag->page_offset;
136 buffer->element[element].addr = (char *)addr;
137 buffer->element[element].length = frag->size;
138 if (cnt < (fragno - 1))
139 buffer->element[element].flags =
140 SBAL_FLAGS_MIDDLE_FRAG;
141 else
142 buffer->element[element].flags =
143 SBAL_FLAGS_LAST_FRAG;
144 element++;
145 }
146 *next_element_to_fill = element;
147}
148#endif /* __QETH_TSO_H__ */