aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/net')
-rw-r--r--drivers/s390/net/Kconfig84
-rw-r--r--drivers/s390/net/Makefile12
-rw-r--r--drivers/s390/net/ctcdbug.c80
-rw-r--r--drivers/s390/net/ctcdbug.h125
-rw-r--r--drivers/s390/net/ctcm_dbug.c67
-rw-r--r--drivers/s390/net/ctcm_dbug.h158
-rw-r--r--drivers/s390/net/ctcm_fsms.c2347
-rw-r--r--drivers/s390/net/ctcm_fsms.h359
-rw-r--r--drivers/s390/net/ctcm_main.c1772
-rw-r--r--drivers/s390/net/ctcm_main.h287
-rw-r--r--drivers/s390/net/ctcm_mpc.c2472
-rw-r--r--drivers/s390/net/ctcm_mpc.h239
-rw-r--r--drivers/s390/net/ctcm_sysfs.c210
-rw-r--r--drivers/s390/net/ctcmain.c3062
-rw-r--r--drivers/s390/net/ctcmain.h270
-rw-r--r--drivers/s390/net/qeth_core.h (renamed from drivers/s390/net/qeth.h)811
-rw-r--r--drivers/s390/net/qeth_core_main.c4540
-rw-r--r--drivers/s390/net/qeth_core_mpc.c266
-rw-r--r--drivers/s390/net/qeth_core_mpc.h (renamed from drivers/s390/net/qeth_mpc.h)143
-rw-r--r--drivers/s390/net/qeth_core_offl.c (renamed from drivers/s390/net/qeth_eddp.c)287
-rw-r--r--drivers/s390/net/qeth_core_offl.h (renamed from drivers/s390/net/qeth_eddp.h)50
-rw-r--r--drivers/s390/net/qeth_core_sys.c651
-rw-r--r--drivers/s390/net/qeth_fs.h168
-rw-r--r--drivers/s390/net/qeth_l2_main.c1242
-rw-r--r--drivers/s390/net/qeth_l3.h76
-rw-r--r--drivers/s390/net/qeth_l3_main.c3391
-rw-r--r--drivers/s390/net/qeth_l3_sys.c1051
-rw-r--r--drivers/s390/net/qeth_main.c8959
-rw-r--r--drivers/s390/net/qeth_mpc.c269
-rw-r--r--drivers/s390/net/qeth_proc.c316
-rw-r--r--drivers/s390/net/qeth_sys.c1858
-rw-r--r--drivers/s390/net/qeth_tso.h148
32 files changed, 19678 insertions, 16092 deletions
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index eada69dec4fe..a7745c82b4ae 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -5,22 +5,25 @@ config LCS
5 tristate "Lan Channel Station Interface" 5 tristate "Lan Channel Station Interface"
6 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI) 6 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
7 help 7 help
8 Select this option if you want to use LCS networking on IBM S/390 8 Select this option if you want to use LCS networking on IBM System z.
9 or zSeries. This device driver supports Token Ring (IEEE 802.5), 9 This device driver supports Token Ring (IEEE 802.5),
10 FDDI (IEEE 802.7) and Ethernet. 10 FDDI (IEEE 802.7) and Ethernet.
11 This option is also available as a module which will be 11 To compile as a module, choose M. The module name is lcs.ko.
12 called lcs.ko. If you do not know what it is, it's safe to say "Y". 12 If you do not know what it is, it's safe to choose Y.
13 13
14config CTC 14config CTCM
15 tristate "CTC device support" 15 tristate "CTC and MPC SNA device support"
16 depends on CCW && NETDEVICES 16 depends on CCW && NETDEVICES
17 help 17 help
18 Select this option if you want to use channel-to-channel networking 18 Select this option if you want to use channel-to-channel
19 on IBM S/390 or zSeries. This device driver supports real CTC 19 point-to-point networking on IBM System z.
20 coupling using ESCON. It also supports virtual CTCs when running 20 This device driver supports real CTC coupling using ESCON.
21 under VM. It will use the channel device configuration if this is 21 It also supports virtual CTCs when running under VM.
22 available. This option is also available as a module which will be 22 This driver also supports channel-to-channel MPC SNA devices.
23 called ctc.ko. If you do not know what it is, it's safe to say "Y". 23 MPC is an SNA protocol device used by Communication Server for Linux.
24 To compile as a module, choose M. The module name is ctcm.ko.
25 To compile into the kernel, choose Y.
26 If you do not need any channel-to-channel connection, choose N.
24 27
25config NETIUCV 28config NETIUCV
26 tristate "IUCV network device support (VM only)" 29 tristate "IUCV network device support (VM only)"
@@ -29,9 +32,9 @@ config NETIUCV
29 Select this option if you want to use inter-user communication 32 Select this option if you want to use inter-user communication
30 vehicle networking under VM or VIF. It enables a fast communication 33 vehicle networking under VM or VIF. It enables a fast communication
31 link between VM guests. Using ifconfig a point-to-point connection 34 link between VM guests. Using ifconfig a point-to-point connection
32 can be established to the Linux for zSeries and S7390 system 35 can be established to the Linux on IBM System z
33 running on the other VM guest. This option is also available 36 running on the other VM guest. To compile as a module, choose M.
34 as a module which will be called netiucv.ko. If unsure, say "Y". 37 The module name is netiucv.ko. If unsure, choose Y.
35 38
36config SMSGIUCV 39config SMSGIUCV
37 tristate "IUCV special message support (VM only)" 40 tristate "IUCV special message support (VM only)"
@@ -47,43 +50,46 @@ config CLAW
47 This driver supports channel attached CLAW devices. 50 This driver supports channel attached CLAW devices.
48 CLAW is Common Link Access for Workstation. Common devices 51 CLAW is Common Link Access for Workstation. Common devices
49 that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices. 52 that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
50 To compile as a module choose M here: The module will be called 53 To compile as a module, choose M. The module name is claw.ko.
51 claw.ko to compile into the kernel choose Y 54 To compile into the kernel, choose Y.
52 55
53config QETH 56config QETH
54 tristate "Gigabit Ethernet device support" 57 tristate "Gigabit Ethernet device support"
55 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO 58 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
56 help 59 help
57 This driver supports the IBM S/390 and zSeries OSA Express adapters 60 This driver supports the IBM System z OSA Express adapters
58 in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN 61 in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
59 interfaces in QDIO and HIPER mode. 62 interfaces in QDIO and HIPER mode.
60 63
61 For details please refer to the documentation provided by IBM at 64 For details please refer to the documentation provided by IBM at
62 <http://www10.software.ibm.com/developerworks/opensource/linux390> 65 <http://www.ibm.com/developerworks/linux/linux390>
63 66
64 To compile this driver as a module, choose M here: the 67 To compile this driver as a module, choose M.
65 module will be called qeth.ko. 68 The module name is qeth.ko.
66 69
70config QETH_L2
71 tristate "qeth layer 2 device support"
72 depends on QETH
73 help
74 Select this option to be able to run qeth devices in layer 2 mode.
75 To compile as a module, choose M. The module name is qeth_l2.ko.
76 If unsure, choose y.
67 77
68comment "Gigabit Ethernet default settings" 78config QETH_L3
69 depends on QETH 79 tristate "qeth layer 3 device support"
80 depends on QETH
81 help
82 Select this option to be able to run qeth devices in layer 3 mode.
83 To compile as a module choose M. The module name is qeth_l3.ko.
84 If unsure, choose Y.
70 85
71config QETH_IPV6 86config QETH_IPV6
72 bool "IPv6 support for gigabit ethernet" 87 bool
73 depends on (QETH = IPV6) || (QETH && IPV6 = 'y') 88 depends on (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
74 help 89 default y
75 If CONFIG_QETH is switched on, this option will include IPv6
76 support in the qeth device driver.
77
78config QETH_VLAN
79 bool "VLAN support for gigabit ethernet"
80 depends on (QETH = VLAN_8021Q) || (QETH && VLAN_8021Q = 'y')
81 help
82 If CONFIG_QETH is switched on, this option will include IEEE
83 802.1q VLAN support in the qeth device driver.
84 90
85config CCWGROUP 91config CCWGROUP
86 tristate 92 tristate
87 default (LCS || CTC || QETH) 93 default (LCS || CTCM || QETH)
88 94
89endmenu 95endmenu
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index bbe3ab2e93d9..6382c04d2bdf 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -2,13 +2,15 @@
2# S/390 network devices 2# S/390 network devices
3# 3#
4 4
5ctc-objs := ctcmain.o ctcdbug.o 5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
6 6obj-$(CONFIG_CTCM) += ctcm.o fsm.o cu3088.o
7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o 7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
9obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
10obj-$(CONFIG_LCS) += lcs.o cu3088.o 9obj-$(CONFIG_LCS) += lcs.o cu3088.o
11obj-$(CONFIG_CLAW) += claw.o cu3088.o 10obj-$(CONFIG_CLAW) += claw.o cu3088.o
12qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o 11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_core_offl.o
13qeth-$(CONFIG_PROC_FS) += qeth_proc.o
14obj-$(CONFIG_QETH) += qeth.o 12obj-$(CONFIG_QETH) += qeth.o
13qeth_l2-y += qeth_l2_main.o
14obj-$(CONFIG_QETH_L2) += qeth_l2.o
15qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
16obj-$(CONFIG_QETH_L3) += qeth_l3.o
diff --git a/drivers/s390/net/ctcdbug.c b/drivers/s390/net/ctcdbug.c
deleted file mode 100644
index e6e72deb36b5..000000000000
--- a/drivers/s390/net/ctcdbug.c
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 *
3 * linux/drivers/s390/net/ctcdbug.c
4 *
5 * CTC / ESCON network driver - s390 dbf exploit.
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Peter Tiedemann (ptiedem@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include "ctcdbug.h"
28
29/**
30 * Debug Facility Stuff
31 */
32debug_info_t *ctc_dbf_setup = NULL;
33debug_info_t *ctc_dbf_data = NULL;
34debug_info_t *ctc_dbf_trace = NULL;
35
36DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf);
37
38void
39ctc_unregister_dbf_views(void)
40{
41 if (ctc_dbf_setup)
42 debug_unregister(ctc_dbf_setup);
43 if (ctc_dbf_data)
44 debug_unregister(ctc_dbf_data);
45 if (ctc_dbf_trace)
46 debug_unregister(ctc_dbf_trace);
47}
48int
49ctc_register_dbf_views(void)
50{
51 ctc_dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
52 CTC_DBF_SETUP_PAGES,
53 CTC_DBF_SETUP_NR_AREAS,
54 CTC_DBF_SETUP_LEN);
55 ctc_dbf_data = debug_register(CTC_DBF_DATA_NAME,
56 CTC_DBF_DATA_PAGES,
57 CTC_DBF_DATA_NR_AREAS,
58 CTC_DBF_DATA_LEN);
59 ctc_dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
60 CTC_DBF_TRACE_PAGES,
61 CTC_DBF_TRACE_NR_AREAS,
62 CTC_DBF_TRACE_LEN);
63
64 if ((ctc_dbf_setup == NULL) || (ctc_dbf_data == NULL) ||
65 (ctc_dbf_trace == NULL)) {
66 ctc_unregister_dbf_views();
67 return -ENOMEM;
68 }
69 debug_register_view(ctc_dbf_setup, &debug_hex_ascii_view);
70 debug_set_level(ctc_dbf_setup, CTC_DBF_SETUP_LEVEL);
71
72 debug_register_view(ctc_dbf_data, &debug_hex_ascii_view);
73 debug_set_level(ctc_dbf_data, CTC_DBF_DATA_LEVEL);
74
75 debug_register_view(ctc_dbf_trace, &debug_hex_ascii_view);
76 debug_set_level(ctc_dbf_trace, CTC_DBF_TRACE_LEVEL);
77
78 return 0;
79}
80
diff --git a/drivers/s390/net/ctcdbug.h b/drivers/s390/net/ctcdbug.h
deleted file mode 100644
index 413925ee23d1..000000000000
--- a/drivers/s390/net/ctcdbug.h
+++ /dev/null
@@ -1,125 +0,0 @@
1/*
2 *
3 * linux/drivers/s390/net/ctcdbug.h
4 *
5 * CTC / ESCON network driver - s390 dbf exploit.
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author(s): Original Code written by
10 * Peter Tiedemann (ptiedem@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26#ifndef _CTCDBUG_H_
27#define _CTCDBUG_H_
28
29#include <asm/debug.h>
30#include "ctcmain.h"
31/**
32 * Debug Facility stuff
33 */
34#define CTC_DBF_SETUP_NAME "ctc_setup"
35#define CTC_DBF_SETUP_LEN 16
36#define CTC_DBF_SETUP_PAGES 8
37#define CTC_DBF_SETUP_NR_AREAS 1
38#define CTC_DBF_SETUP_LEVEL 3
39
40#define CTC_DBF_DATA_NAME "ctc_data"
41#define CTC_DBF_DATA_LEN 128
42#define CTC_DBF_DATA_PAGES 8
43#define CTC_DBF_DATA_NR_AREAS 1
44#define CTC_DBF_DATA_LEVEL 3
45
46#define CTC_DBF_TRACE_NAME "ctc_trace"
47#define CTC_DBF_TRACE_LEN 16
48#define CTC_DBF_TRACE_PAGES 4
49#define CTC_DBF_TRACE_NR_AREAS 2
50#define CTC_DBF_TRACE_LEVEL 3
51
52#define DBF_TEXT(name,level,text) \
53 do { \
54 debug_text_event(ctc_dbf_##name,level,text); \
55 } while (0)
56
57#define DBF_HEX(name,level,addr,len) \
58 do { \
59 debug_event(ctc_dbf_##name,level,(void*)(addr),len); \
60 } while (0)
61
62DECLARE_PER_CPU(char[256], ctc_dbf_txt_buf);
63extern debug_info_t *ctc_dbf_setup;
64extern debug_info_t *ctc_dbf_data;
65extern debug_info_t *ctc_dbf_trace;
66
67
68#define DBF_TEXT_(name,level,text...) \
69 do { \
70 char* ctc_dbf_txt_buf = get_cpu_var(ctc_dbf_txt_buf); \
71 sprintf(ctc_dbf_txt_buf, text); \
72 debug_text_event(ctc_dbf_##name,level,ctc_dbf_txt_buf); \
73 put_cpu_var(ctc_dbf_txt_buf); \
74 } while (0)
75
76#define DBF_SPRINTF(name,level,text...) \
77 do { \
78 debug_sprintf_event(ctc_dbf_trace, level, ##text ); \
79 debug_sprintf_event(ctc_dbf_trace, level, text ); \
80 } while (0)
81
82
83int ctc_register_dbf_views(void);
84
85void ctc_unregister_dbf_views(void);
86
87/**
88 * some more debug stuff
89 */
90
91#define HEXDUMP16(importance,header,ptr) \
92PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
93 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
94 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
95 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
96 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
97 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
98 *(((char*)ptr)+12),*(((char*)ptr)+13), \
99 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
100PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
101 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
102 *(((char*)ptr)+16),*(((char*)ptr)+17), \
103 *(((char*)ptr)+18),*(((char*)ptr)+19), \
104 *(((char*)ptr)+20),*(((char*)ptr)+21), \
105 *(((char*)ptr)+22),*(((char*)ptr)+23), \
106 *(((char*)ptr)+24),*(((char*)ptr)+25), \
107 *(((char*)ptr)+26),*(((char*)ptr)+27), \
108 *(((char*)ptr)+28),*(((char*)ptr)+29), \
109 *(((char*)ptr)+30),*(((char*)ptr)+31));
110
111static inline void
112hex_dump(unsigned char *buf, size_t len)
113{
114 size_t i;
115
116 for (i = 0; i < len; i++) {
117 if (i && !(i % 16))
118 printk("\n");
119 printk("%02x ", *(buf + i));
120 }
121 printk("\n");
122}
123
124
125#endif
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
new file mode 100644
index 000000000000..8eb25d00b2e7
--- /dev/null
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -0,0 +1,67 @@
1/*
2 * drivers/s390/net/ctcm_dbug.c
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
6 *
7 */
8
9#include <linux/stddef.h>
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/slab.h>
13#include <linux/ctype.h>
14#include <linux/sysctl.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/fs.h>
18#include <linux/debugfs.h>
19#include "ctcm_dbug.h"
20
21/*
22 * Debug Facility Stuff
23 */
24
25DEFINE_PER_CPU(char[256], ctcm_dbf_txt_buf);
26
27struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = {
28 [CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, 5, NULL},
29 [CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, 3, NULL},
30 [CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, 3, NULL},
31 [CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 64, 5, NULL},
32 [CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 64, 3, NULL},
33 [CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 64, 3, NULL},
34};
35
36void ctcm_unregister_dbf_views(void)
37{
38 int x;
39 for (x = 0; x < CTCM_DBF_INFOS; x++) {
40 debug_unregister(ctcm_dbf[x].id);
41 ctcm_dbf[x].id = NULL;
42 }
43}
44
45int ctcm_register_dbf_views(void)
46{
47 int x;
48 for (x = 0; x < CTCM_DBF_INFOS; x++) {
49 /* register the areas */
50 ctcm_dbf[x].id = debug_register(ctcm_dbf[x].name,
51 ctcm_dbf[x].pages,
52 ctcm_dbf[x].areas,
53 ctcm_dbf[x].len);
54 if (ctcm_dbf[x].id == NULL) {
55 ctcm_unregister_dbf_views();
56 return -ENOMEM;
57 }
58
59 /* register a view */
60 debug_register_view(ctcm_dbf[x].id, &debug_hex_ascii_view);
61 /* set a passing level */
62 debug_set_level(ctcm_dbf[x].id, ctcm_dbf[x].level);
63 }
64
65 return 0;
66}
67
diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h
new file mode 100644
index 000000000000..fdff34fe59a2
--- /dev/null
+++ b/drivers/s390/net/ctcm_dbug.h
@@ -0,0 +1,158 @@
1/*
2 * drivers/s390/net/ctcm_dbug.h
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
6 *
7 */
8
9#ifndef _CTCM_DBUG_H_
10#define _CTCM_DBUG_H_
11
12/*
13 * Debug Facility stuff
14 */
15
16#include <asm/debug.h>
17
18#ifdef DEBUG
19 #define do_debug 1
20#else
21 #define do_debug 0
22#endif
23#ifdef DEBUGDATA
24 #define do_debug_data 1
25#else
26 #define do_debug_data 0
27#endif
28#ifdef DEBUGCCW
29 #define do_debug_ccw 1
30#else
31 #define do_debug_ccw 0
32#endif
33
34/* define dbf debug levels similar to kernel msg levels */
35#define CTC_DBF_ALWAYS 0 /* always print this */
36#define CTC_DBF_EMERG 0 /* system is unusable */
37#define CTC_DBF_ALERT 1 /* action must be taken immediately */
38#define CTC_DBF_CRIT 2 /* critical conditions */
39#define CTC_DBF_ERROR 3 /* error conditions */
40#define CTC_DBF_WARN 4 /* warning conditions */
41#define CTC_DBF_NOTICE 5 /* normal but significant condition */
42#define CTC_DBF_INFO 5 /* informational */
43#define CTC_DBF_DEBUG 6 /* debug-level messages */
44
45DECLARE_PER_CPU(char[256], ctcm_dbf_txt_buf);
46
47enum ctcm_dbf_names {
48 CTCM_DBF_SETUP,
49 CTCM_DBF_ERROR,
50 CTCM_DBF_TRACE,
51 CTCM_DBF_MPC_SETUP,
52 CTCM_DBF_MPC_ERROR,
53 CTCM_DBF_MPC_TRACE,
54 CTCM_DBF_INFOS /* must be last element */
55};
56
57struct ctcm_dbf_info {
58 char name[DEBUG_MAX_NAME_LEN];
59 int pages;
60 int areas;
61 int len;
62 int level;
63 debug_info_t *id;
64};
65
66extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS];
67
68int ctcm_register_dbf_views(void);
69void ctcm_unregister_dbf_views(void);
70
71static inline const char *strtail(const char *s, int n)
72{
73 int l = strlen(s);
74 return (l > n) ? s + (l - n) : s;
75}
76
77/* sort out levels early to avoid unnecessary sprintfs */
78static inline int ctcm_dbf_passes(debug_info_t *dbf_grp, int level)
79{
80 return (dbf_grp->level >= level);
81}
82
83#define CTCM_FUNTAIL strtail((char *)__func__, 16)
84
85#define CTCM_DBF_TEXT(name, level, text) \
86 do { \
87 debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, level, text); \
88 } while (0)
89
90#define CTCM_DBF_HEX(name, level, addr, len) \
91 do { \
92 debug_event(ctcm_dbf[CTCM_DBF_##name].id, \
93 level, (void *)(addr), len); \
94 } while (0)
95
96#define CTCM_DBF_TEXT_(name, level, text...) \
97 do { \
98 if (ctcm_dbf_passes(ctcm_dbf[CTCM_DBF_##name].id, level)) { \
99 char *ctcm_dbf_txt_buf = \
100 get_cpu_var(ctcm_dbf_txt_buf); \
101 sprintf(ctcm_dbf_txt_buf, text); \
102 debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, \
103 level, ctcm_dbf_txt_buf); \
104 put_cpu_var(ctcm_dbf_txt_buf); \
105 } \
106 } while (0)
107
108/*
109 * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
110 * dev : netdevice with valid name field.
111 * text: any text string.
112 */
113#define CTCM_DBF_DEV_NAME(cat, dev, text) \
114 do { \
115 CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) : %s", \
116 CTCM_FUNTAIL, dev->name, text); \
117 } while (0)
118
119#define MPC_DBF_DEV_NAME(cat, dev, text) \
120 do { \
121 CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) : %s", \
122 CTCM_FUNTAIL, dev->name, text); \
123 } while (0)
124
125#define CTCMY_DBF_DEV_NAME(cat, dev, text) \
126 do { \
127 if (IS_MPCDEV(dev)) \
128 MPC_DBF_DEV_NAME(cat, dev, text); \
129 else \
130 CTCM_DBF_DEV_NAME(cat, dev, text); \
131 } while (0)
132
133/*
134 * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
135 * dev : netdevice.
136 * text: any text string.
137 */
138#define CTCM_DBF_DEV(cat, dev, text) \
139 do { \
140 CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) : %s", \
141 CTCM_FUNTAIL, dev, text); \
142 } while (0)
143
144#define MPC_DBF_DEV(cat, dev, text) \
145 do { \
146 CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) : %s", \
147 CTCM_FUNTAIL, dev, text); \
148 } while (0)
149
150#define CTCMY_DBF_DEV(cat, dev, text) \
151 do { \
152 if (IS_MPCDEV(dev)) \
153 MPC_DBF_DEV(cat, dev, text); \
154 else \
155 CTCM_DBF_DEV(cat, dev, text); \
156 } while (0)
157
158#endif
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
new file mode 100644
index 000000000000..2a106f3a076d
--- /dev/null
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -0,0 +1,2347 @@
1/*
2 * drivers/s390/net/ctcm_fsms.c
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Authors: Fritz Elfert (felfert@millenux.com)
6 * Peter Tiedemann (ptiedem@de.ibm.com)
7 * MPC additions :
8 * Belinda Thompson (belindat@us.ibm.com)
9 * Andy Richter (richtera@us.ibm.com)
10 */
11
12#undef DEBUG
13#undef DEBUGDATA
14#undef DEBUGCCW
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/interrupt.h>
23#include <linux/timer.h>
24#include <linux/bitops.h>
25
26#include <linux/signal.h>
27#include <linux/string.h>
28
29#include <linux/ip.h>
30#include <linux/if_arp.h>
31#include <linux/tcp.h>
32#include <linux/skbuff.h>
33#include <linux/ctype.h>
34#include <net/dst.h>
35
36#include <linux/io.h>
37#include <asm/ccwdev.h>
38#include <asm/ccwgroup.h>
39#include <linux/uaccess.h>
40
41#include <asm/idals.h>
42
43#include "fsm.h"
44#include "cu3088.h"
45
46#include "ctcm_dbug.h"
47#include "ctcm_main.h"
48#include "ctcm_fsms.h"
49
50const char *dev_state_names[] = {
51 [DEV_STATE_STOPPED] = "Stopped",
52 [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX",
53 [DEV_STATE_STARTWAIT_RX] = "StartWait RX",
54 [DEV_STATE_STARTWAIT_TX] = "StartWait TX",
55 [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX",
56 [DEV_STATE_STOPWAIT_RX] = "StopWait RX",
57 [DEV_STATE_STOPWAIT_TX] = "StopWait TX",
58 [DEV_STATE_RUNNING] = "Running",
59};
60
61const char *dev_event_names[] = {
62 [DEV_EVENT_START] = "Start",
63 [DEV_EVENT_STOP] = "Stop",
64 [DEV_EVENT_RXUP] = "RX up",
65 [DEV_EVENT_TXUP] = "TX up",
66 [DEV_EVENT_RXDOWN] = "RX down",
67 [DEV_EVENT_TXDOWN] = "TX down",
68 [DEV_EVENT_RESTART] = "Restart",
69};
70
71const char *ctc_ch_event_names[] = {
72 [CTC_EVENT_IO_SUCCESS] = "ccw_device success",
73 [CTC_EVENT_IO_EBUSY] = "ccw_device busy",
74 [CTC_EVENT_IO_ENODEV] = "ccw_device enodev",
75 [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown",
76 [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY",
77 [CTC_EVENT_ATTN] = "Status ATTN",
78 [CTC_EVENT_BUSY] = "Status BUSY",
79 [CTC_EVENT_UC_RCRESET] = "Unit check remote reset",
80 [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset",
81 [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
82 [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity",
83 [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure",
84 [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity",
85 [CTC_EVENT_UC_ZERO] = "Unit check ZERO",
86 [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown",
87 [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown",
88 [CTC_EVENT_MC_FAIL] = "Machine check failure",
89 [CTC_EVENT_MC_GOOD] = "Machine check operational",
90 [CTC_EVENT_IRQ] = "IRQ normal",
91 [CTC_EVENT_FINSTAT] = "IRQ final",
92 [CTC_EVENT_TIMER] = "Timer",
93 [CTC_EVENT_START] = "Start",
94 [CTC_EVENT_STOP] = "Stop",
95 /*
96 * additional MPC events
97 */
98 [CTC_EVENT_SEND_XID] = "XID Exchange",
99 [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
100};
101
102const char *ctc_ch_state_names[] = {
103 [CTC_STATE_IDLE] = "Idle",
104 [CTC_STATE_STOPPED] = "Stopped",
105 [CTC_STATE_STARTWAIT] = "StartWait",
106 [CTC_STATE_STARTRETRY] = "StartRetry",
107 [CTC_STATE_SETUPWAIT] = "SetupWait",
108 [CTC_STATE_RXINIT] = "RX init",
109 [CTC_STATE_TXINIT] = "TX init",
110 [CTC_STATE_RX] = "RX",
111 [CTC_STATE_TX] = "TX",
112 [CTC_STATE_RXIDLE] = "RX idle",
113 [CTC_STATE_TXIDLE] = "TX idle",
114 [CTC_STATE_RXERR] = "RX error",
115 [CTC_STATE_TXERR] = "TX error",
116 [CTC_STATE_TERM] = "Terminating",
117 [CTC_STATE_DTERM] = "Restarting",
118 [CTC_STATE_NOTOP] = "Not operational",
119 /*
120 * additional MPC states
121 */
122 [CH_XID0_PENDING] = "Pending XID0 Start",
123 [CH_XID0_INPROGRESS] = "In XID0 Negotiations ",
124 [CH_XID7_PENDING] = "Pending XID7 P1 Start",
125 [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ",
126 [CH_XID7_PENDING2] = "Pending XID7 P2 Start ",
127 [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ",
128 [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ",
129};
130
131static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
132
133/*
134 * ----- static ctcm actions for channel statemachine -----
135 *
136*/
137static void chx_txdone(fsm_instance *fi, int event, void *arg);
138static void chx_rx(fsm_instance *fi, int event, void *arg);
139static void chx_rxidle(fsm_instance *fi, int event, void *arg);
140static void chx_firstio(fsm_instance *fi, int event, void *arg);
141static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
142static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
143static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
144static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
145static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
146static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
147static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
148static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
149static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
150static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
151static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
152static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
153static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
154static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
155
156/*
157 * ----- static ctcmpc actions for ctcmpc channel statemachine -----
158 *
159*/
160static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
161static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
162static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
163/* shared :
164static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
165static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
166static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
167static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
168static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
169static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
170static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
171static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
172static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
173static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
174static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
175static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
176static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
177static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
178*/
179static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
180static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
181static void ctcmpc_chx_resend(fsm_instance *, int, void *);
182static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
183
184/**
185 * Check return code of a preceeding ccw_device call, halt_IO etc...
186 *
187 * ch : The channel, the error belongs to.
188 * Returns the error code (!= 0) to inspect.
189 */
190void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
191{
192 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
193 "ccw error %s (%s): %04x\n", ch->id, msg, rc);
194 switch (rc) {
195 case -EBUSY:
196 ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg);
197 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
198 break;
199 case -ENODEV:
200 ctcm_pr_emerg("%s (%s): Invalid device called for IO\n",
201 ch->id, msg);
202 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
203 break;
204 default:
205 ctcm_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
206 ch->id, msg, rc);
207 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
208 }
209}
210
211void ctcm_purge_skb_queue(struct sk_buff_head *q)
212{
213 struct sk_buff *skb;
214
215 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
216
217 while ((skb = skb_dequeue(q))) {
218 atomic_dec(&skb->users);
219 dev_kfree_skb_any(skb);
220 }
221}
222
223/**
224 * NOP action for statemachines
225 */
226static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
227{
228}
229
230/*
231 * Actions for channel - statemachines.
232 */
233
234/**
235 * Normal data has been send. Free the corresponding
236 * skb (it's in io_queue), reset dev->tbusy and
237 * revert to idle state.
238 *
239 * fi An instance of a channel statemachine.
240 * event The event, just happened.
241 * arg Generic pointer, casted from channel * upon call.
242 */
243static void chx_txdone(fsm_instance *fi, int event, void *arg)
244{
245 struct channel *ch = arg;
246 struct net_device *dev = ch->netdev;
247 struct ctcm_priv *priv = dev->priv;
248 struct sk_buff *skb;
249 int first = 1;
250 int i;
251 unsigned long duration;
252 struct timespec done_stamp = current_kernel_time(); /* xtime */
253
254 duration =
255 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
256 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
257 if (duration > ch->prof.tx_time)
258 ch->prof.tx_time = duration;
259
260 if (ch->irb->scsw.count != 0)
261 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
262 dev->name, ch->irb->scsw.count);
263 fsm_deltimer(&ch->timer);
264 while ((skb = skb_dequeue(&ch->io_queue))) {
265 priv->stats.tx_packets++;
266 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
267 if (first) {
268 priv->stats.tx_bytes += 2;
269 first = 0;
270 }
271 atomic_dec(&skb->users);
272 dev_kfree_skb_irq(skb);
273 }
274 spin_lock(&ch->collect_lock);
275 clear_normalized_cda(&ch->ccw[4]);
276 if (ch->collect_len > 0) {
277 int rc;
278
279 if (ctcm_checkalloc_buffer(ch)) {
280 spin_unlock(&ch->collect_lock);
281 return;
282 }
283 ch->trans_skb->data = ch->trans_skb_data;
284 skb_reset_tail_pointer(ch->trans_skb);
285 ch->trans_skb->len = 0;
286 if (ch->prof.maxmulti < (ch->collect_len + 2))
287 ch->prof.maxmulti = ch->collect_len + 2;
288 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
289 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
290 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
291 i = 0;
292 while ((skb = skb_dequeue(&ch->collect_queue))) {
293 skb_copy_from_linear_data(skb,
294 skb_put(ch->trans_skb, skb->len), skb->len);
295 priv->stats.tx_packets++;
296 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
297 atomic_dec(&skb->users);
298 dev_kfree_skb_irq(skb);
299 i++;
300 }
301 ch->collect_len = 0;
302 spin_unlock(&ch->collect_lock);
303 ch->ccw[1].count = ch->trans_skb->len;
304 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
305 ch->prof.send_stamp = current_kernel_time(); /* xtime */
306 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
307 (unsigned long)ch, 0xff, 0);
308 ch->prof.doios_multi++;
309 if (rc != 0) {
310 priv->stats.tx_dropped += i;
311 priv->stats.tx_errors += i;
312 fsm_deltimer(&ch->timer);
313 ctcm_ccw_check_rc(ch, rc, "chained TX");
314 }
315 } else {
316 spin_unlock(&ch->collect_lock);
317 fsm_newstate(fi, CTC_STATE_TXIDLE);
318 }
319 ctcm_clear_busy_do(dev);
320}
321
322/**
323 * Initial data is sent.
324 * Notify device statemachine that we are up and
325 * running.
326 *
327 * fi An instance of a channel statemachine.
328 * event The event, just happened.
329 * arg Generic pointer, casted from channel * upon call.
330 */
331void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
332{
333 struct channel *ch = arg;
334 struct net_device *dev = ch->netdev;
335 struct ctcm_priv *priv = dev->priv;
336
337 CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__);
338 fsm_deltimer(&ch->timer);
339 fsm_newstate(fi, CTC_STATE_TXIDLE);
340 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
341}
342
343/**
344 * Got normal data, check for sanity, queue it up, allocate new buffer
345 * trigger bottom half, and initiate next read.
346 *
347 * fi An instance of a channel statemachine.
348 * event The event, just happened.
349 * arg Generic pointer, casted from channel * upon call.
350 */
351static void chx_rx(fsm_instance *fi, int event, void *arg)
352{
353 struct channel *ch = arg;
354 struct net_device *dev = ch->netdev;
355 struct ctcm_priv *priv = dev->priv;
356 int len = ch->max_bufsize - ch->irb->scsw.count;
357 struct sk_buff *skb = ch->trans_skb;
358 __u16 block_len = *((__u16 *)skb->data);
359 int check_len;
360 int rc;
361
362 fsm_deltimer(&ch->timer);
363 if (len < 8) {
364 ctcm_pr_debug("%s: got packet with length %d < 8\n",
365 dev->name, len);
366 priv->stats.rx_dropped++;
367 priv->stats.rx_length_errors++;
368 goto again;
369 }
370 if (len > ch->max_bufsize) {
371 ctcm_pr_debug("%s: got packet with length %d > %d\n",
372 dev->name, len, ch->max_bufsize);
373 priv->stats.rx_dropped++;
374 priv->stats.rx_length_errors++;
375 goto again;
376 }
377
378 /*
379 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
380 */
381 switch (ch->protocol) {
382 case CTCM_PROTO_S390:
383 case CTCM_PROTO_OS390:
384 check_len = block_len + 2;
385 break;
386 default:
387 check_len = block_len;
388 break;
389 }
390 if ((len < block_len) || (len > check_len)) {
391 ctcm_pr_debug("%s: got block length %d != rx length %d\n",
392 dev->name, block_len, len);
393 if (do_debug)
394 ctcmpc_dump_skb(skb, 0);
395
396 *((__u16 *)skb->data) = len;
397 priv->stats.rx_dropped++;
398 priv->stats.rx_length_errors++;
399 goto again;
400 }
401 block_len -= 2;
402 if (block_len > 0) {
403 *((__u16 *)skb->data) = block_len;
404 ctcm_unpack_skb(ch, skb);
405 }
406 again:
407 skb->data = ch->trans_skb_data;
408 skb_reset_tail_pointer(skb);
409 skb->len = 0;
410 if (ctcm_checkalloc_buffer(ch))
411 return;
412 ch->ccw[1].count = ch->max_bufsize;
413 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
414 (unsigned long)ch, 0xff, 0);
415 if (rc != 0)
416 ctcm_ccw_check_rc(ch, rc, "normal RX");
417}
418
419/**
420 * Initialize connection by sending a __u16 of value 0.
421 *
422 * fi An instance of a channel statemachine.
423 * event The event, just happened.
424 * arg Generic pointer, casted from channel * upon call.
425 */
426static void chx_firstio(fsm_instance *fi, int event, void *arg)
427{
428 struct channel *ch = arg;
429 int rc;
430
431 CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__);
432
433 if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
434 ctcm_pr_debug("%s: remote side issued READ?, init.\n", ch->id);
435 fsm_deltimer(&ch->timer);
436 if (ctcm_checkalloc_buffer(ch))
437 return;
438 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
439 (ch->protocol == CTCM_PROTO_OS390)) {
440 /* OS/390 resp. z/OS */
441 if (CHANNEL_DIRECTION(ch->flags) == READ) {
442 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
443 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
444 CTC_EVENT_TIMER, ch);
445 chx_rxidle(fi, event, arg);
446 } else {
447 struct net_device *dev = ch->netdev;
448 struct ctcm_priv *priv = dev->priv;
449 fsm_newstate(fi, CTC_STATE_TXIDLE);
450 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
451 }
452 return;
453 }
454
455 /*
456 * Don't setup a timer for receiving the initial RX frame
457 * if in compatibility mode, since VM TCP delays the initial
458 * frame until it has some data to send.
459 */
460 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
461 (ch->protocol != CTCM_PROTO_S390))
462 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
463
464 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
465 ch->ccw[1].count = 2; /* Transfer only length */
466
467 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
468 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
469 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
470 (unsigned long)ch, 0xff, 0);
471 if (rc != 0) {
472 fsm_deltimer(&ch->timer);
473 fsm_newstate(fi, CTC_STATE_SETUPWAIT);
474 ctcm_ccw_check_rc(ch, rc, "init IO");
475 }
476 /*
477 * If in compatibility mode since we don't setup a timer, we
478 * also signal RX channel up immediately. This enables us
479 * to send packets early which in turn usually triggers some
480 * reply from VM TCP which brings up the RX channel to it's
481 * final state.
482 */
483 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
484 (ch->protocol == CTCM_PROTO_S390)) {
485 struct net_device *dev = ch->netdev;
486 struct ctcm_priv *priv = dev->priv;
487 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
488 }
489}
490
491/**
492 * Got initial data, check it. If OK,
493 * notify device statemachine that we are up and
494 * running.
495 *
496 * fi An instance of a channel statemachine.
497 * event The event, just happened.
498 * arg Generic pointer, casted from channel * upon call.
499 */
500static void chx_rxidle(fsm_instance *fi, int event, void *arg)
501{
502 struct channel *ch = arg;
503 struct net_device *dev = ch->netdev;
504 struct ctcm_priv *priv = dev->priv;
505 __u16 buflen;
506 int rc;
507
508 CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__);
509 fsm_deltimer(&ch->timer);
510 buflen = *((__u16 *)ch->trans_skb->data);
511 if (do_debug)
512 ctcm_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
513
514 if (buflen >= CTCM_INITIAL_BLOCKLEN) {
515 if (ctcm_checkalloc_buffer(ch))
516 return;
517 ch->ccw[1].count = ch->max_bufsize;
518 fsm_newstate(fi, CTC_STATE_RXIDLE);
519 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
520 (unsigned long)ch, 0xff, 0);
521 if (rc != 0) {
522 fsm_newstate(fi, CTC_STATE_RXINIT);
523 ctcm_ccw_check_rc(ch, rc, "initial RX");
524 } else
525 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
526 } else {
527 if (do_debug)
528 ctcm_pr_debug("%s: Initial RX count %d not %d\n",
529 dev->name, buflen, CTCM_INITIAL_BLOCKLEN);
530 chx_firstio(fi, event, arg);
531 }
532}
533
534/**
535 * Set channel into extended mode.
536 *
537 * fi An instance of a channel statemachine.
538 * event The event, just happened.
539 * arg Generic pointer, casted from channel * upon call.
540 */
541static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
542{
543 struct channel *ch = arg;
544 int rc;
545 unsigned long saveflags = 0;
546 int timeout = CTCM_TIME_5_SEC;
547
548 fsm_deltimer(&ch->timer);
549 if (IS_MPC(ch)) {
550 timeout = 1500;
551 if (do_debug)
552 ctcm_pr_debug("ctcm enter: %s(): cp=%i ch=0x%p id=%s\n",
553 __FUNCTION__, smp_processor_id(), ch, ch->id);
554 }
555 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
556 fsm_newstate(fi, CTC_STATE_SETUPWAIT);
557 if (do_debug_ccw && IS_MPC(ch))
558 ctcmpc_dumpit((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
559
560 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
561 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
562 /* Such conditional locking is undeterministic in
563 * static view. => ignore sparse warnings here. */
564
565 rc = ccw_device_start(ch->cdev, &ch->ccw[6],
566 (unsigned long)ch, 0xff, 0);
567 if (event == CTC_EVENT_TIMER) /* see above comments */
568 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
569 if (rc != 0) {
570 fsm_deltimer(&ch->timer);
571 fsm_newstate(fi, CTC_STATE_STARTWAIT);
572 ctcm_ccw_check_rc(ch, rc, "set Mode");
573 } else
574 ch->retry = 0;
575}
576
577/**
578 * Setup channel.
579 *
580 * fi An instance of a channel statemachine.
581 * event The event, just happened.
582 * arg Generic pointer, casted from channel * upon call.
583 */
584static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
585{
586 struct channel *ch = arg;
587 int rc;
588 struct net_device *dev;
589 unsigned long saveflags;
590
591 CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
592 if (ch == NULL) {
593 ctcm_pr_warn("chx_start ch=NULL\n");
594 return;
595 }
596 if (ch->netdev == NULL) {
597 ctcm_pr_warn("chx_start dev=NULL, id=%s\n", ch->id);
598 return;
599 }
600 dev = ch->netdev;
601
602 if (do_debug)
603 ctcm_pr_debug("%s: %s channel start\n", dev->name,
604 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
605
606 if (ch->trans_skb != NULL) {
607 clear_normalized_cda(&ch->ccw[1]);
608 dev_kfree_skb(ch->trans_skb);
609 ch->trans_skb = NULL;
610 }
611 if (CHANNEL_DIRECTION(ch->flags) == READ) {
612 ch->ccw[1].cmd_code = CCW_CMD_READ;
613 ch->ccw[1].flags = CCW_FLAG_SLI;
614 ch->ccw[1].count = 0;
615 } else {
616 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
617 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
618 ch->ccw[1].count = 0;
619 }
620 if (ctcm_checkalloc_buffer(ch)) {
621 ctcm_pr_notice("%s: %s trans_skb allocation delayed "
622 "until first transfer\n", dev->name,
623 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
624 }
625
626 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
627 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
628 ch->ccw[0].count = 0;
629 ch->ccw[0].cda = 0;
630 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
631 ch->ccw[2].flags = CCW_FLAG_SLI;
632 ch->ccw[2].count = 0;
633 ch->ccw[2].cda = 0;
634 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
635 ch->ccw[4].cda = 0;
636 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
637
638 fsm_newstate(fi, CTC_STATE_STARTWAIT);
639 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
640 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
641 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
642 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
643 if (rc != 0) {
644 if (rc != -EBUSY)
645 fsm_deltimer(&ch->timer);
646 ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
647 }
648}
649
650/**
651 * Shutdown a channel.
652 *
653 * fi An instance of a channel statemachine.
654 * event The event, just happened.
655 * arg Generic pointer, casted from channel * upon call.
656 */
657static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
658{
659 struct channel *ch = arg;
660 unsigned long saveflags = 0;
661 int rc;
662 int oldstate;
663
664 CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__);
665 fsm_deltimer(&ch->timer);
666 if (IS_MPC(ch))
667 fsm_deltimer(&ch->sweep_timer);
668
669 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
670
671 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */
672 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
673 /* Such conditional locking is undeterministic in
674 * static view. => ignore sparse warnings here. */
675 oldstate = fsm_getstate(fi);
676 fsm_newstate(fi, CTC_STATE_TERM);
677 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
678
679 if (event == CTC_EVENT_STOP)
680 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
681 /* see remark above about conditional locking */
682
683 if (rc != 0 && rc != -EBUSY) {
684 fsm_deltimer(&ch->timer);
685 if (event != CTC_EVENT_STOP) {
686 fsm_newstate(fi, oldstate);
687 ctcm_ccw_check_rc(ch, rc, (char *)__FUNCTION__);
688 }
689 }
690}
691
692/**
693 * Cleanup helper for chx_fail and chx_stopped
694 * cleanup channels queue and notify interface statemachine.
695 *
696 * fi An instance of a channel statemachine.
697 * state The next state (depending on caller).
698 * ch The channel to operate on.
699 */
700static void ctcm_chx_cleanup(fsm_instance *fi, int state,
701 struct channel *ch)
702{
703 struct net_device *dev = ch->netdev;
704 struct ctcm_priv *priv = dev->priv;
705
706 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
707
708 fsm_deltimer(&ch->timer);
709 if (IS_MPC(ch))
710 fsm_deltimer(&ch->sweep_timer);
711
712 fsm_newstate(fi, state);
713 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
714 clear_normalized_cda(&ch->ccw[1]);
715 dev_kfree_skb_any(ch->trans_skb);
716 ch->trans_skb = NULL;
717 }
718
719 ch->th_seg = 0x00;
720 ch->th_seq_num = 0x00;
721 if (CHANNEL_DIRECTION(ch->flags) == READ) {
722 skb_queue_purge(&ch->io_queue);
723 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
724 } else {
725 ctcm_purge_skb_queue(&ch->io_queue);
726 if (IS_MPC(ch))
727 ctcm_purge_skb_queue(&ch->sweep_queue);
728 spin_lock(&ch->collect_lock);
729 ctcm_purge_skb_queue(&ch->collect_queue);
730 ch->collect_len = 0;
731 spin_unlock(&ch->collect_lock);
732 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
733 }
734}
735
736/**
737 * A channel has successfully been halted.
738 * Cleanup it's queue and notify interface statemachine.
739 *
740 * fi An instance of a channel statemachine.
741 * event The event, just happened.
742 * arg Generic pointer, casted from channel * upon call.
743 */
744static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
745{
746 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
747 ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
748}
749
750/**
751 * A stop command from device statemachine arrived and we are in
752 * not operational mode. Set state to stopped.
753 *
754 * fi An instance of a channel statemachine.
755 * event The event, just happened.
756 * arg Generic pointer, casted from channel * upon call.
757 */
758static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
759{
760 fsm_newstate(fi, CTC_STATE_STOPPED);
761}
762
763/**
764 * A machine check for no path, not operational status or gone device has
765 * happened.
766 * Cleanup queue and notify interface statemachine.
767 *
768 * fi An instance of a channel statemachine.
769 * event The event, just happened.
770 * arg Generic pointer, casted from channel * upon call.
771 */
772static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
773{
774 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
775 ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
776}
777
778/**
779 * Handle error during setup of channel.
780 *
781 * fi An instance of a channel statemachine.
782 * event The event, just happened.
783 * arg Generic pointer, casted from channel * upon call.
784 */
785static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
786{
787 struct channel *ch = arg;
788 struct net_device *dev = ch->netdev;
789 struct ctcm_priv *priv = dev->priv;
790
791 /*
792 * Special case: Got UC_RCRESET on setmode.
793 * This means that remote side isn't setup. In this case
794 * simply retry after some 10 secs...
795 */
796 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
797 ((event == CTC_EVENT_UC_RCRESET) ||
798 (event == CTC_EVENT_UC_RSRESET))) {
799 fsm_newstate(fi, CTC_STATE_STARTRETRY);
800 fsm_deltimer(&ch->timer);
801 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
802 if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) {
803 int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
804 if (rc != 0)
805 ctcm_ccw_check_rc(ch, rc,
806 "HaltIO in chx_setuperr");
807 }
808 return;
809 }
810
811 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
812 "%s : %s error during %s channel setup state=%s\n",
813 dev->name, ctc_ch_event_names[event],
814 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
815 fsm_getstate_str(fi));
816
817 if (CHANNEL_DIRECTION(ch->flags) == READ) {
818 fsm_newstate(fi, CTC_STATE_RXERR);
819 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
820 } else {
821 fsm_newstate(fi, CTC_STATE_TXERR);
822 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
823 }
824}
825
826/**
827 * Restart a channel after an error.
828 *
829 * fi An instance of a channel statemachine.
830 * event The event, just happened.
831 * arg Generic pointer, casted from channel * upon call.
832 */
833static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
834{
835 struct channel *ch = arg;
836 struct net_device *dev = ch->netdev;
837 unsigned long saveflags = 0;
838 int oldstate;
839 int rc;
840
841 CTCM_DBF_TEXT(TRACE, CTC_DBF_NOTICE, __FUNCTION__);
842 fsm_deltimer(&ch->timer);
843 ctcm_pr_debug("%s: %s channel restart\n", dev->name,
844 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
845 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
846 oldstate = fsm_getstate(fi);
847 fsm_newstate(fi, CTC_STATE_STARTWAIT);
848 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
849 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
850 /* Such conditional locking is a known problem for
851 * sparse because its undeterministic in static view.
852 * Warnings should be ignored here. */
853 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
854 if (event == CTC_EVENT_TIMER)
855 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
856 if (rc != 0) {
857 if (rc != -EBUSY) {
858 fsm_deltimer(&ch->timer);
859 fsm_newstate(fi, oldstate);
860 }
861 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
862 }
863}
864
865/**
866 * Handle error during RX initial handshake (exchange of
867 * 0-length block header)
868 *
869 * fi An instance of a channel statemachine.
870 * event The event, just happened.
871 * arg Generic pointer, casted from channel * upon call.
872 */
873static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
874{
875 struct channel *ch = arg;
876 struct net_device *dev = ch->netdev;
877 struct ctcm_priv *priv = dev->priv;
878
879 CTCM_DBF_TEXT(SETUP, 3, __FUNCTION__);
880 if (event == CTC_EVENT_TIMER) {
881 if (!IS_MPCDEV(dev))
882 /* TODO : check if MPC deletes timer somewhere */
883 fsm_deltimer(&ch->timer);
884 ctcm_pr_debug("%s: Timeout during RX init handshake\n",
885 dev->name);
886 if (ch->retry++ < 3)
887 ctcm_chx_restart(fi, event, arg);
888 else {
889 fsm_newstate(fi, CTC_STATE_RXERR);
890 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
891 }
892 } else
893 ctcm_pr_warn("%s: Error during RX init handshake\n", dev->name);
894}
895
896/**
897 * Notify device statemachine if we gave up initialization
898 * of RX channel.
899 *
900 * fi An instance of a channel statemachine.
901 * event The event, just happened.
902 * arg Generic pointer, casted from channel * upon call.
903 */
904static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
905{
906 struct channel *ch = arg;
907 struct net_device *dev = ch->netdev;
908 struct ctcm_priv *priv = dev->priv;
909
910 CTCM_DBF_TEXT(SETUP, 3, __FUNCTION__);
911 fsm_newstate(fi, CTC_STATE_RXERR);
912 ctcm_pr_warn("%s: RX busy. Initialization failed\n", dev->name);
913 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
914}
915
916/**
917 * Handle RX Unit check remote reset (remote disconnected)
918 *
919 * fi An instance of a channel statemachine.
920 * event The event, just happened.
921 * arg Generic pointer, casted from channel * upon call.
922 */
923static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
924{
925 struct channel *ch = arg;
926 struct channel *ch2;
927 struct net_device *dev = ch->netdev;
928 struct ctcm_priv *priv = dev->priv;
929
930 CTCM_DBF_DEV_NAME(TRACE, dev, "Got remote disconnect, re-initializing");
931 fsm_deltimer(&ch->timer);
932 if (do_debug)
933 ctcm_pr_debug("%s: Got remote disconnect, "
934 "re-initializing ...\n", dev->name);
935 /*
936 * Notify device statemachine
937 */
938 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
939 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
940
941 fsm_newstate(fi, CTC_STATE_DTERM);
942 ch2 = priv->channel[WRITE];
943 fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
944
945 ccw_device_halt(ch->cdev, (unsigned long)ch);
946 ccw_device_halt(ch2->cdev, (unsigned long)ch2);
947}
948
949/**
950 * Handle error during TX channel initialization.
951 *
952 * fi An instance of a channel statemachine.
953 * event The event, just happened.
954 * arg Generic pointer, casted from channel * upon call.
955 */
956static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
957{
958 struct channel *ch = arg;
959 struct net_device *dev = ch->netdev;
960 struct ctcm_priv *priv = dev->priv;
961
962 if (event == CTC_EVENT_TIMER) {
963 fsm_deltimer(&ch->timer);
964 CTCM_DBF_DEV_NAME(ERROR, dev,
965 "Timeout during TX init handshake");
966 if (ch->retry++ < 3)
967 ctcm_chx_restart(fi, event, arg);
968 else {
969 fsm_newstate(fi, CTC_STATE_TXERR);
970 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
971 }
972 } else {
973 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
974 "%s : %s error during channel setup state=%s",
975 dev->name, ctc_ch_event_names[event],
976 fsm_getstate_str(fi));
977
978 ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name);
979 }
980}
981
982/**
983 * Handle TX timeout by retrying operation.
984 *
985 * fi An instance of a channel statemachine.
986 * event The event, just happened.
987 * arg Generic pointer, casted from channel * upon call.
988 */
989static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
990{
991 struct channel *ch = arg;
992 struct net_device *dev = ch->netdev;
993 struct ctcm_priv *priv = dev->priv;
994 struct sk_buff *skb;
995
996 if (do_debug)
997 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
998 __FUNCTION__, smp_processor_id(), ch, ch->id);
999
1000 fsm_deltimer(&ch->timer);
1001 if (ch->retry++ > 3) {
1002 struct mpc_group *gptr = priv->mpcg;
1003 ctcm_pr_debug("%s: TX retry failed, restarting channel\n",
1004 dev->name);
1005 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1006 /* call restart if not MPC or if MPC and mpcg fsm is ready.
1007 use gptr as mpc indicator */
1008 if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
1009 ctcm_chx_restart(fi, event, arg);
1010 goto done;
1011 }
1012
1013 ctcm_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1014 skb = skb_peek(&ch->io_queue);
1015 if (skb) {
1016 int rc = 0;
1017 unsigned long saveflags = 0;
1018 clear_normalized_cda(&ch->ccw[4]);
1019 ch->ccw[4].count = skb->len;
1020 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1021 ctcm_pr_debug("%s: IDAL alloc failed, chan restart\n",
1022 dev->name);
1023 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1024 ctcm_chx_restart(fi, event, arg);
1025 goto done;
1026 }
1027 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
1028 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
1029 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1030 /* Such conditional locking is a known problem for
1031 * sparse because its undeterministic in static view.
1032 * Warnings should be ignored here. */
1033 if (do_debug_ccw)
1034 ctcmpc_dumpit((char *)&ch->ccw[3],
1035 sizeof(struct ccw1) * 3);
1036
1037 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1038 (unsigned long)ch, 0xff, 0);
1039 if (event == CTC_EVENT_TIMER)
1040 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1041 saveflags);
1042 if (rc != 0) {
1043 fsm_deltimer(&ch->timer);
1044 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
1045 ctcm_purge_skb_queue(&ch->io_queue);
1046 }
1047 }
1048done:
1049 return;
1050}
1051
1052/**
1053 * Handle fatal errors during an I/O command.
1054 *
1055 * fi An instance of a channel statemachine.
1056 * event The event, just happened.
1057 * arg Generic pointer, casted from channel * upon call.
1058 */
1059static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
1060{
1061 struct channel *ch = arg;
1062 struct net_device *dev = ch->netdev;
1063 struct ctcm_priv *priv = dev->priv;
1064
1065 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__);
1066 fsm_deltimer(&ch->timer);
1067 ctcm_pr_warn("%s %s : unrecoverable channel error\n",
1068 CTC_DRIVER_NAME, dev->name);
1069 if (IS_MPC(ch)) {
1070 priv->stats.tx_dropped++;
1071 priv->stats.tx_errors++;
1072 }
1073
1074 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1075 ctcm_pr_debug("%s: RX I/O error\n", dev->name);
1076 fsm_newstate(fi, CTC_STATE_RXERR);
1077 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
1078 } else {
1079 ctcm_pr_debug("%s: TX I/O error\n", dev->name);
1080 fsm_newstate(fi, CTC_STATE_TXERR);
1081 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1082 }
1083}
1084
1085/*
1086 * The ctcm statemachine for a channel.
1087 */
1088const fsm_node ch_fsm[] = {
1089 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
1090 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
1091 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
1092 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1093
1094 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
1095 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
1096 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
1097 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1098 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
1099
1100 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1101 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
1102 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1103 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
1104 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1105 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1106
1107 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
1108 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
1109 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop },
1110 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1111
1112 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1113 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
1114 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio },
1115 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1116 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1117 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
1118 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1119 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1120
1121 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1122 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
1123 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle },
1124 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
1125 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
1126 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
1127 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
1128 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1129 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio },
1130 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1131
1132 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1133 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
1134 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx },
1135 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
1136 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1137 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1138 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx },
1139
1140 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1141 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
1142 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
1143 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
1144 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
1145 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
1146 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1147 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1148
1149 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1150 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
1151 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio },
1152 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1153 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1154 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1155 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1156
1157 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
1158 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
1159 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
1160 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1161 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1162 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1163
1164 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
1165 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
1166 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1167 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1168 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1169 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1170
1171 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
1172 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
1173 { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone },
1174 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry },
1175 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry },
1176 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
1177 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1178 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1179
1180 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
1181 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
1182 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1183 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1184};
1185
1186int ch_fsm_len = ARRAY_SIZE(ch_fsm);
1187
1188/*
1189 * MPC actions for mpc channel statemachine
1190 * handling of MPC protocol requires extra
1191 * statemachine and actions which are prefixed ctcmpc_ .
1192 * The ctc_ch_states and ctc_ch_state_names,
1193 * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
1194 * which are expanded by some elements.
1195 */
1196
1197/*
1198 * Actions for mpc channel statemachine.
1199 */
1200
1201/**
1202 * Normal data has been send. Free the corresponding
1203 * skb (it's in io_queue), reset dev->tbusy and
1204 * revert to idle state.
1205 *
1206 * fi An instance of a channel statemachine.
1207 * event The event, just happened.
1208 * arg Generic pointer, casted from channel * upon call.
1209 */
1210static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1211{
1212 struct channel *ch = arg;
1213 struct net_device *dev = ch->netdev;
1214 struct ctcm_priv *priv = dev->priv;
1215 struct mpc_group *grp = priv->mpcg;
1216 struct sk_buff *skb;
1217 int first = 1;
1218 int i;
1219 struct timespec done_stamp;
1220 __u32 data_space;
1221 unsigned long duration;
1222 struct sk_buff *peekskb;
1223 int rc;
1224 struct th_header *header;
1225 struct pdu *p_header;
1226
1227 if (do_debug)
1228 ctcm_pr_debug("%s cp:%i enter: %s()\n",
1229 dev->name, smp_processor_id(), __FUNCTION__);
1230
1231 done_stamp = current_kernel_time(); /* xtime */
1232 duration = (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000
1233 + (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
1234 if (duration > ch->prof.tx_time)
1235 ch->prof.tx_time = duration;
1236
1237 if (ch->irb->scsw.count != 0)
1238 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
1239 dev->name, ch->irb->scsw.count);
1240 fsm_deltimer(&ch->timer);
1241 while ((skb = skb_dequeue(&ch->io_queue))) {
1242 priv->stats.tx_packets++;
1243 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
1244 if (first) {
1245 priv->stats.tx_bytes += 2;
1246 first = 0;
1247 }
1248 atomic_dec(&skb->users);
1249 dev_kfree_skb_irq(skb);
1250 }
1251 spin_lock(&ch->collect_lock);
1252 clear_normalized_cda(&ch->ccw[4]);
1253
1254 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
1255 spin_unlock(&ch->collect_lock);
1256 fsm_newstate(fi, CTC_STATE_TXIDLE);
1257 goto done;
1258 }
1259
1260 if (ctcm_checkalloc_buffer(ch)) {
1261 spin_unlock(&ch->collect_lock);
1262 goto done;
1263 }
1264 ch->trans_skb->data = ch->trans_skb_data;
1265 skb_reset_tail_pointer(ch->trans_skb);
1266 ch->trans_skb->len = 0;
1267 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
1268 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
1269 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
1270 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
1271 i = 0;
1272
1273 if (do_debug_data)
1274 ctcm_pr_debug("ctcmpc: %s() building "
1275 "trans_skb from collect_q \n", __FUNCTION__);
1276
1277 data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
1278
1279 if (do_debug_data)
1280 ctcm_pr_debug("ctcmpc: %s() building trans_skb from collect_q"
1281 " data_space:%04x\n", __FUNCTION__, data_space);
1282 p_header = NULL;
1283 while ((skb = skb_dequeue(&ch->collect_queue))) {
1284 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
1285 p_header = (struct pdu *)
1286 (skb_tail_pointer(ch->trans_skb) - skb->len);
1287 p_header->pdu_flag = 0x00;
1288 if (skb->protocol == ntohs(ETH_P_SNAP))
1289 p_header->pdu_flag |= 0x60;
1290 else
1291 p_header->pdu_flag |= 0x20;
1292
1293 if (do_debug_data) {
1294 ctcm_pr_debug("ctcmpc: %s()trans_skb len:%04x \n",
1295 __FUNCTION__, ch->trans_skb->len);
1296 ctcm_pr_debug("ctcmpc: %s() pdu header and data"
1297 " for up to 32 bytes sent to vtam\n",
1298 __FUNCTION__);
1299 ctcmpc_dumpit((char *)p_header,
1300 min_t(int, skb->len, 32));
1301 }
1302 ch->collect_len -= skb->len;
1303 data_space -= skb->len;
1304 priv->stats.tx_packets++;
1305 priv->stats.tx_bytes += skb->len;
1306 atomic_dec(&skb->users);
1307 dev_kfree_skb_any(skb);
1308 peekskb = skb_peek(&ch->collect_queue);
1309 if (peekskb->len > data_space)
1310 break;
1311 i++;
1312 }
1313 /* p_header points to the last one we handled */
1314 if (p_header)
1315 p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/
1316 header = kzalloc(TH_HEADER_LENGTH, gfp_type());
1317
1318 if (!header) {
1319 printk(KERN_WARNING "ctcmpc: OUT OF MEMORY IN %s()"
1320 ": Data Lost \n", __FUNCTION__);
1321 spin_unlock(&ch->collect_lock);
1322 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1323 goto done;
1324 }
1325
1326 header->th_ch_flag = TH_HAS_PDU; /* Normal data */
1327 ch->th_seq_num++;
1328 header->th_seq_num = ch->th_seq_num;
1329
1330 if (do_debug_data)
1331 ctcm_pr_debug("%s: ToVTAM_th_seq= %08x\n" ,
1332 __FUNCTION__, ch->th_seq_num);
1333
1334 memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
1335 TH_HEADER_LENGTH); /* put the TH on the packet */
1336
1337 kfree(header);
1338
1339 if (do_debug_data) {
1340 ctcm_pr_debug("ctcmpc: %s()trans_skb len:%04x \n",
1341 __FUNCTION__, ch->trans_skb->len);
1342
1343 ctcm_pr_debug("ctcmpc: %s() up-to-50 bytes of trans_skb "
1344 "data to vtam from collect_q\n", __FUNCTION__);
1345 ctcmpc_dumpit((char *)ch->trans_skb->data,
1346 min_t(int, ch->trans_skb->len, 50));
1347 }
1348
1349 spin_unlock(&ch->collect_lock);
1350 clear_normalized_cda(&ch->ccw[1]);
1351 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
1352 dev_kfree_skb_any(ch->trans_skb);
1353 ch->trans_skb = NULL;
1354 printk(KERN_WARNING
1355 "ctcmpc: %s()CCW failure - data lost\n",
1356 __FUNCTION__);
1357 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1358 return;
1359 }
1360 ch->ccw[1].count = ch->trans_skb->len;
1361 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
1362 ch->prof.send_stamp = current_kernel_time(); /* xtime */
1363 if (do_debug_ccw)
1364 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1365 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1366 (unsigned long)ch, 0xff, 0);
1367 ch->prof.doios_multi++;
1368 if (rc != 0) {
1369 priv->stats.tx_dropped += i;
1370 priv->stats.tx_errors += i;
1371 fsm_deltimer(&ch->timer);
1372 ctcm_ccw_check_rc(ch, rc, "chained TX");
1373 }
1374done:
1375 ctcm_clear_busy(dev);
1376 ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__);
1377 return;
1378}
1379
1380/**
1381 * Got normal data, check for sanity, queue it up, allocate new buffer
1382 * trigger bottom half, and initiate next read.
1383 *
1384 * fi An instance of a channel statemachine.
1385 * event The event, just happened.
1386 * arg Generic pointer, casted from channel * upon call.
1387 */
1388static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1389{
1390 struct channel *ch = arg;
1391 struct net_device *dev = ch->netdev;
1392 struct ctcm_priv *priv = dev->priv;
1393 struct mpc_group *grp = priv->mpcg;
1394 struct sk_buff *skb = ch->trans_skb;
1395 struct sk_buff *new_skb;
1396 unsigned long saveflags = 0; /* avoids compiler warning */
1397 int len = ch->max_bufsize - ch->irb->scsw.count;
1398
1399 if (do_debug_data) {
1400 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n",
1401 dev->name, smp_processor_id(), ch->id);
1402 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx: maxbuf: %04x "
1403 "len: %04x\n", ch->max_bufsize, len);
1404 }
1405 fsm_deltimer(&ch->timer);
1406
1407 if (skb == NULL) {
1408 ctcm_pr_debug("ctcmpc exit: %s() TRANS_SKB = NULL \n",
1409 __FUNCTION__);
1410 goto again;
1411 }
1412
1413 if (len < TH_HEADER_LENGTH) {
1414 ctcm_pr_info("%s: got packet with invalid length %d\n",
1415 dev->name, len);
1416 priv->stats.rx_dropped++;
1417 priv->stats.rx_length_errors++;
1418 } else {
1419 /* must have valid th header or game over */
1420 __u32 block_len = len;
1421 len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
1422 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
1423
1424 if (new_skb == NULL) {
1425 printk(KERN_INFO "ctcmpc:%s() NEW_SKB = NULL\n",
1426 __FUNCTION__);
1427 printk(KERN_WARNING "ctcmpc: %s() MEMORY ALLOC FAILED"
1428 " - DATA LOST - MPC FAILED\n",
1429 __FUNCTION__);
1430 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1431 goto again;
1432 }
1433 switch (fsm_getstate(grp->fsm)) {
1434 case MPCG_STATE_RESET:
1435 case MPCG_STATE_INOP:
1436 dev_kfree_skb_any(new_skb);
1437 break;
1438 case MPCG_STATE_FLOWC:
1439 case MPCG_STATE_READY:
1440 memcpy(skb_put(new_skb, block_len),
1441 skb->data, block_len);
1442 skb_queue_tail(&ch->io_queue, new_skb);
1443 tasklet_schedule(&ch->ch_tasklet);
1444 break;
1445 default:
1446 memcpy(skb_put(new_skb, len), skb->data, len);
1447 skb_queue_tail(&ch->io_queue, new_skb);
1448 tasklet_hi_schedule(&ch->ch_tasklet);
1449 break;
1450 }
1451 }
1452
1453again:
1454 switch (fsm_getstate(grp->fsm)) {
1455 int rc, dolock;
1456 case MPCG_STATE_FLOWC:
1457 case MPCG_STATE_READY:
1458 if (ctcm_checkalloc_buffer(ch))
1459 break;
1460 ch->trans_skb->data = ch->trans_skb_data;
1461 skb_reset_tail_pointer(ch->trans_skb);
1462 ch->trans_skb->len = 0;
1463 ch->ccw[1].count = ch->max_bufsize;
1464 if (do_debug_ccw)
1465 ctcmpc_dumpit((char *)&ch->ccw[0],
1466 sizeof(struct ccw1) * 3);
1467 dolock = !in_irq();
1468 if (dolock)
1469 spin_lock_irqsave(
1470 get_ccwdev_lock(ch->cdev), saveflags);
1471 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1472 (unsigned long)ch, 0xff, 0);
1473 if (dolock) /* see remark about conditional locking */
1474 spin_unlock_irqrestore(
1475 get_ccwdev_lock(ch->cdev), saveflags);
1476 if (rc != 0)
1477 ctcm_ccw_check_rc(ch, rc, "normal RX");
1478 default:
1479 break;
1480 }
1481
1482 if (do_debug)
1483 ctcm_pr_debug("ctcmpc exit : %s %s(): ch=0x%p id=%s\n",
1484 dev->name, __FUNCTION__, ch, ch->id);
1485
1486}
1487
1488/**
1489 * Initialize connection by sending a __u16 of value 0.
1490 *
1491 * fi An instance of a channel statemachine.
1492 * event The event, just happened.
1493 * arg Generic pointer, casted from channel * upon call.
1494 */
1495static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1496{
1497 struct channel *ch = arg;
1498 struct net_device *dev = ch->netdev;
1499 struct ctcm_priv *priv = dev->priv;
1500
1501 if (do_debug) {
1502 struct mpc_group *gptr = priv->mpcg;
1503 ctcm_pr_debug("ctcmpc enter: %s(): ch=0x%p id=%s\n",
1504 __FUNCTION__, ch, ch->id);
1505 ctcm_pr_debug("%s() %s chstate:%i grpstate:%i chprotocol:%i\n",
1506 __FUNCTION__, ch->id, fsm_getstate(fi),
1507 fsm_getstate(gptr->fsm), ch->protocol);
1508 }
1509 if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
1510 MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
1511
1512 fsm_deltimer(&ch->timer);
1513 if (ctcm_checkalloc_buffer(ch))
1514 goto done;
1515
1516 switch (fsm_getstate(fi)) {
1517 case CTC_STATE_STARTRETRY:
1518 case CTC_STATE_SETUPWAIT:
1519 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1520 ctcmpc_chx_rxidle(fi, event, arg);
1521 } else {
1522 fsm_newstate(fi, CTC_STATE_TXIDLE);
1523 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
1524 }
1525 goto done;
1526 default:
1527 break;
1528 };
1529
1530 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
1531 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
1532
1533done:
1534 if (do_debug)
1535 ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n",
1536 __FUNCTION__, ch, ch->id);
1537 return;
1538}
1539
1540/**
1541 * Got initial data, check it. If OK,
1542 * notify device statemachine that we are up and
1543 * running.
1544 *
1545 * fi An instance of a channel statemachine.
1546 * event The event, just happened.
1547 * arg Generic pointer, casted from channel * upon call.
1548 */
1549void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
1550{
1551 struct channel *ch = arg;
1552 struct net_device *dev = ch->netdev;
1553 struct ctcm_priv *priv = dev->priv;
1554 struct mpc_group *grp = priv->mpcg;
1555 int rc;
1556 unsigned long saveflags = 0; /* avoids compiler warning */
1557
1558 fsm_deltimer(&ch->timer);
1559 ctcm_pr_debug("%s cp:%i enter: %s()\n",
1560 dev->name, smp_processor_id(), __FUNCTION__);
1561 if (do_debug)
1562 ctcm_pr_debug("%s() %s chstate:%i grpstate:%i\n",
1563 __FUNCTION__, ch->id,
1564 fsm_getstate(fi), fsm_getstate(grp->fsm));
1565
1566 fsm_newstate(fi, CTC_STATE_RXIDLE);
1567 /* XID processing complete */
1568
1569 switch (fsm_getstate(grp->fsm)) {
1570 case MPCG_STATE_FLOWC:
1571 case MPCG_STATE_READY:
1572 if (ctcm_checkalloc_buffer(ch))
1573 goto done;
1574 ch->trans_skb->data = ch->trans_skb_data;
1575 skb_reset_tail_pointer(ch->trans_skb);
1576 ch->trans_skb->len = 0;
1577 ch->ccw[1].count = ch->max_bufsize;
1578 if (do_debug_ccw)
1579 ctcmpc_dumpit((char *)&ch->ccw[0],
1580 sizeof(struct ccw1) * 3);
1581 if (event == CTC_EVENT_START)
1582 /* see remark about conditional locking */
1583 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1584 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1585 (unsigned long)ch, 0xff, 0);
1586 if (event == CTC_EVENT_START)
1587 spin_unlock_irqrestore(
1588 get_ccwdev_lock(ch->cdev), saveflags);
1589 if (rc != 0) {
1590 fsm_newstate(fi, CTC_STATE_RXINIT);
1591 ctcm_ccw_check_rc(ch, rc, "initial RX");
1592 goto done;
1593 }
1594 break;
1595 default:
1596 break;
1597 }
1598
1599 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
1600done:
1601 if (do_debug)
1602 ctcm_pr_debug("ctcmpc exit: %s %s()\n",
1603 dev->name, __FUNCTION__);
1604 return;
1605}
1606
1607/*
1608 * ctcmpc channel FSM action
1609 * called from several points in ctcmpc_ch_fsm
1610 * ctcmpc only
1611 */
1612static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
1613{
1614 struct channel *ch = arg;
1615 struct net_device *dev = ch->netdev;
1616 struct ctcm_priv *priv = dev->priv;
1617 struct mpc_group *grp = priv->mpcg;
1618
1619 if (do_debug) {
1620 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s"
1621 "GrpState:%s ChState:%s\n",
1622 __FUNCTION__, smp_processor_id(), ch, ch->id,
1623 fsm_getstate_str(grp->fsm),
1624 fsm_getstate_str(ch->fsm));
1625 }
1626
1627 switch (fsm_getstate(grp->fsm)) {
1628 case MPCG_STATE_XID2INITW:
1629 /* ok..start yside xid exchanges */
1630 if (!ch->in_mpcgroup)
1631 break;
1632 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) {
1633 fsm_deltimer(&grp->timer);
1634 fsm_addtimer(&grp->timer,
1635 MPC_XID_TIMEOUT_VALUE,
1636 MPCG_EVENT_TIMER, dev);
1637 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1638
1639 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1640 /* attn rcvd before xid0 processed via bh */
1641 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1642 break;
1643 case MPCG_STATE_XID2INITX:
1644 case MPCG_STATE_XID0IOWAIT:
1645 case MPCG_STATE_XID0IOWAIX:
1646 /* attn rcvd before xid0 processed on ch
1647 but mid-xid0 processing for group */
1648 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1649 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1650 break;
1651 case MPCG_STATE_XID7INITW:
1652 case MPCG_STATE_XID7INITX:
1653 case MPCG_STATE_XID7INITI:
1654 case MPCG_STATE_XID7INITZ:
1655 switch (fsm_getstate(ch->fsm)) {
1656 case CH_XID7_PENDING:
1657 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1658 break;
1659 case CH_XID7_PENDING2:
1660 fsm_newstate(ch->fsm, CH_XID7_PENDING3);
1661 break;
1662 }
1663 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
1664 break;
1665 }
1666
1667 if (do_debug)
1668 ctcm_pr_debug("ctcmpc exit : %s(): cp=%i ch=0x%p id=%s\n",
1669 __FUNCTION__, smp_processor_id(), ch, ch->id);
1670 return;
1671
1672}
1673
1674/*
1675 * ctcmpc channel FSM action
1676 * called from one point in ctcmpc_ch_fsm
1677 * ctcmpc only
1678 */
1679static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
1680{
1681 struct channel *ch = arg;
1682 struct net_device *dev = ch->netdev;
1683 struct ctcm_priv *priv = dev->priv;
1684 struct mpc_group *grp = priv->mpcg;
1685
1686 ctcm_pr_debug("ctcmpc enter: %s %s() %s \nGrpState:%s ChState:%s\n",
1687 dev->name,
1688 __FUNCTION__, ch->id,
1689 fsm_getstate_str(grp->fsm),
1690 fsm_getstate_str(ch->fsm));
1691
1692 fsm_deltimer(&ch->timer);
1693
1694 switch (fsm_getstate(grp->fsm)) {
1695 case MPCG_STATE_XID0IOWAIT:
1696 /* vtam wants to be primary.start yside xid exchanges*/
1697 /* only receive one attn-busy at a time so must not */
1698 /* change state each time */
1699 grp->changed_side = 1;
1700 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
1701 break;
1702 case MPCG_STATE_XID2INITW:
1703 if (grp->changed_side == 1) {
1704 grp->changed_side = 2;
1705 break;
1706 }
1707 /* process began via call to establish_conn */
1708 /* so must report failure instead of reverting */
1709 /* back to ready-for-xid passive state */
1710 if (grp->estconnfunc)
1711 goto done;
1712 /* this attnbusy is NOT the result of xside xid */
1713 /* collisions so yside must have been triggered */
1714 /* by an ATTN that was not intended to start XID */
1715 /* processing. Revert back to ready-for-xid and */
1716 /* wait for ATTN interrupt to signal xid start */
1717 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
1718 fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
1719 fsm_deltimer(&grp->timer);
1720 goto done;
1721 }
1722 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1723 goto done;
1724 case MPCG_STATE_XID2INITX:
1725 /* XID2 was received before ATTN Busy for second
1726 channel.Send yside xid for second channel.
1727 */
1728 if (grp->changed_side == 1) {
1729 grp->changed_side = 2;
1730 break;
1731 }
1732 case MPCG_STATE_XID0IOWAIX:
1733 case MPCG_STATE_XID7INITW:
1734 case MPCG_STATE_XID7INITX:
1735 case MPCG_STATE_XID7INITI:
1736 case MPCG_STATE_XID7INITZ:
1737 default:
1738 /* multiple attn-busy indicates too out-of-sync */
1739 /* and they are certainly not being received as part */
1740 /* of valid mpc group negotiations.. */
1741 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1742 goto done;
1743 }
1744
1745 if (grp->changed_side == 1) {
1746 fsm_deltimer(&grp->timer);
1747 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
1748 MPCG_EVENT_TIMER, dev);
1749 }
1750 if (ch->in_mpcgroup)
1751 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1752 else
1753 printk(KERN_WARNING "ctcmpc: %s() Not all channels have"
1754 " been added to group\n", __FUNCTION__);
1755
1756done:
1757 if (do_debug)
1758 ctcm_pr_debug("ctcmpc exit : %s()%s ch=0x%p id=%s\n",
1759 __FUNCTION__, dev->name, ch, ch->id);
1760
1761 return;
1762
1763}
1764
1765/*
1766 * ctcmpc channel FSM action
1767 * called from several points in ctcmpc_ch_fsm
1768 * ctcmpc only
1769 */
1770static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
1771{
1772 struct channel *ch = arg;
1773 struct net_device *dev = ch->netdev;
1774 struct ctcm_priv *priv = dev->priv;
1775 struct mpc_group *grp = priv->mpcg;
1776
1777 ctcm_pr_debug("ctcmpc enter: %s %s() %s \nGrpState:%s ChState:%s\n",
1778 dev->name, __FUNCTION__, ch->id,
1779 fsm_getstate_str(grp->fsm),
1780 fsm_getstate_str(ch->fsm));
1781
1782 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1783
1784 return;
1785}
1786
1787/*
1788 * ctcmpc channel FSM action
1789 * called from several points in ctcmpc_ch_fsm
1790 * ctcmpc only
1791 */
1792static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
1793{
1794 struct channel *ach = arg;
1795 struct net_device *dev = ach->netdev;
1796 struct ctcm_priv *priv = dev->priv;
1797 struct mpc_group *grp = priv->mpcg;
1798 struct channel *wch = priv->channel[WRITE];
1799 struct channel *rch = priv->channel[READ];
1800 struct sk_buff *skb;
1801 struct th_sweep *header;
1802 int rc = 0;
1803 unsigned long saveflags = 0;
1804
1805 if (do_debug)
1806 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
1807 __FUNCTION__, smp_processor_id(), ach, ach->id);
1808
1809 if (grp->in_sweep == 0)
1810 goto done;
1811
1812 if (do_debug_data) {
1813 ctcm_pr_debug("ctcmpc: %s() 1: ToVTAM_th_seq= %08x\n" ,
1814 __FUNCTION__, wch->th_seq_num);
1815 ctcm_pr_debug("ctcmpc: %s() 1: FromVTAM_th_seq= %08x\n" ,
1816 __FUNCTION__, rch->th_seq_num);
1817 }
1818
1819 if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
1820 /* give the previous IO time to complete */
1821 fsm_addtimer(&wch->sweep_timer,
1822 200, CTC_EVENT_RSWEEP_TIMER, wch);
1823 goto done;
1824 }
1825
1826 skb = skb_dequeue(&wch->sweep_queue);
1827 if (!skb)
1828 goto done;
1829
1830 if (set_normalized_cda(&wch->ccw[4], skb->data)) {
1831 grp->in_sweep = 0;
1832 ctcm_clear_busy_do(dev);
1833 dev_kfree_skb_any(skb);
1834 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1835 goto done;
1836 } else {
1837 atomic_inc(&skb->users);
1838 skb_queue_tail(&wch->io_queue, skb);
1839 }
1840
1841 /* send out the sweep */
1842 wch->ccw[4].count = skb->len;
1843
1844 header = (struct th_sweep *)skb->data;
1845 switch (header->th.th_ch_flag) {
1846 case TH_SWEEP_REQ:
1847 grp->sweep_req_pend_num--;
1848 break;
1849 case TH_SWEEP_RESP:
1850 grp->sweep_rsp_pend_num--;
1851 break;
1852 }
1853
1854 header->sw.th_last_seq = wch->th_seq_num;
1855
1856 if (do_debug_ccw)
1857 ctcmpc_dumpit((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
1858
1859 ctcm_pr_debug("ctcmpc: %s() sweep packet\n", __FUNCTION__);
1860 ctcmpc_dumpit((char *)header, TH_SWEEP_LENGTH);
1861
1862 fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
1863 fsm_newstate(wch->fsm, CTC_STATE_TX);
1864
1865 spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
1866 wch->prof.send_stamp = current_kernel_time(); /* xtime */
1867 rc = ccw_device_start(wch->cdev, &wch->ccw[3],
1868 (unsigned long) wch, 0xff, 0);
1869 spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
1870
1871 if ((grp->sweep_req_pend_num == 0) &&
1872 (grp->sweep_rsp_pend_num == 0)) {
1873 grp->in_sweep = 0;
1874 rch->th_seq_num = 0x00;
1875 wch->th_seq_num = 0x00;
1876 ctcm_clear_busy_do(dev);
1877 }
1878
1879 if (do_debug_data) {
1880 ctcm_pr_debug("ctcmpc: %s()2: ToVTAM_th_seq= %08x\n" ,
1881 __FUNCTION__, wch->th_seq_num);
1882 ctcm_pr_debug("ctcmpc: %s()2: FromVTAM_th_seq= %08x\n" ,
1883 __FUNCTION__, rch->th_seq_num);
1884 }
1885
1886 if (rc != 0)
1887 ctcm_ccw_check_rc(wch, rc, "send sweep");
1888
1889done:
1890 if (do_debug)
1891 ctcm_pr_debug("ctcmpc exit: %s() %s\n", __FUNCTION__, ach->id);
1892 return;
1893}
1894
1895
1896/*
1897 * The ctcmpc statemachine for a channel.
1898 */
1899
1900const fsm_node ctcmpc_ch_fsm[] = {
1901 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
1902 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
1903 { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1904 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
1905 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1906
1907 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
1908 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
1909 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
1910 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1911 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
1912 { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop },
1913 { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop },
1914 { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1915
1916 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1917 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
1918 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1919 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
1920 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1921 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1922
1923 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
1924 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
1925 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1926 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1927 { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1928
1929 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1930 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
1931 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
1932 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1933 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1934 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
1935 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1936 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1937
1938 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1939 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
1940 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle },
1941 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
1942 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
1943 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
1944 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
1945 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1946 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio },
1947 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1948
1949 { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop },
1950 { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1951 { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
1952 { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop },
1953 { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1954 { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1955 { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1956 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1957 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1958 { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1959
1960 { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1961 { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1962 { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio },
1963 { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop },
1964 { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1965 { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1966 { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1967 { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1968 { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy },
1969 { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1970 { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1971
1972 { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1973 { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1974 { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
1975 { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop },
1976 { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1977 { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1978 { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1979 { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1980 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1981 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1982 { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1983 { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1984 { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1985
1986 { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1987 { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1988 { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio },
1989 { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop },
1990 { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1991 { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1992 { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1993 { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1994 { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1995 { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1996 { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1997 { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1998
1999 { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
2000 { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn },
2001 { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio },
2002 { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop },
2003 { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2004 { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2005 { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
2006 { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
2007 { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
2008 { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
2009 { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend },
2010 { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2011
2012 { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
2013 { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn },
2014 { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio },
2015 { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop },
2016 { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2017 { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2018 { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
2019 { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
2020 { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
2021 { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
2022 { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend },
2023 { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2024
2025 { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
2026 { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn },
2027 { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio },
2028 { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop },
2029 { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2030 { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2031 { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
2032 { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
2033 { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
2034 { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
2035 { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend },
2036 { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2037
2038 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
2039 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
2040 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
2041 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
2042 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2043 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2044 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2045 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
2046
2047 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
2048 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
2049 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
2050 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
2051 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
2052 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
2053 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2054 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2055 { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2056
2057 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
2058 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
2059 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
2060 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
2061 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2062 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2063 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2064 { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2065
2066 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
2067 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
2068 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
2069 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
2070 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
2071 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2072 { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2073 { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2074
2075 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
2076 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
2077 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
2078 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
2079 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
2080 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2081 { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2082
2083 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
2084 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
2085 { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone },
2086 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
2087 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2088 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
2089 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2090 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2091 { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2092 { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2093
2094 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
2095 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
2096 { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2097 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2098 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2099};
2100
2101int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
2102
2103/*
2104 * Actions for interface - statemachine.
2105 */
2106
2107/**
2108 * Startup channels by sending CTC_EVENT_START to each channel.
2109 *
2110 * fi An instance of an interface statemachine.
2111 * event The event, just happened.
2112 * arg Generic pointer, casted from struct net_device * upon call.
2113 */
2114static void dev_action_start(fsm_instance *fi, int event, void *arg)
2115{
2116 struct net_device *dev = arg;
2117 struct ctcm_priv *priv = dev->priv;
2118 int direction;
2119
2120 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2121
2122 fsm_deltimer(&priv->restart_timer);
2123 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2124 if (IS_MPC(priv))
2125 priv->mpcg->channels_terminating = 0;
2126 for (direction = READ; direction <= WRITE; direction++) {
2127 struct channel *ch = priv->channel[direction];
2128 fsm_event(ch->fsm, CTC_EVENT_START, ch);
2129 }
2130}
2131
2132/**
2133 * Shutdown channels by sending CTC_EVENT_STOP to each channel.
2134 *
2135 * fi An instance of an interface statemachine.
2136 * event The event, just happened.
2137 * arg Generic pointer, casted from struct net_device * upon call.
2138 */
2139static void dev_action_stop(fsm_instance *fi, int event, void *arg)
2140{
2141 int direction;
2142 struct net_device *dev = arg;
2143 struct ctcm_priv *priv = dev->priv;
2144
2145 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2146
2147 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2148 for (direction = READ; direction <= WRITE; direction++) {
2149 struct channel *ch = priv->channel[direction];
2150 fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
2151 ch->th_seq_num = 0x00;
2152 if (do_debug)
2153 ctcm_pr_debug("ctcm: %s() CH_th_seq= %08x\n",
2154 __FUNCTION__, ch->th_seq_num);
2155 }
2156 if (IS_MPC(priv))
2157 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2158}
2159
2160static void dev_action_restart(fsm_instance *fi, int event, void *arg)
2161{
2162 int restart_timer;
2163 struct net_device *dev = arg;
2164 struct ctcm_priv *priv = dev->priv;
2165
2166 CTCMY_DBF_DEV_NAME(TRACE, dev, "");
2167
2168 if (IS_MPC(priv)) {
2169 ctcm_pr_info("ctcm: %s Restarting Device and "
2170 "MPC Group in 5 seconds\n",
2171 dev->name);
2172 restart_timer = CTCM_TIME_1_SEC;
2173 } else {
2174 ctcm_pr_info("%s: Restarting\n", dev->name);
2175 restart_timer = CTCM_TIME_5_SEC;
2176 }
2177
2178 dev_action_stop(fi, event, arg);
2179 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
2180 if (IS_MPC(priv))
2181 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2182
2183 /* going back into start sequence too quickly can */
2184 /* result in the other side becoming unreachable due */
2185 /* to sense reported when IO is aborted */
2186 fsm_addtimer(&priv->restart_timer, restart_timer,
2187 DEV_EVENT_START, dev);
2188}
2189
2190/**
2191 * Called from channel statemachine
2192 * when a channel is up and running.
2193 *
2194 * fi An instance of an interface statemachine.
2195 * event The event, just happened.
2196 * arg Generic pointer, casted from struct net_device * upon call.
2197 */
2198static void dev_action_chup(fsm_instance *fi, int event, void *arg)
2199{
2200 struct net_device *dev = arg;
2201 struct ctcm_priv *priv = dev->priv;
2202
2203 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2204
2205 switch (fsm_getstate(fi)) {
2206 case DEV_STATE_STARTWAIT_RXTX:
2207 if (event == DEV_EVENT_RXUP)
2208 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2209 else
2210 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2211 break;
2212 case DEV_STATE_STARTWAIT_RX:
2213 if (event == DEV_EVENT_RXUP) {
2214 fsm_newstate(fi, DEV_STATE_RUNNING);
2215 ctcm_pr_info("%s: connected with remote side\n",
2216 dev->name);
2217 ctcm_clear_busy(dev);
2218 }
2219 break;
2220 case DEV_STATE_STARTWAIT_TX:
2221 if (event == DEV_EVENT_TXUP) {
2222 fsm_newstate(fi, DEV_STATE_RUNNING);
2223 ctcm_pr_info("%s: connected with remote side\n",
2224 dev->name);
2225 ctcm_clear_busy(dev);
2226 }
2227 break;
2228 case DEV_STATE_STOPWAIT_TX:
2229 if (event == DEV_EVENT_RXUP)
2230 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2231 break;
2232 case DEV_STATE_STOPWAIT_RX:
2233 if (event == DEV_EVENT_TXUP)
2234 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2235 break;
2236 }
2237
2238 if (IS_MPC(priv)) {
2239 if (event == DEV_EVENT_RXUP)
2240 mpc_channel_action(priv->channel[READ],
2241 READ, MPC_CHANNEL_ADD);
2242 else
2243 mpc_channel_action(priv->channel[WRITE],
2244 WRITE, MPC_CHANNEL_ADD);
2245 }
2246}
2247
2248/**
2249 * Called from device statemachine
2250 * when a channel has been shutdown.
2251 *
2252 * fi An instance of an interface statemachine.
2253 * event The event, just happened.
2254 * arg Generic pointer, casted from struct net_device * upon call.
2255 */
2256static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
2257{
2258
2259 struct net_device *dev = arg;
2260 struct ctcm_priv *priv = dev->priv;
2261
2262 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2263
2264 switch (fsm_getstate(fi)) {
2265 case DEV_STATE_RUNNING:
2266 if (event == DEV_EVENT_TXDOWN)
2267 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2268 else
2269 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2270 break;
2271 case DEV_STATE_STARTWAIT_RX:
2272 if (event == DEV_EVENT_TXDOWN)
2273 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2274 break;
2275 case DEV_STATE_STARTWAIT_TX:
2276 if (event == DEV_EVENT_RXDOWN)
2277 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2278 break;
2279 case DEV_STATE_STOPWAIT_RXTX:
2280 if (event == DEV_EVENT_TXDOWN)
2281 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2282 else
2283 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2284 break;
2285 case DEV_STATE_STOPWAIT_RX:
2286 if (event == DEV_EVENT_RXDOWN)
2287 fsm_newstate(fi, DEV_STATE_STOPPED);
2288 break;
2289 case DEV_STATE_STOPWAIT_TX:
2290 if (event == DEV_EVENT_TXDOWN)
2291 fsm_newstate(fi, DEV_STATE_STOPPED);
2292 break;
2293 }
2294 if (IS_MPC(priv)) {
2295 if (event == DEV_EVENT_RXDOWN)
2296 mpc_channel_action(priv->channel[READ],
2297 READ, MPC_CHANNEL_REMOVE);
2298 else
2299 mpc_channel_action(priv->channel[WRITE],
2300 WRITE, MPC_CHANNEL_REMOVE);
2301 }
2302}
2303
2304const fsm_node dev_fsm[] = {
2305 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
2306 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2307 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2308 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2309 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2310 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2311 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2312 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2313 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2314 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2315 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2316 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2317 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2318 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2319 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2320 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2321 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2322 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2323 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2324 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2325 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2326 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2327 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2328 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2329 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2330 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2331 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2332 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2333 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2334 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2335 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2336 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2337 { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2338 { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2339 { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop },
2340 { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop },
2341 { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2342};
2343
2344int dev_fsm_len = ARRAY_SIZE(dev_fsm);
2345
2346/* --- This is the END my friend --- */
2347
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
new file mode 100644
index 000000000000..2326aba9807a
--- /dev/null
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -0,0 +1,359 @@
1/*
2 * drivers/s390/net/ctcm_fsms.h
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Authors: Fritz Elfert (felfert@millenux.com)
6 * Peter Tiedemann (ptiedem@de.ibm.com)
7 * MPC additions :
8 * Belinda Thompson (belindat@us.ibm.com)
9 * Andy Richter (richtera@us.ibm.com)
10 */
11#ifndef _CTCM_FSMS_H_
12#define _CTCM_FSMS_H_
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/interrupt.h>
21#include <linux/timer.h>
22#include <linux/bitops.h>
23
24#include <linux/signal.h>
25#include <linux/string.h>
26
27#include <linux/ip.h>
28#include <linux/if_arp.h>
29#include <linux/tcp.h>
30#include <linux/skbuff.h>
31#include <linux/ctype.h>
32#include <net/dst.h>
33
34#include <linux/io.h>
35#include <asm/ccwdev.h>
36#include <asm/ccwgroup.h>
37#include <linux/uaccess.h>
38
39#include <asm/idals.h>
40
41#include "fsm.h"
42#include "cu3088.h"
43#include "ctcm_main.h"
44
45/*
46 * Definitions for the channel statemachine(s) for ctc and ctcmpc
47 *
48 * To allow better kerntyping, prefix-less definitions for channel states
49 * and channel events have been replaced :
50 * ch_event... -> ctc_ch_event...
51 * CH_EVENT... -> CTC_EVENT...
52 * ch_state... -> ctc_ch_state...
53 * CH_STATE... -> CTC_STATE...
54 */
55/*
56 * Events of the channel statemachine(s) for ctc and ctcmpc
57 */
58enum ctc_ch_events {
59 /*
60 * Events, representing return code of
61 * I/O operations (ccw_device_start, ccw_device_halt et al.)
62 */
63 CTC_EVENT_IO_SUCCESS,
64 CTC_EVENT_IO_EBUSY,
65 CTC_EVENT_IO_ENODEV,
66 CTC_EVENT_IO_UNKNOWN,
67
68 CTC_EVENT_ATTNBUSY,
69 CTC_EVENT_ATTN,
70 CTC_EVENT_BUSY,
71 /*
72 * Events, representing unit-check
73 */
74 CTC_EVENT_UC_RCRESET,
75 CTC_EVENT_UC_RSRESET,
76 CTC_EVENT_UC_TXTIMEOUT,
77 CTC_EVENT_UC_TXPARITY,
78 CTC_EVENT_UC_HWFAIL,
79 CTC_EVENT_UC_RXPARITY,
80 CTC_EVENT_UC_ZERO,
81 CTC_EVENT_UC_UNKNOWN,
82 /*
83 * Events, representing subchannel-check
84 */
85 CTC_EVENT_SC_UNKNOWN,
86 /*
87 * Events, representing machine checks
88 */
89 CTC_EVENT_MC_FAIL,
90 CTC_EVENT_MC_GOOD,
91 /*
92 * Event, representing normal IRQ
93 */
94 CTC_EVENT_IRQ,
95 CTC_EVENT_FINSTAT,
96 /*
97 * Event, representing timer expiry.
98 */
99 CTC_EVENT_TIMER,
100 /*
101 * Events, representing commands from upper levels.
102 */
103 CTC_EVENT_START,
104 CTC_EVENT_STOP,
105 CTC_NR_EVENTS,
106 /*
107 * additional MPC events
108 */
109 CTC_EVENT_SEND_XID = CTC_NR_EVENTS,
110 CTC_EVENT_RSWEEP_TIMER,
111 /*
112 * MUST be always the last element!!
113 */
114 CTC_MPC_NR_EVENTS,
115};
116
117/*
118 * States of the channel statemachine(s) for ctc and ctcmpc.
119 */
120enum ctc_ch_states {
121 /*
122 * Channel not assigned to any device,
123 * initial state, direction invalid
124 */
125 CTC_STATE_IDLE,
126 /*
127 * Channel assigned but not operating
128 */
129 CTC_STATE_STOPPED,
130 CTC_STATE_STARTWAIT,
131 CTC_STATE_STARTRETRY,
132 CTC_STATE_SETUPWAIT,
133 CTC_STATE_RXINIT,
134 CTC_STATE_TXINIT,
135 CTC_STATE_RX,
136 CTC_STATE_TX,
137 CTC_STATE_RXIDLE,
138 CTC_STATE_TXIDLE,
139 CTC_STATE_RXERR,
140 CTC_STATE_TXERR,
141 CTC_STATE_TERM,
142 CTC_STATE_DTERM,
143 CTC_STATE_NOTOP,
144 CTC_NR_STATES, /* MUST be the last element of non-expanded states */
145 /*
146 * additional MPC states
147 */
148 CH_XID0_PENDING = CTC_NR_STATES,
149 CH_XID0_INPROGRESS,
150 CH_XID7_PENDING,
151 CH_XID7_PENDING1,
152 CH_XID7_PENDING2,
153 CH_XID7_PENDING3,
154 CH_XID7_PENDING4,
155 CTC_MPC_NR_STATES, /* MUST be the last element of expanded mpc states */
156};
157
158extern const char *ctc_ch_event_names[];
159
160extern const char *ctc_ch_state_names[];
161
162void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg);
163void ctcm_purge_skb_queue(struct sk_buff_head *q);
164void fsm_action_nop(fsm_instance *fi, int event, void *arg);
165
166/*
167 * ----- non-static actions for ctcm channel statemachine -----
168 *
169 */
170void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg);
171
172/*
173 * ----- FSM (state/event/action) of the ctcm channel statemachine -----
174 */
175extern const fsm_node ch_fsm[];
176extern int ch_fsm_len;
177
178
179/*
180 * ----- non-static actions for ctcmpc channel statemachine ----
181 *
182 */
183/* shared :
184void ctcm_chx_txidle(fsm_instance * fi, int event, void *arg);
185 */
186void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg);
187
188/*
189 * ----- FSM (state/event/action) of the ctcmpc channel statemachine -----
190 */
191extern const fsm_node ctcmpc_ch_fsm[];
192extern int mpc_ch_fsm_len;
193
194/*
195 * Definitions for the device interface statemachine for ctc and mpc
196 */
197
198/*
199 * States of the device interface statemachine.
200 */
201enum dev_states {
202 DEV_STATE_STOPPED,
203 DEV_STATE_STARTWAIT_RXTX,
204 DEV_STATE_STARTWAIT_RX,
205 DEV_STATE_STARTWAIT_TX,
206 DEV_STATE_STOPWAIT_RXTX,
207 DEV_STATE_STOPWAIT_RX,
208 DEV_STATE_STOPWAIT_TX,
209 DEV_STATE_RUNNING,
210 /*
211 * MUST be always the last element!!
212 */
213 CTCM_NR_DEV_STATES
214};
215
216extern const char *dev_state_names[];
217
218/*
219 * Events of the device interface statemachine.
220 * ctcm and ctcmpc
221 */
222enum dev_events {
223 DEV_EVENT_START,
224 DEV_EVENT_STOP,
225 DEV_EVENT_RXUP,
226 DEV_EVENT_TXUP,
227 DEV_EVENT_RXDOWN,
228 DEV_EVENT_TXDOWN,
229 DEV_EVENT_RESTART,
230 /*
231 * MUST be always the last element!!
232 */
233 CTCM_NR_DEV_EVENTS
234};
235
236extern const char *dev_event_names[];
237
238/*
239 * Actions for the device interface statemachine.
240 * ctc and ctcmpc
241 */
242/*
243static void dev_action_start(fsm_instance * fi, int event, void *arg);
244static void dev_action_stop(fsm_instance * fi, int event, void *arg);
245static void dev_action_restart(fsm_instance *fi, int event, void *arg);
246static void dev_action_chup(fsm_instance * fi, int event, void *arg);
247static void dev_action_chdown(fsm_instance * fi, int event, void *arg);
248*/
249
250/*
251 * The (state/event/action) fsm table of the device interface statemachine.
252 * ctcm and ctcmpc
253 */
254extern const fsm_node dev_fsm[];
255extern int dev_fsm_len;
256
257
258/*
259 * Definitions for the MPC Group statemachine
260 */
261
262/*
263 * MPC Group Station FSM States
264
265State Name When In This State
266====================== =======================================
267MPCG_STATE_RESET Initial State When Driver Loaded
268 We receive and send NOTHING
269
270MPCG_STATE_INOP INOP Received.
271 Group level non-recoverable error
272
273MPCG_STATE_READY XID exchanges for at least 1 write and
274 1 read channel have completed.
275 Group is ready for data transfer.
276
277States from ctc_mpc_alloc_channel
278==============================================================
279MPCG_STATE_XID2INITW Awaiting XID2(0) Initiation
280 ATTN from other side will start
281 XID negotiations.
282 Y-side protocol only.
283
284MPCG_STATE_XID2INITX XID2(0) negotiations are in progress.
285 At least 1, but not all, XID2(0)'s
286 have been received from partner.
287
288MPCG_STATE_XID7INITW XID2(0) complete
289 No XID2(7)'s have yet been received.
290 XID2(7) negotiations pending.
291
292MPCG_STATE_XID7INITX XID2(7) negotiations in progress.
293 At least 1, but not all, XID2(7)'s
294 have been received from partner.
295
296MPCG_STATE_XID7INITF XID2(7) negotiations complete.
297 Transitioning to READY.
298
299MPCG_STATE_READY Ready for Data Transfer.
300
301
302States from ctc_mpc_establish_connectivity call
303==============================================================
304MPCG_STATE_XID0IOWAIT Initiating XID2(0) negotiations.
305 X-side protocol only.
306 ATTN-BUSY from other side will convert
307 this to Y-side protocol and the
308 ctc_mpc_alloc_channel flow will begin.
309
310MPCG_STATE_XID0IOWAIX XID2(0) negotiations are in progress.
311 At least 1, but not all, XID2(0)'s
312 have been received from partner.
313
314MPCG_STATE_XID7INITI XID2(0) complete
315 No XID2(7)'s have yet been received.
316 XID2(7) negotiations pending.
317
318MPCG_STATE_XID7INITZ XID2(7) negotiations in progress.
319 At least 1, but not all, XID2(7)'s
320 have been received from partner.
321
322MPCG_STATE_XID7INITF XID2(7) negotiations complete.
323 Transitioning to READY.
324
325MPCG_STATE_READY Ready for Data Transfer.
326
327*/
328
329enum mpcg_events {
330 MPCG_EVENT_INOP,
331 MPCG_EVENT_DISCONC,
332 MPCG_EVENT_XID0DO,
333 MPCG_EVENT_XID2,
334 MPCG_EVENT_XID2DONE,
335 MPCG_EVENT_XID7DONE,
336 MPCG_EVENT_TIMER,
337 MPCG_EVENT_DOIO,
338 MPCG_NR_EVENTS,
339};
340
341enum mpcg_states {
342 MPCG_STATE_RESET,
343 MPCG_STATE_INOP,
344 MPCG_STATE_XID2INITW,
345 MPCG_STATE_XID2INITX,
346 MPCG_STATE_XID7INITW,
347 MPCG_STATE_XID7INITX,
348 MPCG_STATE_XID0IOWAIT,
349 MPCG_STATE_XID0IOWAIX,
350 MPCG_STATE_XID7INITI,
351 MPCG_STATE_XID7INITZ,
352 MPCG_STATE_XID7INITF,
353 MPCG_STATE_FLOWC,
354 MPCG_STATE_READY,
355 MPCG_NR_STATES,
356};
357
358#endif
359/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
new file mode 100644
index 000000000000..d52843da4f55
--- /dev/null
+++ b/drivers/s390/net/ctcm_main.c
@@ -0,0 +1,1772 @@
1/*
2 * drivers/s390/net/ctcm_main.c
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Author(s):
6 * Original CTC driver(s):
7 * Fritz Elfert (felfert@millenux.com)
8 * Dieter Wellerdiek (wel@de.ibm.com)
9 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 * Denis Joseph Barrow (barrow_dj@yahoo.com)
11 * Jochen Roehrig (roehrig@de.ibm.com)
12 * Cornelia Huck <cornelia.huck@de.ibm.com>
13 * MPC additions:
14 * Belinda Thompson (belindat@us.ibm.com)
15 * Andy Richter (richtera@us.ibm.com)
16 * Revived by:
17 * Peter Tiedemann (ptiedem@de.ibm.com)
18 */
19
20#undef DEBUG
21#undef DEBUGDATA
22#undef DEBUGCCW
23
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/interrupt.h>
31#include <linux/timer.h>
32#include <linux/bitops.h>
33
34#include <linux/signal.h>
35#include <linux/string.h>
36
37#include <linux/ip.h>
38#include <linux/if_arp.h>
39#include <linux/tcp.h>
40#include <linux/skbuff.h>
41#include <linux/ctype.h>
42#include <net/dst.h>
43
44#include <linux/io.h>
45#include <asm/ccwdev.h>
46#include <asm/ccwgroup.h>
47#include <linux/uaccess.h>
48
49#include <asm/idals.h>
50
51#include "cu3088.h"
52#include "ctcm_fsms.h"
53#include "ctcm_main.h"
54
55/* Some common global variables */
56
57/*
58 * Linked list of all detected channels.
59 */
60struct channel *channels;
61
62/**
63 * Unpack a just received skb and hand it over to
64 * upper layers.
65 *
66 * ch The channel where this skb has been received.
67 * pskb The received skb.
68 */
69void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
70{
71 struct net_device *dev = ch->netdev;
72 struct ctcm_priv *priv = dev->priv;
73 __u16 len = *((__u16 *) pskb->data);
74
75 skb_put(pskb, 2 + LL_HEADER_LENGTH);
76 skb_pull(pskb, 2);
77 pskb->dev = dev;
78 pskb->ip_summed = CHECKSUM_UNNECESSARY;
79 while (len > 0) {
80 struct sk_buff *skb;
81 int skblen;
82 struct ll_header *header = (struct ll_header *)pskb->data;
83
84 skb_pull(pskb, LL_HEADER_LENGTH);
85 if ((ch->protocol == CTCM_PROTO_S390) &&
86 (header->type != ETH_P_IP)) {
87
88 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
89 /*
90 * Check packet type only if we stick strictly
91 * to S/390's protocol of OS390. This only
92 * supports IP. Otherwise allow any packet
93 * type.
94 */
95 ctcm_pr_warn("%s Illegal packet type 0x%04x "
96 "received, dropping\n",
97 dev->name, header->type);
98 ch->logflags |= LOG_FLAG_ILLEGALPKT;
99 }
100
101 priv->stats.rx_dropped++;
102 priv->stats.rx_frame_errors++;
103 return;
104 }
105 pskb->protocol = ntohs(header->type);
106 if (header->length <= LL_HEADER_LENGTH) {
107 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
108 ctcm_pr_warn(
109 "%s Illegal packet size %d "
110 "received (MTU=%d blocklen=%d), "
111 "dropping\n", dev->name, header->length,
112 dev->mtu, len);
113 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
114 }
115
116 priv->stats.rx_dropped++;
117 priv->stats.rx_length_errors++;
118 return;
119 }
120 header->length -= LL_HEADER_LENGTH;
121 len -= LL_HEADER_LENGTH;
122 if ((header->length > skb_tailroom(pskb)) ||
123 (header->length > len)) {
124 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
125 ctcm_pr_warn(
126 "%s Illegal packet size %d (beyond the"
127 " end of received data), dropping\n",
128 dev->name, header->length);
129 ch->logflags |= LOG_FLAG_OVERRUN;
130 }
131
132 priv->stats.rx_dropped++;
133 priv->stats.rx_length_errors++;
134 return;
135 }
136 skb_put(pskb, header->length);
137 skb_reset_mac_header(pskb);
138 len -= header->length;
139 skb = dev_alloc_skb(pskb->len);
140 if (!skb) {
141 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
142 ctcm_pr_warn(
143 "%s Out of memory in ctcm_unpack_skb\n",
144 dev->name);
145 ch->logflags |= LOG_FLAG_NOMEM;
146 }
147 priv->stats.rx_dropped++;
148 return;
149 }
150 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
151 pskb->len);
152 skb_reset_mac_header(skb);
153 skb->dev = pskb->dev;
154 skb->protocol = pskb->protocol;
155 pskb->ip_summed = CHECKSUM_UNNECESSARY;
156 skblen = skb->len;
157 /*
158 * reset logflags
159 */
160 ch->logflags = 0;
161 priv->stats.rx_packets++;
162 priv->stats.rx_bytes += skblen;
163 netif_rx_ni(skb);
164 dev->last_rx = jiffies;
165 if (len > 0) {
166 skb_pull(pskb, header->length);
167 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
168 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
169 CTCM_DBF_DEV_NAME(TRACE, dev,
170 "Overrun in ctcm_unpack_skb");
171 ch->logflags |= LOG_FLAG_OVERRUN;
172 }
173 return;
174 }
175 skb_put(pskb, LL_HEADER_LENGTH);
176 }
177 }
178}
179
180/**
181 * Release a specific channel in the channel list.
182 *
183 * ch Pointer to channel struct to be released.
184 */
185static void channel_free(struct channel *ch)
186{
187 CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__);
188 ch->flags &= ~CHANNEL_FLAGS_INUSE;
189 fsm_newstate(ch->fsm, CTC_STATE_IDLE);
190}
191
192/**
193 * Remove a specific channel in the channel list.
194 *
195 * ch Pointer to channel struct to be released.
196 */
197static void channel_remove(struct channel *ch)
198{
199 struct channel **c = &channels;
200 char chid[CTCM_ID_SIZE+1];
201 int ok = 0;
202
203 if (ch == NULL)
204 return;
205 else
206 strncpy(chid, ch->id, CTCM_ID_SIZE);
207
208 channel_free(ch);
209 while (*c) {
210 if (*c == ch) {
211 *c = ch->next;
212 fsm_deltimer(&ch->timer);
213 if (IS_MPC(ch))
214 fsm_deltimer(&ch->sweep_timer);
215
216 kfree_fsm(ch->fsm);
217 clear_normalized_cda(&ch->ccw[4]);
218 if (ch->trans_skb != NULL) {
219 clear_normalized_cda(&ch->ccw[1]);
220 dev_kfree_skb_any(ch->trans_skb);
221 }
222 if (IS_MPC(ch)) {
223 tasklet_kill(&ch->ch_tasklet);
224 tasklet_kill(&ch->ch_disc_tasklet);
225 kfree(ch->discontact_th);
226 }
227 kfree(ch->ccw);
228 kfree(ch->irb);
229 kfree(ch);
230 ok = 1;
231 break;
232 }
233 c = &((*c)->next);
234 }
235
236 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s) %s", CTCM_FUNTAIL,
237 chid, ok ? "OK" : "failed");
238}
239
240/**
241 * Get a specific channel from the channel list.
242 *
243 * type Type of channel we are interested in.
244 * id Id of channel we are interested in.
245 * direction Direction we want to use this channel for.
246 *
247 * returns Pointer to a channel or NULL if no matching channel available.
248 */
249static struct channel *channel_get(enum channel_types type,
250 char *id, int direction)
251{
252 struct channel *ch = channels;
253
254 if (do_debug) {
255 char buf[64];
256 sprintf(buf, "%s(%d, %s, %d)\n",
257 CTCM_FUNTAIL, type, id, direction);
258 CTCM_DBF_TEXT(TRACE, CTC_DBF_INFO, buf);
259 }
260 while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type)))
261 ch = ch->next;
262 if (!ch) {
263 char buf[64];
264 sprintf(buf, "%s(%d, %s, %d) not found in channel list\n",
265 CTCM_FUNTAIL, type, id, direction);
266 CTCM_DBF_TEXT(ERROR, CTC_DBF_ERROR, buf);
267 } else {
268 if (ch->flags & CHANNEL_FLAGS_INUSE)
269 ch = NULL;
270 else {
271 ch->flags |= CHANNEL_FLAGS_INUSE;
272 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
273 ch->flags |= (direction == WRITE)
274 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
275 fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
276 }
277 }
278 return ch;
279}
280
281static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
282{
283 if (!IS_ERR(irb))
284 return 0;
285
286 CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN, "irb error %ld on device %s\n",
287 PTR_ERR(irb), cdev->dev.bus_id);
288
289 switch (PTR_ERR(irb)) {
290 case -EIO:
291 ctcm_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
292 break;
293 case -ETIMEDOUT:
294 ctcm_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
295 break;
296 default:
297 ctcm_pr_warn("unknown error %ld on device %s\n",
298 PTR_ERR(irb), cdev->dev.bus_id);
299 }
300 return PTR_ERR(irb);
301}
302
303
304/**
305 * Check sense of a unit check.
306 *
307 * ch The channel, the sense code belongs to.
308 * sense The sense code to inspect.
309 */
310static inline void ccw_unit_check(struct channel *ch, unsigned char sense)
311{
312 CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
313 if (sense & SNS0_INTERVENTION_REQ) {
314 if (sense & 0x01) {
315 ctcm_pr_debug("%s: Interface disc. or Sel. reset "
316 "(remote)\n", ch->id);
317 fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch);
318 } else {
319 ctcm_pr_debug("%s: System reset (remote)\n", ch->id);
320 fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch);
321 }
322 } else if (sense & SNS0_EQUIPMENT_CHECK) {
323 if (sense & SNS0_BUS_OUT_CHECK) {
324 ctcm_pr_warn("%s: Hardware malfunction (remote)\n",
325 ch->id);
326 fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch);
327 } else {
328 ctcm_pr_warn("%s: Read-data parity error (remote)\n",
329 ch->id);
330 fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch);
331 }
332 } else if (sense & SNS0_BUS_OUT_CHECK) {
333 if (sense & 0x04) {
334 ctcm_pr_warn("%s: Data-streaming timeout)\n", ch->id);
335 fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch);
336 } else {
337 ctcm_pr_warn("%s: Data-transfer parity error\n",
338 ch->id);
339 fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch);
340 }
341 } else if (sense & SNS0_CMD_REJECT) {
342 ctcm_pr_warn("%s: Command reject\n", ch->id);
343 } else if (sense == 0) {
344 ctcm_pr_debug("%s: Unit check ZERO\n", ch->id);
345 fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch);
346 } else {
347 ctcm_pr_warn("%s: Unit Check with sense code: %02x\n",
348 ch->id, sense);
349 fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch);
350 }
351}
352
353int ctcm_ch_alloc_buffer(struct channel *ch)
354{
355 CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
356
357 clear_normalized_cda(&ch->ccw[1]);
358 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA);
359 if (ch->trans_skb == NULL) {
360 ctcm_pr_warn("%s: Couldn't alloc %s trans_skb\n",
361 ch->id,
362 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
363 return -ENOMEM;
364 }
365
366 ch->ccw[1].count = ch->max_bufsize;
367 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
368 dev_kfree_skb(ch->trans_skb);
369 ch->trans_skb = NULL;
370 ctcm_pr_warn("%s: set_normalized_cda for %s "
371 "trans_skb failed, dropping packets\n",
372 ch->id,
373 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
374 return -ENOMEM;
375 }
376
377 ch->ccw[1].count = 0;
378 ch->trans_skb_data = ch->trans_skb->data;
379 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
380 return 0;
381}
382
383/*
384 * Interface API for upper network layers
385 */
386
387/**
388 * Open an interface.
389 * Called from generic network layer when ifconfig up is run.
390 *
391 * dev Pointer to interface struct.
392 *
393 * returns 0 on success, -ERRNO on failure. (Never fails.)
394 */
395int ctcm_open(struct net_device *dev)
396{
397 struct ctcm_priv *priv = dev->priv;
398
399 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
400 if (!IS_MPC(priv))
401 fsm_event(priv->fsm, DEV_EVENT_START, dev);
402 return 0;
403}
404
405/**
406 * Close an interface.
407 * Called from generic network layer when ifconfig down is run.
408 *
409 * dev Pointer to interface struct.
410 *
411 * returns 0 on success, -ERRNO on failure. (Never fails.)
412 */
413int ctcm_close(struct net_device *dev)
414{
415 struct ctcm_priv *priv = dev->priv;
416
417 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
418 if (!IS_MPC(priv))
419 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
420 return 0;
421}
422
423
424/**
425 * Transmit a packet.
426 * This is a helper function for ctcm_tx().
427 *
428 * ch Channel to be used for sending.
429 * skb Pointer to struct sk_buff of packet to send.
430 * The linklevel header has already been set up
431 * by ctcm_tx().
432 *
433 * returns 0 on success, -ERRNO on failure. (Never fails.)
434 */
435static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
436{
437 unsigned long saveflags;
438 struct ll_header header;
439 int rc = 0;
440 __u16 block_len;
441 int ccw_idx;
442 struct sk_buff *nskb;
443 unsigned long hi;
444
445 /* we need to acquire the lock for testing the state
446 * otherwise we can have an IRQ changing the state to
447 * TXIDLE after the test but before acquiring the lock.
448 */
449 spin_lock_irqsave(&ch->collect_lock, saveflags);
450 if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
451 int l = skb->len + LL_HEADER_LENGTH;
452
453 if (ch->collect_len + l > ch->max_bufsize - 2) {
454 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
455 return -EBUSY;
456 } else {
457 atomic_inc(&skb->users);
458 header.length = l;
459 header.type = skb->protocol;
460 header.unused = 0;
461 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
462 LL_HEADER_LENGTH);
463 skb_queue_tail(&ch->collect_queue, skb);
464 ch->collect_len += l;
465 }
466 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
467 goto done;
468 }
469 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
470 /*
471 * Protect skb against beeing free'd by upper
472 * layers.
473 */
474 atomic_inc(&skb->users);
475 ch->prof.txlen += skb->len;
476 header.length = skb->len + LL_HEADER_LENGTH;
477 header.type = skb->protocol;
478 header.unused = 0;
479 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
480 block_len = skb->len + 2;
481 *((__u16 *)skb_push(skb, 2)) = block_len;
482
483 /*
484 * IDAL support in CTCM is broken, so we have to
485 * care about skb's above 2G ourselves.
486 */
487 hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
488 if (hi) {
489 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
490 if (!nskb) {
491 atomic_dec(&skb->users);
492 skb_pull(skb, LL_HEADER_LENGTH + 2);
493 ctcm_clear_busy(ch->netdev);
494 return -ENOMEM;
495 } else {
496 memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
497 atomic_inc(&nskb->users);
498 atomic_dec(&skb->users);
499 dev_kfree_skb_irq(skb);
500 skb = nskb;
501 }
502 }
503
504 ch->ccw[4].count = block_len;
505 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
506 /*
507 * idal allocation failed, try via copying to
508 * trans_skb. trans_skb usually has a pre-allocated
509 * idal.
510 */
511 if (ctcm_checkalloc_buffer(ch)) {
512 /*
513 * Remove our header. It gets added
514 * again on retransmit.
515 */
516 atomic_dec(&skb->users);
517 skb_pull(skb, LL_HEADER_LENGTH + 2);
518 ctcm_clear_busy(ch->netdev);
519 return -EBUSY;
520 }
521
522 skb_reset_tail_pointer(ch->trans_skb);
523 ch->trans_skb->len = 0;
524 ch->ccw[1].count = skb->len;
525 skb_copy_from_linear_data(skb,
526 skb_put(ch->trans_skb, skb->len), skb->len);
527 atomic_dec(&skb->users);
528 dev_kfree_skb_irq(skb);
529 ccw_idx = 0;
530 } else {
531 skb_queue_tail(&ch->io_queue, skb);
532 ccw_idx = 3;
533 }
534 ch->retry = 0;
535 fsm_newstate(ch->fsm, CTC_STATE_TX);
536 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
537 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
538 ch->prof.send_stamp = current_kernel_time(); /* xtime */
539 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
540 (unsigned long)ch, 0xff, 0);
541 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
542 if (ccw_idx == 3)
543 ch->prof.doios_single++;
544 if (rc != 0) {
545 fsm_deltimer(&ch->timer);
546 ctcm_ccw_check_rc(ch, rc, "single skb TX");
547 if (ccw_idx == 3)
548 skb_dequeue_tail(&ch->io_queue);
549 /*
550 * Remove our header. It gets added
551 * again on retransmit.
552 */
553 skb_pull(skb, LL_HEADER_LENGTH + 2);
554 } else if (ccw_idx == 0) {
555 struct net_device *dev = ch->netdev;
556 struct ctcm_priv *priv = dev->priv;
557 priv->stats.tx_packets++;
558 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
559 }
560done:
561 ctcm_clear_busy(ch->netdev);
562 return rc;
563}
564
565static void ctcmpc_send_sweep_req(struct channel *rch)
566{
567 struct net_device *dev = rch->netdev;
568 struct ctcm_priv *priv;
569 struct mpc_group *grp;
570 struct th_sweep *header;
571 struct sk_buff *sweep_skb;
572 struct channel *ch;
573 int rc = 0;
574
575 priv = dev->priv;
576 grp = priv->mpcg;
577 ch = priv->channel[WRITE];
578
579 if (do_debug)
580 MPC_DBF_DEV_NAME(TRACE, dev, ch->id);
581
582 /* sweep processing is not complete until response and request */
583 /* has completed for all read channels in group */
584 if (grp->in_sweep == 0) {
585 grp->in_sweep = 1;
586 grp->sweep_rsp_pend_num = grp->active_channels[READ];
587 grp->sweep_req_pend_num = grp->active_channels[READ];
588 }
589
590 sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
591
592 if (sweep_skb == NULL) {
593 printk(KERN_INFO "Couldn't alloc sweep_skb\n");
594 rc = -ENOMEM;
595 goto done;
596 }
597
598 header = kmalloc(TH_SWEEP_LENGTH, gfp_type());
599
600 if (!header) {
601 dev_kfree_skb_any(sweep_skb);
602 rc = -ENOMEM;
603 goto done;
604 }
605
606 header->th.th_seg = 0x00 ;
607 header->th.th_ch_flag = TH_SWEEP_REQ; /* 0x0f */
608 header->th.th_blk_flag = 0x00;
609 header->th.th_is_xid = 0x00;
610 header->th.th_seq_num = 0x00;
611 header->sw.th_last_seq = ch->th_seq_num;
612
613 memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
614
615 kfree(header);
616
617 dev->trans_start = jiffies;
618 skb_queue_tail(&ch->sweep_queue, sweep_skb);
619
620 fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
621
622 return;
623
624done:
625 if (rc != 0) {
626 grp->in_sweep = 0;
627 ctcm_clear_busy(dev);
628 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
629 }
630
631 return;
632}
633
634/*
635 * MPC mode version of transmit_skb
636 */
637static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
638{
639 struct pdu *p_header;
640 struct net_device *dev = ch->netdev;
641 struct ctcm_priv *priv = dev->priv;
642 struct mpc_group *grp = priv->mpcg;
643 struct th_header *header;
644 struct sk_buff *nskb;
645 int rc = 0;
646 int ccw_idx;
647 unsigned long hi;
648 unsigned long saveflags = 0; /* avoids compiler warning */
649 __u16 block_len;
650
651 if (do_debug)
652 ctcm_pr_debug(
653 "ctcm enter: %s(): %s cp=%i ch=0x%p id=%s state=%s\n",
654 __FUNCTION__, dev->name, smp_processor_id(), ch,
655 ch->id, fsm_getstate_str(ch->fsm));
656
657 if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
658 spin_lock_irqsave(&ch->collect_lock, saveflags);
659 atomic_inc(&skb->users);
660 p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
661
662 if (!p_header) {
663 printk(KERN_WARNING "ctcm: OUT OF MEMORY IN %s():"
664 " Data Lost \n", __FUNCTION__);
665
666 atomic_dec(&skb->users);
667 dev_kfree_skb_any(skb);
668 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
669 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
670 goto done;
671 }
672
673 p_header->pdu_offset = skb->len;
674 p_header->pdu_proto = 0x01;
675 p_header->pdu_flag = 0x00;
676 if (skb->protocol == ntohs(ETH_P_SNAP)) {
677 p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
678 } else {
679 p_header->pdu_flag |= PDU_FIRST;
680 }
681 p_header->pdu_seq = 0;
682 memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
683 PDU_HEADER_LENGTH);
684
685 if (do_debug_data) {
686 ctcm_pr_debug("ctcm: %s() Putting on collect_q"
687 " - skb len: %04x \n", __FUNCTION__, skb->len);
688 ctcm_pr_debug("ctcm: %s() pdu header and data"
689 " for up to 32 bytes\n", __FUNCTION__);
690 ctcmpc_dump32((char *)skb->data, skb->len);
691 }
692
693 skb_queue_tail(&ch->collect_queue, skb);
694 ch->collect_len += skb->len;
695 kfree(p_header);
696
697 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
698 goto done;
699 }
700
701 /*
702 * Protect skb against beeing free'd by upper
703 * layers.
704 */
705 atomic_inc(&skb->users);
706
707 block_len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
708 /*
709 * IDAL support in CTCM is broken, so we have to
710 * care about skb's above 2G ourselves.
711 */
712 hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31;
713 if (hi) {
714 nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
715 if (!nskb) {
716 printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY"
717 "- Data Lost \n", __FUNCTION__);
718 atomic_dec(&skb->users);
719 dev_kfree_skb_any(skb);
720 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
721 goto done;
722 } else {
723 memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
724 atomic_inc(&nskb->users);
725 atomic_dec(&skb->users);
726 dev_kfree_skb_irq(skb);
727 skb = nskb;
728 }
729 }
730
731 p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
732
733 if (!p_header) {
734 printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY"
735 ": Data Lost \n", __FUNCTION__);
736
737 atomic_dec(&skb->users);
738 dev_kfree_skb_any(skb);
739 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
740 goto done;
741 }
742
743 p_header->pdu_offset = skb->len;
744 p_header->pdu_proto = 0x01;
745 p_header->pdu_flag = 0x00;
746 p_header->pdu_seq = 0;
747 if (skb->protocol == ntohs(ETH_P_SNAP)) {
748 p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
749 } else {
750 p_header->pdu_flag |= PDU_FIRST;
751 }
752 memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, PDU_HEADER_LENGTH);
753
754 kfree(p_header);
755
756 if (ch->collect_len > 0) {
757 spin_lock_irqsave(&ch->collect_lock, saveflags);
758 skb_queue_tail(&ch->collect_queue, skb);
759 ch->collect_len += skb->len;
760 skb = skb_dequeue(&ch->collect_queue);
761 ch->collect_len -= skb->len;
762 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
763 }
764
765 p_header = (struct pdu *)skb->data;
766 p_header->pdu_flag |= PDU_LAST;
767
768 ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
769
770 header = kmalloc(TH_HEADER_LENGTH, gfp_type());
771
772 if (!header) {
773 printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY: Data Lost \n",
774 __FUNCTION__);
775 atomic_dec(&skb->users);
776 dev_kfree_skb_any(skb);
777 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
778 goto done;
779 }
780
781 header->th_seg = 0x00;
782 header->th_ch_flag = TH_HAS_PDU; /* Normal data */
783 header->th_blk_flag = 0x00;
784 header->th_is_xid = 0x00; /* Just data here */
785 ch->th_seq_num++;
786 header->th_seq_num = ch->th_seq_num;
787
788 if (do_debug_data)
789 ctcm_pr_debug("ctcm: %s() ToVTAM_th_seq= %08x\n" ,
790 __FUNCTION__, ch->th_seq_num);
791
792 /* put the TH on the packet */
793 memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH);
794
795 kfree(header);
796
797 if (do_debug_data) {
798 ctcm_pr_debug("ctcm: %s(): skb len: %04x \n",
799 __FUNCTION__, skb->len);
800 ctcm_pr_debug("ctcm: %s(): pdu header and data for up to 32 "
801 "bytes sent to vtam\n", __FUNCTION__);
802 ctcmpc_dump32((char *)skb->data, skb->len);
803 }
804
805 ch->ccw[4].count = skb->len;
806 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
807 /*
808 * idal allocation failed, try via copying to
809 * trans_skb. trans_skb usually has a pre-allocated
810 * idal.
811 */
812 if (ctcm_checkalloc_buffer(ch)) {
813 /*
814 * Remove our header. It gets added
815 * again on retransmit.
816 */
817 atomic_dec(&skb->users);
818 dev_kfree_skb_any(skb);
819 printk(KERN_WARNING "ctcm: %s()OUT OF MEMORY:"
820 " Data Lost \n", __FUNCTION__);
821 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
822 goto done;
823 }
824
825 skb_reset_tail_pointer(ch->trans_skb);
826 ch->trans_skb->len = 0;
827 ch->ccw[1].count = skb->len;
828 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
829 atomic_dec(&skb->users);
830 dev_kfree_skb_irq(skb);
831 ccw_idx = 0;
832 if (do_debug_data) {
833 ctcm_pr_debug("ctcm: %s() TRANS skb len: %d \n",
834 __FUNCTION__, ch->trans_skb->len);
835 ctcm_pr_debug("ctcm: %s up to 32 bytes of data"
836 " sent to vtam\n", __FUNCTION__);
837 ctcmpc_dump32((char *)ch->trans_skb->data,
838 ch->trans_skb->len);
839 }
840 } else {
841 skb_queue_tail(&ch->io_queue, skb);
842 ccw_idx = 3;
843 }
844 ch->retry = 0;
845 fsm_newstate(ch->fsm, CTC_STATE_TX);
846 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
847
848 if (do_debug_ccw)
849 ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
850 sizeof(struct ccw1) * 3);
851
852 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
853 ch->prof.send_stamp = current_kernel_time(); /* xtime */
854 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
855 (unsigned long)ch, 0xff, 0);
856 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
857 if (ccw_idx == 3)
858 ch->prof.doios_single++;
859 if (rc != 0) {
860 fsm_deltimer(&ch->timer);
861 ctcm_ccw_check_rc(ch, rc, "single skb TX");
862 if (ccw_idx == 3)
863 skb_dequeue_tail(&ch->io_queue);
864 } else if (ccw_idx == 0) {
865 priv->stats.tx_packets++;
866 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
867 }
868 if (ch->th_seq_num > 0xf0000000) /* Chose 4Billion at random. */
869 ctcmpc_send_sweep_req(ch);
870
871done:
872 if (do_debug)
873 ctcm_pr_debug("ctcm exit: %s %s()\n", dev->name, __FUNCTION__);
874 return 0;
875}
876
877/**
878 * Start transmission of a packet.
879 * Called from generic network device layer.
880 *
881 * skb Pointer to buffer containing the packet.
882 * dev Pointer to interface struct.
883 *
884 * returns 0 if packet consumed, !0 if packet rejected.
885 * Note: If we return !0, then the packet is free'd by
886 * the generic network layer.
887 */
888/* first merge version - leaving both functions separated */
889static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
890{
891 int rc = 0;
892 struct ctcm_priv *priv;
893
894 CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__);
895 priv = dev->priv;
896
897 if (skb == NULL) {
898 ctcm_pr_warn("%s: NULL sk_buff passed\n", dev->name);
899 priv->stats.tx_dropped++;
900 return 0;
901 }
902 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
903 ctcm_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
904 dev->name, LL_HEADER_LENGTH + 2);
905 dev_kfree_skb(skb);
906 priv->stats.tx_dropped++;
907 return 0;
908 }
909
910 /*
911 * If channels are not running, try to restart them
912 * and throw away packet.
913 */
914 if (fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) {
915 fsm_event(priv->fsm, DEV_EVENT_START, dev);
916 dev_kfree_skb(skb);
917 priv->stats.tx_dropped++;
918 priv->stats.tx_errors++;
919 priv->stats.tx_carrier_errors++;
920 return 0;
921 }
922
923 if (ctcm_test_and_set_busy(dev))
924 return -EBUSY;
925
926 dev->trans_start = jiffies;
927 if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0)
928 rc = 1;
929 return rc;
930}
931
932/* unmerged MPC variant of ctcm_tx */
933static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
934{
935 int len = 0;
936 struct ctcm_priv *priv = NULL;
937 struct mpc_group *grp = NULL;
938 struct sk_buff *newskb = NULL;
939
940 if (do_debug)
941 ctcm_pr_debug("ctcmpc enter: %s(): skb:%0lx\n",
942 __FUNCTION__, (unsigned long)skb);
943
944 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
945 "ctcmpc enter: %s(): skb:%0lx\n",
946 __FUNCTION__, (unsigned long)skb);
947
948 priv = dev->priv;
949 grp = priv->mpcg;
950 /*
951 * Some sanity checks ...
952 */
953 if (skb == NULL) {
954 ctcm_pr_warn("ctcmpc: %s: NULL sk_buff passed\n", dev->name);
955 priv->stats.tx_dropped++;
956 goto done;
957 }
958 if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
959 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_WARN,
960 "%s: Got sk_buff with head room < %ld bytes\n",
961 dev->name, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
962
963 if (do_debug_data)
964 ctcmpc_dump32((char *)skb->data, skb->len);
965
966 len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
967 newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA);
968
969 if (!newskb) {
970 printk(KERN_WARNING "ctcmpc: %s() OUT OF MEMORY-"
971 "Data Lost\n",
972 __FUNCTION__);
973
974 dev_kfree_skb_any(skb);
975 priv->stats.tx_dropped++;
976 priv->stats.tx_errors++;
977 priv->stats.tx_carrier_errors++;
978 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
979 goto done;
980 }
981 newskb->protocol = skb->protocol;
982 skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
983 memcpy(skb_put(newskb, skb->len), skb->data, skb->len);
984 dev_kfree_skb_any(skb);
985 skb = newskb;
986 }
987
988 /*
989 * If channels are not running,
990 * notify anybody about a link failure and throw
991 * away packet.
992 */
993 if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) ||
994 (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
995 dev_kfree_skb_any(skb);
996 printk(KERN_INFO "ctcmpc: %s() DATA RCVD - MPC GROUP "
997 "NOT ACTIVE - DROPPED\n",
998 __FUNCTION__);
999 priv->stats.tx_dropped++;
1000 priv->stats.tx_errors++;
1001 priv->stats.tx_carrier_errors++;
1002 goto done;
1003 }
1004
1005 if (ctcm_test_and_set_busy(dev)) {
1006 printk(KERN_WARNING "%s:DEVICE ERR - UNRECOVERABLE DATA LOSS\n",
1007 __FUNCTION__);
1008 dev_kfree_skb_any(skb);
1009 priv->stats.tx_dropped++;
1010 priv->stats.tx_errors++;
1011 priv->stats.tx_carrier_errors++;
1012 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1013 goto done;
1014 }
1015
1016 dev->trans_start = jiffies;
1017 if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) {
1018 printk(KERN_WARNING "ctcmpc: %s() DEVICE ERROR"
1019 ": Data Lost \n",
1020 __FUNCTION__);
1021 printk(KERN_WARNING "ctcmpc: %s() DEVICE ERROR"
1022 " - UNRECOVERABLE DATA LOSS\n",
1023 __FUNCTION__);
1024 dev_kfree_skb_any(skb);
1025 priv->stats.tx_dropped++;
1026 priv->stats.tx_errors++;
1027 priv->stats.tx_carrier_errors++;
1028 ctcm_clear_busy(dev);
1029 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1030 goto done;
1031 }
1032 ctcm_clear_busy(dev);
1033done:
1034 if (do_debug)
1035 MPC_DBF_DEV_NAME(TRACE, dev, "exit");
1036
1037 return 0; /* handle freeing of skb here */
1038}
1039
1040
1041/**
1042 * Sets MTU of an interface.
1043 *
1044 * dev Pointer to interface struct.
1045 * new_mtu The new MTU to use for this interface.
1046 *
1047 * returns 0 on success, -EINVAL if MTU is out of valid range.
1048 * (valid range is 576 .. 65527). If VM is on the
1049 * remote side, maximum MTU is 32760, however this is
1050 * not checked here.
1051 */
1052static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
1053{
1054 struct ctcm_priv *priv;
1055 int max_bufsize;
1056
1057 CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
1058
1059 if (new_mtu < 576 || new_mtu > 65527)
1060 return -EINVAL;
1061
1062 priv = dev->priv;
1063 max_bufsize = priv->channel[READ]->max_bufsize;
1064
1065 if (IS_MPC(priv)) {
1066 if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
1067 return -EINVAL;
1068 dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
1069 } else {
1070 if (new_mtu > max_bufsize - LL_HEADER_LENGTH - 2)
1071 return -EINVAL;
1072 dev->hard_header_len = LL_HEADER_LENGTH + 2;
1073 }
1074 dev->mtu = new_mtu;
1075 return 0;
1076}
1077
1078/**
1079 * Returns interface statistics of a device.
1080 *
1081 * dev Pointer to interface struct.
1082 *
1083 * returns Pointer to stats struct of this interface.
1084 */
1085static struct net_device_stats *ctcm_stats(struct net_device *dev)
1086{
1087 return &((struct ctcm_priv *)dev->priv)->stats;
1088}
1089
1090
1091static void ctcm_netdev_unregister(struct net_device *dev)
1092{
1093 CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
1094 if (!dev)
1095 return;
1096 unregister_netdev(dev);
1097}
1098
1099static int ctcm_netdev_register(struct net_device *dev)
1100{
1101 CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
1102 return register_netdev(dev);
1103}
1104
1105static void ctcm_free_netdevice(struct net_device *dev)
1106{
1107 struct ctcm_priv *priv;
1108 struct mpc_group *grp;
1109
1110 CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
1111
1112 if (!dev)
1113 return;
1114 priv = dev->priv;
1115 if (priv) {
1116 grp = priv->mpcg;
1117 if (grp) {
1118 if (grp->fsm)
1119 kfree_fsm(grp->fsm);
1120 if (grp->xid_skb)
1121 dev_kfree_skb(grp->xid_skb);
1122 if (grp->rcvd_xid_skb)
1123 dev_kfree_skb(grp->rcvd_xid_skb);
1124 tasklet_kill(&grp->mpc_tasklet2);
1125 kfree(grp);
1126 priv->mpcg = NULL;
1127 }
1128 if (priv->fsm) {
1129 kfree_fsm(priv->fsm);
1130 priv->fsm = NULL;
1131 }
1132 kfree(priv->xid);
1133 priv->xid = NULL;
1134 /*
1135 * Note: kfree(priv); is done in "opposite" function of
1136 * allocator function probe_device which is remove_device.
1137 */
1138 }
1139#ifdef MODULE
1140 free_netdev(dev);
1141#endif
1142}
1143
1144struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
1145
1146void static ctcm_dev_setup(struct net_device *dev)
1147{
1148 dev->open = ctcm_open;
1149 dev->stop = ctcm_close;
1150 dev->get_stats = ctcm_stats;
1151 dev->change_mtu = ctcm_change_mtu;
1152 dev->type = ARPHRD_SLIP;
1153 dev->tx_queue_len = 100;
1154 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1155}
1156
1157/*
1158 * Initialize everything of the net device except the name and the
1159 * channel structs.
1160 */
1161static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1162{
1163 struct net_device *dev;
1164 struct mpc_group *grp;
1165 if (!priv)
1166 return NULL;
1167
1168 if (IS_MPC(priv))
1169 dev = alloc_netdev(0, MPC_DEVICE_GENE, ctcm_dev_setup);
1170 else
1171 dev = alloc_netdev(0, CTC_DEVICE_GENE, ctcm_dev_setup);
1172
1173 if (!dev) {
1174 ctcm_pr_err("%s: Out of memory\n", __FUNCTION__);
1175 return NULL;
1176 }
1177 dev->priv = priv;
1178 priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names,
1179 CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS,
1180 dev_fsm, dev_fsm_len, GFP_KERNEL);
1181 if (priv->fsm == NULL) {
1182 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
1183 kfree(dev);
1184 return NULL;
1185 }
1186 fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
1187 fsm_settimer(priv->fsm, &priv->restart_timer);
1188
1189 if (IS_MPC(priv)) {
1190 /* MPC Group Initializations */
1191 grp = ctcmpc_init_mpc_group(priv);
1192 if (grp == NULL) {
1193 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
1194 kfree(dev);
1195 return NULL;
1196 }
1197 tasklet_init(&grp->mpc_tasklet2,
1198 mpc_group_ready, (unsigned long)dev);
1199 dev->mtu = MPC_BUFSIZE_DEFAULT -
1200 TH_HEADER_LENGTH - PDU_HEADER_LENGTH;
1201
1202 dev->hard_start_xmit = ctcmpc_tx;
1203 dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
1204 priv->buffer_size = MPC_BUFSIZE_DEFAULT;
1205 } else {
1206 dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
1207 dev->hard_start_xmit = ctcm_tx;
1208 dev->hard_header_len = LL_HEADER_LENGTH + 2;
1209 }
1210
1211 CTCMY_DBF_DEV(SETUP, dev, "finished");
1212 return dev;
1213}
1214
1215/**
1216 * Main IRQ handler.
1217 *
1218 * cdev The ccw_device the interrupt is for.
1219 * intparm interruption parameter.
1220 * irb interruption response block.
1221 */
1222static void ctcm_irq_handler(struct ccw_device *cdev,
1223 unsigned long intparm, struct irb *irb)
1224{
1225 struct channel *ch;
1226 struct net_device *dev;
1227 struct ctcm_priv *priv;
1228 struct ccwgroup_device *cgdev;
1229
1230 CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __FUNCTION__);
1231 if (ctcm_check_irb_error(cdev, irb))
1232 return;
1233
1234 cgdev = dev_get_drvdata(&cdev->dev);
1235
1236 /* Check for unsolicited interrupts. */
1237 if (cgdev == NULL) {
1238 ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n",
1239 cdev->dev.bus_id, irb->scsw.cstat,
1240 irb->scsw.dstat);
1241 return;
1242 }
1243
1244 priv = dev_get_drvdata(&cgdev->dev);
1245
1246 /* Try to extract channel from driver data. */
1247 if (priv->channel[READ]->cdev == cdev)
1248 ch = priv->channel[READ];
1249 else if (priv->channel[WRITE]->cdev == cdev)
1250 ch = priv->channel[WRITE];
1251 else {
1252 ctcm_pr_err("ctcm: Can't determine channel for interrupt, "
1253 "device %s\n", cdev->dev.bus_id);
1254 return;
1255 }
1256
1257 dev = (struct net_device *)(ch->netdev);
1258 if (dev == NULL) {
1259 ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n",
1260 __FUNCTION__, cdev->dev.bus_id, ch);
1261 return;
1262 }
1263
1264 if (do_debug)
1265 ctcm_pr_debug("%s: interrupt for device: %s "
1266 "received c-%02x d-%02x\n",
1267 dev->name,
1268 ch->id,
1269 irb->scsw.cstat,
1270 irb->scsw.dstat);
1271
1272 /* Copy interruption response block. */
1273 memcpy(ch->irb, irb, sizeof(struct irb));
1274
1275 /* Check for good subchannel return code, otherwise error message */
1276 if (irb->scsw.cstat) {
1277 fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
1278 ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n",
1279 dev->name, ch->id, irb->scsw.cstat,
1280 irb->scsw.dstat);
1281 return;
1282 }
1283
1284 /* Check the reason-code of a unit check */
1285 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1286 ccw_unit_check(ch, irb->ecw[0]);
1287 return;
1288 }
1289 if (irb->scsw.dstat & DEV_STAT_BUSY) {
1290 if (irb->scsw.dstat & DEV_STAT_ATTENTION)
1291 fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
1292 else
1293 fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
1294 return;
1295 }
1296 if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
1297 fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
1298 return;
1299 }
1300 if ((irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1301 (irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1302 (irb->scsw.stctl ==
1303 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1304 fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
1305 else
1306 fsm_event(ch->fsm, CTC_EVENT_IRQ, ch);
1307
1308}
1309
1310/**
1311 * Add ctcm specific attributes.
1312 * Add ctcm private data.
1313 *
1314 * cgdev pointer to ccwgroup_device just added
1315 *
1316 * returns 0 on success, !0 on failure.
1317 */
1318static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1319{
1320 struct ctcm_priv *priv;
1321 int rc;
1322
1323 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s %p", __FUNCTION__, cgdev);
1324
1325 if (!get_device(&cgdev->dev))
1326 return -ENODEV;
1327
1328 priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL);
1329 if (!priv) {
1330 ctcm_pr_err("%s: Out of memory\n", __FUNCTION__);
1331 put_device(&cgdev->dev);
1332 return -ENOMEM;
1333 }
1334
1335 rc = ctcm_add_files(&cgdev->dev);
1336 if (rc) {
1337 kfree(priv);
1338 put_device(&cgdev->dev);
1339 return rc;
1340 }
1341 priv->buffer_size = CTCM_BUFSIZE_DEFAULT;
1342 cgdev->cdev[0]->handler = ctcm_irq_handler;
1343 cgdev->cdev[1]->handler = ctcm_irq_handler;
1344 dev_set_drvdata(&cgdev->dev, priv);
1345
1346 return 0;
1347}
1348
1349/**
1350 * Add a new channel to the list of channels.
1351 * Keeps the channel list sorted.
1352 *
1353 * cdev The ccw_device to be added.
1354 * type The type class of the new channel.
1355 * priv Points to the private data of the ccwgroup_device.
1356 *
1357 * returns 0 on success, !0 on error.
1358 */
1359static int add_channel(struct ccw_device *cdev, enum channel_types type,
1360 struct ctcm_priv *priv)
1361{
1362 struct channel **c = &channels;
1363 struct channel *ch;
1364 int ccw_num;
1365 int rc = 0;
1366
1367 CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__);
1368 ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1369 if (ch == NULL)
1370 goto nomem_return;
1371
1372 ch->protocol = priv->protocol;
1373 if (IS_MPC(priv)) {
1374 ch->discontact_th = (struct th_header *)
1375 kzalloc(TH_HEADER_LENGTH, gfp_type());
1376 if (ch->discontact_th == NULL)
1377 goto nomem_return;
1378
1379 ch->discontact_th->th_blk_flag = TH_DISCONTACT;
1380 tasklet_init(&ch->ch_disc_tasklet,
1381 mpc_action_send_discontact, (unsigned long)ch);
1382
1383 tasklet_init(&ch->ch_tasklet, ctcmpc_bh, (unsigned long)ch);
1384 ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35);
1385 ccw_num = 17;
1386 } else
1387 ccw_num = 8;
1388
1389 ch->ccw = (struct ccw1 *)
1390 kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1391 if (ch->ccw == NULL)
1392 goto nomem_return;
1393
1394 ch->cdev = cdev;
1395 snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1396 ch->type = type;
1397
1398 /**
1399 * "static" ccws are used in the following way:
1400 *
1401 * ccw[0..2] (Channel program for generic I/O):
1402 * 0: prepare
1403 * 1: read or write (depending on direction) with fixed
1404 * buffer (idal allocated once when buffer is allocated)
1405 * 2: nop
1406 * ccw[3..5] (Channel program for direct write of packets)
1407 * 3: prepare
1408 * 4: write (idal allocated on every write).
1409 * 5: nop
1410 * ccw[6..7] (Channel program for initial channel setup):
1411 * 6: set extended mode
1412 * 7: nop
1413 *
1414 * ch->ccw[0..5] are initialized in ch_action_start because
1415 * the channel's direction is yet unknown here.
1416 *
1417 * ccws used for xid2 negotiations
1418 * ch-ccw[8-14] need to be used for the XID exchange either
1419 * X side XID2 Processing
1420 * 8: write control
1421 * 9: write th
1422 * 10: write XID
1423 * 11: read th from secondary
1424 * 12: read XID from secondary
1425 * 13: read 4 byte ID
1426 * 14: nop
1427 * Y side XID Processing
1428 * 8: sense
1429 * 9: read th
1430 * 10: read XID
1431 * 11: write th
1432 * 12: write XID
1433 * 13: write 4 byte ID
1434 * 14: nop
1435 *
1436 * ccws used for double noop due to VM timing issues
1437 * which result in unrecoverable Busy on channel
1438 * 15: nop
1439 * 16: nop
1440 */
1441 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1442 ch->ccw[6].flags = CCW_FLAG_SLI;
1443
1444 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1445 ch->ccw[7].flags = CCW_FLAG_SLI;
1446
1447 if (IS_MPC(priv)) {
1448 ch->ccw[15].cmd_code = CCW_CMD_WRITE;
1449 ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1450 ch->ccw[15].count = TH_HEADER_LENGTH;
1451 ch->ccw[15].cda = virt_to_phys(ch->discontact_th);
1452
1453 ch->ccw[16].cmd_code = CCW_CMD_NOOP;
1454 ch->ccw[16].flags = CCW_FLAG_SLI;
1455
1456 ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
1457 ctc_ch_event_names, CTC_MPC_NR_STATES,
1458 CTC_MPC_NR_EVENTS, ctcmpc_ch_fsm,
1459 mpc_ch_fsm_len, GFP_KERNEL);
1460 } else {
1461 ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
1462 ctc_ch_event_names, CTC_NR_STATES,
1463 CTC_NR_EVENTS, ch_fsm,
1464 ch_fsm_len, GFP_KERNEL);
1465 }
1466 if (ch->fsm == NULL)
1467 goto free_return;
1468
1469 fsm_newstate(ch->fsm, CTC_STATE_IDLE);
1470
1471 ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1472 if (ch->irb == NULL)
1473 goto nomem_return;
1474
1475 while (*c && ctcm_less_than((*c)->id, ch->id))
1476 c = &(*c)->next;
1477
1478 if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) {
1479 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1480 "%s (%s) already in list, using old entry",
1481 __FUNCTION__, (*c)->id);
1482
1483 goto free_return;
1484 }
1485
1486 spin_lock_init(&ch->collect_lock);
1487
1488 fsm_settimer(ch->fsm, &ch->timer);
1489 skb_queue_head_init(&ch->io_queue);
1490 skb_queue_head_init(&ch->collect_queue);
1491
1492 if (IS_MPC(priv)) {
1493 fsm_settimer(ch->fsm, &ch->sweep_timer);
1494 skb_queue_head_init(&ch->sweep_queue);
1495 }
1496 ch->next = *c;
1497 *c = ch;
1498 return 0;
1499
1500nomem_return:
1501 ctcm_pr_warn("ctcm: Out of memory in %s\n", __FUNCTION__);
1502 rc = -ENOMEM;
1503
1504free_return: /* note that all channel pointers are 0 or valid */
1505 kfree(ch->ccw); /* TODO: check that again */
1506 kfree(ch->discontact_th);
1507 kfree_fsm(ch->fsm);
1508 kfree(ch->irb);
1509 kfree(ch);
1510 return rc;
1511}
1512
1513/*
1514 * Return type of a detected device.
1515 */
1516static enum channel_types get_channel_type(struct ccw_device_id *id)
1517{
1518 enum channel_types type;
1519 type = (enum channel_types)id->driver_info;
1520
1521 if (type == channel_type_ficon)
1522 type = channel_type_escon;
1523
1524 return type;
1525}
1526
1527/**
1528 *
1529 * Setup an interface.
1530 *
1531 * cgdev Device to be setup.
1532 *
1533 * returns 0 on success, !0 on failure.
1534 */
1535static int ctcm_new_device(struct ccwgroup_device *cgdev)
1536{
1537 char read_id[CTCM_ID_SIZE];
1538 char write_id[CTCM_ID_SIZE];
1539 int direction;
1540 enum channel_types type;
1541 struct ctcm_priv *priv;
1542 struct net_device *dev;
1543 int ret;
1544
1545 CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__);
1546
1547 priv = dev_get_drvdata(&cgdev->dev);
1548 if (!priv)
1549 return -ENODEV;
1550
1551 type = get_channel_type(&cgdev->cdev[0]->id);
1552
1553 snprintf(read_id, CTCM_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
1554 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
1555
1556 ret = add_channel(cgdev->cdev[0], type, priv);
1557 if (ret)
1558 return ret;
1559 ret = add_channel(cgdev->cdev[1], type, priv);
1560 if (ret)
1561 return ret;
1562
1563 ret = ccw_device_set_online(cgdev->cdev[0]);
1564 if (ret != 0) {
1565 CTCM_DBF_TEXT(SETUP, CTC_DBF_WARN,
1566 "ccw_device_set_online (cdev[0]) failed ");
1567 ctcm_pr_warn("ccw_device_set_online (cdev[0]) failed "
1568 "with ret = %d\n", ret);
1569 }
1570
1571 ret = ccw_device_set_online(cgdev->cdev[1]);
1572 if (ret != 0) {
1573 CTCM_DBF_TEXT(SETUP, CTC_DBF_WARN,
1574 "ccw_device_set_online (cdev[1]) failed ");
1575 ctcm_pr_warn("ccw_device_set_online (cdev[1]) failed "
1576 "with ret = %d\n", ret);
1577 }
1578
1579 dev = ctcm_init_netdevice(priv);
1580
1581 if (dev == NULL) {
1582 ctcm_pr_warn("ctcm_init_netdevice failed\n");
1583 goto out;
1584 }
1585
1586 for (direction = READ; direction <= WRITE; direction++) {
1587 priv->channel[direction] =
1588 channel_get(type, direction == READ ? read_id : write_id,
1589 direction);
1590 if (priv->channel[direction] == NULL) {
1591 if (direction == WRITE)
1592 channel_free(priv->channel[READ]);
1593 ctcm_free_netdevice(dev);
1594 goto out;
1595 }
1596 priv->channel[direction]->netdev = dev;
1597 priv->channel[direction]->protocol = priv->protocol;
1598 priv->channel[direction]->max_bufsize = priv->buffer_size;
1599 }
1600 /* sysfs magic */
1601 SET_NETDEV_DEV(dev, &cgdev->dev);
1602
1603 if (ctcm_netdev_register(dev) != 0) {
1604 ctcm_free_netdevice(dev);
1605 goto out;
1606 }
1607
1608 if (ctcm_add_attributes(&cgdev->dev)) {
1609 ctcm_netdev_unregister(dev);
1610/* dev->priv = NULL; why that ???? */
1611 ctcm_free_netdevice(dev);
1612 goto out;
1613 }
1614
1615 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
1616
1617 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1618 "setup(%s) ok : r/w = %s / %s, proto : %d",
1619 dev->name, priv->channel[READ]->id,
1620 priv->channel[WRITE]->id, priv->protocol);
1621
1622 return 0;
1623out:
1624 ccw_device_set_offline(cgdev->cdev[1]);
1625 ccw_device_set_offline(cgdev->cdev[0]);
1626
1627 return -ENODEV;
1628}
1629
1630/**
1631 * Shutdown an interface.
1632 *
1633 * cgdev Device to be shut down.
1634 *
1635 * returns 0 on success, !0 on failure.
1636 */
1637static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
1638{
1639 struct ctcm_priv *priv;
1640 struct net_device *dev;
1641
1642 priv = dev_get_drvdata(&cgdev->dev);
1643 if (!priv)
1644 return -ENODEV;
1645
1646 if (priv->channel[READ]) {
1647 dev = priv->channel[READ]->netdev;
1648 CTCM_DBF_DEV(SETUP, dev, "");
1649 /* Close the device */
1650 ctcm_close(dev);
1651 dev->flags &= ~IFF_RUNNING;
1652 ctcm_remove_attributes(&cgdev->dev);
1653 channel_free(priv->channel[READ]);
1654 } else
1655 dev = NULL;
1656
1657 if (priv->channel[WRITE])
1658 channel_free(priv->channel[WRITE]);
1659
1660 if (dev) {
1661 ctcm_netdev_unregister(dev);
1662/* dev->priv = NULL; why that ??? */
1663 ctcm_free_netdevice(dev);
1664 }
1665
1666 if (priv->fsm)
1667 kfree_fsm(priv->fsm);
1668
1669 ccw_device_set_offline(cgdev->cdev[1]);
1670 ccw_device_set_offline(cgdev->cdev[0]);
1671
1672 if (priv->channel[READ])
1673 channel_remove(priv->channel[READ]);
1674 if (priv->channel[WRITE])
1675 channel_remove(priv->channel[WRITE]);
1676 priv->channel[READ] = priv->channel[WRITE] = NULL;
1677
1678 return 0;
1679
1680}
1681
1682
1683static void ctcm_remove_device(struct ccwgroup_device *cgdev)
1684{
1685 struct ctcm_priv *priv;
1686
1687 CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, __FUNCTION__);
1688
1689 priv = dev_get_drvdata(&cgdev->dev);
1690 if (!priv)
1691 return;
1692 if (cgdev->state == CCWGROUP_ONLINE)
1693 ctcm_shutdown_device(cgdev);
1694 ctcm_remove_files(&cgdev->dev);
1695 dev_set_drvdata(&cgdev->dev, NULL);
1696 kfree(priv);
1697 put_device(&cgdev->dev);
1698}
1699
1700static struct ccwgroup_driver ctcm_group_driver = {
1701 .owner = THIS_MODULE,
1702 .name = CTC_DRIVER_NAME,
1703 .max_slaves = 2,
1704 .driver_id = 0xC3E3C3D4, /* CTCM */
1705 .probe = ctcm_probe_device,
1706 .remove = ctcm_remove_device,
1707 .set_online = ctcm_new_device,
1708 .set_offline = ctcm_shutdown_device,
1709};
1710
1711
1712/*
1713 * Module related routines
1714 */
1715
1716/*
1717 * Prepare to be unloaded. Free IRQ's and release all resources.
1718 * This is called just before this module is unloaded. It is
1719 * not called, if the usage count is !0, so we don't need to check
1720 * for that.
1721 */
1722static void __exit ctcm_exit(void)
1723{
1724 unregister_cu3088_discipline(&ctcm_group_driver);
1725 ctcm_unregister_dbf_views();
1726 ctcm_pr_info("CTCM driver unloaded\n");
1727}
1728
1729/*
1730 * Print Banner.
1731 */
1732static void print_banner(void)
1733{
1734 printk(KERN_INFO "CTCM driver initialized\n");
1735}
1736
1737/**
1738 * Initialize module.
1739 * This is called just after the module is loaded.
1740 *
1741 * returns 0 on success, !0 on error.
1742 */
1743static int __init ctcm_init(void)
1744{
1745 int ret;
1746
1747 channels = NULL;
1748
1749 ret = ctcm_register_dbf_views();
1750 if (ret) {
1751 ctcm_pr_crit("ctcm_init failed with ctcm_register_dbf_views "
1752 "rc = %d\n", ret);
1753 return ret;
1754 }
1755 ret = register_cu3088_discipline(&ctcm_group_driver);
1756 if (ret) {
1757 ctcm_unregister_dbf_views();
1758 ctcm_pr_crit("ctcm_init failed with register_cu3088_discipline "
1759 "(rc = %d)\n", ret);
1760 return ret;
1761 }
1762 print_banner();
1763 return ret;
1764}
1765
1766module_init(ctcm_init);
1767module_exit(ctcm_exit);
1768
1769MODULE_AUTHOR("Peter Tiedemann <ptiedem@de.ibm.com>");
1770MODULE_DESCRIPTION("Network driver for S/390 CTC + CTCMPC (SNA)");
1771MODULE_LICENSE("GPL");
1772
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
new file mode 100644
index 000000000000..95b0c0b6ebc6
--- /dev/null
+++ b/drivers/s390/net/ctcm_main.h
@@ -0,0 +1,287 @@
1/*
2 * drivers/s390/net/ctcm_main.h
3 *
4 * Copyright IBM Corp. 2001, 2007
5 * Authors: Fritz Elfert (felfert@millenux.com)
6 * Peter Tiedemann (ptiedem@de.ibm.com)
7 */
8
9#ifndef _CTCM_MAIN_H_
10#define _CTCM_MAIN_H_
11
12#include <asm/ccwdev.h>
13#include <asm/ccwgroup.h>
14
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17
18#include "fsm.h"
19#include "cu3088.h"
20#include "ctcm_dbug.h"
21#include "ctcm_mpc.h"
22
23#define CTC_DRIVER_NAME "ctcm"
24#define CTC_DEVICE_NAME "ctc"
25#define CTC_DEVICE_GENE "ctc%d"
26#define MPC_DEVICE_NAME "mpc"
27#define MPC_DEVICE_GENE "mpc%d"
28
29#define CHANNEL_FLAGS_READ 0
30#define CHANNEL_FLAGS_WRITE 1
31#define CHANNEL_FLAGS_INUSE 2
32#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
33#define CHANNEL_FLAGS_FAILED 8
34#define CHANNEL_FLAGS_WAITIRQ 16
35#define CHANNEL_FLAGS_RWMASK 1
36#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
37
38#define LOG_FLAG_ILLEGALPKT 1
39#define LOG_FLAG_ILLEGALSIZE 2
40#define LOG_FLAG_OVERRUN 4
41#define LOG_FLAG_NOMEM 8
42
43#define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
44#define ctcm_pr_info(fmt, arg...) printk(KERN_INFO fmt, ##arg)
45#define ctcm_pr_notice(fmt, arg...) printk(KERN_NOTICE fmt, ##arg)
46#define ctcm_pr_warn(fmt, arg...) printk(KERN_WARNING fmt, ##arg)
47#define ctcm_pr_emerg(fmt, arg...) printk(KERN_EMERG fmt, ##arg)
48#define ctcm_pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg)
49#define ctcm_pr_crit(fmt, arg...) printk(KERN_CRIT fmt, ##arg)
50
51/*
52 * CCW commands, used in this driver.
53 */
54#define CCW_CMD_WRITE 0x01
55#define CCW_CMD_READ 0x02
56#define CCW_CMD_NOOP 0x03
57#define CCW_CMD_TIC 0x08
58#define CCW_CMD_SENSE_CMD 0x14
59#define CCW_CMD_WRITE_CTL 0x17
60#define CCW_CMD_SET_EXTENDED 0xc3
61#define CCW_CMD_PREPARE 0xe3
62
63#define CTCM_PROTO_S390 0
64#define CTCM_PROTO_LINUX 1
65#define CTCM_PROTO_LINUX_TTY 2
66#define CTCM_PROTO_OS390 3
67#define CTCM_PROTO_MPC 4
68#define CTCM_PROTO_MAX 4
69
70#define CTCM_BUFSIZE_LIMIT 65535
71#define CTCM_BUFSIZE_DEFAULT 32768
72#define MPC_BUFSIZE_DEFAULT CTCM_BUFSIZE_LIMIT
73
74#define CTCM_TIME_1_SEC 1000
75#define CTCM_TIME_5_SEC 5000
76#define CTCM_TIME_10_SEC 10000
77
78#define CTCM_INITIAL_BLOCKLEN 2
79
80#define READ 0
81#define WRITE 1
82
83#define CTCM_ID_SIZE BUS_ID_SIZE+3
84
85struct ctcm_profile {
86 unsigned long maxmulti;
87 unsigned long maxcqueue;
88 unsigned long doios_single;
89 unsigned long doios_multi;
90 unsigned long txlen;
91 unsigned long tx_time;
92 struct timespec send_stamp;
93};
94
95/*
96 * Definition of one channel
97 */
98struct channel {
99 struct channel *next;
100 char id[CTCM_ID_SIZE];
101 struct ccw_device *cdev;
102 /*
103 * Type of this channel.
104 * CTC/A or Escon for valid channels.
105 */
106 enum channel_types type;
107 /*
108 * Misc. flags. See CHANNEL_FLAGS_... below
109 */
110 __u32 flags;
111 __u16 protocol; /* protocol of this channel (4 = MPC) */
112 /*
113 * I/O and irq related stuff
114 */
115 struct ccw1 *ccw;
116 struct irb *irb;
117 /*
118 * RX/TX buffer size
119 */
120 int max_bufsize;
121 struct sk_buff *trans_skb; /* transmit/receive buffer */
122 struct sk_buff_head io_queue; /* universal I/O queue */
123 struct tasklet_struct ch_tasklet; /* MPC ONLY */
124 /*
125 * TX queue for collecting skb's during busy.
126 */
127 struct sk_buff_head collect_queue;
128 /*
129 * Amount of data in collect_queue.
130 */
131 int collect_len;
132 /*
133 * spinlock for collect_queue and collect_len
134 */
135 spinlock_t collect_lock;
136 /*
137 * Timer for detecting unresposive
138 * I/O operations.
139 */
140 fsm_timer timer;
141 /* MPC ONLY section begin */
142 __u32 th_seq_num; /* SNA TH seq number */
143 __u8 th_seg;
144 __u32 pdu_seq;
145 struct sk_buff *xid_skb;
146 char *xid_skb_data;
147 struct th_header *xid_th;
148 struct xid2 *xid;
149 char *xid_id;
150 struct th_header *rcvd_xid_th;
151 struct xid2 *rcvd_xid;
152 char *rcvd_xid_id;
153 __u8 in_mpcgroup;
154 fsm_timer sweep_timer;
155 struct sk_buff_head sweep_queue;
156 struct th_header *discontact_th;
157 struct tasklet_struct ch_disc_tasklet;
158 /* MPC ONLY section end */
159
160 int retry; /* retry counter for misc. operations */
161 fsm_instance *fsm; /* finite state machine of this channel */
162 struct net_device *netdev; /* corresponding net_device */
163 struct ctcm_profile prof;
164 unsigned char *trans_skb_data;
165 __u16 logflags;
166};
167
168struct ctcm_priv {
169 struct net_device_stats stats;
170 unsigned long tbusy;
171
172 /* The MPC group struct of this interface */
173 struct mpc_group *mpcg; /* MPC only */
174 struct xid2 *xid; /* MPC only */
175
176 /* The finite state machine of this interface */
177 fsm_instance *fsm;
178
179 /* The protocol of this device */
180 __u16 protocol;
181
182 /* Timer for restarting after I/O Errors */
183 fsm_timer restart_timer;
184
185 int buffer_size; /* ctc only */
186
187 struct channel *channel[2];
188};
189
190int ctcm_open(struct net_device *dev);
191int ctcm_close(struct net_device *dev);
192
193/*
194 * prototypes for non-static sysfs functions
195 */
196int ctcm_add_attributes(struct device *dev);
197void ctcm_remove_attributes(struct device *dev);
198int ctcm_add_files(struct device *dev);
199void ctcm_remove_files(struct device *dev);
200
201/*
202 * Compatibility macros for busy handling
203 * of network devices.
204 */
205static inline void ctcm_clear_busy_do(struct net_device *dev)
206{
207 clear_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy));
208 netif_wake_queue(dev);
209}
210
211static inline void ctcm_clear_busy(struct net_device *dev)
212{
213 struct mpc_group *grp;
214 grp = ((struct ctcm_priv *)dev->priv)->mpcg;
215
216 if (!(grp && grp->in_sweep))
217 ctcm_clear_busy_do(dev);
218}
219
220
221static inline int ctcm_test_and_set_busy(struct net_device *dev)
222{
223 netif_stop_queue(dev);
224 return test_and_set_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy));
225}
226
227extern int loglevel;
228extern struct channel *channels;
229
230void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb);
231
232/*
233 * Functions related to setup and device detection.
234 */
235
236static inline int ctcm_less_than(char *id1, char *id2)
237{
238 unsigned long dev1, dev2;
239
240 id1 = id1 + 5;
241 id2 = id2 + 5;
242
243 dev1 = simple_strtoul(id1, &id1, 16);
244 dev2 = simple_strtoul(id2, &id2, 16);
245
246 return (dev1 < dev2);
247}
248
249int ctcm_ch_alloc_buffer(struct channel *ch);
250
251static inline int ctcm_checkalloc_buffer(struct channel *ch)
252{
253 if (ch->trans_skb == NULL)
254 return ctcm_ch_alloc_buffer(ch);
255 if (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED) {
256 dev_kfree_skb(ch->trans_skb);
257 return ctcm_ch_alloc_buffer(ch);
258 }
259 return 0;
260}
261
262struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
263
264/* test if protocol attribute (of struct ctcm_priv or struct channel)
265 * has MPC protocol setting. Type is not checked
266 */
267#define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC)
268
269/* test if struct ctcm_priv of struct net_device has MPC protocol setting */
270#define IS_MPCDEV(d) IS_MPC((struct ctcm_priv *)d->priv)
271
272static inline gfp_t gfp_type(void)
273{
274 return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
275}
276
277/*
278 * Definition of our link level header.
279 */
280struct ll_header {
281 __u16 length;
282 __u16 type;
283 __u16 unused;
284};
285#define LL_HEADER_LENGTH (sizeof(struct ll_header))
286
287#endif
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
new file mode 100644
index 000000000000..044addee64a2
--- /dev/null
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -0,0 +1,2472 @@
1/*
2 * drivers/s390/net/ctcm_mpc.c
3 *
4 * Copyright IBM Corp. 2004, 2007
5 * Authors: Belinda Thompson (belindat@us.ibm.com)
6 * Andy Richter (richtera@us.ibm.com)
7 * Peter Tiedemann (ptiedem@de.ibm.com)
8 */
9
10/*
11 This module exports functions to be used by CCS:
12 EXPORT_SYMBOL(ctc_mpc_alloc_channel);
13 EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
14 EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
15 EXPORT_SYMBOL(ctc_mpc_flow_control);
16*/
17
18#undef DEBUG
19#undef DEBUGDATA
20#undef DEBUGCCW
21
22#include <linux/version.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/errno.h>
28#include <linux/types.h>
29#include <linux/interrupt.h>
30#include <linux/timer.h>
31#include <linux/sched.h>
32
33#include <linux/signal.h>
34#include <linux/string.h>
35#include <linux/proc_fs.h>
36
37#include <linux/ip.h>
38#include <linux/if_arp.h>
39#include <linux/tcp.h>
40#include <linux/skbuff.h>
41#include <linux/ctype.h>
42#include <linux/netdevice.h>
43#include <net/dst.h>
44
45#include <linux/io.h> /* instead of <asm/io.h> ok ? */
46#include <asm/ccwdev.h>
47#include <asm/ccwgroup.h>
48#include <linux/bitops.h> /* instead of <asm/bitops.h> ok ? */
49#include <linux/uaccess.h> /* instead of <asm/uaccess.h> ok ? */
50#include <linux/wait.h>
51#include <linux/moduleparam.h>
52#include <asm/idals.h>
53
54#include "cu3088.h"
55#include "ctcm_mpc.h"
56#include "ctcm_main.h"
57#include "ctcm_fsms.h"
58
59static const struct xid2 init_xid = {
60 .xid2_type_id = XID_FM2,
61 .xid2_len = 0x45,
62 .xid2_adj_id = 0,
63 .xid2_rlen = 0x31,
64 .xid2_resv1 = 0,
65 .xid2_flag1 = 0,
66 .xid2_fmtt = 0,
67 .xid2_flag4 = 0x80,
68 .xid2_resv2 = 0,
69 .xid2_tgnum = 0,
70 .xid2_sender_id = 0,
71 .xid2_flag2 = 0,
72 .xid2_option = XID2_0,
73 .xid2_resv3 = "\x00",
74 .xid2_resv4 = 0,
75 .xid2_dlc_type = XID2_READ_SIDE,
76 .xid2_resv5 = 0,
77 .xid2_mpc_flag = 0,
78 .xid2_resv6 = 0,
79 .xid2_buf_len = (MPC_BUFSIZE_DEFAULT - 35),
80};
81
82static const struct th_header thnorm = {
83 .th_seg = 0x00,
84 .th_ch_flag = TH_IS_XID,
85 .th_blk_flag = TH_DATA_IS_XID,
86 .th_is_xid = 0x01,
87 .th_seq_num = 0x00000000,
88};
89
90static const struct th_header thdummy = {
91 .th_seg = 0x00,
92 .th_ch_flag = 0x00,
93 .th_blk_flag = TH_DATA_IS_XID,
94 .th_is_xid = 0x01,
95 .th_seq_num = 0x00000000,
96};
97
98/*
99 * Definition of one MPC group
100 */
101
102/*
103 * Compatibility macros for busy handling
104 * of network devices.
105 */
106
107static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb);
108
109/*
110 * MPC Group state machine actions (static prototypes)
111 */
112static void mpc_action_nop(fsm_instance *fsm, int event, void *arg);
113static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg);
114static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg);
115static void mpc_action_timeout(fsm_instance *fi, int event, void *arg);
116static int mpc_validate_xid(struct mpcg_info *mpcginfo);
117static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg);
118static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg);
119static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg);
120static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg);
121static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg);
122static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg);
123
124#ifdef DEBUGDATA
125/*-------------------------------------------------------------------*
126* Dump buffer format *
127* *
128*--------------------------------------------------------------------*/
129void ctcmpc_dumpit(char *buf, int len)
130{
131 __u32 ct, sw, rm, dup;
132 char *ptr, *rptr;
133 char tbuf[82], tdup[82];
134 #if (UTS_MACHINE == s390x)
135 char addr[22];
136 #else
137 char addr[12];
138 #endif
139 char boff[12];
140 char bhex[82], duphex[82];
141 char basc[40];
142
143 sw = 0;
144 rptr = ptr = buf;
145 rm = 16;
146 duphex[0] = 0x00;
147 dup = 0;
148
149 for (ct = 0; ct < len; ct++, ptr++, rptr++) {
150 if (sw == 0) {
151 #if (UTS_MACHINE == s390x)
152 sprintf(addr, "%16.16lx", (unsigned long)rptr);
153 #else
154 sprintf(addr, "%8.8X", (__u32)rptr);
155 #endif
156
157 sprintf(boff, "%4.4X", (__u32)ct);
158 bhex[0] = '\0';
159 basc[0] = '\0';
160 }
161 if ((sw == 4) || (sw == 12))
162 strcat(bhex, " ");
163 if (sw == 8)
164 strcat(bhex, " ");
165
166 #if (UTS_MACHINE == s390x)
167 sprintf(tbuf, "%2.2lX", (unsigned long)*ptr);
168 #else
169 sprintf(tbuf, "%2.2X", (__u32)*ptr);
170 #endif
171
172 tbuf[2] = '\0';
173 strcat(bhex, tbuf);
174 if ((0 != isprint(*ptr)) && (*ptr >= 0x20))
175 basc[sw] = *ptr;
176 else
177 basc[sw] = '.';
178
179 basc[sw+1] = '\0';
180 sw++;
181 rm--;
182 if (sw == 16) {
183 if ((strcmp(duphex, bhex)) != 0) {
184 if (dup != 0) {
185 sprintf(tdup, "Duplicate as above "
186 "to %s", addr);
187 printk(KERN_INFO " "
188 " --- %s ---\n", tdup);
189 }
190 printk(KERN_INFO " %s (+%s) : %s [%s]\n",
191 addr, boff, bhex, basc);
192 dup = 0;
193 strcpy(duphex, bhex);
194 } else
195 dup++;
196
197 sw = 0;
198 rm = 16;
199 }
200 } /* endfor */
201
202 if (sw != 0) {
203 for ( ; rm > 0; rm--, sw++) {
204 if ((sw == 4) || (sw == 12))
205 strcat(bhex, " ");
206 if (sw == 8)
207 strcat(bhex, " ");
208 strcat(bhex, " ");
209 strcat(basc, " ");
210 }
211 if (dup != 0) {
212 sprintf(tdup, "Duplicate as above to %s", addr);
213 printk(KERN_INFO " "
214 " --- %s ---\n", tdup);
215 }
216 printk(KERN_INFO " %s (+%s) : %s [%s]\n",
217 addr, boff, bhex, basc);
218 } else {
219 if (dup >= 1) {
220 sprintf(tdup, "Duplicate as above to %s", addr);
221 printk(KERN_INFO " "
222 " --- %s ---\n", tdup);
223 }
224 if (dup != 0) {
225 printk(KERN_INFO " %s (+%s) : %s [%s]\n",
226 addr, boff, bhex, basc);
227 }
228 }
229
230 return;
231
232} /* end of ctcmpc_dumpit */
233#endif
234
235#ifdef DEBUGDATA
236/*
237 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
238 *
239 * skb The sk_buff to dump.
240 * offset Offset relative to skb-data, where to start the dump.
241 */
242void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
243{
244 unsigned char *p = skb->data;
245 struct th_header *header;
246 struct pdu *pheader;
247 int bl = skb->len;
248 int i;
249
250 if (p == NULL)
251 return;
252
253 p += offset;
254 header = (struct th_header *)p;
255
256 printk(KERN_INFO "dump:\n");
257 printk(KERN_INFO "skb len=%d \n", skb->len);
258 if (skb->len > 2) {
259 switch (header->th_ch_flag) {
260 case TH_HAS_PDU:
261 break;
262 case 0x00:
263 case TH_IS_XID:
264 if ((header->th_blk_flag == TH_DATA_IS_XID) &&
265 (header->th_is_xid == 0x01))
266 goto dumpth;
267 case TH_SWEEP_REQ:
268 goto dumpth;
269 case TH_SWEEP_RESP:
270 goto dumpth;
271 default:
272 break;
273 }
274
275 pheader = (struct pdu *)p;
276 printk(KERN_INFO "pdu->offset: %d hex: %04x\n",
277 pheader->pdu_offset, pheader->pdu_offset);
278 printk(KERN_INFO "pdu->flag : %02x\n", pheader->pdu_flag);
279 printk(KERN_INFO "pdu->proto : %02x\n", pheader->pdu_proto);
280 printk(KERN_INFO "pdu->seq : %02x\n", pheader->pdu_seq);
281 goto dumpdata;
282
283dumpth:
284 printk(KERN_INFO "th->seg : %02x\n", header->th_seg);
285 printk(KERN_INFO "th->ch : %02x\n", header->th_ch_flag);
286 printk(KERN_INFO "th->blk_flag: %02x\n", header->th_blk_flag);
287 printk(KERN_INFO "th->type : %s\n",
288 (header->th_is_xid) ? "DATA" : "XID");
289 printk(KERN_INFO "th->seqnum : %04x\n", header->th_seq_num);
290
291 }
292dumpdata:
293 if (bl > 32)
294 bl = 32;
295 printk(KERN_INFO "data: ");
296 for (i = 0; i < bl; i++)
297 printk(KERN_INFO "%02x%s", *p++, (i % 16) ? " " : "\n<7>");
298 printk(KERN_INFO "\n");
299}
300#endif
301
302/*
303 * ctc_mpc_alloc_channel
304 * (exported interface)
305 *
306 * Device Initialization :
307 * ACTPATH driven IO operations
308 */
309int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
310{
311 char device[20];
312 struct net_device *dev;
313 struct mpc_group *grp;
314 struct ctcm_priv *priv;
315
316 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
317
318 sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
319 dev = __dev_get_by_name(&init_net, device);
320
321 if (dev == NULL) {
322 printk(KERN_INFO "ctc_mpc_alloc_channel %s dev=NULL\n", device);
323 return 1;
324 }
325
326 priv = dev->priv;
327 grp = priv->mpcg;
328 if (!grp)
329 return 1;
330
331 grp->allochanfunc = callback;
332 grp->port_num = port_num;
333 grp->port_persist = 1;
334
335 ctcm_pr_debug("ctcmpc: %s called for device %s state=%s\n",
336 __FUNCTION__,
337 dev->name,
338 fsm_getstate_str(grp->fsm));
339
340 switch (fsm_getstate(grp->fsm)) {
341 case MPCG_STATE_INOP:
342 /* Group is in the process of terminating */
343 grp->alloc_called = 1;
344 break;
345 case MPCG_STATE_RESET:
346 /* MPC Group will transition to state */
347 /* MPCG_STATE_XID2INITW iff the minimum number */
348 /* of 1 read and 1 write channel have successfully*/
349 /* activated */
350 /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
351 if (callback)
352 grp->send_qllc_disc = 1;
353 case MPCG_STATE_XID0IOWAIT:
354 fsm_deltimer(&grp->timer);
355 grp->outstanding_xid2 = 0;
356 grp->outstanding_xid7 = 0;
357 grp->outstanding_xid7_p2 = 0;
358 grp->saved_xid2 = NULL;
359 if (callback)
360 ctcm_open(dev);
361 fsm_event(priv->fsm, DEV_EVENT_START, dev);
362 break;
363 case MPCG_STATE_READY:
364 /* XID exchanges completed after PORT was activated */
365 /* Link station already active */
366 /* Maybe timing issue...retry callback */
367 grp->allocchan_callback_retries++;
368 if (grp->allocchan_callback_retries < 4) {
369 if (grp->allochanfunc)
370 grp->allochanfunc(grp->port_num,
371 grp->group_max_buflen);
372 } else {
373 /* there are problems...bail out */
374 /* there may be a state mismatch so restart */
375 grp->port_persist = 1;
376 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
377 grp->allocchan_callback_retries = 0;
378 }
379 break;
380 default:
381 return 0;
382
383 }
384
385 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
386 return 0;
387}
388EXPORT_SYMBOL(ctc_mpc_alloc_channel);
389
390/*
391 * ctc_mpc_establish_connectivity
392 * (exported interface)
393 */
394void ctc_mpc_establish_connectivity(int port_num,
395 void (*callback)(int, int, int))
396{
397 char device[20];
398 struct net_device *dev;
399 struct mpc_group *grp;
400 struct ctcm_priv *priv;
401 struct channel *rch, *wch;
402
403 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
404
405 sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
406 dev = __dev_get_by_name(&init_net, device);
407
408 if (dev == NULL) {
409 printk(KERN_INFO "ctc_mpc_establish_connectivity "
410 "%s dev=NULL\n", device);
411 return;
412 }
413 priv = dev->priv;
414 rch = priv->channel[READ];
415 wch = priv->channel[WRITE];
416
417 grp = priv->mpcg;
418
419 ctcm_pr_debug("ctcmpc: %s() called for device %s state=%s\n",
420 __FUNCTION__, dev->name,
421 fsm_getstate_str(grp->fsm));
422
423 grp->estconnfunc = callback;
424 grp->port_num = port_num;
425
426 switch (fsm_getstate(grp->fsm)) {
427 case MPCG_STATE_READY:
428 /* XID exchanges completed after PORT was activated */
429 /* Link station already active */
430 /* Maybe timing issue...retry callback */
431 fsm_deltimer(&grp->timer);
432 grp->estconn_callback_retries++;
433 if (grp->estconn_callback_retries < 4) {
434 if (grp->estconnfunc) {
435 grp->estconnfunc(grp->port_num, 0,
436 grp->group_max_buflen);
437 grp->estconnfunc = NULL;
438 }
439 } else {
440 /* there are problems...bail out */
441 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
442 grp->estconn_callback_retries = 0;
443 }
444 break;
445 case MPCG_STATE_INOP:
446 case MPCG_STATE_RESET:
447 /* MPC Group is not ready to start XID - min num of */
448 /* 1 read and 1 write channel have not been acquired*/
449 printk(KERN_WARNING "ctcmpc: %s() REJECTED ACTIVE XID Req"
450 "uest - Channel Pair is not Active\n", __FUNCTION__);
451 if (grp->estconnfunc) {
452 grp->estconnfunc(grp->port_num, -1, 0);
453 grp->estconnfunc = NULL;
454 }
455 break;
456 case MPCG_STATE_XID2INITW:
457 /* alloc channel was called but no XID exchange */
458 /* has occurred. initiate xside XID exchange */
459 /* make sure yside XID0 processing has not started */
460 if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) ||
461 (fsm_getstate(wch->fsm) > CH_XID0_PENDING)) {
462 printk(KERN_WARNING "mpc: %s() ABORT ACTIVE XID"
463 " Request- PASSIVE XID in process\n"
464 , __FUNCTION__);
465 break;
466 }
467 grp->send_qllc_disc = 1;
468 fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIT);
469 fsm_deltimer(&grp->timer);
470 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
471 MPCG_EVENT_TIMER, dev);
472 grp->outstanding_xid7 = 0;
473 grp->outstanding_xid7_p2 = 0;
474 grp->saved_xid2 = NULL;
475 if ((rch->in_mpcgroup) &&
476 (fsm_getstate(rch->fsm) == CH_XID0_PENDING))
477 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch);
478 else {
479 printk(KERN_WARNING "mpc: %s() Unable to start"
480 " ACTIVE XID0 on read channel\n",
481 __FUNCTION__);
482 if (grp->estconnfunc) {
483 grp->estconnfunc(grp->port_num, -1, 0);
484 grp->estconnfunc = NULL;
485 }
486 fsm_deltimer(&grp->timer);
487 goto done;
488 }
489 if ((wch->in_mpcgroup) &&
490 (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
491 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch);
492 else {
493 printk(KERN_WARNING "mpc: %s() Unable to start"
494 " ACTIVE XID0 on write channel\n",
495 __FUNCTION__);
496 if (grp->estconnfunc) {
497 grp->estconnfunc(grp->port_num, -1, 0);
498 grp->estconnfunc = NULL;
499 }
500 fsm_deltimer(&grp->timer);
501 goto done;
502 }
503 break;
504 case MPCG_STATE_XID0IOWAIT:
505 /* already in active XID negotiations */
506 default:
507 break;
508 }
509
510done:
511 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
512 return;
513}
514EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
515
516/*
517 * ctc_mpc_dealloc_ch
518 * (exported interface)
519 */
520void ctc_mpc_dealloc_ch(int port_num)
521{
522 struct net_device *dev;
523 char device[20];
524 struct ctcm_priv *priv;
525 struct mpc_group *grp;
526
527 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
528 sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
529 dev = __dev_get_by_name(&init_net, device);
530
531 if (dev == NULL) {
532 printk(KERN_INFO "%s() %s dev=NULL\n", __FUNCTION__, device);
533 goto done;
534 }
535
536 ctcm_pr_debug("ctcmpc:%s %s() called for device %s refcount=%d\n",
537 dev->name, __FUNCTION__,
538 dev->name, atomic_read(&dev->refcnt));
539
540 priv = dev->priv;
541 if (priv == NULL) {
542 printk(KERN_INFO "%s() %s priv=NULL\n",
543 __FUNCTION__, device);
544 goto done;
545 }
546 fsm_deltimer(&priv->restart_timer);
547
548 grp = priv->mpcg;
549 if (grp == NULL) {
550 printk(KERN_INFO "%s() %s dev=NULL\n", __FUNCTION__, device);
551 goto done;
552 }
553 grp->channels_terminating = 0;
554
555 fsm_deltimer(&grp->timer);
556
557 grp->allochanfunc = NULL;
558 grp->estconnfunc = NULL;
559 grp->port_persist = 0;
560 grp->send_qllc_disc = 0;
561 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
562
563 ctcm_close(dev);
564done:
565 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
566 return;
567}
568EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
569
570/*
571 * ctc_mpc_flow_control
572 * (exported interface)
573 */
574void ctc_mpc_flow_control(int port_num, int flowc)
575{
576 char device[20];
577 struct ctcm_priv *priv;
578 struct mpc_group *grp;
579 struct net_device *dev;
580 struct channel *rch;
581 int mpcg_state;
582
583 ctcm_pr_debug("ctcmpc enter: %s() %i\n", __FUNCTION__, flowc);
584
585 sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
586 dev = __dev_get_by_name(&init_net, device);
587
588 if (dev == NULL) {
589 printk(KERN_INFO "ctc_mpc_flow_control %s dev=NULL\n", device);
590 return;
591 }
592
593 ctcm_pr_debug("ctcmpc: %s %s called \n", dev->name, __FUNCTION__);
594
595 priv = dev->priv;
596 if (priv == NULL) {
597 printk(KERN_INFO "ctcmpc:%s() %s priv=NULL\n",
598 __FUNCTION__, device);
599 return;
600 }
601 grp = priv->mpcg;
602 rch = priv->channel[READ];
603
604 mpcg_state = fsm_getstate(grp->fsm);
605 switch (flowc) {
606 case 1:
607 if (mpcg_state == MPCG_STATE_FLOWC)
608 break;
609 if (mpcg_state == MPCG_STATE_READY) {
610 if (grp->flow_off_called == 1)
611 grp->flow_off_called = 0;
612 else
613 fsm_newstate(grp->fsm, MPCG_STATE_FLOWC);
614 break;
615 }
616 break;
617 case 0:
618 if (mpcg_state == MPCG_STATE_FLOWC) {
619 fsm_newstate(grp->fsm, MPCG_STATE_READY);
620 /* ensure any data that has accumulated */
621 /* on the io_queue will now be sen t */
622 tasklet_schedule(&rch->ch_tasklet);
623 }
624 /* possible race condition */
625 if (mpcg_state == MPCG_STATE_READY) {
626 grp->flow_off_called = 1;
627 break;
628 }
629 break;
630 }
631
632 ctcm_pr_debug("ctcmpc exit: %s() %i\n", __FUNCTION__, flowc);
633}
634EXPORT_SYMBOL(ctc_mpc_flow_control);
635
636static int mpc_send_qllc_discontact(struct net_device *);
637
638/*
639 * helper function of ctcmpc_unpack_skb
640*/
641static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
642{
643 struct channel *rch = mpcginfo->ch;
644 struct net_device *dev = rch->netdev;
645 struct ctcm_priv *priv = dev->priv;
646 struct mpc_group *grp = priv->mpcg;
647 struct channel *ch = priv->channel[WRITE];
648
649 if (do_debug)
650 ctcm_pr_debug("ctcmpc enter: %s(): ch=0x%p id=%s\n",
651 __FUNCTION__, ch, ch->id);
652
653 if (do_debug_data)
654 ctcmpc_dumpit((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
655
656 grp->sweep_rsp_pend_num--;
657
658 if ((grp->sweep_req_pend_num == 0) &&
659 (grp->sweep_rsp_pend_num == 0)) {
660 fsm_deltimer(&ch->sweep_timer);
661 grp->in_sweep = 0;
662 rch->th_seq_num = 0x00;
663 ch->th_seq_num = 0x00;
664 ctcm_clear_busy_do(dev);
665 }
666
667 kfree(mpcginfo);
668
669 return;
670
671}
672
673/*
674 * helper function of mpc_rcvd_sweep_req
675 * which is a helper of ctcmpc_unpack_skb
676 */
677static void ctcmpc_send_sweep_resp(struct channel *rch)
678{
679 struct net_device *dev = rch->netdev;
680 struct ctcm_priv *priv = dev->priv;
681 struct mpc_group *grp = priv->mpcg;
682 int rc = 0;
683 struct th_sweep *header;
684 struct sk_buff *sweep_skb;
685 struct channel *ch = priv->channel[WRITE];
686
687 if (do_debug)
688 ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n",
689 __FUNCTION__, rch, rch->id);
690
691 sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
692 GFP_ATOMIC|GFP_DMA);
693 if (sweep_skb == NULL) {
694 printk(KERN_INFO "Couldn't alloc sweep_skb\n");
695 rc = -ENOMEM;
696 goto done;
697 }
698
699 header = (struct th_sweep *)
700 kmalloc(sizeof(struct th_sweep), gfp_type());
701
702 if (!header) {
703 dev_kfree_skb_any(sweep_skb);
704 rc = -ENOMEM;
705 goto done;
706 }
707
708 header->th.th_seg = 0x00 ;
709 header->th.th_ch_flag = TH_SWEEP_RESP;
710 header->th.th_blk_flag = 0x00;
711 header->th.th_is_xid = 0x00;
712 header->th.th_seq_num = 0x00;
713 header->sw.th_last_seq = ch->th_seq_num;
714
715 memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
716
717 kfree(header);
718
719 dev->trans_start = jiffies;
720 skb_queue_tail(&ch->sweep_queue, sweep_skb);
721
722 fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
723
724 return;
725
726done:
727 if (rc != 0) {
728 grp->in_sweep = 0;
729 ctcm_clear_busy_do(dev);
730 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
731 }
732
733 return;
734}
735
736/*
737 * helper function of ctcmpc_unpack_skb
738 */
739static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
740{
741 struct channel *rch = mpcginfo->ch;
742 struct net_device *dev = rch->netdev;
743 struct ctcm_priv *priv = dev->priv;
744 struct mpc_group *grp = priv->mpcg;
745 struct channel *ch = priv->channel[WRITE];
746
747 if (do_debug)
748 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
749 " %s(): ch=0x%p id=%s\n", __FUNCTION__, ch, ch->id);
750
751 if (grp->in_sweep == 0) {
752 grp->in_sweep = 1;
753 ctcm_test_and_set_busy(dev);
754 grp->sweep_req_pend_num = grp->active_channels[READ];
755 grp->sweep_rsp_pend_num = grp->active_channels[READ];
756 }
757
758 if (do_debug_data)
759 ctcmpc_dumpit((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
760
761 grp->sweep_req_pend_num--;
762 ctcmpc_send_sweep_resp(ch);
763 kfree(mpcginfo);
764 return;
765}
766
767/*
768 * MPC Group Station FSM definitions
769 */
770static const char *mpcg_event_names[] = {
771 [MPCG_EVENT_INOP] = "INOP Condition",
772 [MPCG_EVENT_DISCONC] = "Discontact Received",
773 [MPCG_EVENT_XID0DO] = "Channel Active - Start XID",
774 [MPCG_EVENT_XID2] = "XID2 Received",
775 [MPCG_EVENT_XID2DONE] = "XID0 Complete",
776 [MPCG_EVENT_XID7DONE] = "XID7 Complete",
777 [MPCG_EVENT_TIMER] = "XID Setup Timer",
778 [MPCG_EVENT_DOIO] = "XID DoIO",
779};
780
781static const char *mpcg_state_names[] = {
782 [MPCG_STATE_RESET] = "Reset",
783 [MPCG_STATE_INOP] = "INOP",
784 [MPCG_STATE_XID2INITW] = "Passive XID- XID0 Pending Start",
785 [MPCG_STATE_XID2INITX] = "Passive XID- XID0 Pending Complete",
786 [MPCG_STATE_XID7INITW] = "Passive XID- XID7 Pending P1 Start",
787 [MPCG_STATE_XID7INITX] = "Passive XID- XID7 Pending P2 Complete",
788 [MPCG_STATE_XID0IOWAIT] = "Active XID- XID0 Pending Start",
789 [MPCG_STATE_XID0IOWAIX] = "Active XID- XID0 Pending Complete",
790 [MPCG_STATE_XID7INITI] = "Active XID- XID7 Pending Start",
791 [MPCG_STATE_XID7INITZ] = "Active XID- XID7 Pending Complete ",
792 [MPCG_STATE_XID7INITF] = "XID - XID7 Complete ",
793 [MPCG_STATE_FLOWC] = "FLOW CONTROL ON",
794 [MPCG_STATE_READY] = "READY",
795};
796
797/*
798 * The MPC Group Station FSM
799 * 22 events
800 */
801static const fsm_node mpcg_fsm[] = {
802 { MPCG_STATE_RESET, MPCG_EVENT_INOP, mpc_action_go_inop },
803 { MPCG_STATE_INOP, MPCG_EVENT_INOP, mpc_action_nop },
804 { MPCG_STATE_FLOWC, MPCG_EVENT_INOP, mpc_action_go_inop },
805
806 { MPCG_STATE_READY, MPCG_EVENT_DISCONC, mpc_action_discontact },
807 { MPCG_STATE_READY, MPCG_EVENT_INOP, mpc_action_go_inop },
808
809 { MPCG_STATE_XID2INITW, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
810 { MPCG_STATE_XID2INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
811 { MPCG_STATE_XID2INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
812 { MPCG_STATE_XID2INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
813 { MPCG_STATE_XID2INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
814
815 { MPCG_STATE_XID2INITX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
816 { MPCG_STATE_XID2INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
817 { MPCG_STATE_XID2INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
818 { MPCG_STATE_XID2INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
819 { MPCG_STATE_XID2INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
820
821 { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
822 { MPCG_STATE_XID7INITW, MPCG_EVENT_DISCONC, mpc_action_discontact },
823 { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
824 { MPCG_STATE_XID7INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
825 { MPCG_STATE_XID7INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
826 { MPCG_STATE_XID7INITW, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
827 { MPCG_STATE_XID7INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
828
829 { MPCG_STATE_XID7INITX, MPCG_EVENT_DISCONC, mpc_action_discontact },
830 { MPCG_STATE_XID7INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
831 { MPCG_STATE_XID7INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
832 { MPCG_STATE_XID7INITX, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
833 { MPCG_STATE_XID7INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
834 { MPCG_STATE_XID7INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
835
836 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
837 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DISCONC, mpc_action_discontact },
838 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
839 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_INOP, mpc_action_go_inop },
840 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_TIMER, mpc_action_timeout },
841 { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DOIO, mpc_action_xside_xid },
842
843 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
844 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DISCONC, mpc_action_discontact },
845 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
846 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_INOP, mpc_action_go_inop },
847 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_TIMER, mpc_action_timeout },
848 { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DOIO, mpc_action_xside_xid },
849
850 { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
851 { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
852 { MPCG_STATE_XID7INITI, MPCG_EVENT_DISCONC, mpc_action_discontact },
853 { MPCG_STATE_XID7INITI, MPCG_EVENT_INOP, mpc_action_go_inop },
854 { MPCG_STATE_XID7INITI, MPCG_EVENT_TIMER, mpc_action_timeout },
855 { MPCG_STATE_XID7INITI, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
856 { MPCG_STATE_XID7INITI, MPCG_EVENT_DOIO, mpc_action_xside_xid },
857
858 { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
859 { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
860 { MPCG_STATE_XID7INITZ, MPCG_EVENT_DISCONC, mpc_action_discontact },
861 { MPCG_STATE_XID7INITZ, MPCG_EVENT_INOP, mpc_action_go_inop },
862 { MPCG_STATE_XID7INITZ, MPCG_EVENT_TIMER, mpc_action_timeout },
863 { MPCG_STATE_XID7INITZ, MPCG_EVENT_DOIO, mpc_action_xside_xid },
864
865 { MPCG_STATE_XID7INITF, MPCG_EVENT_INOP, mpc_action_go_inop },
866 { MPCG_STATE_XID7INITF, MPCG_EVENT_XID7DONE, mpc_action_go_ready },
867};
868
869static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm);
870
871/*
872 * MPC Group Station FSM action
873 * CTCM_PROTO_MPC only
874 */
875static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
876{
877 struct net_device *dev = arg;
878 struct ctcm_priv *priv = NULL;
879 struct mpc_group *grp = NULL;
880
881 if (dev == NULL) {
882 printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__);
883 return;
884 }
885
886 ctcm_pr_debug("ctcmpc enter: %s %s()\n", dev->name, __FUNCTION__);
887
888 priv = dev->priv;
889 if (priv == NULL) {
890 printk(KERN_INFO "%s() priv=NULL\n", __FUNCTION__);
891 return;
892 }
893
894 grp = priv->mpcg;
895 if (grp == NULL) {
896 printk(KERN_INFO "%s() grp=NULL\n", __FUNCTION__);
897 return;
898 }
899
900 fsm_deltimer(&grp->timer);
901
902 if (grp->saved_xid2->xid2_flag2 == 0x40) {
903 priv->xid->xid2_flag2 = 0x00;
904 if (grp->estconnfunc) {
905 grp->estconnfunc(grp->port_num, 1,
906 grp->group_max_buflen);
907 grp->estconnfunc = NULL;
908 } else if (grp->allochanfunc)
909 grp->send_qllc_disc = 1;
910 goto done;
911 }
912
913 grp->port_persist = 1;
914 grp->out_of_sequence = 0;
915 grp->estconn_called = 0;
916
917 tasklet_hi_schedule(&grp->mpc_tasklet2);
918
919 ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__);
920 return;
921
922done:
923 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
924
925
926 ctcm_pr_info("ctcmpc: %s()failure occurred\n", __FUNCTION__);
927}
928
929/*
930 * helper of ctcm_init_netdevice
931 * CTCM_PROTO_MPC only
932 */
933void mpc_group_ready(unsigned long adev)
934{
935 struct net_device *dev = (struct net_device *)adev;
936 struct ctcm_priv *priv = NULL;
937 struct mpc_group *grp = NULL;
938 struct channel *ch = NULL;
939
940
941 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
942
943 if (dev == NULL) {
944 printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__);
945 return;
946 }
947
948 priv = dev->priv;
949 if (priv == NULL) {
950 printk(KERN_INFO "%s() priv=NULL\n", __FUNCTION__);
951 return;
952 }
953
954 grp = priv->mpcg;
955 if (grp == NULL) {
956 printk(KERN_INFO "ctcmpc:%s() grp=NULL\n", __FUNCTION__);
957 return;
958 }
959
960 printk(KERN_NOTICE "ctcmpc: %s GROUP TRANSITIONED TO READY"
961 " maxbuf:%d\n",
962 dev->name, grp->group_max_buflen);
963
964 fsm_newstate(grp->fsm, MPCG_STATE_READY);
965
966 /* Put up a read on the channel */
967 ch = priv->channel[READ];
968 ch->pdu_seq = 0;
969 if (do_debug_data)
970 ctcm_pr_debug("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
971 __FUNCTION__, ch->pdu_seq);
972
973 ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
974 /* Put the write channel in idle state */
975 ch = priv->channel[WRITE];
976 if (ch->collect_len > 0) {
977 spin_lock(&ch->collect_lock);
978 ctcm_purge_skb_queue(&ch->collect_queue);
979 ch->collect_len = 0;
980 spin_unlock(&ch->collect_lock);
981 }
982 ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch);
983
984 ctcm_clear_busy(dev);
985
986 if (grp->estconnfunc) {
987 grp->estconnfunc(grp->port_num, 0,
988 grp->group_max_buflen);
989 grp->estconnfunc = NULL;
990 } else
991 if (grp->allochanfunc)
992 grp->allochanfunc(grp->port_num,
993 grp->group_max_buflen);
994
995 grp->send_qllc_disc = 1;
996 grp->changed_side = 0;
997
998 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
999 return;
1000
1001}
1002
1003/*
1004 * Increment the MPC Group Active Channel Counts
1005 * helper of dev_action (called from channel fsm)
1006 */
1007int mpc_channel_action(struct channel *ch, int direction, int action)
1008{
1009 struct net_device *dev = ch->netdev;
1010 struct ctcm_priv *priv;
1011 struct mpc_group *grp = NULL;
1012 int rc = 0;
1013
1014 if (do_debug)
1015 ctcm_pr_debug("ctcmpc enter: %s(): ch=0x%p id=%s\n",
1016 __FUNCTION__, ch, ch->id);
1017
1018 if (dev == NULL) {
1019 printk(KERN_INFO "ctcmpc_channel_action %i dev=NULL\n",
1020 action);
1021 rc = 1;
1022 goto done;
1023 }
1024
1025 priv = dev->priv;
1026 if (priv == NULL) {
1027 printk(KERN_INFO
1028 "ctcmpc_channel_action%i priv=NULL, dev=%s\n",
1029 action, dev->name);
1030 rc = 2;
1031 goto done;
1032 }
1033
1034 grp = priv->mpcg;
1035
1036 if (grp == NULL) {
1037 printk(KERN_INFO "ctcmpc: %s()%i mpcgroup=NULL, dev=%s\n",
1038 __FUNCTION__, action, dev->name);
1039 rc = 3;
1040 goto done;
1041 }
1042
1043 ctcm_pr_info(
1044 "ctcmpc: %s() %i(): Grp:%s total_channel_paths=%i "
1045 "active_channels read=%i, write=%i\n",
1046 __FUNCTION__,
1047 action,
1048 fsm_getstate_str(grp->fsm),
1049 grp->num_channel_paths,
1050 grp->active_channels[READ],
1051 grp->active_channels[WRITE]);
1052
1053 if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
1054 grp->num_channel_paths++;
1055 grp->active_channels[direction]++;
1056 grp->outstanding_xid2++;
1057 ch->in_mpcgroup = 1;
1058
1059 if (ch->xid_skb != NULL)
1060 dev_kfree_skb_any(ch->xid_skb);
1061
1062 ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
1063 GFP_ATOMIC | GFP_DMA);
1064 if (ch->xid_skb == NULL) {
1065 printk(KERN_INFO "ctcmpc: %s()"
1066 "Couldn't alloc ch xid_skb\n", __FUNCTION__);
1067 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1068 return 1;
1069 }
1070 ch->xid_skb_data = ch->xid_skb->data;
1071 ch->xid_th = (struct th_header *)ch->xid_skb->data;
1072 skb_put(ch->xid_skb, TH_HEADER_LENGTH);
1073 ch->xid = (struct xid2 *)skb_tail_pointer(ch->xid_skb);
1074 skb_put(ch->xid_skb, XID2_LENGTH);
1075 ch->xid_id = skb_tail_pointer(ch->xid_skb);
1076 ch->xid_skb->data = ch->xid_skb_data;
1077 skb_reset_tail_pointer(ch->xid_skb);
1078 ch->xid_skb->len = 0;
1079
1080 memcpy(skb_put(ch->xid_skb, grp->xid_skb->len),
1081 grp->xid_skb->data,
1082 grp->xid_skb->len);
1083
1084 ch->xid->xid2_dlc_type = ((CHANNEL_DIRECTION(ch->flags) == READ)
1085 ? XID2_READ_SIDE : XID2_WRITE_SIDE);
1086
1087 if (CHANNEL_DIRECTION(ch->flags) == WRITE)
1088 ch->xid->xid2_buf_len = 0x00;
1089
1090 ch->xid_skb->data = ch->xid_skb_data;
1091 skb_reset_tail_pointer(ch->xid_skb);
1092 ch->xid_skb->len = 0;
1093
1094 fsm_newstate(ch->fsm, CH_XID0_PENDING);
1095
1096 if ((grp->active_channels[READ] > 0) &&
1097 (grp->active_channels[WRITE] > 0) &&
1098 (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
1099 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
1100 printk(KERN_NOTICE "ctcmpc: %s MPC GROUP "
1101 "CHANNELS ACTIVE\n", dev->name);
1102 }
1103 } else if ((action == MPC_CHANNEL_REMOVE) &&
1104 (ch->in_mpcgroup == 1)) {
1105 ch->in_mpcgroup = 0;
1106 grp->num_channel_paths--;
1107 grp->active_channels[direction]--;
1108
1109 if (ch->xid_skb != NULL)
1110 dev_kfree_skb_any(ch->xid_skb);
1111 ch->xid_skb = NULL;
1112
1113 if (grp->channels_terminating)
1114 goto done;
1115
1116 if (((grp->active_channels[READ] == 0) &&
1117 (grp->active_channels[WRITE] > 0))
1118 || ((grp->active_channels[WRITE] == 0) &&
1119 (grp->active_channels[READ] > 0)))
1120 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1121 }
1122
1123done:
1124
1125 if (do_debug) {
1126 ctcm_pr_debug(
1127 "ctcmpc: %s() %i Grp:%s ttl_chan_paths=%i "
1128 "active_chans read=%i, write=%i\n",
1129 __FUNCTION__,
1130 action,
1131 fsm_getstate_str(grp->fsm),
1132 grp->num_channel_paths,
1133 grp->active_channels[READ],
1134 grp->active_channels[WRITE]);
1135
1136 ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n",
1137 __FUNCTION__, ch, ch->id);
1138 }
1139 return rc;
1140
1141}
1142
1143/**
1144 * Unpack a just received skb and hand it over to
1145 * upper layers.
1146 * special MPC version of unpack_skb.
1147 *
1148 * ch The channel where this skb has been received.
1149 * pskb The received skb.
1150 */
1151static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
1152{
1153 struct net_device *dev = ch->netdev;
1154 struct ctcm_priv *priv = dev->priv;
1155 struct mpc_group *grp = priv->mpcg;
1156 struct pdu *curr_pdu;
1157 struct mpcg_info *mpcginfo;
1158 struct th_header *header = NULL;
1159 struct th_sweep *sweep = NULL;
1160 int pdu_last_seen = 0;
1161 __u32 new_len;
1162 struct sk_buff *skb;
1163 int skblen;
1164 int sendrc = 0;
1165
1166 if (do_debug)
1167 ctcm_pr_debug("ctcmpc enter: %s() %s cp:%i ch:%s\n",
1168 __FUNCTION__, dev->name, smp_processor_id(), ch->id);
1169
1170 header = (struct th_header *)pskb->data;
1171 if ((header->th_seg == 0) &&
1172 (header->th_ch_flag == 0) &&
1173 (header->th_blk_flag == 0) &&
1174 (header->th_seq_num == 0))
1175 /* nothing for us */ goto done;
1176
1177 if (do_debug_data) {
1178 ctcm_pr_debug("ctcmpc: %s() th_header\n", __FUNCTION__);
1179 ctcmpc_dumpit((char *)header, TH_HEADER_LENGTH);
1180 ctcm_pr_debug("ctcmpc: %s() pskb len: %04x \n",
1181 __FUNCTION__, pskb->len);
1182 }
1183
1184 pskb->dev = dev;
1185 pskb->ip_summed = CHECKSUM_UNNECESSARY;
1186 skb_pull(pskb, TH_HEADER_LENGTH);
1187
1188 if (likely(header->th_ch_flag == TH_HAS_PDU)) {
1189 if (do_debug_data)
1190 ctcm_pr_debug("ctcmpc: %s() came into th_has_pdu\n",
1191 __FUNCTION__);
1192 if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) ||
1193 ((fsm_getstate(grp->fsm) == MPCG_STATE_READY) &&
1194 (header->th_seq_num != ch->th_seq_num + 1) &&
1195 (ch->th_seq_num != 0))) {
1196 /* This is NOT the next segment *
1197 * we are not the correct race winner *
1198 * go away and let someone else win *
1199 * BUT..this only applies if xid negot *
1200 * is done *
1201 */
1202 grp->out_of_sequence += 1;
1203 __skb_push(pskb, TH_HEADER_LENGTH);
1204 skb_queue_tail(&ch->io_queue, pskb);
1205 if (do_debug_data)
1206 ctcm_pr_debug("ctcmpc: %s() th_seq_num "
1207 "expect:%08x got:%08x\n", __FUNCTION__,
1208 ch->th_seq_num + 1, header->th_seq_num);
1209
1210 return;
1211 }
1212 grp->out_of_sequence = 0;
1213 ch->th_seq_num = header->th_seq_num;
1214
1215 if (do_debug_data)
1216 ctcm_pr_debug("ctcmpc: %s() FromVTAM_th_seq=%08x\n",
1217 __FUNCTION__, ch->th_seq_num);
1218
1219 if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY))
1220 goto done;
1221 pdu_last_seen = 0;
1222 while ((pskb->len > 0) && !pdu_last_seen) {
1223 curr_pdu = (struct pdu *)pskb->data;
1224 if (do_debug_data) {
1225 ctcm_pr_debug("ctcm: %s() pdu_header\n",
1226 __FUNCTION__);
1227 ctcmpc_dumpit((char *)pskb->data,
1228 PDU_HEADER_LENGTH);
1229 ctcm_pr_debug("ctcm: %s() pskb len: %04x \n",
1230 __FUNCTION__, pskb->len);
1231 }
1232 skb_pull(pskb, PDU_HEADER_LENGTH);
1233
1234 if (curr_pdu->pdu_flag & PDU_LAST)
1235 pdu_last_seen = 1;
1236 if (curr_pdu->pdu_flag & PDU_CNTL)
1237 pskb->protocol = htons(ETH_P_SNAP);
1238 else
1239 pskb->protocol = htons(ETH_P_SNA_DIX);
1240
1241 if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) {
1242 printk(KERN_INFO
1243 "%s Illegal packet size %d "
1244 "received "
1245 "dropping\n", dev->name,
1246 pskb->len);
1247 priv->stats.rx_dropped++;
1248 priv->stats.rx_length_errors++;
1249 goto done;
1250 }
1251 skb_reset_mac_header(pskb);
1252 new_len = curr_pdu->pdu_offset;
1253 if (do_debug_data)
1254 ctcm_pr_debug("ctcmpc: %s() new_len: %04x \n",
1255 __FUNCTION__, new_len);
1256 if ((new_len == 0) || (new_len > pskb->len)) {
1257 /* should never happen */
1258 /* pskb len must be hosed...bail out */
1259 printk(KERN_INFO
1260 "ctcmpc: %s(): invalid pdu"
1261 " offset of %04x - data may be"
1262 "lost\n", __FUNCTION__, new_len);
1263 goto done;
1264 }
1265 skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC);
1266
1267 if (!skb) {
1268 printk(KERN_INFO
1269 "ctcm: %s Out of memory in "
1270 "%s()- request-len:%04x \n",
1271 dev->name,
1272 __FUNCTION__,
1273 new_len+4);
1274 priv->stats.rx_dropped++;
1275 fsm_event(grp->fsm,
1276 MPCG_EVENT_INOP, dev);
1277 goto done;
1278 }
1279
1280 memcpy(skb_put(skb, new_len),
1281 pskb->data, new_len);
1282
1283 skb_reset_mac_header(skb);
1284 skb->dev = pskb->dev;
1285 skb->protocol = pskb->protocol;
1286 skb->ip_summed = CHECKSUM_UNNECESSARY;
1287 *((__u32 *) skb_push(skb, 4)) = ch->pdu_seq;
1288 ch->pdu_seq++;
1289
1290 if (do_debug_data)
1291 ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n",
1292 __FUNCTION__, ch->pdu_seq);
1293
1294 ctcm_pr_debug("ctcm: %s() skb:%0lx "
1295 "skb len: %d \n", __FUNCTION__,
1296 (unsigned long)skb, skb->len);
1297 if (do_debug_data) {
1298 ctcm_pr_debug("ctcmpc: %s() up to 32 bytes"
1299 " of pdu_data sent\n",
1300 __FUNCTION__);
1301 ctcmpc_dump32((char *)skb->data, skb->len);
1302 }
1303
1304 skblen = skb->len;
1305 sendrc = netif_rx(skb);
1306 priv->stats.rx_packets++;
1307 priv->stats.rx_bytes += skblen;
1308 skb_pull(pskb, new_len); /* point to next PDU */
1309 }
1310 } else {
1311 mpcginfo = (struct mpcg_info *)
1312 kmalloc(sizeof(struct mpcg_info), gfp_type());
1313 if (mpcginfo == NULL)
1314 goto done;
1315
1316 mpcginfo->ch = ch;
1317 mpcginfo->th = header;
1318 mpcginfo->skb = pskb;
1319 ctcm_pr_debug("ctcmpc: %s() Not PDU - may be control pkt\n",
1320 __FUNCTION__);
1321 /* it's a sweep? */
1322 sweep = (struct th_sweep *)pskb->data;
1323 mpcginfo->sweep = sweep;
1324 if (header->th_ch_flag == TH_SWEEP_REQ)
1325 mpc_rcvd_sweep_req(mpcginfo);
1326 else if (header->th_ch_flag == TH_SWEEP_RESP)
1327 mpc_rcvd_sweep_resp(mpcginfo);
1328 else if (header->th_blk_flag == TH_DATA_IS_XID) {
1329 struct xid2 *thisxid = (struct xid2 *)pskb->data;
1330 skb_pull(pskb, XID2_LENGTH);
1331 mpcginfo->xid = thisxid;
1332 fsm_event(grp->fsm, MPCG_EVENT_XID2, mpcginfo);
1333 } else if (header->th_blk_flag == TH_DISCONTACT)
1334 fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo);
1335 else if (header->th_seq_num != 0) {
1336 printk(KERN_INFO "%s unexpected packet"
1337 " expected control pkt\n", dev->name);
1338 priv->stats.rx_dropped++;
1339 /* mpcginfo only used for non-data transfers */
1340 kfree(mpcginfo);
1341 if (do_debug_data)
1342 ctcmpc_dump_skb(pskb, -8);
1343 }
1344 }
1345done:
1346
1347 dev_kfree_skb_any(pskb);
1348 if (sendrc == NET_RX_DROP) {
1349 printk(KERN_WARNING "%s %s() NETWORK BACKLOG EXCEEDED"
1350 " - PACKET DROPPED\n", dev->name, __FUNCTION__);
1351 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1352 }
1353
1354 if (do_debug)
1355 ctcm_pr_debug("ctcmpc exit : %s %s(): ch=0x%p id=%s\n",
1356 dev->name, __FUNCTION__, ch, ch->id);
1357}
1358
1359/**
1360 * tasklet helper for mpc's skb unpacking.
1361 *
1362 * ch The channel to work on.
1363 * Allow flow control back pressure to occur here.
1364 * Throttling back channel can result in excessive
1365 * channel inactivity and system deact of channel
1366 */
1367void ctcmpc_bh(unsigned long thischan)
1368{
1369 struct channel *ch = (struct channel *)thischan;
1370 struct sk_buff *skb;
1371 struct net_device *dev = ch->netdev;
1372 struct ctcm_priv *priv = dev->priv;
1373 struct mpc_group *grp = priv->mpcg;
1374
1375 if (do_debug)
1376 ctcm_pr_debug("%s cp:%i enter: %s() %s\n",
1377 dev->name, smp_processor_id(), __FUNCTION__, ch->id);
1378 /* caller has requested driver to throttle back */
1379 while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) &&
1380 (skb = skb_dequeue(&ch->io_queue))) {
1381 ctcmpc_unpack_skb(ch, skb);
1382 if (grp->out_of_sequence > 20) {
1383 /* assume data loss has occurred if */
1384 /* missing seq_num for extended */
1385 /* period of time */
1386 grp->out_of_sequence = 0;
1387 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1388 break;
1389 }
1390 if (skb == skb_peek(&ch->io_queue))
1391 break;
1392 }
1393 if (do_debug)
1394 ctcm_pr_debug("ctcmpc exit : %s %s(): ch=0x%p id=%s\n",
1395 dev->name, __FUNCTION__, ch, ch->id);
1396 return;
1397}
1398
1399/*
1400 * MPC Group Initializations
1401 */
1402struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv)
1403{
1404 struct mpc_group *grp;
1405
1406 CTCM_DBF_TEXT(MPC_SETUP, 3, __FUNCTION__);
1407
1408 grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL);
1409 if (grp == NULL)
1410 return NULL;
1411
1412 grp->fsm =
1413 init_fsm("mpcg", mpcg_state_names, mpcg_event_names,
1414 MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm,
1415 mpcg_fsm_len, GFP_KERNEL);
1416 if (grp->fsm == NULL) {
1417 kfree(grp);
1418 return NULL;
1419 }
1420
1421 fsm_newstate(grp->fsm, MPCG_STATE_RESET);
1422 fsm_settimer(grp->fsm, &grp->timer);
1423
1424 grp->xid_skb =
1425 __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
1426 if (grp->xid_skb == NULL) {
1427 printk(KERN_INFO "Couldn't alloc MPCgroup xid_skb\n");
1428 kfree_fsm(grp->fsm);
1429 kfree(grp);
1430 return NULL;
1431 }
1432 /* base xid for all channels in group */
1433 grp->xid_skb_data = grp->xid_skb->data;
1434 grp->xid_th = (struct th_header *)grp->xid_skb->data;
1435 memcpy(skb_put(grp->xid_skb, TH_HEADER_LENGTH),
1436 &thnorm, TH_HEADER_LENGTH);
1437
1438 grp->xid = (struct xid2 *) skb_tail_pointer(grp->xid_skb);
1439 memcpy(skb_put(grp->xid_skb, XID2_LENGTH), &init_xid, XID2_LENGTH);
1440 grp->xid->xid2_adj_id = jiffies | 0xfff00000;
1441 grp->xid->xid2_sender_id = jiffies;
1442
1443 grp->xid_id = skb_tail_pointer(grp->xid_skb);
1444 memcpy(skb_put(grp->xid_skb, 4), "VTAM", 4);
1445
1446 grp->rcvd_xid_skb =
1447 __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
1448 if (grp->rcvd_xid_skb == NULL) {
1449 printk(KERN_INFO "Couldn't alloc MPCgroup rcvd_xid_skb\n");
1450 kfree_fsm(grp->fsm);
1451 dev_kfree_skb(grp->xid_skb);
1452 kfree(grp);
1453 return NULL;
1454 }
1455 grp->rcvd_xid_data = grp->rcvd_xid_skb->data;
1456 grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
1457 memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH),
1458 &thnorm, TH_HEADER_LENGTH);
1459 grp->saved_xid2 = NULL;
1460 priv->xid = grp->xid;
1461 priv->mpcg = grp;
1462 return grp;
1463}
1464
1465/*
1466 * The MPC Group Station FSM
1467 */
1468
1469/*
1470 * MPC Group Station FSM actions
1471 * CTCM_PROTO_MPC only
1472 */
1473
1474/**
1475 * NOP action for statemachines
1476 */
1477static void mpc_action_nop(fsm_instance *fi, int event, void *arg)
1478{
1479}
1480
1481/*
1482 * invoked when the device transitions to dev_stopped
1483 * MPC will stop each individual channel if a single XID failure
1484 * occurs, or will intitiate all channels be stopped if a GROUP
1485 * level failure occurs.
1486 */
1487static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
1488{
1489 struct net_device *dev = arg;
1490 struct ctcm_priv *priv;
1491 struct mpc_group *grp;
1492 int rc = 0;
1493 struct channel *wch, *rch;
1494
1495 if (dev == NULL) {
1496 printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__);
1497 return;
1498 }
1499
1500 ctcm_pr_debug("ctcmpc enter: %s %s()\n", dev->name, __FUNCTION__);
1501
1502 priv = dev->priv;
1503 grp = priv->mpcg;
1504 grp->flow_off_called = 0;
1505
1506 fsm_deltimer(&grp->timer);
1507
1508 if (grp->channels_terminating)
1509 goto done;
1510
1511 grp->channels_terminating = 1;
1512
1513 grp->saved_state = fsm_getstate(grp->fsm);
1514 fsm_newstate(grp->fsm, MPCG_STATE_INOP);
1515 if (grp->saved_state > MPCG_STATE_XID7INITF)
1516 printk(KERN_NOTICE "%s:MPC GROUP INOPERATIVE\n", dev->name);
1517 if ((grp->saved_state != MPCG_STATE_RESET) ||
1518 /* dealloc_channel has been called */
1519 ((grp->saved_state == MPCG_STATE_RESET) &&
1520 (grp->port_persist == 0)))
1521 fsm_deltimer(&priv->restart_timer);
1522
1523 wch = priv->channel[WRITE];
1524 rch = priv->channel[READ];
1525
1526 switch (grp->saved_state) {
1527 case MPCG_STATE_RESET:
1528 case MPCG_STATE_INOP:
1529 case MPCG_STATE_XID2INITW:
1530 case MPCG_STATE_XID0IOWAIT:
1531 case MPCG_STATE_XID2INITX:
1532 case MPCG_STATE_XID7INITW:
1533 case MPCG_STATE_XID7INITX:
1534 case MPCG_STATE_XID0IOWAIX:
1535 case MPCG_STATE_XID7INITI:
1536 case MPCG_STATE_XID7INITZ:
1537 case MPCG_STATE_XID7INITF:
1538 break;
1539 case MPCG_STATE_FLOWC:
1540 case MPCG_STATE_READY:
1541 default:
1542 tasklet_hi_schedule(&wch->ch_disc_tasklet);
1543 }
1544
1545 grp->xid2_tgnum = 0;
1546 grp->group_max_buflen = 0; /*min of all received */
1547 grp->outstanding_xid2 = 0;
1548 grp->outstanding_xid7 = 0;
1549 grp->outstanding_xid7_p2 = 0;
1550 grp->saved_xid2 = NULL;
1551 grp->xidnogood = 0;
1552 grp->changed_side = 0;
1553
1554 grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
1555 skb_reset_tail_pointer(grp->rcvd_xid_skb);
1556 grp->rcvd_xid_skb->len = 0;
1557 grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
1558 memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH), &thnorm,
1559 TH_HEADER_LENGTH);
1560
1561 if (grp->send_qllc_disc == 1) {
1562 grp->send_qllc_disc = 0;
1563 rc = mpc_send_qllc_discontact(dev);
1564 }
1565
1566 /* DO NOT issue DEV_EVENT_STOP directly out of this code */
1567 /* This can result in INOP of VTAM PU due to halting of */
1568 /* outstanding IO which causes a sense to be returned */
1569 /* Only about 3 senses are allowed and then IOS/VTAM will*/
1570 /* ebcome unreachable without manual intervention */
1571 if ((grp->port_persist == 1) || (grp->alloc_called)) {
1572 grp->alloc_called = 0;
1573 fsm_deltimer(&priv->restart_timer);
1574 fsm_addtimer(&priv->restart_timer,
1575 500,
1576 DEV_EVENT_RESTART,
1577 dev);
1578 fsm_newstate(grp->fsm, MPCG_STATE_RESET);
1579 if (grp->saved_state > MPCG_STATE_XID7INITF)
1580 printk(KERN_NOTICE "%s:MPC GROUP RECOVERY SCHEDULED\n",
1581 dev->name);
1582 } else {
1583 fsm_deltimer(&priv->restart_timer);
1584 fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev);
1585 fsm_newstate(grp->fsm, MPCG_STATE_RESET);
1586 printk(KERN_NOTICE "%s:MPC GROUP RECOVERY NOT ATTEMPTED\n",
1587 dev->name);
1588 }
1589
1590done:
1591 ctcm_pr_debug("ctcmpc exit:%s %s()\n", dev->name, __FUNCTION__);
1592 return;
1593}
1594
1595/**
1596 * Handle mpc group action timeout.
1597 * MPC Group Station FSM action
1598 * CTCM_PROTO_MPC only
1599 *
1600 * fi An instance of an mpc_group fsm.
1601 * event The event, just happened.
1602 * arg Generic pointer, casted from net_device * upon call.
1603 */
1604static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
1605{
1606 struct net_device *dev = arg;
1607 struct ctcm_priv *priv;
1608 struct mpc_group *grp;
1609 struct channel *wch;
1610 struct channel *rch;
1611
1612 CTCM_DBF_TEXT(MPC_TRACE, 6, __FUNCTION__);
1613
1614 if (dev == NULL) {
1615 CTCM_DBF_TEXT_(MPC_ERROR, 4, "%s: dev=NULL\n", __FUNCTION__);
1616 return;
1617 }
1618
1619 priv = dev->priv;
1620 grp = priv->mpcg;
1621 wch = priv->channel[WRITE];
1622 rch = priv->channel[READ];
1623
1624 switch (fsm_getstate(grp->fsm)) {
1625 case MPCG_STATE_XID2INITW:
1626 /* Unless there is outstanding IO on the */
1627 /* channel just return and wait for ATTN */
1628 /* interrupt to begin XID negotiations */
1629 if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
1630 (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
1631 break;
1632 default:
1633 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1634 }
1635
1636 CTCM_DBF_TEXT_(MPC_TRACE, 6, "%s: dev=%s exit",
1637 __FUNCTION__, dev->name);
1638 return;
1639}
1640
1641/*
1642 * MPC Group Station FSM action
1643 * CTCM_PROTO_MPC only
1644 */
1645void mpc_action_discontact(fsm_instance *fi, int event, void *arg)
1646{
1647 struct mpcg_info *mpcginfo = arg;
1648 struct channel *ch = mpcginfo->ch;
1649 struct net_device *dev = ch->netdev;
1650 struct ctcm_priv *priv = dev->priv;
1651 struct mpc_group *grp = priv->mpcg;
1652
1653 if (ch == NULL) {
1654 printk(KERN_INFO "%s() ch=NULL\n", __FUNCTION__);
1655 return;
1656 }
1657 if (ch->netdev == NULL) {
1658 printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__);
1659 return;
1660 }
1661
1662 ctcm_pr_debug("ctcmpc enter: %s %s()\n", dev->name, __FUNCTION__);
1663
1664 grp->send_qllc_disc = 1;
1665 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1666
1667 ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__);
1668 return;
1669}
1670
1671/*
1672 * MPC Group Station - not part of FSM
1673 * CTCM_PROTO_MPC only
1674 * called from add_channel in ctcm_main.c
1675 */
1676void mpc_action_send_discontact(unsigned long thischan)
1677{
1678 struct channel *ch;
1679 struct net_device *dev;
1680 struct ctcm_priv *priv;
1681 struct mpc_group *grp;
1682 int rc = 0;
1683 unsigned long saveflags;
1684
1685 ch = (struct channel *)thischan;
1686 dev = ch->netdev;
1687 priv = dev->priv;
1688 grp = priv->mpcg;
1689
1690 ctcm_pr_info("ctcmpc: %s cp:%i enter: %s() GrpState:%s ChState:%s\n",
1691 dev->name,
1692 smp_processor_id(),
1693 __FUNCTION__,
1694 fsm_getstate_str(grp->fsm),
1695 fsm_getstate_str(ch->fsm));
1696 saveflags = 0; /* avoids compiler warning with
1697 spin_unlock_irqrestore */
1698
1699 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1700 rc = ccw_device_start(ch->cdev, &ch->ccw[15],
1701 (unsigned long)ch, 0xff, 0);
1702 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1703
1704 if (rc != 0) {
1705 ctcm_pr_info("ctcmpc: %s() ch:%s IO failed \n",
1706 __FUNCTION__,
1707 ch->id);
1708 ctcm_ccw_check_rc(ch, rc, "send discontact");
1709 /* Not checking return code value here */
1710 /* Making best effort to notify partner*/
1711 /* that MPC Group is going down */
1712 }
1713
1714 ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__);
1715 return;
1716}
1717
1718
1719/*
1720 * helper function of mpc FSM
1721 * CTCM_PROTO_MPC only
1722 * mpc_action_rcvd_xid7
1723*/
1724static int mpc_validate_xid(struct mpcg_info *mpcginfo)
1725{
1726 struct channel *ch = mpcginfo->ch;
1727 struct net_device *dev = ch->netdev;
1728 struct ctcm_priv *priv = dev->priv;
1729 struct mpc_group *grp = priv->mpcg;
1730 struct xid2 *xid = mpcginfo->xid;
1731 int failed = 0;
1732 int rc = 0;
1733 __u64 our_id, their_id = 0;
1734 int len;
1735
1736 len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
1737
1738 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
1739
1740 if (mpcginfo->xid == NULL) {
1741 printk(KERN_INFO "%s() xid=NULL\n", __FUNCTION__);
1742 rc = 1;
1743 goto done;
1744 }
1745
1746 ctcm_pr_debug("ctcmpc : %s xid received()\n", __FUNCTION__);
1747 ctcmpc_dumpit((char *)mpcginfo->xid, XID2_LENGTH);
1748
1749 /*the received direction should be the opposite of ours */
1750 if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE :
1751 XID2_READ_SIDE) != xid->xid2_dlc_type) {
1752 failed = 1;
1753 printk(KERN_INFO "ctcmpc:%s() XID REJECTED - READ-WRITE CH "
1754 "Pairing Invalid \n", __FUNCTION__);
1755 }
1756
1757 if (xid->xid2_dlc_type == XID2_READ_SIDE) {
1758 ctcm_pr_debug("ctcmpc: %s(): grpmaxbuf:%d xid2buflen:%d\n",
1759 __FUNCTION__, grp->group_max_buflen,
1760 xid->xid2_buf_len);
1761
1762 if (grp->group_max_buflen == 0 ||
1763 grp->group_max_buflen > xid->xid2_buf_len - len)
1764 grp->group_max_buflen = xid->xid2_buf_len - len;
1765 }
1766
1767
1768 if (grp->saved_xid2 == NULL) {
1769 grp->saved_xid2 =
1770 (struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb);
1771
1772 memcpy(skb_put(grp->rcvd_xid_skb,
1773 XID2_LENGTH), xid, XID2_LENGTH);
1774 grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
1775
1776 skb_reset_tail_pointer(grp->rcvd_xid_skb);
1777 grp->rcvd_xid_skb->len = 0;
1778
1779 /* convert two 32 bit numbers into 1 64 bit for id compare */
1780 our_id = (__u64)priv->xid->xid2_adj_id;
1781 our_id = our_id << 32;
1782 our_id = our_id + priv->xid->xid2_sender_id;
1783 their_id = (__u64)xid->xid2_adj_id;
1784 their_id = their_id << 32;
1785 their_id = their_id + xid->xid2_sender_id;
1786 /* lower id assume the xside role */
1787 if (our_id < their_id) {
1788 grp->roll = XSIDE;
1789 ctcm_pr_debug("ctcmpc :%s() WE HAVE LOW ID-"
1790 "TAKE XSIDE\n", __FUNCTION__);
1791 } else {
1792 grp->roll = YSIDE;
1793 ctcm_pr_debug("ctcmpc :%s() WE HAVE HIGH ID-"
1794 "TAKE YSIDE\n", __FUNCTION__);
1795 }
1796
1797 } else {
1798 if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) {
1799 failed = 1;
1800 printk(KERN_INFO "%s XID REJECTED - XID Flag Byte4\n",
1801 __FUNCTION__);
1802 }
1803 if (xid->xid2_flag2 == 0x40) {
1804 failed = 1;
1805 printk(KERN_INFO "%s XID REJECTED - XID NOGOOD\n",
1806 __FUNCTION__);
1807 }
1808 if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) {
1809 failed = 1;
1810 printk(KERN_INFO "%s XID REJECTED - "
1811 "Adjacent Station ID Mismatch\n",
1812 __FUNCTION__);
1813 }
1814 if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) {
1815 failed = 1;
1816 printk(KERN_INFO "%s XID REJECTED - "
1817 "Sender Address Mismatch\n", __FUNCTION__);
1818
1819 }
1820 }
1821
1822 if (failed) {
1823 ctcm_pr_info("ctcmpc : %s() failed\n", __FUNCTION__);
1824 priv->xid->xid2_flag2 = 0x40;
1825 grp->saved_xid2->xid2_flag2 = 0x40;
1826 rc = 1;
1827 }
1828
1829done:
1830
1831 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
1832 return rc;
1833}
1834
1835/*
1836 * MPC Group Station FSM action
1837 * CTCM_PROTO_MPC only
1838 */
1839static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
1840{
1841 struct channel *ch = arg;
1842 struct ctcm_priv *priv;
1843 struct mpc_group *grp = NULL;
1844 struct net_device *dev = NULL;
1845 int rc = 0;
1846 int gotlock = 0;
1847 unsigned long saveflags = 0; /* avoids compiler warning with
1848 spin_unlock_irqrestore */
1849
1850 if (ch == NULL) {
1851 printk(KERN_INFO "%s ch=NULL\n", __FUNCTION__);
1852 goto done;
1853 }
1854
1855 if (do_debug)
1856 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
1857 __FUNCTION__, smp_processor_id(), ch, ch->id);
1858
1859 dev = ch->netdev;
1860 if (dev == NULL) {
1861 printk(KERN_INFO "%s dev=NULL\n", __FUNCTION__);
1862 goto done;
1863 }
1864
1865 priv = dev->priv;
1866 if (priv == NULL) {
1867 printk(KERN_INFO "%s priv=NULL\n", __FUNCTION__);
1868 goto done;
1869 }
1870
1871 grp = priv->mpcg;
1872 if (grp == NULL) {
1873 printk(KERN_INFO "%s grp=NULL\n", __FUNCTION__);
1874 goto done;
1875 }
1876
1877 if (ctcm_checkalloc_buffer(ch))
1878 goto done;
1879
1880 /* skb data-buffer referencing: */
1881
1882 ch->trans_skb->data = ch->trans_skb_data;
1883 skb_reset_tail_pointer(ch->trans_skb);
1884 ch->trans_skb->len = 0;
1885 /* result of the previous 3 statements is NOT always
1886 * already set after ctcm_checkalloc_buffer
1887 * because of possible reuse of the trans_skb
1888 */
1889 memset(ch->trans_skb->data, 0, 16);
1890 ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
1891 /* check is main purpose here: */
1892 skb_put(ch->trans_skb, TH_HEADER_LENGTH);
1893 ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb);
1894 /* check is main purpose here: */
1895 skb_put(ch->trans_skb, XID2_LENGTH);
1896 ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb);
1897 /* cleanup back to startpoint */
1898 ch->trans_skb->data = ch->trans_skb_data;
1899 skb_reset_tail_pointer(ch->trans_skb);
1900 ch->trans_skb->len = 0;
1901
1902 /* non-checking rewrite of above skb data-buffer referencing: */
1903 /*
1904 memset(ch->trans_skb->data, 0, 16);
1905 ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
1906 ch->rcvd_xid = (struct xid2 *)(ch->trans_skb_data + TH_HEADER_LENGTH);
1907 ch->rcvd_xid_id = ch->trans_skb_data + TH_HEADER_LENGTH + XID2_LENGTH;
1908 */
1909
1910 ch->ccw[8].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1911 ch->ccw[8].count = 0;
1912 ch->ccw[8].cda = 0x00;
1913
1914 if (side == XSIDE) {
1915 /* mpc_action_xside_xid */
1916 if (ch->xid_th == NULL) {
1917 printk(KERN_INFO "%s ch->xid_th=NULL\n", __FUNCTION__);
1918 goto done;
1919 }
1920 ch->ccw[9].cmd_code = CCW_CMD_WRITE;
1921 ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1922 ch->ccw[9].count = TH_HEADER_LENGTH;
1923 ch->ccw[9].cda = virt_to_phys(ch->xid_th);
1924
1925 if (ch->xid == NULL) {
1926 printk(KERN_INFO "%s ch->xid=NULL\n", __FUNCTION__);
1927 goto done;
1928 }
1929
1930 ch->ccw[10].cmd_code = CCW_CMD_WRITE;
1931 ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1932 ch->ccw[10].count = XID2_LENGTH;
1933 ch->ccw[10].cda = virt_to_phys(ch->xid);
1934
1935 ch->ccw[11].cmd_code = CCW_CMD_READ;
1936 ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1937 ch->ccw[11].count = TH_HEADER_LENGTH;
1938 ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th);
1939
1940 ch->ccw[12].cmd_code = CCW_CMD_READ;
1941 ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1942 ch->ccw[12].count = XID2_LENGTH;
1943 ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid);
1944
1945 ch->ccw[13].cmd_code = CCW_CMD_READ;
1946 ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id);
1947
1948 } else { /* side == YSIDE : mpc_action_yside_xid */
1949 ch->ccw[9].cmd_code = CCW_CMD_READ;
1950 ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1951 ch->ccw[9].count = TH_HEADER_LENGTH;
1952 ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th);
1953
1954 ch->ccw[10].cmd_code = CCW_CMD_READ;
1955 ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1956 ch->ccw[10].count = XID2_LENGTH;
1957 ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid);
1958
1959 if (ch->xid_th == NULL) {
1960 printk(KERN_INFO "%s ch->xid_th=NULL\n", __FUNCTION__);
1961 goto done;
1962 }
1963 ch->ccw[11].cmd_code = CCW_CMD_WRITE;
1964 ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1965 ch->ccw[11].count = TH_HEADER_LENGTH;
1966 ch->ccw[11].cda = virt_to_phys(ch->xid_th);
1967
1968 if (ch->xid == NULL) {
1969 printk(KERN_INFO "%s ch->xid=NULL\n", __FUNCTION__);
1970 goto done;
1971 }
1972 ch->ccw[12].cmd_code = CCW_CMD_WRITE;
1973 ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1974 ch->ccw[12].count = XID2_LENGTH;
1975 ch->ccw[12].cda = virt_to_phys(ch->xid);
1976
1977 if (ch->xid_id == NULL) {
1978 printk(KERN_INFO "%s ch->xid_id=NULL\n", __FUNCTION__);
1979 goto done;
1980 }
1981 ch->ccw[13].cmd_code = CCW_CMD_WRITE;
1982 ch->ccw[13].cda = virt_to_phys(ch->xid_id);
1983
1984 }
1985 ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1986 ch->ccw[13].count = 4;
1987
1988 ch->ccw[14].cmd_code = CCW_CMD_NOOP;
1989 ch->ccw[14].flags = CCW_FLAG_SLI;
1990 ch->ccw[14].count = 0;
1991 ch->ccw[14].cda = 0;
1992
1993 if (do_debug_ccw)
1994 ctcmpc_dumpit((char *)&ch->ccw[8], sizeof(struct ccw1) * 7);
1995
1996 ctcmpc_dumpit((char *)ch->xid_th, TH_HEADER_LENGTH);
1997 ctcmpc_dumpit((char *)ch->xid, XID2_LENGTH);
1998 ctcmpc_dumpit((char *)ch->xid_id, 4);
1999 if (!in_irq()) {
2000 /* Such conditional locking is a known problem for
2001 * sparse because its static undeterministic.
2002 * Warnings should be ignored here. */
2003 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2004 gotlock = 1;
2005 }
2006
2007 fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch);
2008 rc = ccw_device_start(ch->cdev, &ch->ccw[8],
2009 (unsigned long)ch, 0xff, 0);
2010
2011 if (gotlock) /* see remark above about conditional locking */
2012 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2013
2014 if (rc != 0) {
2015 ctcm_pr_info("ctcmpc: %s() ch:%s IO failed \n",
2016 __FUNCTION__, ch->id);
2017 ctcm_ccw_check_rc(ch, rc,
2018 (side == XSIDE) ? "x-side XID" : "y-side XID");
2019 }
2020
2021done:
2022 if (do_debug)
2023 ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n",
2024 __FUNCTION__, ch, ch->id);
2025 return;
2026
2027}
2028
2029/*
2030 * MPC Group Station FSM action
2031 * CTCM_PROTO_MPC only
2032 */
2033static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg)
2034{
2035 mpc_action_side_xid(fsm, arg, XSIDE);
2036}
2037
2038/*
2039 * MPC Group Station FSM action
2040 * CTCM_PROTO_MPC only
2041 */
2042static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg)
2043{
2044 mpc_action_side_xid(fsm, arg, YSIDE);
2045}
2046
2047/*
2048 * MPC Group Station FSM action
2049 * CTCM_PROTO_MPC only
2050 */
2051static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
2052{
2053 struct channel *ch = arg;
2054 struct ctcm_priv *priv;
2055 struct mpc_group *grp = NULL;
2056 struct net_device *dev = NULL;
2057
2058 if (do_debug)
2059 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
2060 __FUNCTION__, smp_processor_id(), ch, ch->id);
2061
2062 if (ch == NULL) {
2063 printk(KERN_WARNING "%s ch=NULL\n", __FUNCTION__);
2064 goto done;
2065 }
2066
2067 dev = ch->netdev;
2068 if (dev == NULL) {
2069 printk(KERN_WARNING "%s dev=NULL\n", __FUNCTION__);
2070 goto done;
2071 }
2072
2073 priv = dev->priv;
2074 if (priv == NULL) {
2075 printk(KERN_WARNING "%s priv=NULL\n", __FUNCTION__);
2076 goto done;
2077 }
2078
2079 grp = priv->mpcg;
2080 if (grp == NULL) {
2081 printk(KERN_WARNING "%s grp=NULL\n", __FUNCTION__);
2082 goto done;
2083 }
2084
2085 if (ch->xid == NULL) {
2086 printk(KERN_WARNING "%s ch-xid=NULL\n", __FUNCTION__);
2087 goto done;
2088 }
2089
2090 fsm_newstate(ch->fsm, CH_XID0_INPROGRESS);
2091
2092 ch->xid->xid2_option = XID2_0;
2093
2094 switch (fsm_getstate(grp->fsm)) {
2095 case MPCG_STATE_XID2INITW:
2096 case MPCG_STATE_XID2INITX:
2097 ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
2098 break;
2099 case MPCG_STATE_XID0IOWAIT:
2100 case MPCG_STATE_XID0IOWAIX:
2101 ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
2102 break;
2103 }
2104
2105 fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
2106
2107done:
2108 if (do_debug)
2109 ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n",
2110 __FUNCTION__, ch, ch->id);
2111 return;
2112
2113}
2114
2115/*
2116 * MPC Group Station FSM action
2117 * CTCM_PROTO_MPC only
2118*/
2119static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
2120{
2121 struct net_device *dev = arg;
2122 struct ctcm_priv *priv = NULL;
2123 struct mpc_group *grp = NULL;
2124 int direction;
2125 int rc = 0;
2126 int send = 0;
2127
2128 ctcm_pr_debug("ctcmpc enter: %s() \n", __FUNCTION__);
2129
2130 if (dev == NULL) {
2131 printk(KERN_INFO "%s dev=NULL \n", __FUNCTION__);
2132 rc = 1;
2133 goto done;
2134 }
2135
2136 priv = dev->priv;
2137 if (priv == NULL) {
2138 printk(KERN_INFO "%s priv=NULL \n", __FUNCTION__);
2139 rc = 1;
2140 goto done;
2141 }
2142
2143 grp = priv->mpcg;
2144 if (grp == NULL) {
2145 printk(KERN_INFO "%s grp=NULL \n", __FUNCTION__);
2146 rc = 1;
2147 goto done;
2148 }
2149
2150 for (direction = READ; direction <= WRITE; direction++) {
2151 struct channel *ch = priv->channel[direction];
2152 struct xid2 *thisxid = ch->xid;
2153 ch->xid_skb->data = ch->xid_skb_data;
2154 skb_reset_tail_pointer(ch->xid_skb);
2155 ch->xid_skb->len = 0;
2156 thisxid->xid2_option = XID2_7;
2157 send = 0;
2158
2159 /* xid7 phase 1 */
2160 if (grp->outstanding_xid7_p2 > 0) {
2161 if (grp->roll == YSIDE) {
2162 if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) {
2163 fsm_newstate(ch->fsm, CH_XID7_PENDING2);
2164 ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
2165 memcpy(skb_put(ch->xid_skb,
2166 TH_HEADER_LENGTH),
2167 &thdummy, TH_HEADER_LENGTH);
2168 send = 1;
2169 }
2170 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) {
2171 fsm_newstate(ch->fsm, CH_XID7_PENDING2);
2172 ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
2173 memcpy(skb_put(ch->xid_skb,
2174 TH_HEADER_LENGTH),
2175 &thnorm, TH_HEADER_LENGTH);
2176 send = 1;
2177 }
2178 } else {
2179 /* xid7 phase 2 */
2180 if (grp->roll == YSIDE) {
2181 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) {
2182 fsm_newstate(ch->fsm, CH_XID7_PENDING4);
2183 memcpy(skb_put(ch->xid_skb,
2184 TH_HEADER_LENGTH),
2185 &thnorm, TH_HEADER_LENGTH);
2186 ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
2187 send = 1;
2188 }
2189 } else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) {
2190 fsm_newstate(ch->fsm, CH_XID7_PENDING4);
2191 ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
2192 memcpy(skb_put(ch->xid_skb, TH_HEADER_LENGTH),
2193 &thdummy, TH_HEADER_LENGTH);
2194 send = 1;
2195 }
2196 }
2197
2198 if (send)
2199 fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
2200 }
2201
2202done:
2203
2204 if (rc != 0)
2205 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
2206
2207 return;
2208}
2209
2210/*
2211 * MPC Group Station FSM action
2212 * CTCM_PROTO_MPC only
2213 */
2214static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
2215{
2216
2217 struct mpcg_info *mpcginfo = arg;
2218 struct channel *ch = mpcginfo->ch;
2219 struct net_device *dev = ch->netdev;
2220 struct ctcm_priv *priv;
2221 struct mpc_group *grp;
2222
2223 if (do_debug)
2224 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
2225 __FUNCTION__, smp_processor_id(), ch, ch->id);
2226
2227 priv = dev->priv;
2228 grp = priv->mpcg;
2229
2230 ctcm_pr_debug("ctcmpc in:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
2231 __FUNCTION__, ch->id,
2232 grp->outstanding_xid2,
2233 grp->outstanding_xid7,
2234 grp->outstanding_xid7_p2);
2235
2236 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING)
2237 fsm_newstate(ch->fsm, CH_XID7_PENDING);
2238
2239 grp->outstanding_xid2--;
2240 grp->outstanding_xid7++;
2241 grp->outstanding_xid7_p2++;
2242
2243 /* must change state before validating xid to */
2244 /* properly handle interim interrupts received*/
2245 switch (fsm_getstate(grp->fsm)) {
2246 case MPCG_STATE_XID2INITW:
2247 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITX);
2248 mpc_validate_xid(mpcginfo);
2249 break;
2250 case MPCG_STATE_XID0IOWAIT:
2251 fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIX);
2252 mpc_validate_xid(mpcginfo);
2253 break;
2254 case MPCG_STATE_XID2INITX:
2255 if (grp->outstanding_xid2 == 0) {
2256 fsm_newstate(grp->fsm, MPCG_STATE_XID7INITW);
2257 mpc_validate_xid(mpcginfo);
2258 fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
2259 }
2260 break;
2261 case MPCG_STATE_XID0IOWAIX:
2262 if (grp->outstanding_xid2 == 0) {
2263 fsm_newstate(grp->fsm, MPCG_STATE_XID7INITI);
2264 mpc_validate_xid(mpcginfo);
2265 fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
2266 }
2267 break;
2268 }
2269 kfree(mpcginfo);
2270
2271 if (do_debug) {
2272 ctcm_pr_debug("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
2273 __FUNCTION__, ch->id,
2274 grp->outstanding_xid2,
2275 grp->outstanding_xid7,
2276 grp->outstanding_xid7_p2);
2277 ctcm_pr_debug("ctcmpc:%s() %s grpstate: %s chanstate: %s \n",
2278 __FUNCTION__, ch->id,
2279 fsm_getstate_str(grp->fsm),
2280 fsm_getstate_str(ch->fsm));
2281 }
2282 return;
2283
2284}
2285
2286
2287/*
2288 * MPC Group Station FSM action
2289 * CTCM_PROTO_MPC only
2290 */
2291static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
2292{
2293 struct mpcg_info *mpcginfo = arg;
2294 struct channel *ch = mpcginfo->ch;
2295 struct net_device *dev = ch->netdev;
2296 struct ctcm_priv *priv = dev->priv;
2297 struct mpc_group *grp = priv->mpcg;
2298
2299 if (do_debug) {
2300 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
2301 __FUNCTION__, smp_processor_id(), ch, ch->id);
2302
2303 ctcm_pr_debug("ctcmpc: outstanding_xid7: %i, "
2304 " outstanding_xid7_p2: %i\n",
2305 grp->outstanding_xid7,
2306 grp->outstanding_xid7_p2);
2307 }
2308
2309 grp->outstanding_xid7--;
2310 ch->xid_skb->data = ch->xid_skb_data;
2311 skb_reset_tail_pointer(ch->xid_skb);
2312 ch->xid_skb->len = 0;
2313
2314 switch (fsm_getstate(grp->fsm)) {
2315 case MPCG_STATE_XID7INITI:
2316 fsm_newstate(grp->fsm, MPCG_STATE_XID7INITZ);
2317 mpc_validate_xid(mpcginfo);
2318 break;
2319 case MPCG_STATE_XID7INITW:
2320 fsm_newstate(grp->fsm, MPCG_STATE_XID7INITX);
2321 mpc_validate_xid(mpcginfo);
2322 break;
2323 case MPCG_STATE_XID7INITZ:
2324 case MPCG_STATE_XID7INITX:
2325 if (grp->outstanding_xid7 == 0) {
2326 if (grp->outstanding_xid7_p2 > 0) {
2327 grp->outstanding_xid7 =
2328 grp->outstanding_xid7_p2;
2329 grp->outstanding_xid7_p2 = 0;
2330 } else
2331 fsm_newstate(grp->fsm, MPCG_STATE_XID7INITF);
2332
2333 mpc_validate_xid(mpcginfo);
2334 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
2335 break;
2336 }
2337 mpc_validate_xid(mpcginfo);
2338 break;
2339 }
2340
2341 kfree(mpcginfo);
2342
2343 if (do_debug)
2344 ctcm_pr_debug("ctcmpc exit: %s(): cp=%i ch=0x%p id=%s\n",
2345 __FUNCTION__, smp_processor_id(), ch, ch->id);
2346 return;
2347
2348}
2349
2350/*
2351 * mpc_action helper of an MPC Group Station FSM action
2352 * CTCM_PROTO_MPC only
2353 */
2354static int mpc_send_qllc_discontact(struct net_device *dev)
2355{
2356 int rc = 0;
2357 __u32 new_len = 0;
2358 struct sk_buff *skb;
2359 struct qllc *qllcptr;
2360 struct ctcm_priv *priv;
2361 struct mpc_group *grp;
2362
2363 ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__);
2364
2365 if (dev == NULL) {
2366 printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__);
2367 rc = 1;
2368 goto done;
2369 }
2370
2371 priv = dev->priv;
2372 if (priv == NULL) {
2373 printk(KERN_INFO "%s() priv=NULL\n", __FUNCTION__);
2374 rc = 1;
2375 goto done;
2376 }
2377
2378 grp = priv->mpcg;
2379 if (grp == NULL) {
2380 printk(KERN_INFO "%s() grp=NULL\n", __FUNCTION__);
2381 rc = 1;
2382 goto done;
2383 }
2384 ctcm_pr_info("ctcmpc: %s() GROUP STATE: %s\n", __FUNCTION__,
2385 mpcg_state_names[grp->saved_state]);
2386
2387 switch (grp->saved_state) {
2388 /*
2389 * establish conn callback function is
2390 * preferred method to report failure
2391 */
2392 case MPCG_STATE_XID0IOWAIT:
2393 case MPCG_STATE_XID0IOWAIX:
2394 case MPCG_STATE_XID7INITI:
2395 case MPCG_STATE_XID7INITZ:
2396 case MPCG_STATE_XID2INITW:
2397 case MPCG_STATE_XID2INITX:
2398 case MPCG_STATE_XID7INITW:
2399 case MPCG_STATE_XID7INITX:
2400 if (grp->estconnfunc) {
2401 grp->estconnfunc(grp->port_num, -1, 0);
2402 grp->estconnfunc = NULL;
2403 break;
2404 }
2405 case MPCG_STATE_FLOWC:
2406 case MPCG_STATE_READY:
2407 grp->send_qllc_disc = 2;
2408 new_len = sizeof(struct qllc);
2409 qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA);
2410 if (qllcptr == NULL) {
2411 printk(KERN_INFO
2412 "ctcmpc: Out of memory in %s()\n",
2413 dev->name);
2414 rc = 1;
2415 goto done;
2416 }
2417
2418 qllcptr->qllc_address = 0xcc;
2419 qllcptr->qllc_commands = 0x03;
2420
2421 skb = __dev_alloc_skb(new_len, GFP_ATOMIC);
2422
2423 if (skb == NULL) {
2424 printk(KERN_INFO "%s Out of memory in mpc_send_qllc\n",
2425 dev->name);
2426 priv->stats.rx_dropped++;
2427 rc = 1;
2428 kfree(qllcptr);
2429 goto done;
2430 }
2431
2432 memcpy(skb_put(skb, new_len), qllcptr, new_len);
2433 kfree(qllcptr);
2434
2435 if (skb_headroom(skb) < 4) {
2436 printk(KERN_INFO "ctcmpc: %s() Unable to"
2437 " build discontact for %s\n",
2438 __FUNCTION__, dev->name);
2439 rc = 1;
2440 dev_kfree_skb_any(skb);
2441 goto done;
2442 }
2443
2444 *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq;
2445 priv->channel[READ]->pdu_seq++;
2446 if (do_debug_data)
2447 ctcm_pr_debug("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
2448 __FUNCTION__, priv->channel[READ]->pdu_seq);
2449
2450 /* receipt of CC03 resets anticipated sequence number on
2451 receiving side */
2452 priv->channel[READ]->pdu_seq = 0x00;
2453 skb_reset_mac_header(skb);
2454 skb->dev = dev;
2455 skb->protocol = htons(ETH_P_SNAP);
2456 skb->ip_summed = CHECKSUM_UNNECESSARY;
2457
2458 ctcmpc_dumpit((char *)skb->data, (sizeof(struct qllc) + 4));
2459
2460 netif_rx(skb);
2461 break;
2462 default:
2463 break;
2464
2465 }
2466
2467done:
2468 ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__);
2469 return rc;
2470}
2471/* --- This is the END my friend --- */
2472
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
new file mode 100644
index 000000000000..f99686069a91
--- /dev/null
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -0,0 +1,239 @@
1/*
2 * drivers/s390/net/ctcm_mpc.h
3 *
4 * Copyright IBM Corp. 2007
5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
6 *
7 * MPC additions:
8 * Belinda Thompson (belindat@us.ibm.com)
9 * Andy Richter (richtera@us.ibm.com)
10 */
11
12#ifndef _CTC_MPC_H_
13#define _CTC_MPC_H_
14
15#include <linux/skbuff.h>
16#include "fsm.h"
17
18/*
19 * MPC external interface
20 * Note that ctc_mpc_xyz are called with a lock on ................
21 */
22
23/* port_number is the mpc device 0, 1, 2 etc mpc2 is port_number 2 */
24
25/* passive open Just wait for XID2 exchange */
26extern int ctc_mpc_alloc_channel(int port,
27 void (*callback)(int port_num, int max_write_size));
28/* active open Alloc then send XID2 */
29extern void ctc_mpc_establish_connectivity(int port,
30 void (*callback)(int port_num, int rc, int max_write_size));
31
32extern void ctc_mpc_dealloc_ch(int port);
33extern void ctc_mpc_flow_control(int port, int flowc);
34
35/*
36 * other MPC Group prototypes and structures
37 */
38
39#define ETH_P_SNA_DIX 0x80D5
40
41/*
42 * Declaration of an XID2
43 *
44 */
45#define ALLZEROS 0x0000000000000000
46
47#define XID_FM2 0x20
48#define XID2_0 0x00
49#define XID2_7 0x07
50#define XID2_WRITE_SIDE 0x04
51#define XID2_READ_SIDE 0x05
52
53struct xid2 {
54 __u8 xid2_type_id;
55 __u8 xid2_len;
56 __u32 xid2_adj_id;
57 __u8 xid2_rlen;
58 __u8 xid2_resv1;
59 __u8 xid2_flag1;
60 __u8 xid2_fmtt;
61 __u8 xid2_flag4;
62 __u16 xid2_resv2;
63 __u8 xid2_tgnum;
64 __u32 xid2_sender_id;
65 __u8 xid2_flag2;
66 __u8 xid2_option;
67 char xid2_resv3[8];
68 __u16 xid2_resv4;
69 __u8 xid2_dlc_type;
70 __u16 xid2_resv5;
71 __u8 xid2_mpc_flag;
72 __u8 xid2_resv6;
73 __u16 xid2_buf_len;
74 char xid2_buffer[255 - (13 * sizeof(__u8) +
75 2 * sizeof(__u32) +
76 4 * sizeof(__u16) +
77 8 * sizeof(char))];
78} __attribute__ ((packed));
79
80#define XID2_LENGTH (sizeof(struct xid2))
81
82struct th_header {
83 __u8 th_seg;
84 __u8 th_ch_flag;
85#define TH_HAS_PDU 0xf0
86#define TH_IS_XID 0x01
87#define TH_SWEEP_REQ 0xfe
88#define TH_SWEEP_RESP 0xff
89 __u8 th_blk_flag;
90#define TH_DATA_IS_XID 0x80
91#define TH_RETRY 0x40
92#define TH_DISCONTACT 0xc0
93#define TH_SEG_BLK 0x20
94#define TH_LAST_SEG 0x10
95#define TH_PDU_PART 0x08
96 __u8 th_is_xid; /* is 0x01 if this is XID */
97 __u32 th_seq_num;
98} __attribute__ ((packed));
99
100struct th_addon {
101 __u32 th_last_seq;
102 __u32 th_resvd;
103} __attribute__ ((packed));
104
105struct th_sweep {
106 struct th_header th;
107 struct th_addon sw;
108} __attribute__ ((packed));
109
110#define TH_HEADER_LENGTH (sizeof(struct th_header))
111#define TH_SWEEP_LENGTH (sizeof(struct th_sweep))
112
113#define PDU_LAST 0x80
114#define PDU_CNTL 0x40
115#define PDU_FIRST 0x20
116
117struct pdu {
118 __u32 pdu_offset;
119 __u8 pdu_flag;
120 __u8 pdu_proto; /* 0x01 is APPN SNA */
121 __u16 pdu_seq;
122} __attribute__ ((packed));
123
124#define PDU_HEADER_LENGTH (sizeof(struct pdu))
125
126struct qllc {
127 __u8 qllc_address;
128#define QLLC_REQ 0xFF
129#define QLLC_RESP 0x00
130 __u8 qllc_commands;
131#define QLLC_DISCONNECT 0x53
132#define QLLC_UNSEQACK 0x73
133#define QLLC_SETMODE 0x93
134#define QLLC_EXCHID 0xBF
135} __attribute__ ((packed));
136
137
138/*
139 * Definition of one MPC group
140 */
141
142#define MAX_MPCGCHAN 10
143#define MPC_XID_TIMEOUT_VALUE 10000
144#define MPC_CHANNEL_ADD 0
145#define MPC_CHANNEL_REMOVE 1
146#define MPC_CHANNEL_ATTN 2
147#define XSIDE 1
148#define YSIDE 0
149
150struct mpcg_info {
151 struct sk_buff *skb;
152 struct channel *ch;
153 struct xid2 *xid;
154 struct th_sweep *sweep;
155 struct th_header *th;
156};
157
158struct mpc_group {
159 struct tasklet_struct mpc_tasklet;
160 struct tasklet_struct mpc_tasklet2;
161 int changed_side;
162 int saved_state;
163 int channels_terminating;
164 int out_of_sequence;
165 int flow_off_called;
166 int port_num;
167 int port_persist;
168 int alloc_called;
169 __u32 xid2_adj_id;
170 __u8 xid2_tgnum;
171 __u32 xid2_sender_id;
172 int num_channel_paths;
173 int active_channels[2];
174 __u16 group_max_buflen;
175 int outstanding_xid2;
176 int outstanding_xid7;
177 int outstanding_xid7_p2;
178 int sweep_req_pend_num;
179 int sweep_rsp_pend_num;
180 struct sk_buff *xid_skb;
181 char *xid_skb_data;
182 struct th_header *xid_th;
183 struct xid2 *xid;
184 char *xid_id;
185 struct th_header *rcvd_xid_th;
186 struct sk_buff *rcvd_xid_skb;
187 char *rcvd_xid_data;
188 __u8 in_sweep;
189 __u8 roll;
190 struct xid2 *saved_xid2;
191 void (*allochanfunc)(int, int);
192 int allocchan_callback_retries;
193 void (*estconnfunc)(int, int, int);
194 int estconn_callback_retries;
195 int estconn_called;
196 int xidnogood;
197 int send_qllc_disc;
198 fsm_timer timer;
199 fsm_instance *fsm; /* group xid fsm */
200};
201
202#ifdef DEBUGDATA
203void ctcmpc_dumpit(char *buf, int len);
204#else
205static inline void ctcmpc_dumpit(char *buf, int len)
206{
207}
208#endif
209
210#ifdef DEBUGDATA
211/*
212 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
213 *
214 * skb The struct sk_buff to dump.
215 * offset Offset relative to skb-data, where to start the dump.
216 */
217void ctcmpc_dump_skb(struct sk_buff *skb, int offset);
218#else
219static inline void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
220{}
221#endif
222
223static inline void ctcmpc_dump32(char *buf, int len)
224{
225 if (len < 32)
226 ctcmpc_dumpit(buf, len);
227 else
228 ctcmpc_dumpit(buf, 32);
229}
230
231int ctcmpc_open(struct net_device *);
232void ctcm_ccw_check_rc(struct channel *, int, char *);
233void mpc_group_ready(unsigned long adev);
234int mpc_channel_action(struct channel *ch, int direction, int action);
235void mpc_action_send_discontact(unsigned long thischan);
236void mpc_action_discontact(fsm_instance *fi, int event, void *arg);
237void ctcmpc_bh(unsigned long thischan);
238#endif
239/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
new file mode 100644
index 000000000000..bb2d13721d34
--- /dev/null
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -0,0 +1,210 @@
1/*
2 * drivers/s390/net/ctcm_sysfs.c
3 *
4 * Copyright IBM Corp. 2007, 2007
5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
6 *
7 */
8
9#undef DEBUG
10#undef DEBUGDATA
11#undef DEBUGCCW
12
13#include <linux/sysfs.h>
14#include "ctcm_main.h"
15
16/*
17 * sysfs attributes
18 */
19
20static ssize_t ctcm_buffer_show(struct device *dev,
21 struct device_attribute *attr, char *buf)
22{
23 struct ctcm_priv *priv = dev_get_drvdata(dev);
24
25 if (!priv)
26 return -ENODEV;
27 return sprintf(buf, "%d\n", priv->buffer_size);
28}
29
30static ssize_t ctcm_buffer_write(struct device *dev,
31 struct device_attribute *attr, const char *buf, size_t count)
32{
33 struct net_device *ndev;
34 int bs1;
35 struct ctcm_priv *priv = dev_get_drvdata(dev);
36
37 if (!(priv && priv->channel[READ] &&
38 (ndev = priv->channel[READ]->netdev))) {
39 CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
40 return -ENODEV;
41 }
42
43 sscanf(buf, "%u", &bs1);
44 if (bs1 > CTCM_BUFSIZE_LIMIT)
45 goto einval;
46 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
47 goto einval;
48 priv->buffer_size = bs1; /* just to overwrite the default */
49
50 if ((ndev->flags & IFF_RUNNING) &&
51 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
52 goto einval;
53
54 priv->channel[READ]->max_bufsize = bs1;
55 priv->channel[WRITE]->max_bufsize = bs1;
56 if (!(ndev->flags & IFF_RUNNING))
57 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
58 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
59 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
60
61 CTCM_DBF_DEV(SETUP, ndev, buf);
62 return count;
63
64einval:
65 CTCM_DBF_DEV(SETUP, ndev, "buff_err");
66 return -EINVAL;
67}
68
69static void ctcm_print_statistics(struct ctcm_priv *priv)
70{
71 char *sbuf;
72 char *p;
73
74 if (!priv)
75 return;
76 sbuf = kmalloc(2048, GFP_KERNEL);
77 if (sbuf == NULL)
78 return;
79 p = sbuf;
80
81 p += sprintf(p, " Device FSM state: %s\n",
82 fsm_getstate_str(priv->fsm));
83 p += sprintf(p, " RX channel FSM state: %s\n",
84 fsm_getstate_str(priv->channel[READ]->fsm));
85 p += sprintf(p, " TX channel FSM state: %s\n",
86 fsm_getstate_str(priv->channel[WRITE]->fsm));
87 p += sprintf(p, " Max. TX buffer used: %ld\n",
88 priv->channel[WRITE]->prof.maxmulti);
89 p += sprintf(p, " Max. chained SKBs: %ld\n",
90 priv->channel[WRITE]->prof.maxcqueue);
91 p += sprintf(p, " TX single write ops: %ld\n",
92 priv->channel[WRITE]->prof.doios_single);
93 p += sprintf(p, " TX multi write ops: %ld\n",
94 priv->channel[WRITE]->prof.doios_multi);
95 p += sprintf(p, " Netto bytes written: %ld\n",
96 priv->channel[WRITE]->prof.txlen);
97 p += sprintf(p, " Max. TX IO-time: %ld\n",
98 priv->channel[WRITE]->prof.tx_time);
99
100 printk(KERN_INFO "Statistics for %s:\n%s",
101 priv->channel[WRITE]->netdev->name, sbuf);
102 kfree(sbuf);
103 return;
104}
105
106static ssize_t stats_show(struct device *dev,
107 struct device_attribute *attr, char *buf)
108{
109 struct ctcm_priv *priv = dev_get_drvdata(dev);
110 if (!priv)
111 return -ENODEV;
112 ctcm_print_statistics(priv);
113 return sprintf(buf, "0\n");
114}
115
116static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
117 const char *buf, size_t count)
118{
119 struct ctcm_priv *priv = dev_get_drvdata(dev);
120 if (!priv)
121 return -ENODEV;
122 /* Reset statistics */
123 memset(&priv->channel[WRITE]->prof, 0,
124 sizeof(priv->channel[WRITE]->prof));
125 return count;
126}
127
128static ssize_t ctcm_proto_show(struct device *dev,
129 struct device_attribute *attr, char *buf)
130{
131 struct ctcm_priv *priv = dev_get_drvdata(dev);
132 if (!priv)
133 return -ENODEV;
134
135 return sprintf(buf, "%d\n", priv->protocol);
136}
137
138static ssize_t ctcm_proto_store(struct device *dev,
139 struct device_attribute *attr, const char *buf, size_t count)
140{
141 int value;
142 struct ctcm_priv *priv = dev_get_drvdata(dev);
143
144 if (!priv)
145 return -ENODEV;
146 sscanf(buf, "%u", &value);
147 if (!((value == CTCM_PROTO_S390) ||
148 (value == CTCM_PROTO_LINUX) ||
149 (value == CTCM_PROTO_MPC) ||
150 (value == CTCM_PROTO_OS390)))
151 return -EINVAL;
152 priv->protocol = value;
153 CTCM_DBF_DEV(SETUP, dev, buf);
154
155 return count;
156}
157
158static ssize_t ctcm_type_show(struct device *dev,
159 struct device_attribute *attr, char *buf)
160{
161 struct ccwgroup_device *cgdev;
162
163 cgdev = to_ccwgroupdev(dev);
164 if (!cgdev)
165 return -ENODEV;
166
167 return sprintf(buf, "%s\n",
168 cu3088_type[cgdev->cdev[0]->id.driver_info]);
169}
170
171static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
172static DEVICE_ATTR(protocol, 0644, ctcm_proto_show, ctcm_proto_store);
173static DEVICE_ATTR(type, 0444, ctcm_type_show, NULL);
174static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
175
176static struct attribute *ctcm_attr[] = {
177 &dev_attr_protocol.attr,
178 &dev_attr_type.attr,
179 &dev_attr_buffer.attr,
180 NULL,
181};
182
183static struct attribute_group ctcm_attr_group = {
184 .attrs = ctcm_attr,
185};
186
187int ctcm_add_attributes(struct device *dev)
188{
189 int rc;
190
191 rc = device_create_file(dev, &dev_attr_stats);
192
193 return rc;
194}
195
196void ctcm_remove_attributes(struct device *dev)
197{
198 device_remove_file(dev, &dev_attr_stats);
199}
200
201int ctcm_add_files(struct device *dev)
202{
203 return sysfs_create_group(&dev->kobj, &ctcm_attr_group);
204}
205
206void ctcm_remove_files(struct device *dev)
207{
208 sysfs_remove_group(&dev->kobj, &ctcm_attr_group);
209}
210
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
deleted file mode 100644
index 77a503139e32..000000000000
--- a/drivers/s390/net/ctcmain.c
+++ /dev/null
@@ -1,3062 +0,0 @@
1/*
2 * CTC / ESCON network driver
3 *
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
7 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 Peter Tiedemann (ptiedem@de.ibm.com)
9 * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com>
10 *
11 * Documentation used:
12 * - Principles of Operation (IBM doc#: SA22-7201-06)
13 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
14 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
15 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
16 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
17 *
18 * and the source of the original CTC driver by:
19 * Dieter Wellerdiek (wel@de.ibm.com)
20 * Martin Schwidefsky (schwidefsky@de.ibm.com)
21 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
22 * Jochen Röhrig (roehrig@de.ibm.com)
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
28 *
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
33 *
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
37 *
38 */
39#undef DEBUG
40#include <linux/module.h>
41#include <linux/init.h>
42#include <linux/kernel.h>
43#include <linux/slab.h>
44#include <linux/errno.h>
45#include <linux/types.h>
46#include <linux/interrupt.h>
47#include <linux/timer.h>
48#include <linux/bitops.h>
49
50#include <linux/signal.h>
51#include <linux/string.h>
52
53#include <linux/ip.h>
54#include <linux/if_arp.h>
55#include <linux/tcp.h>
56#include <linux/skbuff.h>
57#include <linux/ctype.h>
58#include <net/dst.h>
59
60#include <asm/io.h>
61#include <asm/ccwdev.h>
62#include <asm/ccwgroup.h>
63#include <asm/uaccess.h>
64
65#include <asm/idals.h>
66
67#include "fsm.h"
68#include "cu3088.h"
69
70#include "ctcdbug.h"
71#include "ctcmain.h"
72
73MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
74MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
75MODULE_LICENSE("GPL");
76/**
77 * States of the interface statemachine.
78 */
79enum dev_states {
80 DEV_STATE_STOPPED,
81 DEV_STATE_STARTWAIT_RXTX,
82 DEV_STATE_STARTWAIT_RX,
83 DEV_STATE_STARTWAIT_TX,
84 DEV_STATE_STOPWAIT_RXTX,
85 DEV_STATE_STOPWAIT_RX,
86 DEV_STATE_STOPWAIT_TX,
87 DEV_STATE_RUNNING,
88 /**
89 * MUST be always the last element!!
90 */
91 CTC_NR_DEV_STATES
92};
93
94static const char *dev_state_names[] = {
95 "Stopped",
96 "StartWait RXTX",
97 "StartWait RX",
98 "StartWait TX",
99 "StopWait RXTX",
100 "StopWait RX",
101 "StopWait TX",
102 "Running",
103};
104
105/**
106 * Events of the interface statemachine.
107 */
108enum dev_events {
109 DEV_EVENT_START,
110 DEV_EVENT_STOP,
111 DEV_EVENT_RXUP,
112 DEV_EVENT_TXUP,
113 DEV_EVENT_RXDOWN,
114 DEV_EVENT_TXDOWN,
115 DEV_EVENT_RESTART,
116 /**
117 * MUST be always the last element!!
118 */
119 CTC_NR_DEV_EVENTS
120};
121
122static const char *dev_event_names[] = {
123 "Start",
124 "Stop",
125 "RX up",
126 "TX up",
127 "RX down",
128 "TX down",
129 "Restart",
130};
131
132/**
133 * Events of the channel statemachine
134 */
135enum ch_events {
136 /**
137 * Events, representing return code of
138 * I/O operations (ccw_device_start, ccw_device_halt et al.)
139 */
140 CH_EVENT_IO_SUCCESS,
141 CH_EVENT_IO_EBUSY,
142 CH_EVENT_IO_ENODEV,
143 CH_EVENT_IO_EIO,
144 CH_EVENT_IO_UNKNOWN,
145
146 CH_EVENT_ATTNBUSY,
147 CH_EVENT_ATTN,
148 CH_EVENT_BUSY,
149
150 /**
151 * Events, representing unit-check
152 */
153 CH_EVENT_UC_RCRESET,
154 CH_EVENT_UC_RSRESET,
155 CH_EVENT_UC_TXTIMEOUT,
156 CH_EVENT_UC_TXPARITY,
157 CH_EVENT_UC_HWFAIL,
158 CH_EVENT_UC_RXPARITY,
159 CH_EVENT_UC_ZERO,
160 CH_EVENT_UC_UNKNOWN,
161
162 /**
163 * Events, representing subchannel-check
164 */
165 CH_EVENT_SC_UNKNOWN,
166
167 /**
168 * Events, representing machine checks
169 */
170 CH_EVENT_MC_FAIL,
171 CH_EVENT_MC_GOOD,
172
173 /**
174 * Event, representing normal IRQ
175 */
176 CH_EVENT_IRQ,
177 CH_EVENT_FINSTAT,
178
179 /**
180 * Event, representing timer expiry.
181 */
182 CH_EVENT_TIMER,
183
184 /**
185 * Events, representing commands from upper levels.
186 */
187 CH_EVENT_START,
188 CH_EVENT_STOP,
189
190 /**
191 * MUST be always the last element!!
192 */
193 NR_CH_EVENTS,
194};
195
196/**
197 * States of the channel statemachine.
198 */
199enum ch_states {
200 /**
201 * Channel not assigned to any device,
202 * initial state, direction invalid
203 */
204 CH_STATE_IDLE,
205
206 /**
207 * Channel assigned but not operating
208 */
209 CH_STATE_STOPPED,
210 CH_STATE_STARTWAIT,
211 CH_STATE_STARTRETRY,
212 CH_STATE_SETUPWAIT,
213 CH_STATE_RXINIT,
214 CH_STATE_TXINIT,
215 CH_STATE_RX,
216 CH_STATE_TX,
217 CH_STATE_RXIDLE,
218 CH_STATE_TXIDLE,
219 CH_STATE_RXERR,
220 CH_STATE_TXERR,
221 CH_STATE_TERM,
222 CH_STATE_DTERM,
223 CH_STATE_NOTOP,
224
225 /**
226 * MUST be always the last element!!
227 */
228 NR_CH_STATES,
229};
230
231static int loglevel = CTC_LOGLEVEL_DEFAULT;
232
233/**
234 * Linked list of all detected channels.
235 */
236static struct channel *channels = NULL;
237
238/**
239 * Print Banner.
240 */
241static void
242print_banner(void)
243{
244 static int printed = 0;
245
246 if (printed)
247 return;
248
249 printk(KERN_INFO "CTC driver initialized\n");
250 printed = 1;
251}
252
253/**
254 * Return type of a detected device.
255 */
256static enum channel_types
257get_channel_type(struct ccw_device_id *id)
258{
259 enum channel_types type = (enum channel_types) id->driver_info;
260
261 if (type == channel_type_ficon)
262 type = channel_type_escon;
263
264 return type;
265}
266
267static const char *ch_event_names[] = {
268 "ccw_device success",
269 "ccw_device busy",
270 "ccw_device enodev",
271 "ccw_device ioerr",
272 "ccw_device unknown",
273
274 "Status ATTN & BUSY",
275 "Status ATTN",
276 "Status BUSY",
277
278 "Unit check remote reset",
279 "Unit check remote system reset",
280 "Unit check TX timeout",
281 "Unit check TX parity",
282 "Unit check Hardware failure",
283 "Unit check RX parity",
284 "Unit check ZERO",
285 "Unit check Unknown",
286
287 "SubChannel check Unknown",
288
289 "Machine check failure",
290 "Machine check operational",
291
292 "IRQ normal",
293 "IRQ final",
294
295 "Timer",
296
297 "Start",
298 "Stop",
299};
300
301static const char *ch_state_names[] = {
302 "Idle",
303 "Stopped",
304 "StartWait",
305 "StartRetry",
306 "SetupWait",
307 "RX init",
308 "TX init",
309 "RX",
310 "TX",
311 "RX idle",
312 "TX idle",
313 "RX error",
314 "TX error",
315 "Terminating",
316 "Restarting",
317 "Not operational",
318};
319
320#ifdef DEBUG
321/**
322 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
323 *
324 * @param skb The sk_buff to dump.
325 * @param offset Offset relative to skb-data, where to start the dump.
326 */
327static void
328ctc_dump_skb(struct sk_buff *skb, int offset)
329{
330 unsigned char *p = skb->data;
331 __u16 bl;
332 struct ll_header *header;
333 int i;
334
335 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
336 return;
337 p += offset;
338 bl = *((__u16 *) p);
339 p += 2;
340 header = (struct ll_header *) p;
341 p -= 2;
342
343 printk(KERN_DEBUG "dump:\n");
344 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
345
346 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
347 header->length);
348 printk(KERN_DEBUG "h->type=%04x\n", header->type);
349 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
350 if (bl > 16)
351 bl = 16;
352 printk(KERN_DEBUG "data: ");
353 for (i = 0; i < bl; i++)
354 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
355 printk("\n");
356}
357#else
358static inline void
359ctc_dump_skb(struct sk_buff *skb, int offset)
360{
361}
362#endif
363
364/**
365 * Unpack a just received skb and hand it over to
366 * upper layers.
367 *
368 * @param ch The channel where this skb has been received.
369 * @param pskb The received skb.
370 */
371static void
372ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
373{
374 struct net_device *dev = ch->netdev;
375 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
376 __u16 len = *((__u16 *) pskb->data);
377
378 DBF_TEXT(trace, 4, __FUNCTION__);
379 skb_put(pskb, 2 + LL_HEADER_LENGTH);
380 skb_pull(pskb, 2);
381 pskb->dev = dev;
382 pskb->ip_summed = CHECKSUM_UNNECESSARY;
383 while (len > 0) {
384 struct sk_buff *skb;
385 struct ll_header *header = (struct ll_header *) pskb->data;
386
387 skb_pull(pskb, LL_HEADER_LENGTH);
388 if ((ch->protocol == CTC_PROTO_S390) &&
389 (header->type != ETH_P_IP)) {
390
391#ifndef DEBUG
392 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
393#endif
394 /**
395 * Check packet type only if we stick strictly
396 * to S/390's protocol of OS390. This only
397 * supports IP. Otherwise allow any packet
398 * type.
399 */
400 ctc_pr_warn(
401 "%s Illegal packet type 0x%04x received, dropping\n",
402 dev->name, header->type);
403 ch->logflags |= LOG_FLAG_ILLEGALPKT;
404#ifndef DEBUG
405 }
406#endif
407#ifdef DEBUG
408 ctc_dump_skb(pskb, -6);
409#endif
410 privptr->stats.rx_dropped++;
411 privptr->stats.rx_frame_errors++;
412 return;
413 }
414 pskb->protocol = ntohs(header->type);
415 if (header->length <= LL_HEADER_LENGTH) {
416#ifndef DEBUG
417 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
418#endif
419 ctc_pr_warn(
420 "%s Illegal packet size %d "
421 "received (MTU=%d blocklen=%d), "
422 "dropping\n", dev->name, header->length,
423 dev->mtu, len);
424 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
425#ifndef DEBUG
426 }
427#endif
428#ifdef DEBUG
429 ctc_dump_skb(pskb, -6);
430#endif
431 privptr->stats.rx_dropped++;
432 privptr->stats.rx_length_errors++;
433 return;
434 }
435 header->length -= LL_HEADER_LENGTH;
436 len -= LL_HEADER_LENGTH;
437 if ((header->length > skb_tailroom(pskb)) ||
438 (header->length > len)) {
439#ifndef DEBUG
440 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
441#endif
442 ctc_pr_warn(
443 "%s Illegal packet size %d "
444 "(beyond the end of received data), "
445 "dropping\n", dev->name, header->length);
446 ch->logflags |= LOG_FLAG_OVERRUN;
447#ifndef DEBUG
448 }
449#endif
450#ifdef DEBUG
451 ctc_dump_skb(pskb, -6);
452#endif
453 privptr->stats.rx_dropped++;
454 privptr->stats.rx_length_errors++;
455 return;
456 }
457 skb_put(pskb, header->length);
458 skb_reset_mac_header(pskb);
459 len -= header->length;
460 skb = dev_alloc_skb(pskb->len);
461 if (!skb) {
462#ifndef DEBUG
463 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
464#endif
465 ctc_pr_warn(
466 "%s Out of memory in ctc_unpack_skb\n",
467 dev->name);
468 ch->logflags |= LOG_FLAG_NOMEM;
469#ifndef DEBUG
470 }
471#endif
472 privptr->stats.rx_dropped++;
473 return;
474 }
475 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
476 pskb->len);
477 skb_reset_mac_header(skb);
478 skb->dev = pskb->dev;
479 skb->protocol = pskb->protocol;
480 pskb->ip_summed = CHECKSUM_UNNECESSARY;
481 /**
482 * reset logflags
483 */
484 ch->logflags = 0;
485 privptr->stats.rx_packets++;
486 privptr->stats.rx_bytes += skb->len;
487 netif_rx_ni(skb);
488 dev->last_rx = jiffies;
489 if (len > 0) {
490 skb_pull(pskb, header->length);
491 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
492#ifndef DEBUG
493 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
494#endif
495 ctc_pr_warn(
496 "%s Overrun in ctc_unpack_skb\n",
497 dev->name);
498 ch->logflags |= LOG_FLAG_OVERRUN;
499#ifndef DEBUG
500 }
501#endif
502 return;
503 }
504 skb_put(pskb, LL_HEADER_LENGTH);
505 }
506 }
507}
508
509/**
510 * Check return code of a preceeding ccw_device call, halt_IO etc...
511 *
512 * @param ch The channel, the error belongs to.
513 * @param return_code The error code to inspect.
514 */
515static void
516ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517{
518 DBF_TEXT(trace, 5, __FUNCTION__);
519 switch (return_code) {
520 case 0:
521 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
522 break;
523 case -EBUSY:
524 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
525 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
526 break;
527 case -ENODEV:
528 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
529 ch->id, msg);
530 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
531 break;
532 case -EIO:
533 ctc_pr_emerg("%s (%s): Status pending... \n",
534 ch->id, msg);
535 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
536 break;
537 default:
538 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
539 ch->id, msg, return_code);
540 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
541 }
542}
543
544/**
545 * Check sense of a unit check.
546 *
547 * @param ch The channel, the sense code belongs to.
548 * @param sense The sense code to inspect.
549 */
550static void
551ccw_unit_check(struct channel *ch, unsigned char sense)
552{
553 DBF_TEXT(trace, 5, __FUNCTION__);
554 if (sense & SNS0_INTERVENTION_REQ) {
555 if (sense & 0x01) {
556 ctc_pr_debug("%s: Interface disc. or Sel. reset "
557 "(remote)\n", ch->id);
558 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
559 } else {
560 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
561 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
562 }
563 } else if (sense & SNS0_EQUIPMENT_CHECK) {
564 if (sense & SNS0_BUS_OUT_CHECK) {
565 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
566 ch->id);
567 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
568 } else {
569 ctc_pr_warn("%s: Read-data parity error (remote)\n",
570 ch->id);
571 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
572 }
573 } else if (sense & SNS0_BUS_OUT_CHECK) {
574 if (sense & 0x04) {
575 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
576 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
577 } else {
578 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
579 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
580 }
581 } else if (sense & SNS0_CMD_REJECT) {
582 ctc_pr_warn("%s: Command reject\n", ch->id);
583 } else if (sense == 0) {
584 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
585 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
586 } else {
587 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
588 ch->id, sense);
589 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
590 }
591}
592
593static void
594ctc_purge_skb_queue(struct sk_buff_head *q)
595{
596 struct sk_buff *skb;
597
598 DBF_TEXT(trace, 5, __FUNCTION__);
599
600 while ((skb = skb_dequeue(q))) {
601 atomic_dec(&skb->users);
602 dev_kfree_skb_irq(skb);
603 }
604}
605
606static int
607ctc_checkalloc_buffer(struct channel *ch, int warn)
608{
609 DBF_TEXT(trace, 5, __FUNCTION__);
610 if ((ch->trans_skb == NULL) ||
611 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
612 if (ch->trans_skb != NULL)
613 dev_kfree_skb(ch->trans_skb);
614 clear_normalized_cda(&ch->ccw[1]);
615 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
616 GFP_ATOMIC | GFP_DMA);
617 if (ch->trans_skb == NULL) {
618 if (warn)
619 ctc_pr_warn(
620 "%s: Couldn't alloc %s trans_skb\n",
621 ch->id,
622 (CHANNEL_DIRECTION(ch->flags) == READ) ?
623 "RX" : "TX");
624 return -ENOMEM;
625 }
626 ch->ccw[1].count = ch->max_bufsize;
627 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
628 dev_kfree_skb(ch->trans_skb);
629 ch->trans_skb = NULL;
630 if (warn)
631 ctc_pr_warn(
632 "%s: set_normalized_cda for %s "
633 "trans_skb failed, dropping packets\n",
634 ch->id,
635 (CHANNEL_DIRECTION(ch->flags) == READ) ?
636 "RX" : "TX");
637 return -ENOMEM;
638 }
639 ch->ccw[1].count = 0;
640 ch->trans_skb_data = ch->trans_skb->data;
641 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
642 }
643 return 0;
644}
645
646/**
647 * Dummy NOP action for statemachines
648 */
649static void
650fsm_action_nop(fsm_instance * fi, int event, void *arg)
651{
652}
653
654/**
655 * Actions for channel - statemachines.
656 *****************************************************************************/
657
658/**
659 * Normal data has been send. Free the corresponding
660 * skb (it's in io_queue), reset dev->tbusy and
661 * revert to idle state.
662 *
663 * @param fi An instance of a channel statemachine.
664 * @param event The event, just happened.
665 * @param arg Generic pointer, casted from channel * upon call.
666 */
667static void
668ch_action_txdone(fsm_instance * fi, int event, void *arg)
669{
670 struct channel *ch = (struct channel *) arg;
671 struct net_device *dev = ch->netdev;
672 struct ctc_priv *privptr = dev->priv;
673 struct sk_buff *skb;
674 int first = 1;
675 int i;
676 unsigned long duration;
677 struct timespec done_stamp = current_kernel_time();
678
679 DBF_TEXT(trace, 4, __FUNCTION__);
680
681 duration =
682 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
683 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
684 if (duration > ch->prof.tx_time)
685 ch->prof.tx_time = duration;
686
687 if (ch->irb->scsw.count != 0)
688 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
689 dev->name, ch->irb->scsw.count);
690 fsm_deltimer(&ch->timer);
691 while ((skb = skb_dequeue(&ch->io_queue))) {
692 privptr->stats.tx_packets++;
693 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
694 if (first) {
695 privptr->stats.tx_bytes += 2;
696 first = 0;
697 }
698 atomic_dec(&skb->users);
699 dev_kfree_skb_irq(skb);
700 }
701 spin_lock(&ch->collect_lock);
702 clear_normalized_cda(&ch->ccw[4]);
703 if (ch->collect_len > 0) {
704 int rc;
705
706 if (ctc_checkalloc_buffer(ch, 1)) {
707 spin_unlock(&ch->collect_lock);
708 return;
709 }
710 ch->trans_skb->data = ch->trans_skb_data;
711 skb_reset_tail_pointer(ch->trans_skb);
712 ch->trans_skb->len = 0;
713 if (ch->prof.maxmulti < (ch->collect_len + 2))
714 ch->prof.maxmulti = ch->collect_len + 2;
715 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
716 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
717 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
718 i = 0;
719 while ((skb = skb_dequeue(&ch->collect_queue))) {
720 skb_copy_from_linear_data(skb, skb_put(ch->trans_skb,
721 skb->len),
722 skb->len);
723 privptr->stats.tx_packets++;
724 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
725 atomic_dec(&skb->users);
726 dev_kfree_skb_irq(skb);
727 i++;
728 }
729 ch->collect_len = 0;
730 spin_unlock(&ch->collect_lock);
731 ch->ccw[1].count = ch->trans_skb->len;
732 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
733 ch->prof.send_stamp = current_kernel_time();
734 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
735 (unsigned long) ch, 0xff, 0);
736 ch->prof.doios_multi++;
737 if (rc != 0) {
738 privptr->stats.tx_dropped += i;
739 privptr->stats.tx_errors += i;
740 fsm_deltimer(&ch->timer);
741 ccw_check_return_code(ch, rc, "chained TX");
742 }
743 } else {
744 spin_unlock(&ch->collect_lock);
745 fsm_newstate(fi, CH_STATE_TXIDLE);
746 }
747 ctc_clear_busy(dev);
748}
749
750/**
751 * Initial data is sent.
752 * Notify device statemachine that we are up and
753 * running.
754 *
755 * @param fi An instance of a channel statemachine.
756 * @param event The event, just happened.
757 * @param arg Generic pointer, casted from channel * upon call.
758 */
759static void
760ch_action_txidle(fsm_instance * fi, int event, void *arg)
761{
762 struct channel *ch = (struct channel *) arg;
763
764 DBF_TEXT(trace, 4, __FUNCTION__);
765 fsm_deltimer(&ch->timer);
766 fsm_newstate(fi, CH_STATE_TXIDLE);
767 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
768 ch->netdev);
769}
770
771/**
772 * Got normal data, check for sanity, queue it up, allocate new buffer
773 * trigger bottom half, and initiate next read.
774 *
775 * @param fi An instance of a channel statemachine.
776 * @param event The event, just happened.
777 * @param arg Generic pointer, casted from channel * upon call.
778 */
779static void
780ch_action_rx(fsm_instance * fi, int event, void *arg)
781{
782 struct channel *ch = (struct channel *) arg;
783 struct net_device *dev = ch->netdev;
784 struct ctc_priv *privptr = dev->priv;
785 int len = ch->max_bufsize - ch->irb->scsw.count;
786 struct sk_buff *skb = ch->trans_skb;
787 __u16 block_len = *((__u16 *) skb->data);
788 int check_len;
789 int rc;
790
791 DBF_TEXT(trace, 4, __FUNCTION__);
792 fsm_deltimer(&ch->timer);
793 if (len < 8) {
794 ctc_pr_debug("%s: got packet with length %d < 8\n",
795 dev->name, len);
796 privptr->stats.rx_dropped++;
797 privptr->stats.rx_length_errors++;
798 goto again;
799 }
800 if (len > ch->max_bufsize) {
801 ctc_pr_debug("%s: got packet with length %d > %d\n",
802 dev->name, len, ch->max_bufsize);
803 privptr->stats.rx_dropped++;
804 privptr->stats.rx_length_errors++;
805 goto again;
806 }
807
808 /**
809 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
810 */
811 switch (ch->protocol) {
812 case CTC_PROTO_S390:
813 case CTC_PROTO_OS390:
814 check_len = block_len + 2;
815 break;
816 default:
817 check_len = block_len;
818 break;
819 }
820 if ((len < block_len) || (len > check_len)) {
821 ctc_pr_debug("%s: got block length %d != rx length %d\n",
822 dev->name, block_len, len);
823#ifdef DEBUG
824 ctc_dump_skb(skb, 0);
825#endif
826 *((__u16 *) skb->data) = len;
827 privptr->stats.rx_dropped++;
828 privptr->stats.rx_length_errors++;
829 goto again;
830 }
831 block_len -= 2;
832 if (block_len > 0) {
833 *((__u16 *) skb->data) = block_len;
834 ctc_unpack_skb(ch, skb);
835 }
836 again:
837 skb->data = ch->trans_skb_data;
838 skb_reset_tail_pointer(skb);
839 skb->len = 0;
840 if (ctc_checkalloc_buffer(ch, 1))
841 return;
842 ch->ccw[1].count = ch->max_bufsize;
843 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
844 if (rc != 0)
845 ccw_check_return_code(ch, rc, "normal RX");
846}
847
848static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
849
850/**
851 * Initialize connection by sending a __u16 of value 0.
852 *
853 * @param fi An instance of a channel statemachine.
854 * @param event The event, just happened.
855 * @param arg Generic pointer, casted from channel * upon call.
856 */
857static void
858ch_action_firstio(fsm_instance * fi, int event, void *arg)
859{
860 struct channel *ch = (struct channel *) arg;
861 int rc;
862
863 DBF_TEXT(trace, 4, __FUNCTION__);
864
865 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
866 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
867 fsm_deltimer(&ch->timer);
868 if (ctc_checkalloc_buffer(ch, 1))
869 return;
870 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
871 (ch->protocol == CTC_PROTO_OS390)) {
872 /* OS/390 resp. z/OS */
873 if (CHANNEL_DIRECTION(ch->flags) == READ) {
874 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
875 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
876 CH_EVENT_TIMER, ch);
877 ch_action_rxidle(fi, event, arg);
878 } else {
879 struct net_device *dev = ch->netdev;
880 fsm_newstate(fi, CH_STATE_TXIDLE);
881 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
882 DEV_EVENT_TXUP, dev);
883 }
884 return;
885 }
886
887 /**
888 * Don't setup a timer for receiving the initial RX frame
889 * if in compatibility mode, since VM TCP delays the initial
890 * frame until it has some data to send.
891 */
892 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
893 (ch->protocol != CTC_PROTO_S390))
894 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
895
896 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
897 ch->ccw[1].count = 2; /* Transfer only length */
898
899 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
900 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
901 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
902 if (rc != 0) {
903 fsm_deltimer(&ch->timer);
904 fsm_newstate(fi, CH_STATE_SETUPWAIT);
905 ccw_check_return_code(ch, rc, "init IO");
906 }
907 /**
908 * If in compatibility mode since we don't setup a timer, we
909 * also signal RX channel up immediately. This enables us
910 * to send packets early which in turn usually triggers some
911 * reply from VM TCP which brings up the RX channel to it's
912 * final state.
913 */
914 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
915 (ch->protocol == CTC_PROTO_S390)) {
916 struct net_device *dev = ch->netdev;
917 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
918 dev);
919 }
920}
921
922/**
923 * Got initial data, check it. If OK,
924 * notify device statemachine that we are up and
925 * running.
926 *
927 * @param fi An instance of a channel statemachine.
928 * @param event The event, just happened.
929 * @param arg Generic pointer, casted from channel * upon call.
930 */
931static void
932ch_action_rxidle(fsm_instance * fi, int event, void *arg)
933{
934 struct channel *ch = (struct channel *) arg;
935 struct net_device *dev = ch->netdev;
936 __u16 buflen;
937 int rc;
938
939 DBF_TEXT(trace, 4, __FUNCTION__);
940 fsm_deltimer(&ch->timer);
941 buflen = *((__u16 *) ch->trans_skb->data);
942#ifdef DEBUG
943 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
944#endif
945 if (buflen >= CTC_INITIAL_BLOCKLEN) {
946 if (ctc_checkalloc_buffer(ch, 1))
947 return;
948 ch->ccw[1].count = ch->max_bufsize;
949 fsm_newstate(fi, CH_STATE_RXIDLE);
950 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
951 (unsigned long) ch, 0xff, 0);
952 if (rc != 0) {
953 fsm_newstate(fi, CH_STATE_RXINIT);
954 ccw_check_return_code(ch, rc, "initial RX");
955 } else
956 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
957 DEV_EVENT_RXUP, dev);
958 } else {
959 ctc_pr_debug("%s: Initial RX count %d not %d\n",
960 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
961 ch_action_firstio(fi, event, arg);
962 }
963}
964
965/**
966 * Set channel into extended mode.
967 *
968 * @param fi An instance of a channel statemachine.
969 * @param event The event, just happened.
970 * @param arg Generic pointer, casted from channel * upon call.
971 */
972static void
973ch_action_setmode(fsm_instance * fi, int event, void *arg)
974{
975 struct channel *ch = (struct channel *) arg;
976 int rc;
977 unsigned long saveflags;
978
979 DBF_TEXT(trace, 4, __FUNCTION__);
980 fsm_deltimer(&ch->timer);
981 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
982 fsm_newstate(fi, CH_STATE_SETUPWAIT);
983 saveflags = 0; /* avoids compiler warning with
984 spin_unlock_irqrestore */
985 if (event == CH_EVENT_TIMER) // only for timer not yet locked
986 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
987 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
988 if (event == CH_EVENT_TIMER)
989 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
990 if (rc != 0) {
991 fsm_deltimer(&ch->timer);
992 fsm_newstate(fi, CH_STATE_STARTWAIT);
993 ccw_check_return_code(ch, rc, "set Mode");
994 } else
995 ch->retry = 0;
996}
997
998/**
999 * Setup channel.
1000 *
1001 * @param fi An instance of a channel statemachine.
1002 * @param event The event, just happened.
1003 * @param arg Generic pointer, casted from channel * upon call.
1004 */
1005static void
1006ch_action_start(fsm_instance * fi, int event, void *arg)
1007{
1008 struct channel *ch = (struct channel *) arg;
1009 unsigned long saveflags;
1010 int rc;
1011 struct net_device *dev;
1012
1013 DBF_TEXT(trace, 4, __FUNCTION__);
1014 if (ch == NULL) {
1015 ctc_pr_warn("ch_action_start ch=NULL\n");
1016 return;
1017 }
1018 if (ch->netdev == NULL) {
1019 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1020 return;
1021 }
1022 dev = ch->netdev;
1023
1024#ifdef DEBUG
1025 ctc_pr_debug("%s: %s channel start\n", dev->name,
1026 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1027#endif
1028
1029 if (ch->trans_skb != NULL) {
1030 clear_normalized_cda(&ch->ccw[1]);
1031 dev_kfree_skb(ch->trans_skb);
1032 ch->trans_skb = NULL;
1033 }
1034 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1035 ch->ccw[1].cmd_code = CCW_CMD_READ;
1036 ch->ccw[1].flags = CCW_FLAG_SLI;
1037 ch->ccw[1].count = 0;
1038 } else {
1039 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1040 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1041 ch->ccw[1].count = 0;
1042 }
1043 if (ctc_checkalloc_buffer(ch, 0)) {
1044 ctc_pr_notice(
1045 "%s: Could not allocate %s trans_skb, delaying "
1046 "allocation until first transfer\n",
1047 dev->name,
1048 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1049 }
1050
1051 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1052 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1053 ch->ccw[0].count = 0;
1054 ch->ccw[0].cda = 0;
1055 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1056 ch->ccw[2].flags = CCW_FLAG_SLI;
1057 ch->ccw[2].count = 0;
1058 ch->ccw[2].cda = 0;
1059 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1060 ch->ccw[4].cda = 0;
1061 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1062
1063 fsm_newstate(fi, CH_STATE_STARTWAIT);
1064 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1065 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1066 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1067 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1068 if (rc != 0) {
1069 if (rc != -EBUSY)
1070 fsm_deltimer(&ch->timer);
1071 ccw_check_return_code(ch, rc, "initial HaltIO");
1072 }
1073#ifdef DEBUG
1074 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1075#endif
1076}
1077
1078/**
1079 * Shutdown a channel.
1080 *
1081 * @param fi An instance of a channel statemachine.
1082 * @param event The event, just happened.
1083 * @param arg Generic pointer, casted from channel * upon call.
1084 */
1085static void
1086ch_action_haltio(fsm_instance * fi, int event, void *arg)
1087{
1088 struct channel *ch = (struct channel *) arg;
1089 unsigned long saveflags;
1090 int rc;
1091 int oldstate;
1092
1093 DBF_TEXT(trace, 3, __FUNCTION__);
1094 fsm_deltimer(&ch->timer);
1095 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1096 saveflags = 0; /* avoids comp warning with
1097 spin_unlock_irqrestore */
1098 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1099 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1100 oldstate = fsm_getstate(fi);
1101 fsm_newstate(fi, CH_STATE_TERM);
1102 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1103 if (event == CH_EVENT_STOP)
1104 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1105 if (rc != 0) {
1106 if (rc != -EBUSY) {
1107 fsm_deltimer(&ch->timer);
1108 fsm_newstate(fi, oldstate);
1109 }
1110 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1111 }
1112}
1113
1114/**
1115 * A channel has successfully been halted.
1116 * Cleanup it's queue and notify interface statemachine.
1117 *
1118 * @param fi An instance of a channel statemachine.
1119 * @param event The event, just happened.
1120 * @param arg Generic pointer, casted from channel * upon call.
1121 */
1122static void
1123ch_action_stopped(fsm_instance * fi, int event, void *arg)
1124{
1125 struct channel *ch = (struct channel *) arg;
1126 struct net_device *dev = ch->netdev;
1127
1128 DBF_TEXT(trace, 3, __FUNCTION__);
1129 fsm_deltimer(&ch->timer);
1130 fsm_newstate(fi, CH_STATE_STOPPED);
1131 if (ch->trans_skb != NULL) {
1132 clear_normalized_cda(&ch->ccw[1]);
1133 dev_kfree_skb(ch->trans_skb);
1134 ch->trans_skb = NULL;
1135 }
1136 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1137 skb_queue_purge(&ch->io_queue);
1138 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1139 DEV_EVENT_RXDOWN, dev);
1140 } else {
1141 ctc_purge_skb_queue(&ch->io_queue);
1142 spin_lock(&ch->collect_lock);
1143 ctc_purge_skb_queue(&ch->collect_queue);
1144 ch->collect_len = 0;
1145 spin_unlock(&ch->collect_lock);
1146 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1147 DEV_EVENT_TXDOWN, dev);
1148 }
1149}
1150
1151/**
1152 * A stop command from device statemachine arrived and we are in
1153 * not operational mode. Set state to stopped.
1154 *
1155 * @param fi An instance of a channel statemachine.
1156 * @param event The event, just happened.
1157 * @param arg Generic pointer, casted from channel * upon call.
1158 */
1159static void
1160ch_action_stop(fsm_instance * fi, int event, void *arg)
1161{
1162 fsm_newstate(fi, CH_STATE_STOPPED);
1163}
1164
1165/**
1166 * A machine check for no path, not operational status or gone device has
1167 * happened.
1168 * Cleanup queue and notify interface statemachine.
1169 *
1170 * @param fi An instance of a channel statemachine.
1171 * @param event The event, just happened.
1172 * @param arg Generic pointer, casted from channel * upon call.
1173 */
1174static void
1175ch_action_fail(fsm_instance * fi, int event, void *arg)
1176{
1177 struct channel *ch = (struct channel *) arg;
1178 struct net_device *dev = ch->netdev;
1179
1180 DBF_TEXT(trace, 3, __FUNCTION__);
1181 fsm_deltimer(&ch->timer);
1182 fsm_newstate(fi, CH_STATE_NOTOP);
1183 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1184 skb_queue_purge(&ch->io_queue);
1185 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1186 DEV_EVENT_RXDOWN, dev);
1187 } else {
1188 ctc_purge_skb_queue(&ch->io_queue);
1189 spin_lock(&ch->collect_lock);
1190 ctc_purge_skb_queue(&ch->collect_queue);
1191 ch->collect_len = 0;
1192 spin_unlock(&ch->collect_lock);
1193 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1194 DEV_EVENT_TXDOWN, dev);
1195 }
1196}
1197
1198/**
1199 * Handle error during setup of channel.
1200 *
1201 * @param fi An instance of a channel statemachine.
1202 * @param event The event, just happened.
1203 * @param arg Generic pointer, casted from channel * upon call.
1204 */
1205static void
1206ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1207{
1208 struct channel *ch = (struct channel *) arg;
1209 struct net_device *dev = ch->netdev;
1210
1211 DBF_TEXT(setup, 3, __FUNCTION__);
1212 /**
1213 * Special case: Got UC_RCRESET on setmode.
1214 * This means that remote side isn't setup. In this case
1215 * simply retry after some 10 secs...
1216 */
1217 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1218 ((event == CH_EVENT_UC_RCRESET) ||
1219 (event == CH_EVENT_UC_RSRESET))) {
1220 fsm_newstate(fi, CH_STATE_STARTRETRY);
1221 fsm_deltimer(&ch->timer);
1222 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1223 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1224 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1225 if (rc != 0)
1226 ccw_check_return_code(
1227 ch, rc, "HaltIO in ch_action_setuperr");
1228 }
1229 return;
1230 }
1231
1232 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1233 dev->name, ch_event_names[event],
1234 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1235 fsm_getstate_str(fi));
1236 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1237 fsm_newstate(fi, CH_STATE_RXERR);
1238 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1239 DEV_EVENT_RXDOWN, dev);
1240 } else {
1241 fsm_newstate(fi, CH_STATE_TXERR);
1242 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1243 DEV_EVENT_TXDOWN, dev);
1244 }
1245}
1246
1247/**
1248 * Restart a channel after an error.
1249 *
1250 * @param fi An instance of a channel statemachine.
1251 * @param event The event, just happened.
1252 * @param arg Generic pointer, casted from channel * upon call.
1253 */
1254static void
1255ch_action_restart(fsm_instance * fi, int event, void *arg)
1256{
1257 unsigned long saveflags;
1258 int oldstate;
1259 int rc;
1260
1261 struct channel *ch = (struct channel *) arg;
1262 struct net_device *dev = ch->netdev;
1263
1264 DBF_TEXT(trace, 3, __FUNCTION__);
1265 fsm_deltimer(&ch->timer);
1266 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1267 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1268 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1269 oldstate = fsm_getstate(fi);
1270 fsm_newstate(fi, CH_STATE_STARTWAIT);
1271 saveflags = 0; /* avoids compiler warning with
1272 spin_unlock_irqrestore */
1273 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1274 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1275 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1276 if (event == CH_EVENT_TIMER)
1277 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1278 if (rc != 0) {
1279 if (rc != -EBUSY) {
1280 fsm_deltimer(&ch->timer);
1281 fsm_newstate(fi, oldstate);
1282 }
1283 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1284 }
1285}
1286
1287/**
1288 * Handle error during RX initial handshake (exchange of
1289 * 0-length block header)
1290 *
1291 * @param fi An instance of a channel statemachine.
1292 * @param event The event, just happened.
1293 * @param arg Generic pointer, casted from channel * upon call.
1294 */
1295static void
1296ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1297{
1298 struct channel *ch = (struct channel *) arg;
1299 struct net_device *dev = ch->netdev;
1300
1301 DBF_TEXT(setup, 3, __FUNCTION__);
1302 if (event == CH_EVENT_TIMER) {
1303 fsm_deltimer(&ch->timer);
1304 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1305 if (ch->retry++ < 3)
1306 ch_action_restart(fi, event, arg);
1307 else {
1308 fsm_newstate(fi, CH_STATE_RXERR);
1309 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1310 DEV_EVENT_RXDOWN, dev);
1311 }
1312 } else
1313 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1314}
1315
1316/**
1317 * Notify device statemachine if we gave up initialization
1318 * of RX channel.
1319 *
1320 * @param fi An instance of a channel statemachine.
1321 * @param event The event, just happened.
1322 * @param arg Generic pointer, casted from channel * upon call.
1323 */
1324static void
1325ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1326{
1327 struct channel *ch = (struct channel *) arg;
1328 struct net_device *dev = ch->netdev;
1329
1330 DBF_TEXT(setup, 3, __FUNCTION__);
1331 fsm_newstate(fi, CH_STATE_RXERR);
1332 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1333 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1334 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1335}
1336
1337/**
1338 * Handle RX Unit check remote reset (remote disconnected)
1339 *
1340 * @param fi An instance of a channel statemachine.
1341 * @param event The event, just happened.
1342 * @param arg Generic pointer, casted from channel * upon call.
1343 */
1344static void
1345ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1346{
1347 struct channel *ch = (struct channel *) arg;
1348 struct channel *ch2;
1349 struct net_device *dev = ch->netdev;
1350
1351 DBF_TEXT(trace, 3, __FUNCTION__);
1352 fsm_deltimer(&ch->timer);
1353 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1354 dev->name);
1355
1356 /**
1357 * Notify device statemachine
1358 */
1359 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1360 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1361
1362 fsm_newstate(fi, CH_STATE_DTERM);
1363 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1364 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1365
1366 ccw_device_halt(ch->cdev, (unsigned long) ch);
1367 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1368}
1369
1370/**
1371 * Handle error during TX channel initialization.
1372 *
1373 * @param fi An instance of a channel statemachine.
1374 * @param event The event, just happened.
1375 * @param arg Generic pointer, casted from channel * upon call.
1376 */
1377static void
1378ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1379{
1380 struct channel *ch = (struct channel *) arg;
1381 struct net_device *dev = ch->netdev;
1382
1383 DBF_TEXT(setup, 2, __FUNCTION__);
1384 if (event == CH_EVENT_TIMER) {
1385 fsm_deltimer(&ch->timer);
1386 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1387 if (ch->retry++ < 3)
1388 ch_action_restart(fi, event, arg);
1389 else {
1390 fsm_newstate(fi, CH_STATE_TXERR);
1391 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1392 DEV_EVENT_TXDOWN, dev);
1393 }
1394 } else
1395 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1396}
1397
1398/**
1399 * Handle TX timeout by retrying operation.
1400 *
1401 * @param fi An instance of a channel statemachine.
1402 * @param event The event, just happened.
1403 * @param arg Generic pointer, casted from channel * upon call.
1404 */
1405static void
1406ch_action_txretry(fsm_instance * fi, int event, void *arg)
1407{
1408 struct channel *ch = (struct channel *) arg;
1409 struct net_device *dev = ch->netdev;
1410 unsigned long saveflags;
1411
1412 DBF_TEXT(trace, 4, __FUNCTION__);
1413 fsm_deltimer(&ch->timer);
1414 if (ch->retry++ > 3) {
1415 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1416 dev->name);
1417 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1418 DEV_EVENT_TXDOWN, dev);
1419 ch_action_restart(fi, event, arg);
1420 } else {
1421 struct sk_buff *skb;
1422
1423 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1424 if ((skb = skb_peek(&ch->io_queue))) {
1425 int rc = 0;
1426
1427 clear_normalized_cda(&ch->ccw[4]);
1428 ch->ccw[4].count = skb->len;
1429 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1430 ctc_pr_debug(
1431 "%s: IDAL alloc failed, chan restart\n",
1432 dev->name);
1433 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1434 DEV_EVENT_TXDOWN, dev);
1435 ch_action_restart(fi, event, arg);
1436 return;
1437 }
1438 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1439 saveflags = 0; /* avoids compiler warning with
1440 spin_unlock_irqrestore */
1441 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1442 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1443 saveflags);
1444 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1445 (unsigned long) ch, 0xff, 0);
1446 if (event == CH_EVENT_TIMER)
1447 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1448 saveflags);
1449 if (rc != 0) {
1450 fsm_deltimer(&ch->timer);
1451 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1452 ctc_purge_skb_queue(&ch->io_queue);
1453 }
1454 }
1455 }
1456
1457}
1458
1459/**
1460 * Handle fatal errors during an I/O command.
1461 *
1462 * @param fi An instance of a channel statemachine.
1463 * @param event The event, just happened.
1464 * @param arg Generic pointer, casted from channel * upon call.
1465 */
1466static void
1467ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1468{
1469 struct channel *ch = (struct channel *) arg;
1470 struct net_device *dev = ch->netdev;
1471
1472 DBF_TEXT(trace, 3, __FUNCTION__);
1473 fsm_deltimer(&ch->timer);
1474 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1475 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1476 fsm_newstate(fi, CH_STATE_RXERR);
1477 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1478 DEV_EVENT_RXDOWN, dev);
1479 } else {
1480 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1481 fsm_newstate(fi, CH_STATE_TXERR);
1482 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1483 DEV_EVENT_TXDOWN, dev);
1484 }
1485}
1486
1487static void
1488ch_action_reinit(fsm_instance *fi, int event, void *arg)
1489{
1490 struct channel *ch = (struct channel *)arg;
1491 struct net_device *dev = ch->netdev;
1492 struct ctc_priv *privptr = dev->priv;
1493
1494 DBF_TEXT(trace, 4, __FUNCTION__);
1495 ch_action_iofatal(fi, event, arg);
1496 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1497}
1498
1499/**
1500 * The statemachine for a channel.
1501 */
1502static const fsm_node ch_fsm[] = {
1503 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1504 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1505 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1506 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1507
1508 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1509 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1510 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1511 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1512 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1513
1514 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1515 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1516 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1517 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1518 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1519 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1520 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1521
1522 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1523 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1524 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1525 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1526
1527 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1528 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1529 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1530 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1531 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1532 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1533 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1534 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1535 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1536
1537 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1538 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1539 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1540 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1541 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1542 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1543 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1544 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1545 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1546 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1547 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1548
1549 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1550 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1551 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1552 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1553// {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1554 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1555 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1556 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1557 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1558
1559 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1560 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1561 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1562 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1563 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1564 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1565 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1566 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1567 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1568
1569 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1570 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1571 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1572 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1573 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1574 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1575 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1576 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1577
1578 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1579 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1580 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1581 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1582 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1583 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1584
1585 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1586 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1587 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1588 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1589 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1590 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1591
1592 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1593 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1594 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1595 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1596 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1597 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1598 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1599 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1600 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1601
1602 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1603 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1604 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1605 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1606};
1607
1608static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1609
1610/**
1611 * Functions related to setup and device detection.
1612 *****************************************************************************/
1613
1614static inline int
1615less_than(char *id1, char *id2)
1616{
1617 int dev1, dev2, i;
1618
1619 for (i = 0; i < 5; i++) {
1620 id1++;
1621 id2++;
1622 }
1623 dev1 = simple_strtoul(id1, &id1, 16);
1624 dev2 = simple_strtoul(id2, &id2, 16);
1625
1626 return (dev1 < dev2);
1627}
1628
1629/**
1630 * Add a new channel to the list of channels.
1631 * Keeps the channel list sorted.
1632 *
1633 * @param cdev The ccw_device to be added.
1634 * @param type The type class of the new channel.
1635 *
1636 * @return 0 on success, !0 on error.
1637 */
1638static int
1639add_channel(struct ccw_device *cdev, enum channel_types type)
1640{
1641 struct channel **c = &channels;
1642 struct channel *ch;
1643
1644 DBF_TEXT(trace, 2, __FUNCTION__);
1645 ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1646 if (!ch) {
1647 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1648 return -1;
1649 }
1650 /* assure all flags and counters are reset */
1651 ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1652 if (!ch->ccw) {
1653 kfree(ch);
1654 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1655 return -1;
1656 }
1657
1658
1659 /**
1660 * "static" ccws are used in the following way:
1661 *
1662 * ccw[0..2] (Channel program for generic I/O):
1663 * 0: prepare
1664 * 1: read or write (depending on direction) with fixed
1665 * buffer (idal allocated once when buffer is allocated)
1666 * 2: nop
1667 * ccw[3..5] (Channel program for direct write of packets)
1668 * 3: prepare
1669 * 4: write (idal allocated on every write).
1670 * 5: nop
1671 * ccw[6..7] (Channel program for initial channel setup):
1672 * 6: set extended mode
1673 * 7: nop
1674 *
1675 * ch->ccw[0..5] are initialized in ch_action_start because
1676 * the channel's direction is yet unknown here.
1677 */
1678 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1679 ch->ccw[6].flags = CCW_FLAG_SLI;
1680
1681 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1682 ch->ccw[7].flags = CCW_FLAG_SLI;
1683
1684 ch->cdev = cdev;
1685 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1686 ch->type = type;
1687 ch->fsm = init_fsm(ch->id, ch_state_names,
1688 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1689 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1690 if (ch->fsm == NULL) {
1691 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1692 kfree(ch->ccw);
1693 kfree(ch);
1694 return -1;
1695 }
1696 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1697 ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1698 if (!ch->irb) {
1699 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1700 kfree_fsm(ch->fsm);
1701 kfree(ch->ccw);
1702 kfree(ch);
1703 return -1;
1704 }
1705 while (*c && less_than((*c)->id, ch->id))
1706 c = &(*c)->next;
1707 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1708 ctc_pr_debug(
1709 "ctc: add_channel: device %s already in list, "
1710 "using old entry\n", (*c)->id);
1711 kfree(ch->irb);
1712 kfree_fsm(ch->fsm);
1713 kfree(ch->ccw);
1714 kfree(ch);
1715 return 0;
1716 }
1717
1718 spin_lock_init(&ch->collect_lock);
1719
1720 fsm_settimer(ch->fsm, &ch->timer);
1721 skb_queue_head_init(&ch->io_queue);
1722 skb_queue_head_init(&ch->collect_queue);
1723 ch->next = *c;
1724 *c = ch;
1725 return 0;
1726}
1727
1728/**
1729 * Release a specific channel in the channel list.
1730 *
1731 * @param ch Pointer to channel struct to be released.
1732 */
1733static void
1734channel_free(struct channel *ch)
1735{
1736 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1737 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1738}
1739
1740/**
1741 * Remove a specific channel in the channel list.
1742 *
1743 * @param ch Pointer to channel struct to be released.
1744 */
1745static void
1746channel_remove(struct channel *ch)
1747{
1748 struct channel **c = &channels;
1749
1750 DBF_TEXT(trace, 2, __FUNCTION__);
1751 if (ch == NULL)
1752 return;
1753
1754 channel_free(ch);
1755 while (*c) {
1756 if (*c == ch) {
1757 *c = ch->next;
1758 fsm_deltimer(&ch->timer);
1759 kfree_fsm(ch->fsm);
1760 clear_normalized_cda(&ch->ccw[4]);
1761 if (ch->trans_skb != NULL) {
1762 clear_normalized_cda(&ch->ccw[1]);
1763 dev_kfree_skb(ch->trans_skb);
1764 }
1765 kfree(ch->ccw);
1766 kfree(ch->irb);
1767 kfree(ch);
1768 return;
1769 }
1770 c = &((*c)->next);
1771 }
1772}
1773
1774/**
1775 * Get a specific channel from the channel list.
1776 *
1777 * @param type Type of channel we are interested in.
1778 * @param id Id of channel we are interested in.
1779 * @param direction Direction we want to use this channel for.
1780 *
1781 * @return Pointer to a channel or NULL if no matching channel available.
1782 */
1783static struct channel
1784*
1785channel_get(enum channel_types type, char *id, int direction)
1786{
1787 struct channel *ch = channels;
1788
1789 DBF_TEXT(trace, 3, __FUNCTION__);
1790#ifdef DEBUG
1791 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
1792 __func__, id, type);
1793#endif
1794
1795 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
1796#ifdef DEBUG
1797 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
1798 __func__, ch, ch->id, ch->type);
1799#endif
1800 ch = ch->next;
1801 }
1802#ifdef DEBUG
1803 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
1804 __func__, ch, ch->id, ch->type);
1805#endif
1806 if (!ch) {
1807 ctc_pr_warn("ctc: %s(): channel with id %s "
1808 "and type %d not found in channel list\n",
1809 __func__, id, type);
1810 } else {
1811 if (ch->flags & CHANNEL_FLAGS_INUSE)
1812 ch = NULL;
1813 else {
1814 ch->flags |= CHANNEL_FLAGS_INUSE;
1815 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
1816 ch->flags |= (direction == WRITE)
1817 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
1818 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
1819 }
1820 }
1821 return ch;
1822}
1823
1824/**
1825 * Return the channel type by name.
1826 *
1827 * @param name Name of network interface.
1828 *
1829 * @return Type class of channel to be used for that interface.
1830 */
1831static enum channel_types inline
1832extract_channel_media(char *name)
1833{
1834 enum channel_types ret = channel_type_unknown;
1835
1836 if (name != NULL) {
1837 if (strncmp(name, "ctc", 3) == 0)
1838 ret = channel_type_parallel;
1839 if (strncmp(name, "escon", 5) == 0)
1840 ret = channel_type_escon;
1841 }
1842 return ret;
1843}
1844
1845static long
1846__ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1847{
1848 if (!IS_ERR(irb))
1849 return 0;
1850
1851 switch (PTR_ERR(irb)) {
1852 case -EIO:
1853 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
1854// CTC_DBF_TEXT(trace, 2, "ckirberr");
1855// CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1856 break;
1857 case -ETIMEDOUT:
1858 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
1859// CTC_DBF_TEXT(trace, 2, "ckirberr");
1860// CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
1861 break;
1862 default:
1863 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
1864 cdev->dev.bus_id);
1865// CTC_DBF_TEXT(trace, 2, "ckirberr");
1866// CTC_DBF_TEXT(trace, 2, " rc???");
1867 }
1868 return PTR_ERR(irb);
1869}
1870
1871/**
1872 * Main IRQ handler.
1873 *
1874 * @param cdev The ccw_device the interrupt is for.
1875 * @param intparm interruption parameter.
1876 * @param irb interruption response block.
1877 */
1878static void
1879ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1880{
1881 struct channel *ch;
1882 struct net_device *dev;
1883 struct ctc_priv *priv;
1884
1885 DBF_TEXT(trace, 5, __FUNCTION__);
1886 if (__ctc_check_irb_error(cdev, irb))
1887 return;
1888
1889 /* Check for unsolicited interrupts. */
1890 if (!cdev->dev.driver_data) {
1891 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
1892 cdev->dev.bus_id, irb->scsw.cstat,
1893 irb->scsw.dstat);
1894 return;
1895 }
1896
1897 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
1898 ->dev.driver_data;
1899
1900 /* Try to extract channel from driver data. */
1901 if (priv->channel[READ]->cdev == cdev)
1902 ch = priv->channel[READ];
1903 else if (priv->channel[WRITE]->cdev == cdev)
1904 ch = priv->channel[WRITE];
1905 else {
1906 ctc_pr_err("ctc: Can't determine channel for interrupt, "
1907 "device %s\n", cdev->dev.bus_id);
1908 return;
1909 }
1910
1911 dev = (struct net_device *) (ch->netdev);
1912 if (dev == NULL) {
1913 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
1914 cdev->dev.bus_id, ch);
1915 return;
1916 }
1917
1918#ifdef DEBUG
1919 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
1920 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
1921#endif
1922
1923 /* Copy interruption response block. */
1924 memcpy(ch->irb, irb, sizeof(struct irb));
1925
1926 /* Check for good subchannel return code, otherwise error message */
1927 if (ch->irb->scsw.cstat) {
1928 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
1929 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
1930 dev->name, ch->id, ch->irb->scsw.cstat,
1931 ch->irb->scsw.dstat);
1932 return;
1933 }
1934
1935 /* Check the reason-code of a unit check */
1936 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1937 ccw_unit_check(ch, ch->irb->ecw[0]);
1938 return;
1939 }
1940 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
1941 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
1942 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
1943 else
1944 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
1945 return;
1946 }
1947 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
1948 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
1949 return;
1950 }
1951 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1952 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1953 (ch->irb->scsw.stctl ==
1954 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1955 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
1956 else
1957 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1958
1959}
1960
1961/**
1962 * Actions for interface - statemachine.
1963 *****************************************************************************/
1964
1965/**
1966 * Startup channels by sending CH_EVENT_START to each channel.
1967 *
1968 * @param fi An instance of an interface statemachine.
1969 * @param event The event, just happened.
1970 * @param arg Generic pointer, casted from struct net_device * upon call.
1971 */
1972static void
1973dev_action_start(fsm_instance * fi, int event, void *arg)
1974{
1975 struct net_device *dev = (struct net_device *) arg;
1976 struct ctc_priv *privptr = dev->priv;
1977 int direction;
1978
1979 DBF_TEXT(setup, 3, __FUNCTION__);
1980 fsm_deltimer(&privptr->restart_timer);
1981 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
1982 for (direction = READ; direction <= WRITE; direction++) {
1983 struct channel *ch = privptr->channel[direction];
1984 fsm_event(ch->fsm, CH_EVENT_START, ch);
1985 }
1986}
1987
1988/**
1989 * Shutdown channels by sending CH_EVENT_STOP to each channel.
1990 *
1991 * @param fi An instance of an interface statemachine.
1992 * @param event The event, just happened.
1993 * @param arg Generic pointer, casted from struct net_device * upon call.
1994 */
1995static void
1996dev_action_stop(fsm_instance * fi, int event, void *arg)
1997{
1998 struct net_device *dev = (struct net_device *) arg;
1999 struct ctc_priv *privptr = dev->priv;
2000 int direction;
2001
2002 DBF_TEXT(trace, 3, __FUNCTION__);
2003 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2004 for (direction = READ; direction <= WRITE; direction++) {
2005 struct channel *ch = privptr->channel[direction];
2006 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2007 }
2008}
2009static void
2010dev_action_restart(fsm_instance *fi, int event, void *arg)
2011{
2012 struct net_device *dev = (struct net_device *)arg;
2013 struct ctc_priv *privptr = dev->priv;
2014
2015 DBF_TEXT(trace, 3, __FUNCTION__);
2016 ctc_pr_debug("%s: Restarting\n", dev->name);
2017 dev_action_stop(fi, event, arg);
2018 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2019 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2020 DEV_EVENT_START, dev);
2021}
2022
2023/**
2024 * Called from channel statemachine
2025 * when a channel is up and running.
2026 *
2027 * @param fi An instance of an interface statemachine.
2028 * @param event The event, just happened.
2029 * @param arg Generic pointer, casted from struct net_device * upon call.
2030 */
2031static void
2032dev_action_chup(fsm_instance * fi, int event, void *arg)
2033{
2034 struct net_device *dev = (struct net_device *) arg;
2035
2036 DBF_TEXT(trace, 3, __FUNCTION__);
2037 switch (fsm_getstate(fi)) {
2038 case DEV_STATE_STARTWAIT_RXTX:
2039 if (event == DEV_EVENT_RXUP)
2040 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2041 else
2042 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2043 break;
2044 case DEV_STATE_STARTWAIT_RX:
2045 if (event == DEV_EVENT_RXUP) {
2046 fsm_newstate(fi, DEV_STATE_RUNNING);
2047 ctc_pr_info("%s: connected with remote side\n",
2048 dev->name);
2049 ctc_clear_busy(dev);
2050 }
2051 break;
2052 case DEV_STATE_STARTWAIT_TX:
2053 if (event == DEV_EVENT_TXUP) {
2054 fsm_newstate(fi, DEV_STATE_RUNNING);
2055 ctc_pr_info("%s: connected with remote side\n",
2056 dev->name);
2057 ctc_clear_busy(dev);
2058 }
2059 break;
2060 case DEV_STATE_STOPWAIT_TX:
2061 if (event == DEV_EVENT_RXUP)
2062 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2063 break;
2064 case DEV_STATE_STOPWAIT_RX:
2065 if (event == DEV_EVENT_TXUP)
2066 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2067 break;
2068 }
2069}
2070
2071/**
2072 * Called from channel statemachine
2073 * when a channel has been shutdown.
2074 *
2075 * @param fi An instance of an interface statemachine.
2076 * @param event The event, just happened.
2077 * @param arg Generic pointer, casted from struct net_device * upon call.
2078 */
2079static void
2080dev_action_chdown(fsm_instance * fi, int event, void *arg)
2081{
2082
2083 DBF_TEXT(trace, 3, __FUNCTION__);
2084 switch (fsm_getstate(fi)) {
2085 case DEV_STATE_RUNNING:
2086 if (event == DEV_EVENT_TXDOWN)
2087 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2088 else
2089 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2090 break;
2091 case DEV_STATE_STARTWAIT_RX:
2092 if (event == DEV_EVENT_TXDOWN)
2093 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2094 break;
2095 case DEV_STATE_STARTWAIT_TX:
2096 if (event == DEV_EVENT_RXDOWN)
2097 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2098 break;
2099 case DEV_STATE_STOPWAIT_RXTX:
2100 if (event == DEV_EVENT_TXDOWN)
2101 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2102 else
2103 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2104 break;
2105 case DEV_STATE_STOPWAIT_RX:
2106 if (event == DEV_EVENT_RXDOWN)
2107 fsm_newstate(fi, DEV_STATE_STOPPED);
2108 break;
2109 case DEV_STATE_STOPWAIT_TX:
2110 if (event == DEV_EVENT_TXDOWN)
2111 fsm_newstate(fi, DEV_STATE_STOPPED);
2112 break;
2113 }
2114}
2115
2116static const fsm_node dev_fsm[] = {
2117 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2118
2119 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2120 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2121 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2122 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2123
2124 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2125 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2126 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2127 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2128 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2129
2130 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2131 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2132 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2133 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2134 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2135
2136 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2137 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2138 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2139 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2140 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2141 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2142
2143 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2144 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2145 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2146 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2147 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2148
2149 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2150 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2151 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2152 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2153 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2154
2155 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2156 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2157 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2158 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2159 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2160 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2161};
2162
2163static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2164
2165/**
2166 * Transmit a packet.
2167 * This is a helper function for ctc_tx().
2168 *
2169 * @param ch Channel to be used for sending.
2170 * @param skb Pointer to struct sk_buff of packet to send.
2171 * The linklevel header has already been set up
2172 * by ctc_tx().
2173 *
2174 * @return 0 on success, -ERRNO on failure. (Never fails.)
2175 */
2176static int
2177transmit_skb(struct channel *ch, struct sk_buff *skb)
2178{
2179 unsigned long saveflags;
2180 struct ll_header header;
2181 int rc = 0;
2182
2183 DBF_TEXT(trace, 5, __FUNCTION__);
2184 /* we need to acquire the lock for testing the state
2185 * otherwise we can have an IRQ changing the state to
2186 * TXIDLE after the test but before acquiring the lock.
2187 */
2188 spin_lock_irqsave(&ch->collect_lock, saveflags);
2189 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2190 int l = skb->len + LL_HEADER_LENGTH;
2191
2192 if (ch->collect_len + l > ch->max_bufsize - 2) {
2193 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2194 return -EBUSY;
2195 } else {
2196 atomic_inc(&skb->users);
2197 header.length = l;
2198 header.type = skb->protocol;
2199 header.unused = 0;
2200 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2201 LL_HEADER_LENGTH);
2202 skb_queue_tail(&ch->collect_queue, skb);
2203 ch->collect_len += l;
2204 }
2205 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2206 } else {
2207 __u16 block_len;
2208 int ccw_idx;
2209 struct sk_buff *nskb;
2210 unsigned long hi;
2211 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2212 /**
2213 * Protect skb against beeing free'd by upper
2214 * layers.
2215 */
2216 atomic_inc(&skb->users);
2217 ch->prof.txlen += skb->len;
2218 header.length = skb->len + LL_HEADER_LENGTH;
2219 header.type = skb->protocol;
2220 header.unused = 0;
2221 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2222 LL_HEADER_LENGTH);
2223 block_len = skb->len + 2;
2224 *((__u16 *) skb_push(skb, 2)) = block_len;
2225
2226 /**
2227 * IDAL support in CTC is broken, so we have to
2228 * care about skb's above 2G ourselves.
2229 */
2230 hi = ((unsigned long)skb_tail_pointer(skb) +
2231 LL_HEADER_LENGTH) >> 31;
2232 if (hi) {
2233 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2234 if (!nskb) {
2235 atomic_dec(&skb->users);
2236 skb_pull(skb, LL_HEADER_LENGTH + 2);
2237 ctc_clear_busy(ch->netdev);
2238 return -ENOMEM;
2239 } else {
2240 memcpy(skb_put(nskb, skb->len),
2241 skb->data, skb->len);
2242 atomic_inc(&nskb->users);
2243 atomic_dec(&skb->users);
2244 dev_kfree_skb_irq(skb);
2245 skb = nskb;
2246 }
2247 }
2248
2249 ch->ccw[4].count = block_len;
2250 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2251 /**
2252 * idal allocation failed, try via copying to
2253 * trans_skb. trans_skb usually has a pre-allocated
2254 * idal.
2255 */
2256 if (ctc_checkalloc_buffer(ch, 1)) {
2257 /**
2258 * Remove our header. It gets added
2259 * again on retransmit.
2260 */
2261 atomic_dec(&skb->users);
2262 skb_pull(skb, LL_HEADER_LENGTH + 2);
2263 ctc_clear_busy(ch->netdev);
2264 return -EBUSY;
2265 }
2266
2267 skb_reset_tail_pointer(ch->trans_skb);
2268 ch->trans_skb->len = 0;
2269 ch->ccw[1].count = skb->len;
2270 skb_copy_from_linear_data(skb, skb_put(ch->trans_skb,
2271 skb->len),
2272 skb->len);
2273 atomic_dec(&skb->users);
2274 dev_kfree_skb_irq(skb);
2275 ccw_idx = 0;
2276 } else {
2277 skb_queue_tail(&ch->io_queue, skb);
2278 ccw_idx = 3;
2279 }
2280 ch->retry = 0;
2281 fsm_newstate(ch->fsm, CH_STATE_TX);
2282 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2283 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2284 ch->prof.send_stamp = current_kernel_time();
2285 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2286 (unsigned long) ch, 0xff, 0);
2287 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2288 if (ccw_idx == 3)
2289 ch->prof.doios_single++;
2290 if (rc != 0) {
2291 fsm_deltimer(&ch->timer);
2292 ccw_check_return_code(ch, rc, "single skb TX");
2293 if (ccw_idx == 3)
2294 skb_dequeue_tail(&ch->io_queue);
2295 /**
2296 * Remove our header. It gets added
2297 * again on retransmit.
2298 */
2299 skb_pull(skb, LL_HEADER_LENGTH + 2);
2300 } else {
2301 if (ccw_idx == 0) {
2302 struct net_device *dev = ch->netdev;
2303 struct ctc_priv *privptr = dev->priv;
2304 privptr->stats.tx_packets++;
2305 privptr->stats.tx_bytes +=
2306 skb->len - LL_HEADER_LENGTH;
2307 }
2308 }
2309 }
2310
2311 ctc_clear_busy(ch->netdev);
2312 return rc;
2313}
2314
2315/**
2316 * Interface API for upper network layers
2317 *****************************************************************************/
2318
2319/**
2320 * Open an interface.
2321 * Called from generic network layer when ifconfig up is run.
2322 *
2323 * @param dev Pointer to interface struct.
2324 *
2325 * @return 0 on success, -ERRNO on failure. (Never fails.)
2326 */
2327static int
2328ctc_open(struct net_device * dev)
2329{
2330 DBF_TEXT(trace, 5, __FUNCTION__);
2331 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2332 return 0;
2333}
2334
2335/**
2336 * Close an interface.
2337 * Called from generic network layer when ifconfig down is run.
2338 *
2339 * @param dev Pointer to interface struct.
2340 *
2341 * @return 0 on success, -ERRNO on failure. (Never fails.)
2342 */
2343static int
2344ctc_close(struct net_device * dev)
2345{
2346 DBF_TEXT(trace, 5, __FUNCTION__);
2347 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2348 return 0;
2349}
2350
2351/**
2352 * Start transmission of a packet.
2353 * Called from generic network device layer.
2354 *
2355 * @param skb Pointer to buffer containing the packet.
2356 * @param dev Pointer to interface struct.
2357 *
2358 * @return 0 if packet consumed, !0 if packet rejected.
2359 * Note: If we return !0, then the packet is free'd by
2360 * the generic network layer.
2361 */
2362static int
2363ctc_tx(struct sk_buff *skb, struct net_device * dev)
2364{
2365 int rc = 0;
2366 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2367
2368 DBF_TEXT(trace, 5, __FUNCTION__);
2369 /**
2370 * Some sanity checks ...
2371 */
2372 if (skb == NULL) {
2373 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2374 privptr->stats.tx_dropped++;
2375 return 0;
2376 }
2377 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2378 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2379 dev->name, LL_HEADER_LENGTH + 2);
2380 dev_kfree_skb(skb);
2381 privptr->stats.tx_dropped++;
2382 return 0;
2383 }
2384
2385 /**
2386 * If channels are not running, try to restart them
2387 * and throw away packet.
2388 */
2389 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2390 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2391 dev_kfree_skb(skb);
2392 privptr->stats.tx_dropped++;
2393 privptr->stats.tx_errors++;
2394 privptr->stats.tx_carrier_errors++;
2395 return 0;
2396 }
2397
2398 if (ctc_test_and_set_busy(dev))
2399 return -EBUSY;
2400
2401 dev->trans_start = jiffies;
2402 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2403 rc = 1;
2404 return rc;
2405}
2406
2407/**
2408 * Sets MTU of an interface.
2409 *
2410 * @param dev Pointer to interface struct.
2411 * @param new_mtu The new MTU to use for this interface.
2412 *
2413 * @return 0 on success, -EINVAL if MTU is out of valid range.
2414 * (valid range is 576 .. 65527). If VM is on the
2415 * remote side, maximum MTU is 32760, however this is
2416 * <em>not</em> checked here.
2417 */
2418static int
2419ctc_change_mtu(struct net_device * dev, int new_mtu)
2420{
2421 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2422
2423 DBF_TEXT(trace, 3, __FUNCTION__);
2424 if ((new_mtu < 576) || (new_mtu > 65527) ||
2425 (new_mtu > (privptr->channel[READ]->max_bufsize -
2426 LL_HEADER_LENGTH - 2)))
2427 return -EINVAL;
2428 dev->mtu = new_mtu;
2429 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2430 return 0;
2431}
2432
2433/**
2434 * Returns interface statistics of a device.
2435 *
2436 * @param dev Pointer to interface struct.
2437 *
2438 * @return Pointer to stats struct of this interface.
2439 */
2440static struct net_device_stats *
2441ctc_stats(struct net_device * dev)
2442{
2443 return &((struct ctc_priv *) dev->priv)->stats;
2444}
2445
2446/*
2447 * sysfs attributes
2448 */
2449
2450static ssize_t
2451buffer_show(struct device *dev, struct device_attribute *attr, char *buf)
2452{
2453 struct ctc_priv *priv;
2454
2455 priv = dev->driver_data;
2456 if (!priv)
2457 return -ENODEV;
2458 return sprintf(buf, "%d\n",
2459 priv->buffer_size);
2460}
2461
2462static ssize_t
2463buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2464{
2465 struct ctc_priv *priv;
2466 struct net_device *ndev;
2467 int bs1;
2468 char buffer[16];
2469
2470 DBF_TEXT(trace, 3, __FUNCTION__);
2471 DBF_TEXT(trace, 3, buf);
2472 priv = dev->driver_data;
2473 if (!priv) {
2474 DBF_TEXT(trace, 3, "bfnopriv");
2475 return -ENODEV;
2476 }
2477
2478 sscanf(buf, "%u", &bs1);
2479 if (bs1 > CTC_BUFSIZE_LIMIT)
2480 goto einval;
2481 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2482 goto einval;
2483 priv->buffer_size = bs1; // just to overwrite the default
2484
2485 ndev = priv->channel[READ]->netdev;
2486 if (!ndev) {
2487 DBF_TEXT(trace, 3, "bfnondev");
2488 return -ENODEV;
2489 }
2490
2491 if ((ndev->flags & IFF_RUNNING) &&
2492 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2493 goto einval;
2494
2495 priv->channel[READ]->max_bufsize = bs1;
2496 priv->channel[WRITE]->max_bufsize = bs1;
2497 if (!(ndev->flags & IFF_RUNNING))
2498 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2499 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2500 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2501
2502 sprintf(buffer, "%d",priv->buffer_size);
2503 DBF_TEXT(trace, 3, buffer);
2504 return count;
2505
2506einval:
2507 DBF_TEXT(trace, 3, "buff_err");
2508 return -EINVAL;
2509}
2510
2511static ssize_t
2512loglevel_show(struct device *dev, struct device_attribute *attr, char *buf)
2513{
2514 return sprintf(buf, "%d\n", loglevel);
2515}
2516
2517static ssize_t
2518loglevel_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2519{
2520 int ll1;
2521
2522 DBF_TEXT(trace, 5, __FUNCTION__);
2523 sscanf(buf, "%i", &ll1);
2524
2525 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2526 return -EINVAL;
2527 loglevel = ll1;
2528 return count;
2529}
2530
2531static void
2532ctc_print_statistics(struct ctc_priv *priv)
2533{
2534 char *sbuf;
2535 char *p;
2536
2537 DBF_TEXT(trace, 4, __FUNCTION__);
2538 if (!priv)
2539 return;
2540 sbuf = kmalloc(2048, GFP_KERNEL);
2541 if (sbuf == NULL)
2542 return;
2543 p = sbuf;
2544
2545 p += sprintf(p, " Device FSM state: %s\n",
2546 fsm_getstate_str(priv->fsm));
2547 p += sprintf(p, " RX channel FSM state: %s\n",
2548 fsm_getstate_str(priv->channel[READ]->fsm));
2549 p += sprintf(p, " TX channel FSM state: %s\n",
2550 fsm_getstate_str(priv->channel[WRITE]->fsm));
2551 p += sprintf(p, " Max. TX buffer used: %ld\n",
2552 priv->channel[WRITE]->prof.maxmulti);
2553 p += sprintf(p, " Max. chained SKBs: %ld\n",
2554 priv->channel[WRITE]->prof.maxcqueue);
2555 p += sprintf(p, " TX single write ops: %ld\n",
2556 priv->channel[WRITE]->prof.doios_single);
2557 p += sprintf(p, " TX multi write ops: %ld\n",
2558 priv->channel[WRITE]->prof.doios_multi);
2559 p += sprintf(p, " Netto bytes written: %ld\n",
2560 priv->channel[WRITE]->prof.txlen);
2561 p += sprintf(p, " Max. TX IO-time: %ld\n",
2562 priv->channel[WRITE]->prof.tx_time);
2563
2564 ctc_pr_debug("Statistics for %s:\n%s",
2565 priv->channel[WRITE]->netdev->name, sbuf);
2566 kfree(sbuf);
2567 return;
2568}
2569
2570static ssize_t
2571stats_show(struct device *dev, struct device_attribute *attr, char *buf)
2572{
2573 struct ctc_priv *priv = dev->driver_data;
2574 if (!priv)
2575 return -ENODEV;
2576 ctc_print_statistics(priv);
2577 return sprintf(buf, "0\n");
2578}
2579
2580static ssize_t
2581stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2582{
2583 struct ctc_priv *priv = dev->driver_data;
2584 if (!priv)
2585 return -ENODEV;
2586 /* Reset statistics */
2587 memset(&priv->channel[WRITE]->prof, 0,
2588 sizeof(priv->channel[WRITE]->prof));
2589 return count;
2590}
2591
2592static void
2593ctc_netdev_unregister(struct net_device * dev)
2594{
2595 struct ctc_priv *privptr;
2596
2597 if (!dev)
2598 return;
2599 privptr = (struct ctc_priv *) dev->priv;
2600 unregister_netdev(dev);
2601}
2602
2603static int
2604ctc_netdev_register(struct net_device * dev)
2605{
2606 return register_netdev(dev);
2607}
2608
2609static void
2610ctc_free_netdevice(struct net_device * dev, int free_dev)
2611{
2612 struct ctc_priv *privptr;
2613 if (!dev)
2614 return;
2615 privptr = dev->priv;
2616 if (privptr) {
2617 if (privptr->fsm)
2618 kfree_fsm(privptr->fsm);
2619 kfree(privptr);
2620 }
2621#ifdef MODULE
2622 if (free_dev)
2623 free_netdev(dev);
2624#endif
2625}
2626
2627static ssize_t
2628ctc_proto_show(struct device *dev, struct device_attribute *attr, char *buf)
2629{
2630 struct ctc_priv *priv;
2631
2632 priv = dev->driver_data;
2633 if (!priv)
2634 return -ENODEV;
2635
2636 return sprintf(buf, "%d\n", priv->protocol);
2637}
2638
2639static ssize_t
2640ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2641{
2642 struct ctc_priv *priv;
2643 int value;
2644
2645 DBF_TEXT(trace, 3, __FUNCTION__);
2646 pr_debug("%s() called\n", __FUNCTION__);
2647
2648 priv = dev->driver_data;
2649 if (!priv)
2650 return -ENODEV;
2651 sscanf(buf, "%u", &value);
2652 if (!((value == CTC_PROTO_S390) ||
2653 (value == CTC_PROTO_LINUX) ||
2654 (value == CTC_PROTO_OS390)))
2655 return -EINVAL;
2656 priv->protocol = value;
2657
2658 return count;
2659}
2660
2661static ssize_t
2662ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2663{
2664 struct ccwgroup_device *cgdev;
2665
2666 cgdev = to_ccwgroupdev(dev);
2667 if (!cgdev)
2668 return -ENODEV;
2669
2670 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2671}
2672
2673static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2674static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2675static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2676
2677static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2678static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2679
2680static struct attribute *ctc_attr[] = {
2681 &dev_attr_protocol.attr,
2682 &dev_attr_type.attr,
2683 &dev_attr_buffer.attr,
2684 NULL,
2685};
2686
2687static struct attribute_group ctc_attr_group = {
2688 .attrs = ctc_attr,
2689};
2690
2691static int
2692ctc_add_attributes(struct device *dev)
2693{
2694 int rc;
2695
2696 rc = device_create_file(dev, &dev_attr_loglevel);
2697 if (rc)
2698 goto out;
2699 rc = device_create_file(dev, &dev_attr_stats);
2700 if (!rc)
2701 goto out;
2702 device_remove_file(dev, &dev_attr_loglevel);
2703out:
2704 return rc;
2705}
2706
2707static void
2708ctc_remove_attributes(struct device *dev)
2709{
2710 device_remove_file(dev, &dev_attr_stats);
2711 device_remove_file(dev, &dev_attr_loglevel);
2712}
2713
2714static int
2715ctc_add_files(struct device *dev)
2716{
2717 pr_debug("%s() called\n", __FUNCTION__);
2718
2719 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
2720}
2721
2722static void
2723ctc_remove_files(struct device *dev)
2724{
2725 pr_debug("%s() called\n", __FUNCTION__);
2726
2727 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
2728}
2729
2730/**
2731 * Add ctc specific attributes.
2732 * Add ctc private data.
2733 *
2734 * @param cgdev pointer to ccwgroup_device just added
2735 *
2736 * @returns 0 on success, !0 on failure.
2737 */
2738static int
2739ctc_probe_device(struct ccwgroup_device *cgdev)
2740{
2741 struct ctc_priv *priv;
2742 int rc;
2743 char buffer[16];
2744
2745 pr_debug("%s() called\n", __FUNCTION__);
2746 DBF_TEXT(setup, 3, __FUNCTION__);
2747
2748 if (!get_device(&cgdev->dev))
2749 return -ENODEV;
2750
2751 priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL);
2752 if (!priv) {
2753 ctc_pr_err("%s: Out of memory\n", __func__);
2754 put_device(&cgdev->dev);
2755 return -ENOMEM;
2756 }
2757
2758 rc = ctc_add_files(&cgdev->dev);
2759 if (rc) {
2760 kfree(priv);
2761 put_device(&cgdev->dev);
2762 return rc;
2763 }
2764 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
2765 cgdev->cdev[0]->handler = ctc_irq_handler;
2766 cgdev->cdev[1]->handler = ctc_irq_handler;
2767 cgdev->dev.driver_data = priv;
2768
2769 sprintf(buffer, "%p", priv);
2770 DBF_TEXT(data, 3, buffer);
2771
2772 sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
2773 DBF_TEXT(data, 3, buffer);
2774
2775 sprintf(buffer, "%p", &channels);
2776 DBF_TEXT(data, 3, buffer);
2777
2778 sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
2779 DBF_TEXT(data, 3, buffer);
2780
2781 return 0;
2782}
2783
2784/**
2785 * Device setup function called by alloc_netdev().
2786 *
2787 * @param dev Device to be setup.
2788 */
2789void ctc_init_netdevice(struct net_device * dev)
2790{
2791 DBF_TEXT(setup, 3, __FUNCTION__);
2792
2793 if (dev->mtu == 0)
2794 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2795 dev->hard_start_xmit = ctc_tx;
2796 dev->open = ctc_open;
2797 dev->stop = ctc_close;
2798 dev->get_stats = ctc_stats;
2799 dev->change_mtu = ctc_change_mtu;
2800 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2801 dev->addr_len = 0;
2802 dev->type = ARPHRD_SLIP;
2803 dev->tx_queue_len = 100;
2804 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2805}
2806
2807
2808/**
2809 *
2810 * Setup an interface.
2811 *
2812 * @param cgdev Device to be setup.
2813 *
2814 * @returns 0 on success, !0 on failure.
2815 */
2816static int
2817ctc_new_device(struct ccwgroup_device *cgdev)
2818{
2819 char read_id[CTC_ID_SIZE];
2820 char write_id[CTC_ID_SIZE];
2821 int direction;
2822 enum channel_types type;
2823 struct ctc_priv *privptr;
2824 struct net_device *dev;
2825 int ret;
2826 char buffer[16];
2827
2828 pr_debug("%s() called\n", __FUNCTION__);
2829 DBF_TEXT(setup, 3, __FUNCTION__);
2830
2831 privptr = cgdev->dev.driver_data;
2832 if (!privptr)
2833 return -ENODEV;
2834
2835 sprintf(buffer, "%d", privptr->buffer_size);
2836 DBF_TEXT(setup, 3, buffer);
2837
2838 type = get_channel_type(&cgdev->cdev[0]->id);
2839
2840 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
2841 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
2842
2843 if (add_channel(cgdev->cdev[0], type))
2844 return -ENOMEM;
2845 if (add_channel(cgdev->cdev[1], type))
2846 return -ENOMEM;
2847
2848 ret = ccw_device_set_online(cgdev->cdev[0]);
2849 if (ret != 0) {
2850 printk(KERN_WARNING
2851 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
2852 }
2853
2854 ret = ccw_device_set_online(cgdev->cdev[1]);
2855 if (ret != 0) {
2856 printk(KERN_WARNING
2857 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
2858 }
2859
2860 dev = alloc_netdev(0, "ctc%d", ctc_init_netdevice);
2861 if (!dev) {
2862 ctc_pr_warn("ctc_init_netdevice failed\n");
2863 goto out;
2864 }
2865 dev->priv = privptr;
2866
2867 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2868 dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
2869 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2870 if (privptr->fsm == NULL) {
2871 free_netdev(dev);
2872 goto out;
2873 }
2874 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2875 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2876
2877 for (direction = READ; direction <= WRITE; direction++) {
2878 privptr->channel[direction] =
2879 channel_get(type, direction == READ ? read_id : write_id,
2880 direction);
2881 if (privptr->channel[direction] == NULL) {
2882 if (direction == WRITE)
2883 channel_free(privptr->channel[READ]);
2884
2885 ctc_free_netdevice(dev, 1);
2886 goto out;
2887 }
2888 privptr->channel[direction]->netdev = dev;
2889 privptr->channel[direction]->protocol = privptr->protocol;
2890 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
2891 }
2892 /* sysfs magic */
2893 SET_NETDEV_DEV(dev, &cgdev->dev);
2894
2895 if (ctc_netdev_register(dev) != 0) {
2896 ctc_free_netdevice(dev, 1);
2897 goto out;
2898 }
2899
2900 if (ctc_add_attributes(&cgdev->dev)) {
2901 ctc_netdev_unregister(dev);
2902 dev->priv = NULL;
2903 ctc_free_netdevice(dev, 1);
2904 goto out;
2905 }
2906
2907 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2908
2909 print_banner();
2910
2911 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
2912 dev->name, privptr->channel[READ]->id,
2913 privptr->channel[WRITE]->id, privptr->protocol);
2914
2915 return 0;
2916out:
2917 ccw_device_set_offline(cgdev->cdev[1]);
2918 ccw_device_set_offline(cgdev->cdev[0]);
2919
2920 return -ENODEV;
2921}
2922
2923/**
2924 * Shutdown an interface.
2925 *
2926 * @param cgdev Device to be shut down.
2927 *
2928 * @returns 0 on success, !0 on failure.
2929 */
2930static int
2931ctc_shutdown_device(struct ccwgroup_device *cgdev)
2932{
2933 struct ctc_priv *priv;
2934 struct net_device *ndev;
2935
2936 DBF_TEXT(setup, 3, __FUNCTION__);
2937 pr_debug("%s() called\n", __FUNCTION__);
2938
2939
2940 priv = cgdev->dev.driver_data;
2941 ndev = NULL;
2942 if (!priv)
2943 return -ENODEV;
2944
2945 if (priv->channel[READ]) {
2946 ndev = priv->channel[READ]->netdev;
2947
2948 /* Close the device */
2949 ctc_close(ndev);
2950 ndev->flags &=~IFF_RUNNING;
2951
2952 ctc_remove_attributes(&cgdev->dev);
2953
2954 channel_free(priv->channel[READ]);
2955 }
2956 if (priv->channel[WRITE])
2957 channel_free(priv->channel[WRITE]);
2958
2959 if (ndev) {
2960 ctc_netdev_unregister(ndev);
2961 ndev->priv = NULL;
2962 ctc_free_netdevice(ndev, 1);
2963 }
2964
2965 if (priv->fsm)
2966 kfree_fsm(priv->fsm);
2967
2968 ccw_device_set_offline(cgdev->cdev[1]);
2969 ccw_device_set_offline(cgdev->cdev[0]);
2970
2971 if (priv->channel[READ])
2972 channel_remove(priv->channel[READ]);
2973 if (priv->channel[WRITE])
2974 channel_remove(priv->channel[WRITE]);
2975 priv->channel[READ] = priv->channel[WRITE] = NULL;
2976
2977 return 0;
2978
2979}
2980
2981static void
2982ctc_remove_device(struct ccwgroup_device *cgdev)
2983{
2984 struct ctc_priv *priv;
2985
2986 pr_debug("%s() called\n", __FUNCTION__);
2987 DBF_TEXT(setup, 3, __FUNCTION__);
2988
2989 priv = cgdev->dev.driver_data;
2990 if (!priv)
2991 return;
2992 if (cgdev->state == CCWGROUP_ONLINE)
2993 ctc_shutdown_device(cgdev);
2994 ctc_remove_files(&cgdev->dev);
2995 cgdev->dev.driver_data = NULL;
2996 kfree(priv);
2997 put_device(&cgdev->dev);
2998}
2999
3000static struct ccwgroup_driver ctc_group_driver = {
3001 .owner = THIS_MODULE,
3002 .name = "ctc",
3003 .max_slaves = 2,
3004 .driver_id = 0xC3E3C3,
3005 .probe = ctc_probe_device,
3006 .remove = ctc_remove_device,
3007 .set_online = ctc_new_device,
3008 .set_offline = ctc_shutdown_device,
3009};
3010
3011/**
3012 * Module related routines
3013 *****************************************************************************/
3014
3015/**
3016 * Prepare to be unloaded. Free IRQ's and release all resources.
3017 * This is called just before this module is unloaded. It is
3018 * <em>not</em> called, if the usage count is !0, so we don't need to check
3019 * for that.
3020 */
3021static void __exit
3022ctc_exit(void)
3023{
3024 DBF_TEXT(setup, 3, __FUNCTION__);
3025 unregister_cu3088_discipline(&ctc_group_driver);
3026 ctc_unregister_dbf_views();
3027 ctc_pr_info("CTC driver unloaded\n");
3028}
3029
3030/**
3031 * Initialize module.
3032 * This is called just after the module is loaded.
3033 *
3034 * @return 0 on success, !0 on error.
3035 */
3036static int __init
3037ctc_init(void)
3038{
3039 int ret = 0;
3040
3041 loglevel = CTC_LOGLEVEL_DEFAULT;
3042
3043 DBF_TEXT(setup, 3, __FUNCTION__);
3044
3045 print_banner();
3046
3047 ret = ctc_register_dbf_views();
3048 if (ret){
3049 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3050 return ret;
3051 }
3052 ret = register_cu3088_discipline(&ctc_group_driver);
3053 if (ret) {
3054 ctc_unregister_dbf_views();
3055 }
3056 return ret;
3057}
3058
3059module_init(ctc_init);
3060module_exit(ctc_exit);
3061
3062/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctcmain.h b/drivers/s390/net/ctcmain.h
deleted file mode 100644
index 7f305d119f3d..000000000000
--- a/drivers/s390/net/ctcmain.h
+++ /dev/null
@@ -1,270 +0,0 @@
1/*
2 * CTC / ESCON network driver
3 *
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 Peter Tiedemann (ptiedem@de.ibm.com)
7 *
8 *
9 * Documentation used:
10 * - Principles of Operation (IBM doc#: SA22-7201-06)
11 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
12 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
13 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
14 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2, or (at your option)
19 * any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 *
30 */
31
32#ifndef _CTCMAIN_H_
33#define _CTCMAIN_H_
34
35#include <asm/ccwdev.h>
36#include <asm/ccwgroup.h>
37
38#include <linux/skbuff.h>
39#include <linux/netdevice.h>
40
41#include "fsm.h"
42#include "cu3088.h"
43
44
45/**
46 * CCW commands, used in this driver.
47 */
48#define CCW_CMD_WRITE 0x01
49#define CCW_CMD_READ 0x02
50#define CCW_CMD_SET_EXTENDED 0xc3
51#define CCW_CMD_PREPARE 0xe3
52
53#define CTC_PROTO_S390 0
54#define CTC_PROTO_LINUX 1
55#define CTC_PROTO_OS390 3
56
57#define CTC_BUFSIZE_LIMIT 65535
58#define CTC_BUFSIZE_DEFAULT 32768
59
60#define CTC_TIMEOUT_5SEC 5000
61
62#define CTC_INITIAL_BLOCKLEN 2
63
64#define READ 0
65#define WRITE 1
66
67#define CTC_ID_SIZE BUS_ID_SIZE+3
68
69
70struct ctc_profile {
71 unsigned long maxmulti;
72 unsigned long maxcqueue;
73 unsigned long doios_single;
74 unsigned long doios_multi;
75 unsigned long txlen;
76 unsigned long tx_time;
77 struct timespec send_stamp;
78};
79
80/**
81 * Definition of one channel
82 */
83struct channel {
84
85 /**
86 * Pointer to next channel in list.
87 */
88 struct channel *next;
89 char id[CTC_ID_SIZE];
90 struct ccw_device *cdev;
91
92 /**
93 * Type of this channel.
94 * CTC/A or Escon for valid channels.
95 */
96 enum channel_types type;
97
98 /**
99 * Misc. flags. See CHANNEL_FLAGS_... below
100 */
101 __u32 flags;
102
103 /**
104 * The protocol of this channel
105 */
106 __u16 protocol;
107
108 /**
109 * I/O and irq related stuff
110 */
111 struct ccw1 *ccw;
112 struct irb *irb;
113
114 /**
115 * RX/TX buffer size
116 */
117 int max_bufsize;
118
119 /**
120 * Transmit/Receive buffer.
121 */
122 struct sk_buff *trans_skb;
123
124 /**
125 * Universal I/O queue.
126 */
127 struct sk_buff_head io_queue;
128
129 /**
130 * TX queue for collecting skb's during busy.
131 */
132 struct sk_buff_head collect_queue;
133
134 /**
135 * Amount of data in collect_queue.
136 */
137 int collect_len;
138
139 /**
140 * spinlock for collect_queue and collect_len
141 */
142 spinlock_t collect_lock;
143
144 /**
145 * Timer for detecting unresposive
146 * I/O operations.
147 */
148 fsm_timer timer;
149
150 /**
151 * Retry counter for misc. operations.
152 */
153 int retry;
154
155 /**
156 * The finite state machine of this channel
157 */
158 fsm_instance *fsm;
159
160 /**
161 * The corresponding net_device this channel
162 * belongs to.
163 */
164 struct net_device *netdev;
165
166 struct ctc_profile prof;
167
168 unsigned char *trans_skb_data;
169
170 __u16 logflags;
171};
172
173#define CHANNEL_FLAGS_READ 0
174#define CHANNEL_FLAGS_WRITE 1
175#define CHANNEL_FLAGS_INUSE 2
176#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
177#define CHANNEL_FLAGS_FAILED 8
178#define CHANNEL_FLAGS_WAITIRQ 16
179#define CHANNEL_FLAGS_RWMASK 1
180#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
181
182#define LOG_FLAG_ILLEGALPKT 1
183#define LOG_FLAG_ILLEGALSIZE 2
184#define LOG_FLAG_OVERRUN 4
185#define LOG_FLAG_NOMEM 8
186
187#define CTC_LOGLEVEL_INFO 1
188#define CTC_LOGLEVEL_NOTICE 2
189#define CTC_LOGLEVEL_WARN 4
190#define CTC_LOGLEVEL_EMERG 8
191#define CTC_LOGLEVEL_ERR 16
192#define CTC_LOGLEVEL_DEBUG 32
193#define CTC_LOGLEVEL_CRIT 64
194
195#define CTC_LOGLEVEL_DEFAULT \
196(CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT)
197
198#define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1)
199
200#define ctc_pr_debug(fmt, arg...) \
201do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0)
202
203#define ctc_pr_info(fmt, arg...) \
204do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0)
205
206#define ctc_pr_notice(fmt, arg...) \
207do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0)
208
209#define ctc_pr_warn(fmt, arg...) \
210do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0)
211
212#define ctc_pr_emerg(fmt, arg...) \
213do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0)
214
215#define ctc_pr_err(fmt, arg...) \
216do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0)
217
218#define ctc_pr_crit(fmt, arg...) \
219do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0)
220
221struct ctc_priv {
222 struct net_device_stats stats;
223 unsigned long tbusy;
224 /**
225 * The finite state machine of this interface.
226 */
227 fsm_instance *fsm;
228 /**
229 * The protocol of this device
230 */
231 __u16 protocol;
232 /**
233 * Timer for restarting after I/O Errors
234 */
235 fsm_timer restart_timer;
236
237 int buffer_size;
238
239 struct channel *channel[2];
240};
241
242/**
243 * Definition of our link level header.
244 */
245struct ll_header {
246 __u16 length;
247 __u16 type;
248 __u16 unused;
249};
250#define LL_HEADER_LENGTH (sizeof(struct ll_header))
251
252/**
253 * Compatibility macros for busy handling
254 * of network devices.
255 */
256static __inline__ void
257ctc_clear_busy(struct net_device * dev)
258{
259 clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
260 netif_wake_queue(dev);
261}
262
263static __inline__ int
264ctc_test_and_set_busy(struct net_device * dev)
265{
266 netif_stop_queue(dev);
267 return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
268}
269
270#endif
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth_core.h
index 8c6b72d05b1d..9485e363ca11 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1,40 +1,38 @@
1#ifndef __QETH_H__ 1/*
2#define __QETH_H__ 2 * drivers/s390/net/qeth_core.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#ifndef __QETH_CORE_H__
12#define __QETH_CORE_H__
3 13
4#include <linux/if.h> 14#include <linux/if.h>
5#include <linux/if_arp.h> 15#include <linux/if_arp.h>
6
7#include <linux/if_tr.h> 16#include <linux/if_tr.h>
8#include <linux/trdevice.h> 17#include <linux/trdevice.h>
9#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
10#include <linux/if_vlan.h> 19#include <linux/if_vlan.h>
11#include <linux/ctype.h> 20#include <linux/ctype.h>
21#include <linux/in6.h>
22#include <linux/bitops.h>
23#include <linux/seq_file.h>
24#include <linux/ethtool.h>
12 25
13#include <net/ipv6.h> 26#include <net/ipv6.h>
14#include <linux/in6.h>
15#include <net/if_inet6.h> 27#include <net/if_inet6.h>
16#include <net/addrconf.h> 28#include <net/addrconf.h>
17 29
18
19#include <linux/bitops.h>
20
21#include <asm/debug.h> 30#include <asm/debug.h>
22#include <asm/qdio.h> 31#include <asm/qdio.h>
23#include <asm/ccwdev.h> 32#include <asm/ccwdev.h>
24#include <asm/ccwgroup.h> 33#include <asm/ccwgroup.h>
25 34
26#include "qeth_mpc.h" 35#include "qeth_core_mpc.h"
27
28#ifdef CONFIG_QETH_IPV6
29#define QETH_VERSION_IPV6 ":IPv6"
30#else
31#define QETH_VERSION_IPV6 ""
32#endif
33#ifdef CONFIG_QETH_VLAN
34#define QETH_VERSION_VLAN ":VLAN"
35#else
36#define QETH_VERSION_VLAN ""
37#endif
38 36
39/** 37/**
40 * Debug Facility stuff 38 * Debug Facility stuff
@@ -60,15 +58,14 @@
60#define QETH_DBF_CONTROL_NAME "qeth_control" 58#define QETH_DBF_CONTROL_NAME "qeth_control"
61#define QETH_DBF_CONTROL_LEN 256 59#define QETH_DBF_CONTROL_LEN 256
62#define QETH_DBF_CONTROL_PAGES 8 60#define QETH_DBF_CONTROL_PAGES 8
63#define QETH_DBF_CONTROL_NR_AREAS 2 61#define QETH_DBF_CONTROL_NR_AREAS 1
64#define QETH_DBF_CONTROL_LEVEL 5 62#define QETH_DBF_CONTROL_LEVEL 5
65 63
66#define QETH_DBF_TRACE_NAME "qeth_trace" 64#define QETH_DBF_TRACE_NAME "qeth_trace"
67#define QETH_DBF_TRACE_LEN 8 65#define QETH_DBF_TRACE_LEN 8
68#define QETH_DBF_TRACE_PAGES 4 66#define QETH_DBF_TRACE_PAGES 4
69#define QETH_DBF_TRACE_NR_AREAS 2 67#define QETH_DBF_TRACE_NR_AREAS 1
70#define QETH_DBF_TRACE_LEVEL 3 68#define QETH_DBF_TRACE_LEVEL 3
71extern debug_info_t *qeth_dbf_trace;
72 69
73#define QETH_DBF_SENSE_NAME "qeth_sense" 70#define QETH_DBF_SENSE_NAME "qeth_sense"
74#define QETH_DBF_SENSE_LEN 64 71#define QETH_DBF_SENSE_LEN 64
@@ -79,72 +76,29 @@ extern debug_info_t *qeth_dbf_trace;
79#define QETH_DBF_QERR_NAME "qeth_qerr" 76#define QETH_DBF_QERR_NAME "qeth_qerr"
80#define QETH_DBF_QERR_LEN 8 77#define QETH_DBF_QERR_LEN 8
81#define QETH_DBF_QERR_PAGES 2 78#define QETH_DBF_QERR_PAGES 2
82#define QETH_DBF_QERR_NR_AREAS 2 79#define QETH_DBF_QERR_NR_AREAS 1
83#define QETH_DBF_QERR_LEVEL 2 80#define QETH_DBF_QERR_LEVEL 2
84 81
85#define QETH_DBF_TEXT(name,level,text) \ 82#define QETH_DBF_TEXT(name, level, text) \
86 do { \ 83 do { \
87 debug_text_event(qeth_dbf_##name,level,text); \ 84 debug_text_event(qeth_dbf_##name, level, text); \
88 } while (0) 85 } while (0)
89 86
90#define QETH_DBF_HEX(name,level,addr,len) \ 87#define QETH_DBF_HEX(name, level, addr, len) \
91 do { \ 88 do { \
92 debug_event(qeth_dbf_##name,level,(void*)(addr),len); \ 89 debug_event(qeth_dbf_##name, level, (void *)(addr), len); \
93 } while (0)
94
95DECLARE_PER_CPU(char[256], qeth_dbf_txt_buf);
96
97#define QETH_DBF_TEXT_(name,level,text...) \
98 do { \
99 char* dbf_txt_buf = get_cpu_var(qeth_dbf_txt_buf); \
100 sprintf(dbf_txt_buf, text); \
101 debug_text_event(qeth_dbf_##name,level,dbf_txt_buf); \
102 put_cpu_var(qeth_dbf_txt_buf); \
103 } while (0) 90 } while (0)
104 91
105#define QETH_DBF_SPRINTF(name,level,text...) \ 92/* Allow to sort out low debug levels early to avoid wasted sprints */
106 do { \ 93static inline int qeth_dbf_passes(debug_info_t *dbf_grp, int level)
107 debug_sprintf_event(qeth_dbf_trace, level, ##text ); \ 94{
108 debug_sprintf_event(qeth_dbf_trace, level, text ); \ 95 return (level <= dbf_grp->level);
109 } while (0) 96}
110 97
111/** 98/**
112 * some more debug stuff 99 * some more debug stuff
113 */ 100 */
114#define PRINTK_HEADER "qeth: " 101#define PRINTK_HEADER "qeth: "
115
116#define HEXDUMP16(importance,header,ptr) \
117PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
118 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
119 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
120 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
121 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
122 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
123 *(((char*)ptr)+12),*(((char*)ptr)+13), \
124 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
125PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
126 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
127 *(((char*)ptr)+16),*(((char*)ptr)+17), \
128 *(((char*)ptr)+18),*(((char*)ptr)+19), \
129 *(((char*)ptr)+20),*(((char*)ptr)+21), \
130 *(((char*)ptr)+22),*(((char*)ptr)+23), \
131 *(((char*)ptr)+24),*(((char*)ptr)+25), \
132 *(((char*)ptr)+26),*(((char*)ptr)+27), \
133 *(((char*)ptr)+28),*(((char*)ptr)+29), \
134 *(((char*)ptr)+30),*(((char*)ptr)+31));
135
136static inline void
137qeth_hex_dump(unsigned char *buf, size_t len)
138{
139 size_t i;
140
141 for (i = 0; i < len; i++) {
142 if (i && !(i % 16))
143 printk("\n");
144 printk("%02x ", *(buf + i));
145 }
146 printk("\n");
147}
148 102
149#define SENSE_COMMAND_REJECT_BYTE 0 103#define SENSE_COMMAND_REJECT_BYTE 0
150#define SENSE_COMMAND_REJECT_FLAG 0x80 104#define SENSE_COMMAND_REJECT_FLAG 0x80
@@ -154,10 +108,6 @@ qeth_hex_dump(unsigned char *buf, size_t len)
154/* 108/*
155 * Common IO related definitions 109 * Common IO related definitions
156 */ 110 */
157extern struct device *qeth_root_dev;
158extern struct ccw_driver qeth_ccw_driver;
159extern struct ccwgroup_driver qeth_ccwgroup_driver;
160
161#define CARD_RDEV(card) card->read.ccwdev 111#define CARD_RDEV(card) card->read.ccwdev
162#define CARD_WDEV(card) card->write.ccwdev 112#define CARD_WDEV(card) card->write.ccwdev
163#define CARD_DDEV(card) card->data.ccwdev 113#define CARD_DDEV(card) card->data.ccwdev
@@ -167,10 +117,6 @@ extern struct ccwgroup_driver qeth_ccwgroup_driver;
167#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id 117#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id
168#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id 118#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id
169 119
170#define CARD_FROM_CDEV(cdev) (struct qeth_card *) \
171 ((struct ccwgroup_device *)cdev->dev.driver_data)\
172 ->dev.driver_data;
173
174/** 120/**
175 * card stuff 121 * card stuff
176 */ 122 */
@@ -228,40 +174,36 @@ struct qeth_ipa_info {
228 __u32 enabled_funcs; 174 __u32 enabled_funcs;
229}; 175};
230 176
231static inline int 177static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
232qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) 178 enum qeth_ipa_funcs func)
233{ 179{
234 return (ipa->supported_funcs & func); 180 return (ipa->supported_funcs & func);
235} 181}
236 182
237static inline int 183static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
238qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) 184 enum qeth_ipa_funcs func)
239{ 185{
240 return (ipa->supported_funcs & ipa->enabled_funcs & func); 186 return (ipa->supported_funcs & ipa->enabled_funcs & func);
241} 187}
242 188
243#define qeth_adp_supported(c,f) \ 189#define qeth_adp_supported(c, f) \
244 qeth_is_ipa_supported(&c->options.adp, f) 190 qeth_is_ipa_supported(&c->options.adp, f)
245#define qeth_adp_enabled(c,f) \ 191#define qeth_adp_enabled(c, f) \
246 qeth_is_ipa_enabled(&c->options.adp, f) 192 qeth_is_ipa_enabled(&c->options.adp, f)
247#define qeth_is_supported(c,f) \ 193#define qeth_is_supported(c, f) \
248 qeth_is_ipa_supported(&c->options.ipa4, f) 194 qeth_is_ipa_supported(&c->options.ipa4, f)
249#define qeth_is_enabled(c,f) \ 195#define qeth_is_enabled(c, f) \
250 qeth_is_ipa_enabled(&c->options.ipa4, f) 196 qeth_is_ipa_enabled(&c->options.ipa4, f)
251#ifdef CONFIG_QETH_IPV6 197#define qeth_is_supported6(c, f) \
252#define qeth_is_supported6(c,f) \
253 qeth_is_ipa_supported(&c->options.ipa6, f) 198 qeth_is_ipa_supported(&c->options.ipa6, f)
254#define qeth_is_enabled6(c,f) \ 199#define qeth_is_enabled6(c, f) \
255 qeth_is_ipa_enabled(&c->options.ipa6, f) 200 qeth_is_ipa_enabled(&c->options.ipa6, f)
256#else /* CONFIG_QETH_IPV6 */ 201#define qeth_is_ipafunc_supported(c, prot, f) \
257#define qeth_is_supported6(c,f) 0 202 ((prot == QETH_PROT_IPV6) ? \
258#define qeth_is_enabled6(c,f) 0 203 qeth_is_supported6(c, f) : qeth_is_supported(c, f))
259#endif /* CONFIG_QETH_IPV6 */ 204#define qeth_is_ipafunc_enabled(c, prot, f) \
260#define qeth_is_ipafunc_supported(c,prot,f) \ 205 ((prot == QETH_PROT_IPV6) ? \
261 (prot==QETH_PROT_IPV6)? qeth_is_supported6(c,f):qeth_is_supported(c,f) 206 qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
262#define qeth_is_ipafunc_enabled(c,prot,f) \
263 (prot==QETH_PROT_IPV6)? qeth_is_enabled6(c,f):qeth_is_enabled(c,f)
264
265 207
266#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101 208#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101
267#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101 209#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101
@@ -269,39 +211,35 @@ qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
269#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108 211#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
270 212
271#define QETH_MODELLIST_ARRAY \ 213#define QETH_MODELLIST_ARRAY \
272 {{0x1731,0x01,0x1732,0x01,QETH_CARD_TYPE_OSAE,1, \ 214 {{0x1731, 0x01, 0x1732, 0x01, QETH_CARD_TYPE_OSAE, 1, \
273 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \ 215 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
274 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \ 216 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
275 QETH_MAX_QUEUES,0}, \ 217 QETH_MAX_QUEUES, 0}, \
276 {0x1731,0x05,0x1732,0x05,QETH_CARD_TYPE_IQD,0, \ 218 {0x1731, 0x05, 0x1732, 0x05, QETH_CARD_TYPE_IQD, 0, \
277 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \ 219 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
278 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \ 220 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
279 QETH_MAX_QUEUES,0x103}, \ 221 QETH_MAX_QUEUES, 0x103}, \
280 {0x1731,0x06,0x1732,0x06,QETH_CARD_TYPE_OSN,0, \ 222 {0x1731, 0x06, 0x1732, 0x06, QETH_CARD_TYPE_OSN, 0, \
281 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \ 223 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
282 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \ 224 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
283 QETH_MAX_QUEUES,0}, \ 225 QETH_MAX_QUEUES, 0}, \
284 {0,0,0,0,0,0,0,0,0}} 226 {0, 0, 0, 0, 0, 0, 0, 0, 0} }
285 227
286#define QETH_REAL_CARD 1 228#define QETH_REAL_CARD 1
287#define QETH_VLAN_CARD 2 229#define QETH_VLAN_CARD 2
288#define QETH_BUFSIZE 4096 230#define QETH_BUFSIZE 4096
289 231
290/** 232/**
291 * some more defs 233 * some more defs
292 */ 234 */
293#define IF_NAME_LEN 16
294#define QETH_TX_TIMEOUT 100 * HZ 235#define QETH_TX_TIMEOUT 100 * HZ
295#define QETH_RCD_TIMEOUT 60 * HZ 236#define QETH_RCD_TIMEOUT 60 * HZ
296#define QETH_HEADER_SIZE 32 237#define QETH_HEADER_SIZE 32
297#define MAX_PORTNO 15 238#define QETH_MAX_PORTNO 15
298#define QETH_FAKE_LL_LEN_ETH ETH_HLEN
299#define QETH_FAKE_LL_LEN_TR (sizeof(struct trh_hdr)-TR_MAXRIFLEN+sizeof(struct trllc))
300#define QETH_FAKE_LL_V6_ADDR_POS 24
301 239
302/*IPv6 address autoconfiguration stuff*/ 240/*IPv6 address autoconfiguration stuff*/
303#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe 241#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
304#define UNIQUE_ID_NOT_BY_CARD 0x10000 242#define UNIQUE_ID_NOT_BY_CARD 0x10000
305 243
306/*****************************************************************************/ 244/*****************************************************************************/
307/* QDIO queue and buffer handling */ 245/* QDIO queue and buffer handling */
@@ -394,20 +332,20 @@ struct qeth_hdr {
394 332
395/*TCP Segmentation Offload header*/ 333/*TCP Segmentation Offload header*/
396struct qeth_hdr_ext_tso { 334struct qeth_hdr_ext_tso {
397 __u16 hdr_tot_len; 335 __u16 hdr_tot_len;
398 __u8 imb_hdr_no; 336 __u8 imb_hdr_no;
399 __u8 reserved; 337 __u8 reserved;
400 __u8 hdr_type; 338 __u8 hdr_type;
401 __u8 hdr_version; 339 __u8 hdr_version;
402 __u16 hdr_len; 340 __u16 hdr_len;
403 __u32 payload_len; 341 __u32 payload_len;
404 __u16 mss; 342 __u16 mss;
405 __u16 dg_hdr_len; 343 __u16 dg_hdr_len;
406 __u8 padding[16]; 344 __u8 padding[16];
407} __attribute__ ((packed)); 345} __attribute__ ((packed));
408 346
409struct qeth_hdr_tso { 347struct qeth_hdr_tso {
410 struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/ 348 struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
411 struct qeth_hdr_ext_tso ext; 349 struct qeth_hdr_ext_tso ext;
412} __attribute__ ((packed)); 350} __attribute__ ((packed));
413 351
@@ -446,8 +384,7 @@ enum qeth_header_ids {
446#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 384#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
447#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/ 385#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/
448 386
449static inline int 387static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
450qeth_is_last_sbale(struct qdio_buffer_element *sbale)
451{ 388{
452 return (sbale->flags & SBAL_FLAGS_LAST_ENTRY); 389 return (sbale->flags & SBAL_FLAGS_LAST_ENTRY);
453} 390}
@@ -485,7 +422,6 @@ struct qeth_qdio_buffer_pool {
485 422
486struct qeth_qdio_buffer { 423struct qeth_qdio_buffer {
487 struct qdio_buffer *buffer; 424 struct qdio_buffer *buffer;
488 volatile enum qeth_qdio_buffer_states state;
489 /* the buffer pool entry currently associated to this buffer */ 425 /* the buffer pool entry currently associated to this buffer */
490 struct qeth_buffer_pool_entry *pool_entry; 426 struct qeth_buffer_pool_entry *pool_entry;
491}; 427};
@@ -493,11 +429,7 @@ struct qeth_qdio_buffer {
493struct qeth_qdio_q { 429struct qeth_qdio_q {
494 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; 430 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
495 struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; 431 struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
496 /* 432 int next_buf_to_init;
497 * buf_to_init means "buffer must be initialized by driver and must
498 * be made available for hardware" -> state is set to EMPTY
499 */
500 volatile int next_buf_to_init;
501} __attribute__ ((aligned(256))); 433} __attribute__ ((aligned(256)));
502 434
503/* possible types of qeth large_send support */ 435/* possible types of qeth large_send support */
@@ -510,7 +442,7 @@ enum qeth_large_send_types {
510struct qeth_qdio_out_buffer { 442struct qeth_qdio_out_buffer {
511 struct qdio_buffer *buffer; 443 struct qdio_buffer *buffer;
512 atomic_t state; 444 atomic_t state;
513 volatile int next_element_to_fill; 445 int next_element_to_fill;
514 struct sk_buff_head skb_list; 446 struct sk_buff_head skb_list;
515 struct list_head ctx_list; 447 struct list_head ctx_list;
516}; 448};
@@ -529,11 +461,11 @@ struct qeth_qdio_out_q {
529 int queue_no; 461 int queue_no;
530 struct qeth_card *card; 462 struct qeth_card *card;
531 atomic_t state; 463 atomic_t state;
532 volatile int do_pack; 464 int do_pack;
533 /* 465 /*
534 * index of buffer to be filled by driver; state EMPTY or PACKING 466 * index of buffer to be filled by driver; state EMPTY or PACKING
535 */ 467 */
536 volatile int next_buf_to_fill; 468 int next_buf_to_fill;
537 /* 469 /*
538 * number of buffers that are currently filled (PRIMED) 470 * number of buffers that are currently filled (PRIMED)
539 * -> these buffers are hardware-owned 471 * -> these buffers are hardware-owned
@@ -624,36 +556,6 @@ enum qeth_cmd_buffer_state {
624 BUF_STATE_LOCKED, 556 BUF_STATE_LOCKED,
625 BUF_STATE_PROCESSED, 557 BUF_STATE_PROCESSED,
626}; 558};
627/**
628 * IP address and multicast list
629 */
630struct qeth_ipaddr {
631 struct list_head entry;
632 enum qeth_ip_types type;
633 enum qeth_ipa_setdelip_flags set_flags;
634 enum qeth_ipa_setdelip_flags del_flags;
635 int is_multicast;
636 volatile int users;
637 enum qeth_prot_versions proto;
638 unsigned char mac[OSA_ADDR_LEN];
639 union {
640 struct {
641 unsigned int addr;
642 unsigned int mask;
643 } a4;
644 struct {
645 struct in6_addr addr;
646 unsigned int pfxlen;
647 } a6;
648 } u;
649};
650
651struct qeth_ipato_entry {
652 struct list_head entry;
653 enum qeth_prot_versions proto;
654 char addr[16];
655 int mask_bits;
656};
657 559
658struct qeth_ipato { 560struct qeth_ipato {
659 int enabled; 561 int enabled;
@@ -672,7 +574,6 @@ struct qeth_cmd_buffer {
672 void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); 574 void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
673}; 575};
674 576
675
676/** 577/**
677 * definition of a qeth channel, used for read and write 578 * definition of a qeth channel, used for read and write
678 */ 579 */
@@ -686,8 +587,8 @@ struct qeth_channel {
686/*command buffer for control data*/ 587/*command buffer for control data*/
687 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; 588 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
688 atomic_t irq_pending; 589 atomic_t irq_pending;
689 volatile int io_buf_no; 590 int io_buf_no;
690 volatile int buf_no; 591 int buf_no;
691}; 592};
692 593
693/** 594/**
@@ -717,8 +618,9 @@ struct qeth_seqno {
717struct qeth_reply { 618struct qeth_reply {
718 struct list_head list; 619 struct list_head list;
719 wait_queue_head_t wait_q; 620 wait_queue_head_t wait_q;
720 int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long); 621 int (*callback)(struct qeth_card *, struct qeth_reply *,
721 u32 seqno; 622 unsigned long);
623 u32 seqno;
722 unsigned long offset; 624 unsigned long offset;
723 atomic_t received; 625 atomic_t received;
724 int rc; 626 int rc;
@@ -765,10 +667,8 @@ struct qeth_card_options {
765 struct qeth_routing_info route4; 667 struct qeth_routing_info route4;
766 struct qeth_ipa_info ipa4; 668 struct qeth_ipa_info ipa4;
767 struct qeth_ipa_info adp; /*Adapter parameters*/ 669 struct qeth_ipa_info adp; /*Adapter parameters*/
768#ifdef CONFIG_QETH_IPV6
769 struct qeth_routing_info route6; 670 struct qeth_routing_info route6;
770 struct qeth_ipa_info ipa6; 671 struct qeth_ipa_info ipa6;
771#endif /* QETH_IPV6 */
772 enum qeth_checksum_types checksum_type; 672 enum qeth_checksum_types checksum_type;
773 int broadcast_mode; 673 int broadcast_mode;
774 int macaddr_mode; 674 int macaddr_mode;
@@ -785,9 +685,7 @@ struct qeth_card_options {
785 * thread bits for qeth_card thread masks 685 * thread bits for qeth_card thread masks
786 */ 686 */
787enum qeth_threads { 687enum qeth_threads {
788 QETH_SET_IP_THREAD = 1, 688 QETH_RECOVER_THREAD = 1,
789 QETH_RECOVER_THREAD = 2,
790 QETH_SET_PROMISC_MODE_THREAD = 4,
791}; 689};
792 690
793struct qeth_osn_info { 691struct qeth_osn_info {
@@ -795,12 +693,34 @@ struct qeth_osn_info {
795 int (*data_cb)(struct sk_buff *skb); 693 int (*data_cb)(struct sk_buff *skb);
796}; 694};
797 695
696enum qeth_discipline_id {
697 QETH_DISCIPLINE_LAYER3 = 0,
698 QETH_DISCIPLINE_LAYER2 = 1,
699};
700
701struct qeth_discipline {
702 qdio_handler_t *input_handler;
703 qdio_handler_t *output_handler;
704 int (*recover)(void *ptr);
705 struct ccwgroup_driver *ccwgdriver;
706};
707
708struct qeth_vlan_vid {
709 struct list_head list;
710 unsigned short vid;
711};
712
713struct qeth_mc_mac {
714 struct list_head list;
715 __u8 mc_addr[MAX_ADDR_LEN];
716 unsigned char mc_addrlen;
717};
718
798struct qeth_card { 719struct qeth_card {
799 struct list_head list; 720 struct list_head list;
800 enum qeth_card_states state; 721 enum qeth_card_states state;
801 int lan_online; 722 int lan_online;
802 spinlock_t lock; 723 spinlock_t lock;
803/*hardware and sysfs stuff*/
804 struct ccwgroup_device *gdev; 724 struct ccwgroup_device *gdev;
805 struct qeth_channel read; 725 struct qeth_channel read;
806 struct qeth_channel write; 726 struct qeth_channel write;
@@ -815,15 +735,16 @@ struct qeth_card {
815 struct qeth_card_options options; 735 struct qeth_card_options options;
816 736
817 wait_queue_head_t wait_q; 737 wait_queue_head_t wait_q;
818#ifdef CONFIG_QETH_VLAN
819 spinlock_t vlanlock; 738 spinlock_t vlanlock;
739 spinlock_t mclock;
820 struct vlan_group *vlangrp; 740 struct vlan_group *vlangrp;
821#endif 741 struct list_head vid_list;
742 struct list_head mc_list;
822 struct work_struct kernel_thread_starter; 743 struct work_struct kernel_thread_starter;
823 spinlock_t thread_mask_lock; 744 spinlock_t thread_mask_lock;
824 volatile unsigned long thread_start_mask; 745 unsigned long thread_start_mask;
825 volatile unsigned long thread_allowed_mask; 746 unsigned long thread_allowed_mask;
826 volatile unsigned long thread_running_mask; 747 unsigned long thread_running_mask;
827 spinlock_t ip_lock; 748 spinlock_t ip_lock;
828 struct list_head ip_list; 749 struct list_head ip_list;
829 struct list_head *ip_tbd_list; 750 struct list_head *ip_tbd_list;
@@ -833,8 +754,8 @@ struct qeth_card {
833 struct qeth_qdio_info qdio; 754 struct qeth_qdio_info qdio;
834 struct qeth_perf_stats perf_stats; 755 struct qeth_perf_stats perf_stats;
835 int use_hard_stop; 756 int use_hard_stop;
836 const struct header_ops *orig_header_ops;
837 struct qeth_osn_info osn_info; 757 struct qeth_osn_info osn_info;
758 struct qeth_discipline discipline;
838 atomic_t force_alloc_skb; 759 atomic_t force_alloc_skb;
839}; 760};
840 761
@@ -843,411 +764,153 @@ struct qeth_card_list_struct {
843 rwlock_t rwlock; 764 rwlock_t rwlock;
844}; 765};
845 766
846extern struct qeth_card_list_struct qeth_card_list;
847
848/*notifier list */
849struct qeth_notify_list_struct {
850 struct list_head list;
851 struct task_struct *task;
852 int signum;
853};
854extern spinlock_t qeth_notify_lock;
855extern struct list_head qeth_notify_list;
856
857/*some helper functions*/ 767/*some helper functions*/
858
859#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") 768#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
860 769
861static inline __u8 770static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
862qeth_get_ipa_adp_type(enum qeth_link_types link_type)
863{ 771{
864 switch (link_type) { 772 struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
865 case QETH_LINK_TYPE_HSTR: 773 dev_get_drvdata(&cdev->dev))->dev);
866 return 2; 774 return card;
867 default:
868 return 1;
869 }
870} 775}
871 776
872static inline struct sk_buff * 777static inline int qeth_get_micros(void)
873qeth_realloc_headroom(struct qeth_card *card, struct sk_buff *skb, int size)
874{ 778{
875 struct sk_buff *new_skb = skb; 779 return (int) (get_clock() >> 12);
876
877 if (skb_headroom(skb) >= size)
878 return skb;
879 new_skb = skb_realloc_headroom(skb, size);
880 if (!new_skb)
881 PRINT_ERR("Could not realloc headroom for qeth_hdr "
882 "on interface %s", QETH_CARD_IFNAME(card));
883 return new_skb;
884}
885
886static inline struct sk_buff *
887qeth_pskb_unshare(struct sk_buff *skb, gfp_t pri)
888{
889 struct sk_buff *nskb;
890 if (!skb_cloned(skb))
891 return skb;
892 nskb = skb_copy(skb, pri);
893 return nskb;
894} 780}
895 781
896static inline void * 782static inline void *qeth_push_skb(struct qeth_card *card, struct sk_buff *skb,
897qeth_push_skb(struct qeth_card *card, struct sk_buff *skb, int size) 783 int size)
898{ 784{
899 void *hdr; 785 void *hdr;
900 786
901 hdr = (void *) skb_push(skb, size); 787 hdr = (void *) skb_push(skb, size);
902 /* 788 /*
903 * sanity check, the Linux memory allocation scheme should 789 * sanity check, the Linux memory allocation scheme should
904 * never present us cases like this one (the qdio header size plus 790 * never present us cases like this one (the qdio header size plus
905 * the first 40 bytes of the paket cross a 4k boundary) 791 * the first 40 bytes of the paket cross a 4k boundary)
906 */ 792 */
907 if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) != 793 if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
908 (((unsigned long) hdr + size + 794 (((unsigned long) hdr + size +
909 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) { 795 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
910 PRINT_ERR("Misaligned packet on interface %s. Discarded.", 796 PRINT_ERR("Misaligned packet on interface %s. Discarded.",
911 QETH_CARD_IFNAME(card)); 797 QETH_CARD_IFNAME(card));
912 return NULL; 798 return NULL;
913 }
914 return hdr;
915}
916
917
918static inline int
919qeth_get_hlen(__u8 link_type)
920{
921#ifdef CONFIG_QETH_IPV6
922 switch (link_type) {
923 case QETH_LINK_TYPE_HSTR:
924 case QETH_LINK_TYPE_LANE_TR:
925 return sizeof(struct qeth_hdr_tso) + TR_HLEN;
926 default:
927#ifdef CONFIG_QETH_VLAN
928 return sizeof(struct qeth_hdr_tso) + VLAN_ETH_HLEN;
929#else
930 return sizeof(struct qeth_hdr_tso) + ETH_HLEN;
931#endif
932 }
933#else /* CONFIG_QETH_IPV6 */
934#ifdef CONFIG_QETH_VLAN
935 return sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
936#else
937 return sizeof(struct qeth_hdr_tso);
938#endif
939#endif /* CONFIG_QETH_IPV6 */
940}
941
942static inline unsigned short
943qeth_get_netdev_flags(struct qeth_card *card)
944{
945 if (card->options.layer2 &&
946 (card->info.type == QETH_CARD_TYPE_OSAE))
947 return 0;
948 switch (card->info.type) {
949 case QETH_CARD_TYPE_IQD:
950 case QETH_CARD_TYPE_OSN:
951 return IFF_NOARP;
952#ifdef CONFIG_QETH_IPV6
953 default:
954 return 0;
955#else
956 default:
957 return IFF_NOARP;
958#endif
959 }
960}
961
962static inline int
963qeth_get_initial_mtu_for_card(struct qeth_card * card)
964{
965 switch (card->info.type) {
966 case QETH_CARD_TYPE_UNKNOWN:
967 return 1500;
968 case QETH_CARD_TYPE_IQD:
969 return card->info.max_mtu;
970 case QETH_CARD_TYPE_OSAE:
971 switch (card->info.link_type) {
972 case QETH_LINK_TYPE_HSTR:
973 case QETH_LINK_TYPE_LANE_TR:
974 return 2000;
975 default:
976 return 1492;
977 }
978 default:
979 return 1500;
980 }
981}
982
983static inline int
984qeth_get_max_mtu_for_card(int cardtype)
985{
986 switch (cardtype) {
987
988 case QETH_CARD_TYPE_UNKNOWN:
989 case QETH_CARD_TYPE_OSAE:
990 case QETH_CARD_TYPE_OSN:
991 return 61440;
992 case QETH_CARD_TYPE_IQD:
993 return 57344;
994 default:
995 return 1500;
996 }
997}
998
999static inline int
1000qeth_get_mtu_out_of_mpc(int cardtype)
1001{
1002 switch (cardtype) {
1003 case QETH_CARD_TYPE_IQD:
1004 return 1;
1005 default:
1006 return 0;
1007 }
1008}
1009
1010static inline int
1011qeth_get_mtu_outof_framesize(int framesize)
1012{
1013 switch (framesize) {
1014 case 0x4000:
1015 return 8192;
1016 case 0x6000:
1017 return 16384;
1018 case 0xa000:
1019 return 32768;
1020 case 0xffff:
1021 return 57344;
1022 default:
1023 return 0;
1024 }
1025}
1026
1027static inline int
1028qeth_mtu_is_valid(struct qeth_card * card, int mtu)
1029{
1030 switch (card->info.type) {
1031 case QETH_CARD_TYPE_OSAE:
1032 return ((mtu >= 576) && (mtu <= 61440));
1033 case QETH_CARD_TYPE_IQD:
1034 return ((mtu >= 576) &&
1035 (mtu <= card->info.max_mtu + 4096 - 32));
1036 case QETH_CARD_TYPE_OSN:
1037 case QETH_CARD_TYPE_UNKNOWN:
1038 default:
1039 return 1;
1040 }
1041}
1042
1043static inline int
1044qeth_get_arphdr_type(int cardtype, int linktype)
1045{
1046 switch (cardtype) {
1047 case QETH_CARD_TYPE_OSAE:
1048 case QETH_CARD_TYPE_OSN:
1049 switch (linktype) {
1050 case QETH_LINK_TYPE_LANE_TR:
1051 case QETH_LINK_TYPE_HSTR:
1052 return ARPHRD_IEEE802_TR;
1053 default:
1054 return ARPHRD_ETHER;
1055 }
1056 case QETH_CARD_TYPE_IQD:
1057 default:
1058 return ARPHRD_ETHER;
1059 } 799 }
800 return hdr;
1060} 801}
1061 802
1062static inline int 803static inline int qeth_get_ip_version(struct sk_buff *skb)
1063qeth_get_micros(void)
1064{
1065 return (int) (get_clock() >> 12);
1066}
1067
1068static inline int
1069qeth_get_qdio_q_format(struct qeth_card *card)
1070{ 804{
1071 switch (card->info.type) { 805 switch (skb->protocol) {
1072 case QETH_CARD_TYPE_IQD: 806 case ETH_P_IPV6:
1073 return 2; 807 return 6;
808 case ETH_P_IP:
809 return 4;
1074 default: 810 default:
1075 return 0; 811 return 0;
1076 } 812 }
1077} 813}
1078 814
1079static inline int 815struct qeth_eddp_context;
1080qeth_isxdigit(char * buf) 816extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
1081{ 817extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
1082 while (*buf) { 818const char *qeth_get_cardname_short(struct qeth_card *);
1083 if (!isxdigit(*buf++)) 819int qeth_realloc_buffer_pool(struct qeth_card *, int);
1084 return 0; 820int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
1085 } 821void qeth_core_free_discipline(struct qeth_card *);
1086 return 1; 822int qeth_core_create_device_attributes(struct device *);
1087} 823void qeth_core_remove_device_attributes(struct device *);
1088 824int qeth_core_create_osn_attributes(struct device *);
1089static inline void 825void qeth_core_remove_osn_attributes(struct device *);
1090qeth_ipaddr4_to_string(const __u8 *addr, char *buf) 826
1091{ 827/* exports for qeth discipline device drivers */
1092 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]); 828extern struct qeth_card_list_struct qeth_core_card_list;
1093} 829extern debug_info_t *qeth_dbf_setup;
1094 830extern debug_info_t *qeth_dbf_data;
1095static inline int 831extern debug_info_t *qeth_dbf_misc;
1096qeth_string_to_ipaddr4(const char *buf, __u8 *addr) 832extern debug_info_t *qeth_dbf_control;
1097{ 833extern debug_info_t *qeth_dbf_trace;
1098 int count = 0, rc = 0; 834extern debug_info_t *qeth_dbf_sense;
1099 int in[4]; 835extern debug_info_t *qeth_dbf_qerr;
1100 char c; 836
1101 837void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
1102 rc = sscanf(buf, "%u.%u.%u.%u%c", 838int qeth_threads_running(struct qeth_card *, unsigned long);
1103 &in[0], &in[1], &in[2], &in[3], &c); 839int qeth_wait_for_threads(struct qeth_card *, unsigned long);
1104 if (rc != 4 && (rc != 5 || c != '\n')) 840int qeth_do_run_thread(struct qeth_card *, unsigned long);
1105 return -EINVAL; 841void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
1106 for (count = 0; count < 4; count++) { 842void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
1107 if (in[count] > 255) 843int qeth_core_hardsetup_card(struct qeth_card *);
1108 return -EINVAL; 844void qeth_print_status_message(struct qeth_card *);
1109 addr[count] = in[count]; 845int qeth_init_qdio_queues(struct qeth_card *);
1110 } 846int qeth_send_startlan(struct qeth_card *);
1111 return 0; 847int qeth_send_stoplan(struct qeth_card *);
1112} 848int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
1113 849 int (*reply_cb)
1114static inline void 850 (struct qeth_card *, struct qeth_reply *, unsigned long),
1115qeth_ipaddr6_to_string(const __u8 *addr, char *buf) 851 void *);
1116{ 852struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
1117 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x" 853 enum qeth_ipa_cmds, enum qeth_prot_versions);
1118 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x", 854int qeth_query_setadapterparms(struct qeth_card *);
1119 addr[0], addr[1], addr[2], addr[3], 855int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int,
1120 addr[4], addr[5], addr[6], addr[7], 856 unsigned int, const char *);
1121 addr[8], addr[9], addr[10], addr[11], 857void qeth_put_buffer_pool_entry(struct qeth_card *,
1122 addr[12], addr[13], addr[14], addr[15]); 858 struct qeth_buffer_pool_entry *);
1123} 859void qeth_queue_input_buffer(struct qeth_card *, int);
1124 860struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
1125static inline int 861 struct qdio_buffer *, struct qdio_buffer_element **, int *,
1126qeth_string_to_ipaddr6(const char *buf, __u8 *addr) 862 struct qeth_hdr **);
1127{ 863void qeth_schedule_recovery(struct qeth_card *);
1128 const char *end, *end_tmp, *start; 864void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
1129 __u16 *in; 865 unsigned int, unsigned int,
1130 char num[5]; 866 unsigned int, int, int,
1131 int num2, cnt, out, found, save_cnt; 867 unsigned long);
1132 unsigned short in_tmp[8] = {0, }; 868void qeth_clear_ipacmd_list(struct qeth_card *);
1133 869int qeth_qdio_clear_card(struct qeth_card *, int);
1134 cnt = out = found = save_cnt = num2 = 0; 870void qeth_clear_working_pool_list(struct qeth_card *);
1135 end = start = buf; 871void qeth_clear_cmd_buffers(struct qeth_channel *);
1136 in = (__u16 *) addr; 872void qeth_clear_qdio_buffers(struct qeth_card *);
1137 memset(in, 0, 16); 873void qeth_setadp_promisc_mode(struct qeth_card *);
1138 while (*end) { 874struct net_device_stats *qeth_get_stats(struct net_device *);
1139 end = strchr(start,':'); 875int qeth_change_mtu(struct net_device *, int);
1140 if (end == NULL) { 876int qeth_setadpparms_change_macaddr(struct qeth_card *);
1141 end = buf + strlen(buf); 877void qeth_tx_timeout(struct net_device *);
1142 if ((end_tmp = strchr(start, '\n')) != NULL) 878void qeth_prepare_control_data(struct qeth_card *, int,
1143 end = end_tmp; 879 struct qeth_cmd_buffer *);
1144 out = 1; 880void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
1145 } 881void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
1146 if ((end - start)) { 882struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
1147 memset(num, 0, 5); 883int qeth_mdio_read(struct net_device *, int, int);
1148 if ((end - start) > 4) 884int qeth_snmp_command(struct qeth_card *, char __user *);
1149 return -EINVAL; 885int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
1150 memcpy(num, start, end - start); 886struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
1151 if (!qeth_isxdigit(num)) 887int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
1152 return -EINVAL; 888 unsigned long);
1153 sscanf(start, "%x", &num2); 889int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
1154 if (found) 890 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
1155 in_tmp[save_cnt++] = num2; 891 void *reply_param);
1156 else 892int qeth_get_cast_type(struct qeth_card *, struct sk_buff *);
1157 in[cnt++] = num2; 893int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
1158 if (out) 894struct sk_buff *qeth_prepare_skb(struct qeth_card *, struct sk_buff *,
1159 break; 895 struct qeth_hdr **);
1160 } else { 896int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
1161 if (found) 897int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
1162 return -EINVAL; 898 struct sk_buff *, struct qeth_hdr *, int,
1163 found = 1; 899 struct qeth_eddp_context *);
1164 } 900int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
1165 start = ++end; 901 struct sk_buff *, struct qeth_hdr *,
1166 } 902 int, struct qeth_eddp_context *);
1167 if (cnt + save_cnt > 8) 903int qeth_core_get_stats_count(struct net_device *);
1168 return -EINVAL; 904void qeth_core_get_ethtool_stats(struct net_device *,
1169 cnt = 7; 905 struct ethtool_stats *, u64 *);
1170 while (save_cnt) 906void qeth_core_get_strings(struct net_device *, u32, u8 *);
1171 in[cnt--] = in_tmp[--save_cnt]; 907void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
1172 return 0; 908
1173} 909/* exports for OSN */
1174 910int qeth_osn_assist(struct net_device *, void *, int);
1175static inline void 911int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
1176qeth_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, 912 int (*assist_cb)(struct net_device *, void *),
1177 char *buf) 913 int (*data_cb)(struct sk_buff *));
1178{ 914void qeth_osn_deregister(struct net_device *);
1179 if (proto == QETH_PROT_IPV4) 915
1180 qeth_ipaddr4_to_string(addr, buf); 916#endif /* __QETH_CORE_H__ */
1181 else if (proto == QETH_PROT_IPV6)
1182 qeth_ipaddr6_to_string(addr, buf);
1183}
1184
1185static inline int
1186qeth_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
1187 __u8 *addr)
1188{
1189 if (proto == QETH_PROT_IPV4)
1190 return qeth_string_to_ipaddr4(buf, addr);
1191 else if (proto == QETH_PROT_IPV6)
1192 return qeth_string_to_ipaddr6(buf, addr);
1193 else
1194 return -EINVAL;
1195}
1196
1197extern int
1198qeth_setrouting_v4(struct qeth_card *);
1199extern int
1200qeth_setrouting_v6(struct qeth_card *);
1201
1202extern int
1203qeth_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
1204
1205extern void
1206qeth_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions, u8 *, int);
1207
1208extern int
1209qeth_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1210
1211extern void
1212qeth_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1213
1214extern int
1215qeth_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1216
1217extern void
1218qeth_del_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1219
1220extern int
1221qeth_notifier_register(struct task_struct *, int );
1222
1223extern int
1224qeth_notifier_unregister(struct task_struct * );
1225
1226extern void
1227qeth_schedule_recovery(struct qeth_card *);
1228
1229extern int
1230qeth_realloc_buffer_pool(struct qeth_card *, int);
1231
1232extern int
1233qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
1234
1235extern void
1236qeth_fill_header(struct qeth_card *, struct qeth_hdr *,
1237 struct sk_buff *, int, int);
1238extern void
1239qeth_flush_buffers(struct qeth_qdio_out_q *, int, int, int);
1240
1241extern int
1242qeth_osn_assist(struct net_device *, void *, int);
1243
1244extern int
1245qeth_osn_register(unsigned char *read_dev_no,
1246 struct net_device **,
1247 int (*assist_cb)(struct net_device *, void *),
1248 int (*data_cb)(struct sk_buff *));
1249
1250extern void
1251qeth_osn_deregister(struct net_device *);
1252
1253#endif /* __QETH_H__ */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
new file mode 100644
index 000000000000..95c6fcf58953
--- /dev/null
+++ b/drivers/s390/net/qeth_core_main.c
@@ -0,0 +1,4540 @@
1/*
2 * drivers/s390/net/qeth_core_main.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/tcp.h>
19#include <linux/mii.h>
20#include <linux/kthread.h>
21
22#include <asm-s390/ebcdic.h>
23#include <asm-s390/io.h>
24#include <asm/s390_rdev.h>
25
26#include "qeth_core.h"
27#include "qeth_core_offl.h"
28
29#define QETH_DBF_TEXT_(name, level, text...) \
30 do { \
31 if (qeth_dbf_passes(qeth_dbf_##name, level)) { \
32 char *dbf_txt_buf = \
33 get_cpu_var(qeth_core_dbf_txt_buf); \
34 sprintf(dbf_txt_buf, text); \
35 debug_text_event(qeth_dbf_##name, level, dbf_txt_buf); \
36 put_cpu_var(qeth_core_dbf_txt_buf); \
37 } \
38 } while (0)
39
40struct qeth_card_list_struct qeth_core_card_list;
41EXPORT_SYMBOL_GPL(qeth_core_card_list);
42debug_info_t *qeth_dbf_setup;
43EXPORT_SYMBOL_GPL(qeth_dbf_setup);
44debug_info_t *qeth_dbf_data;
45EXPORT_SYMBOL_GPL(qeth_dbf_data);
46debug_info_t *qeth_dbf_misc;
47EXPORT_SYMBOL_GPL(qeth_dbf_misc);
48debug_info_t *qeth_dbf_control;
49EXPORT_SYMBOL_GPL(qeth_dbf_control);
50debug_info_t *qeth_dbf_trace;
51EXPORT_SYMBOL_GPL(qeth_dbf_trace);
52debug_info_t *qeth_dbf_sense;
53EXPORT_SYMBOL_GPL(qeth_dbf_sense);
54debug_info_t *qeth_dbf_qerr;
55EXPORT_SYMBOL_GPL(qeth_dbf_qerr);
56
57static struct device *qeth_core_root_dev;
58static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
59static struct lock_class_key qdio_out_skb_queue_key;
60static DEFINE_PER_CPU(char[256], qeth_core_dbf_txt_buf);
61
62static void qeth_send_control_data_cb(struct qeth_channel *,
63 struct qeth_cmd_buffer *);
64static int qeth_issue_next_read(struct qeth_card *);
65static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
66static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
67static void qeth_free_buffer_pool(struct qeth_card *);
68static int qeth_qdio_establish(struct qeth_card *);
69
70
71static inline void __qeth_fill_buffer_frag(struct sk_buff *skb,
72 struct qdio_buffer *buffer, int is_tso,
73 int *next_element_to_fill)
74{
75 struct skb_frag_struct *frag;
76 int fragno;
77 unsigned long addr;
78 int element, cnt, dlen;
79
80 fragno = skb_shinfo(skb)->nr_frags;
81 element = *next_element_to_fill;
82 dlen = 0;
83
84 if (is_tso)
85 buffer->element[element].flags =
86 SBAL_FLAGS_MIDDLE_FRAG;
87 else
88 buffer->element[element].flags =
89 SBAL_FLAGS_FIRST_FRAG;
90 dlen = skb->len - skb->data_len;
91 if (dlen) {
92 buffer->element[element].addr = skb->data;
93 buffer->element[element].length = dlen;
94 element++;
95 }
96 for (cnt = 0; cnt < fragno; cnt++) {
97 frag = &skb_shinfo(skb)->frags[cnt];
98 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
99 frag->page_offset;
100 buffer->element[element].addr = (char *)addr;
101 buffer->element[element].length = frag->size;
102 if (cnt < (fragno - 1))
103 buffer->element[element].flags =
104 SBAL_FLAGS_MIDDLE_FRAG;
105 else
106 buffer->element[element].flags =
107 SBAL_FLAGS_LAST_FRAG;
108 element++;
109 }
110 *next_element_to_fill = element;
111}
112
113static inline const char *qeth_get_cardname(struct qeth_card *card)
114{
115 if (card->info.guestlan) {
116 switch (card->info.type) {
117 case QETH_CARD_TYPE_OSAE:
118 return " Guest LAN QDIO";
119 case QETH_CARD_TYPE_IQD:
120 return " Guest LAN Hiper";
121 default:
122 return " unknown";
123 }
124 } else {
125 switch (card->info.type) {
126 case QETH_CARD_TYPE_OSAE:
127 return " OSD Express";
128 case QETH_CARD_TYPE_IQD:
129 return " HiperSockets";
130 case QETH_CARD_TYPE_OSN:
131 return " OSN QDIO";
132 default:
133 return " unknown";
134 }
135 }
136 return " n/a";
137}
138
139/* max length to be returned: 14 */
140const char *qeth_get_cardname_short(struct qeth_card *card)
141{
142 if (card->info.guestlan) {
143 switch (card->info.type) {
144 case QETH_CARD_TYPE_OSAE:
145 return "GuestLAN QDIO";
146 case QETH_CARD_TYPE_IQD:
147 return "GuestLAN Hiper";
148 default:
149 return "unknown";
150 }
151 } else {
152 switch (card->info.type) {
153 case QETH_CARD_TYPE_OSAE:
154 switch (card->info.link_type) {
155 case QETH_LINK_TYPE_FAST_ETH:
156 return "OSD_100";
157 case QETH_LINK_TYPE_HSTR:
158 return "HSTR";
159 case QETH_LINK_TYPE_GBIT_ETH:
160 return "OSD_1000";
161 case QETH_LINK_TYPE_10GBIT_ETH:
162 return "OSD_10GIG";
163 case QETH_LINK_TYPE_LANE_ETH100:
164 return "OSD_FE_LANE";
165 case QETH_LINK_TYPE_LANE_TR:
166 return "OSD_TR_LANE";
167 case QETH_LINK_TYPE_LANE_ETH1000:
168 return "OSD_GbE_LANE";
169 case QETH_LINK_TYPE_LANE:
170 return "OSD_ATM_LANE";
171 default:
172 return "OSD_Express";
173 }
174 case QETH_CARD_TYPE_IQD:
175 return "HiperSockets";
176 case QETH_CARD_TYPE_OSN:
177 return "OSN";
178 default:
179 return "unknown";
180 }
181 }
182 return "n/a";
183}
184
185void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
186 int clear_start_mask)
187{
188 unsigned long flags;
189
190 spin_lock_irqsave(&card->thread_mask_lock, flags);
191 card->thread_allowed_mask = threads;
192 if (clear_start_mask)
193 card->thread_start_mask &= threads;
194 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
195 wake_up(&card->wait_q);
196}
197EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
198
199int qeth_threads_running(struct qeth_card *card, unsigned long threads)
200{
201 unsigned long flags;
202 int rc = 0;
203
204 spin_lock_irqsave(&card->thread_mask_lock, flags);
205 rc = (card->thread_running_mask & threads);
206 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
207 return rc;
208}
209EXPORT_SYMBOL_GPL(qeth_threads_running);
210
211int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
212{
213 return wait_event_interruptible(card->wait_q,
214 qeth_threads_running(card, threads) == 0);
215}
216EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
217
218void qeth_clear_working_pool_list(struct qeth_card *card)
219{
220 struct qeth_buffer_pool_entry *pool_entry, *tmp;
221
222 QETH_DBF_TEXT(trace, 5, "clwrklst");
223 list_for_each_entry_safe(pool_entry, tmp,
224 &card->qdio.in_buf_pool.entry_list, list){
225 list_del(&pool_entry->list);
226 }
227}
228EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
229
230static int qeth_alloc_buffer_pool(struct qeth_card *card)
231{
232 struct qeth_buffer_pool_entry *pool_entry;
233 void *ptr;
234 int i, j;
235
236 QETH_DBF_TEXT(trace, 5, "alocpool");
237 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
238 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
239 if (!pool_entry) {
240 qeth_free_buffer_pool(card);
241 return -ENOMEM;
242 }
243 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
244 ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
245 if (!ptr) {
246 while (j > 0)
247 free_page((unsigned long)
248 pool_entry->elements[--j]);
249 kfree(pool_entry);
250 qeth_free_buffer_pool(card);
251 return -ENOMEM;
252 }
253 pool_entry->elements[j] = ptr;
254 }
255 list_add(&pool_entry->init_list,
256 &card->qdio.init_pool.entry_list);
257 }
258 return 0;
259}
260
261int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
262{
263 QETH_DBF_TEXT(trace, 2, "realcbp");
264
265 if ((card->state != CARD_STATE_DOWN) &&
266 (card->state != CARD_STATE_RECOVER))
267 return -EPERM;
268
269 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
270 qeth_clear_working_pool_list(card);
271 qeth_free_buffer_pool(card);
272 card->qdio.in_buf_pool.buf_count = bufcnt;
273 card->qdio.init_pool.buf_count = bufcnt;
274 return qeth_alloc_buffer_pool(card);
275}
276
277int qeth_set_large_send(struct qeth_card *card,
278 enum qeth_large_send_types type)
279{
280 int rc = 0;
281
282 if (card->dev == NULL) {
283 card->options.large_send = type;
284 return 0;
285 }
286 if (card->state == CARD_STATE_UP)
287 netif_tx_disable(card->dev);
288 card->options.large_send = type;
289 switch (card->options.large_send) {
290 case QETH_LARGE_SEND_EDDP:
291 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
292 NETIF_F_HW_CSUM;
293 break;
294 case QETH_LARGE_SEND_TSO:
295 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
296 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
297 NETIF_F_HW_CSUM;
298 } else {
299 PRINT_WARN("TSO not supported on %s. "
300 "large_send set to 'no'.\n",
301 card->dev->name);
302 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
303 NETIF_F_HW_CSUM);
304 card->options.large_send = QETH_LARGE_SEND_NO;
305 rc = -EOPNOTSUPP;
306 }
307 break;
308 default: /* includes QETH_LARGE_SEND_NO */
309 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
310 NETIF_F_HW_CSUM);
311 break;
312 }
313 if (card->state == CARD_STATE_UP)
314 netif_wake_queue(card->dev);
315 return rc;
316}
317EXPORT_SYMBOL_GPL(qeth_set_large_send);
318
319static int qeth_issue_next_read(struct qeth_card *card)
320{
321 int rc;
322 struct qeth_cmd_buffer *iob;
323
324 QETH_DBF_TEXT(trace, 5, "issnxrd");
325 if (card->read.state != CH_STATE_UP)
326 return -EIO;
327 iob = qeth_get_buffer(&card->read);
328 if (!iob) {
329 PRINT_WARN("issue_next_read failed: no iob available!\n");
330 return -ENOMEM;
331 }
332 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
333 QETH_DBF_TEXT(trace, 6, "noirqpnd");
334 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
335 (addr_t) iob, 0, 0);
336 if (rc) {
337 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
338 atomic_set(&card->read.irq_pending, 0);
339 qeth_schedule_recovery(card);
340 wake_up(&card->wait_q);
341 }
342 return rc;
343}
344
345static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
346{
347 struct qeth_reply *reply;
348
349 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
350 if (reply) {
351 atomic_set(&reply->refcnt, 1);
352 atomic_set(&reply->received, 0);
353 reply->card = card;
354 };
355 return reply;
356}
357
358static void qeth_get_reply(struct qeth_reply *reply)
359{
360 WARN_ON(atomic_read(&reply->refcnt) <= 0);
361 atomic_inc(&reply->refcnt);
362}
363
364static void qeth_put_reply(struct qeth_reply *reply)
365{
366 WARN_ON(atomic_read(&reply->refcnt) <= 0);
367 if (atomic_dec_and_test(&reply->refcnt))
368 kfree(reply);
369}
370
371static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd,
372 struct qeth_card *card)
373{
374 int rc;
375 int com;
376 char *ipa_name;
377
378 com = cmd->hdr.command;
379 rc = cmd->hdr.return_code;
380 ipa_name = qeth_get_ipa_cmd_name(com);
381
382 PRINT_ERR("%s(x%X) for %s returned x%X \"%s\"\n", ipa_name, com,
383 QETH_CARD_IFNAME(card), rc, qeth_get_ipa_msg(rc));
384}
385
386static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
387 struct qeth_cmd_buffer *iob)
388{
389 struct qeth_ipa_cmd *cmd = NULL;
390
391 QETH_DBF_TEXT(trace, 5, "chkipad");
392 if (IS_IPA(iob->data)) {
393 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
394 if (IS_IPA_REPLY(cmd)) {
395 if (cmd->hdr.return_code &&
396 (cmd->hdr.command < IPA_CMD_SETCCID ||
397 cmd->hdr.command > IPA_CMD_MODCCID))
398 qeth_issue_ipa_msg(cmd, card);
399 return cmd;
400 } else {
401 switch (cmd->hdr.command) {
402 case IPA_CMD_STOPLAN:
403 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
404 "there is a network problem or "
405 "someone pulled the cable or "
406 "disabled the port.\n",
407 QETH_CARD_IFNAME(card),
408 card->info.chpid);
409 card->lan_online = 0;
410 if (card->dev && netif_carrier_ok(card->dev))
411 netif_carrier_off(card->dev);
412 return NULL;
413 case IPA_CMD_STARTLAN:
414 PRINT_INFO("Link reestablished on %s "
415 "(CHPID 0x%X). Scheduling "
416 "IP address reset.\n",
417 QETH_CARD_IFNAME(card),
418 card->info.chpid);
419 netif_carrier_on(card->dev);
420 qeth_schedule_recovery(card);
421 return NULL;
422 case IPA_CMD_MODCCID:
423 return cmd;
424 case IPA_CMD_REGISTER_LOCAL_ADDR:
425 QETH_DBF_TEXT(trace, 3, "irla");
426 break;
427 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
428 QETH_DBF_TEXT(trace, 3, "urla");
429 break;
430 default:
431 PRINT_WARN("Received data is IPA "
432 "but not a reply!\n");
433 break;
434 }
435 }
436 }
437 return cmd;
438}
439
440void qeth_clear_ipacmd_list(struct qeth_card *card)
441{
442 struct qeth_reply *reply, *r;
443 unsigned long flags;
444
445 QETH_DBF_TEXT(trace, 4, "clipalst");
446
447 spin_lock_irqsave(&card->lock, flags);
448 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
449 qeth_get_reply(reply);
450 reply->rc = -EIO;
451 atomic_inc(&reply->received);
452 list_del_init(&reply->list);
453 wake_up(&reply->wait_q);
454 qeth_put_reply(reply);
455 }
456 spin_unlock_irqrestore(&card->lock, flags);
457}
458EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
459
460static int qeth_check_idx_response(unsigned char *buffer)
461{
462 if (!buffer)
463 return 0;
464
465 QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
466 if ((buffer[2] & 0xc0) == 0xc0) {
467 PRINT_WARN("received an IDX TERMINATE "
468 "with cause code 0x%02x%s\n",
469 buffer[4],
470 ((buffer[4] == 0x22) ?
471 " -- try another portname" : ""));
472 QETH_DBF_TEXT(trace, 2, "ckidxres");
473 QETH_DBF_TEXT(trace, 2, " idxterm");
474 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
475 return -EIO;
476 }
477 return 0;
478}
479
480static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
481 __u32 len)
482{
483 struct qeth_card *card;
484
485 QETH_DBF_TEXT(trace, 4, "setupccw");
486 card = CARD_FROM_CDEV(channel->ccwdev);
487 if (channel == &card->read)
488 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
489 else
490 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
491 channel->ccw.count = len;
492 channel->ccw.cda = (__u32) __pa(iob);
493}
494
495static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
496{
497 __u8 index;
498
499 QETH_DBF_TEXT(trace, 6, "getbuff");
500 index = channel->io_buf_no;
501 do {
502 if (channel->iob[index].state == BUF_STATE_FREE) {
503 channel->iob[index].state = BUF_STATE_LOCKED;
504 channel->io_buf_no = (channel->io_buf_no + 1) %
505 QETH_CMD_BUFFER_NO;
506 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
507 return channel->iob + index;
508 }
509 index = (index + 1) % QETH_CMD_BUFFER_NO;
510 } while (index != channel->io_buf_no);
511
512 return NULL;
513}
514
515void qeth_release_buffer(struct qeth_channel *channel,
516 struct qeth_cmd_buffer *iob)
517{
518 unsigned long flags;
519
520 QETH_DBF_TEXT(trace, 6, "relbuff");
521 spin_lock_irqsave(&channel->iob_lock, flags);
522 memset(iob->data, 0, QETH_BUFSIZE);
523 iob->state = BUF_STATE_FREE;
524 iob->callback = qeth_send_control_data_cb;
525 iob->rc = 0;
526 spin_unlock_irqrestore(&channel->iob_lock, flags);
527}
528EXPORT_SYMBOL_GPL(qeth_release_buffer);
529
530static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
531{
532 struct qeth_cmd_buffer *buffer = NULL;
533 unsigned long flags;
534
535 spin_lock_irqsave(&channel->iob_lock, flags);
536 buffer = __qeth_get_buffer(channel);
537 spin_unlock_irqrestore(&channel->iob_lock, flags);
538 return buffer;
539}
540
541struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
542{
543 struct qeth_cmd_buffer *buffer;
544 wait_event(channel->wait_q,
545 ((buffer = qeth_get_buffer(channel)) != NULL));
546 return buffer;
547}
548EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
549
550void qeth_clear_cmd_buffers(struct qeth_channel *channel)
551{
552 int cnt;
553
554 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
555 qeth_release_buffer(channel, &channel->iob[cnt]);
556 channel->buf_no = 0;
557 channel->io_buf_no = 0;
558}
559EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
560
561static void qeth_send_control_data_cb(struct qeth_channel *channel,
562 struct qeth_cmd_buffer *iob)
563{
564 struct qeth_card *card;
565 struct qeth_reply *reply, *r;
566 struct qeth_ipa_cmd *cmd;
567 unsigned long flags;
568 int keep_reply;
569
570 QETH_DBF_TEXT(trace, 4, "sndctlcb");
571
572 card = CARD_FROM_CDEV(channel->ccwdev);
573 if (qeth_check_idx_response(iob->data)) {
574 qeth_clear_ipacmd_list(card);
575 qeth_schedule_recovery(card);
576 goto out;
577 }
578
579 cmd = qeth_check_ipa_data(card, iob);
580 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
581 goto out;
582 /*in case of OSN : check if cmd is set */
583 if (card->info.type == QETH_CARD_TYPE_OSN &&
584 cmd &&
585 cmd->hdr.command != IPA_CMD_STARTLAN &&
586 card->osn_info.assist_cb != NULL) {
587 card->osn_info.assist_cb(card->dev, cmd);
588 goto out;
589 }
590
591 spin_lock_irqsave(&card->lock, flags);
592 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
593 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
594 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
595 qeth_get_reply(reply);
596 list_del_init(&reply->list);
597 spin_unlock_irqrestore(&card->lock, flags);
598 keep_reply = 0;
599 if (reply->callback != NULL) {
600 if (cmd) {
601 reply->offset = (__u16)((char *)cmd -
602 (char *)iob->data);
603 keep_reply = reply->callback(card,
604 reply,
605 (unsigned long)cmd);
606 } else
607 keep_reply = reply->callback(card,
608 reply,
609 (unsigned long)iob);
610 }
611 if (cmd)
612 reply->rc = (u16) cmd->hdr.return_code;
613 else if (iob->rc)
614 reply->rc = iob->rc;
615 if (keep_reply) {
616 spin_lock_irqsave(&card->lock, flags);
617 list_add_tail(&reply->list,
618 &card->cmd_waiter_list);
619 spin_unlock_irqrestore(&card->lock, flags);
620 } else {
621 atomic_inc(&reply->received);
622 wake_up(&reply->wait_q);
623 }
624 qeth_put_reply(reply);
625 goto out;
626 }
627 }
628 spin_unlock_irqrestore(&card->lock, flags);
629out:
630 memcpy(&card->seqno.pdu_hdr_ack,
631 QETH_PDU_HEADER_SEQ_NO(iob->data),
632 QETH_SEQ_NO_LENGTH);
633 qeth_release_buffer(channel, iob);
634}
635
636static int qeth_setup_channel(struct qeth_channel *channel)
637{
638 int cnt;
639
640 QETH_DBF_TEXT(setup, 2, "setupch");
641 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
642 channel->iob[cnt].data = (char *)
643 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
644 if (channel->iob[cnt].data == NULL)
645 break;
646 channel->iob[cnt].state = BUF_STATE_FREE;
647 channel->iob[cnt].channel = channel;
648 channel->iob[cnt].callback = qeth_send_control_data_cb;
649 channel->iob[cnt].rc = 0;
650 }
651 if (cnt < QETH_CMD_BUFFER_NO) {
652 while (cnt-- > 0)
653 kfree(channel->iob[cnt].data);
654 return -ENOMEM;
655 }
656 channel->buf_no = 0;
657 channel->io_buf_no = 0;
658 atomic_set(&channel->irq_pending, 0);
659 spin_lock_init(&channel->iob_lock);
660
661 init_waitqueue_head(&channel->wait_q);
662 return 0;
663}
664
665static int qeth_set_thread_start_bit(struct qeth_card *card,
666 unsigned long thread)
667{
668 unsigned long flags;
669
670 spin_lock_irqsave(&card->thread_mask_lock, flags);
671 if (!(card->thread_allowed_mask & thread) ||
672 (card->thread_start_mask & thread)) {
673 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
674 return -EPERM;
675 }
676 card->thread_start_mask |= thread;
677 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
678 return 0;
679}
680
681void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
682{
683 unsigned long flags;
684
685 spin_lock_irqsave(&card->thread_mask_lock, flags);
686 card->thread_start_mask &= ~thread;
687 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
688 wake_up(&card->wait_q);
689}
690EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
691
692void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
693{
694 unsigned long flags;
695
696 spin_lock_irqsave(&card->thread_mask_lock, flags);
697 card->thread_running_mask &= ~thread;
698 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
699 wake_up(&card->wait_q);
700}
701EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
702
703static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
704{
705 unsigned long flags;
706 int rc = 0;
707
708 spin_lock_irqsave(&card->thread_mask_lock, flags);
709 if (card->thread_start_mask & thread) {
710 if ((card->thread_allowed_mask & thread) &&
711 !(card->thread_running_mask & thread)) {
712 rc = 1;
713 card->thread_start_mask &= ~thread;
714 card->thread_running_mask |= thread;
715 } else
716 rc = -EPERM;
717 }
718 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
719 return rc;
720}
721
722int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
723{
724 int rc = 0;
725
726 wait_event(card->wait_q,
727 (rc = __qeth_do_run_thread(card, thread)) >= 0);
728 return rc;
729}
730EXPORT_SYMBOL_GPL(qeth_do_run_thread);
731
732void qeth_schedule_recovery(struct qeth_card *card)
733{
734 QETH_DBF_TEXT(trace, 2, "startrec");
735 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
736 schedule_work(&card->kernel_thread_starter);
737}
738EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
739
740static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
741{
742 int dstat, cstat;
743 char *sense;
744
745 sense = (char *) irb->ecw;
746 cstat = irb->scsw.cstat;
747 dstat = irb->scsw.dstat;
748
749 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
750 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
751 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
752 QETH_DBF_TEXT(trace, 2, "CGENCHK");
753 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
754 cdev->dev.bus_id, dstat, cstat);
755 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
756 16, 1, irb, 64, 1);
757 return 1;
758 }
759
760 if (dstat & DEV_STAT_UNIT_CHECK) {
761 if (sense[SENSE_RESETTING_EVENT_BYTE] &
762 SENSE_RESETTING_EVENT_FLAG) {
763 QETH_DBF_TEXT(trace, 2, "REVIND");
764 return 1;
765 }
766 if (sense[SENSE_COMMAND_REJECT_BYTE] &
767 SENSE_COMMAND_REJECT_FLAG) {
768 QETH_DBF_TEXT(trace, 2, "CMDREJi");
769 return 0;
770 }
771 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
772 QETH_DBF_TEXT(trace, 2, "AFFE");
773 return 1;
774 }
775 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
776 QETH_DBF_TEXT(trace, 2, "ZEROSEN");
777 return 0;
778 }
779 QETH_DBF_TEXT(trace, 2, "DGENCHK");
780 return 1;
781 }
782 return 0;
783}
784
785static long __qeth_check_irb_error(struct ccw_device *cdev,
786 unsigned long intparm, struct irb *irb)
787{
788 if (!IS_ERR(irb))
789 return 0;
790
791 switch (PTR_ERR(irb)) {
792 case -EIO:
793 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
794 QETH_DBF_TEXT(trace, 2, "ckirberr");
795 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
796 break;
797 case -ETIMEDOUT:
798 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
799 QETH_DBF_TEXT(trace, 2, "ckirberr");
800 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
801 if (intparm == QETH_RCD_PARM) {
802 struct qeth_card *card = CARD_FROM_CDEV(cdev);
803
804 if (card && (card->data.ccwdev == cdev)) {
805 card->data.state = CH_STATE_DOWN;
806 wake_up(&card->wait_q);
807 }
808 }
809 break;
810 default:
811 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
812 cdev->dev.bus_id);
813 QETH_DBF_TEXT(trace, 2, "ckirberr");
814 QETH_DBF_TEXT(trace, 2, " rc???");
815 }
816 return PTR_ERR(irb);
817}
818
819static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
820 struct irb *irb)
821{
822 int rc;
823 int cstat, dstat;
824 struct qeth_cmd_buffer *buffer;
825 struct qeth_channel *channel;
826 struct qeth_card *card;
827 struct qeth_cmd_buffer *iob;
828 __u8 index;
829
830 QETH_DBF_TEXT(trace, 5, "irq");
831
832 if (__qeth_check_irb_error(cdev, intparm, irb))
833 return;
834 cstat = irb->scsw.cstat;
835 dstat = irb->scsw.dstat;
836
837 card = CARD_FROM_CDEV(cdev);
838 if (!card)
839 return;
840
841 if (card->read.ccwdev == cdev) {
842 channel = &card->read;
843 QETH_DBF_TEXT(trace, 5, "read");
844 } else if (card->write.ccwdev == cdev) {
845 channel = &card->write;
846 QETH_DBF_TEXT(trace, 5, "write");
847 } else {
848 channel = &card->data;
849 QETH_DBF_TEXT(trace, 5, "data");
850 }
851 atomic_set(&channel->irq_pending, 0);
852
853 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
854 channel->state = CH_STATE_STOPPED;
855
856 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
857 channel->state = CH_STATE_HALTED;
858
859 /*let's wake up immediately on data channel*/
860 if ((channel == &card->data) && (intparm != 0) &&
861 (intparm != QETH_RCD_PARM))
862 goto out;
863
864 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
865 QETH_DBF_TEXT(trace, 6, "clrchpar");
866 /* we don't have to handle this further */
867 intparm = 0;
868 }
869 if (intparm == QETH_HALT_CHANNEL_PARM) {
870 QETH_DBF_TEXT(trace, 6, "hltchpar");
871 /* we don't have to handle this further */
872 intparm = 0;
873 }
874 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
875 (dstat & DEV_STAT_UNIT_CHECK) ||
876 (cstat)) {
877 if (irb->esw.esw0.erw.cons) {
878 /* TODO: we should make this s390dbf */
879 PRINT_WARN("sense data available on channel %s.\n",
880 CHANNEL_ID(channel));
881 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
882 print_hex_dump(KERN_WARNING, "qeth: irb ",
883 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
884 print_hex_dump(KERN_WARNING, "qeth: sense data ",
885 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
886 }
887 if (intparm == QETH_RCD_PARM) {
888 channel->state = CH_STATE_DOWN;
889 goto out;
890 }
891 rc = qeth_get_problem(cdev, irb);
892 if (rc) {
893 qeth_schedule_recovery(card);
894 goto out;
895 }
896 }
897
898 if (intparm == QETH_RCD_PARM) {
899 channel->state = CH_STATE_RCD_DONE;
900 goto out;
901 }
902 if (intparm) {
903 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
904 buffer->state = BUF_STATE_PROCESSED;
905 }
906 if (channel == &card->data)
907 return;
908 if (channel == &card->read &&
909 channel->state == CH_STATE_UP)
910 qeth_issue_next_read(card);
911
912 iob = channel->iob;
913 index = channel->buf_no;
914 while (iob[index].state == BUF_STATE_PROCESSED) {
915 if (iob[index].callback != NULL)
916 iob[index].callback(channel, iob + index);
917
918 index = (index + 1) % QETH_CMD_BUFFER_NO;
919 }
920 channel->buf_no = index;
921out:
922 wake_up(&card->wait_q);
923 return;
924}
925
926static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
927 struct qeth_qdio_out_buffer *buf)
928{
929 int i;
930 struct sk_buff *skb;
931
932 /* is PCI flag set on buffer? */
933 if (buf->buffer->element[0].flags & 0x40)
934 atomic_dec(&queue->set_pci_flags_count);
935
936 skb = skb_dequeue(&buf->skb_list);
937 while (skb) {
938 atomic_dec(&skb->users);
939 dev_kfree_skb_any(skb);
940 skb = skb_dequeue(&buf->skb_list);
941 }
942 qeth_eddp_buf_release_contexts(buf);
943 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
944 buf->buffer->element[i].length = 0;
945 buf->buffer->element[i].addr = NULL;
946 buf->buffer->element[i].flags = 0;
947 }
948 buf->next_element_to_fill = 0;
949 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
950}
951
952void qeth_clear_qdio_buffers(struct qeth_card *card)
953{
954 int i, j;
955
956 QETH_DBF_TEXT(trace, 2, "clearqdbf");
957 /* clear outbound buffers to free skbs */
958 for (i = 0; i < card->qdio.no_out_queues; ++i)
959 if (card->qdio.out_qs[i]) {
960 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
961 qeth_clear_output_buffer(card->qdio.out_qs[i],
962 &card->qdio.out_qs[i]->bufs[j]);
963 }
964}
965EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
966
967static void qeth_free_buffer_pool(struct qeth_card *card)
968{
969 struct qeth_buffer_pool_entry *pool_entry, *tmp;
970 int i = 0;
971 QETH_DBF_TEXT(trace, 5, "freepool");
972 list_for_each_entry_safe(pool_entry, tmp,
973 &card->qdio.init_pool.entry_list, init_list){
974 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
975 free_page((unsigned long)pool_entry->elements[i]);
976 list_del(&pool_entry->init_list);
977 kfree(pool_entry);
978 }
979}
980
981static void qeth_free_qdio_buffers(struct qeth_card *card)
982{
983 int i, j;
984
985 QETH_DBF_TEXT(trace, 2, "freeqdbf");
986 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
987 QETH_QDIO_UNINITIALIZED)
988 return;
989 kfree(card->qdio.in_q);
990 card->qdio.in_q = NULL;
991 /* inbound buffer pool */
992 qeth_free_buffer_pool(card);
993 /* free outbound qdio_qs */
994 if (card->qdio.out_qs) {
995 for (i = 0; i < card->qdio.no_out_queues; ++i) {
996 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
997 qeth_clear_output_buffer(card->qdio.out_qs[i],
998 &card->qdio.out_qs[i]->bufs[j]);
999 kfree(card->qdio.out_qs[i]);
1000 }
1001 kfree(card->qdio.out_qs);
1002 card->qdio.out_qs = NULL;
1003 }
1004}
1005
1006static void qeth_clean_channel(struct qeth_channel *channel)
1007{
1008 int cnt;
1009
1010 QETH_DBF_TEXT(setup, 2, "freech");
1011 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1012 kfree(channel->iob[cnt].data);
1013}
1014
1015static int qeth_is_1920_device(struct qeth_card *card)
1016{
1017 int single_queue = 0;
1018 struct ccw_device *ccwdev;
1019 struct channelPath_dsc {
1020 u8 flags;
1021 u8 lsn;
1022 u8 desc;
1023 u8 chpid;
1024 u8 swla;
1025 u8 zeroes;
1026 u8 chla;
1027 u8 chpp;
1028 } *chp_dsc;
1029
1030 QETH_DBF_TEXT(setup, 2, "chk_1920");
1031
1032 ccwdev = card->data.ccwdev;
1033 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1034 if (chp_dsc != NULL) {
1035 /* CHPP field bit 6 == 1 -> single queue */
1036 single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
1037 kfree(chp_dsc);
1038 }
1039 QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue);
1040 return single_queue;
1041}
1042
1043static void qeth_init_qdio_info(struct qeth_card *card)
1044{
1045 QETH_DBF_TEXT(setup, 4, "intqdinf");
1046 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1047 /* inbound */
1048 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1049 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1050 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1051 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1052 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1053}
1054
1055static void qeth_set_intial_options(struct qeth_card *card)
1056{
1057 card->options.route4.type = NO_ROUTER;
1058 card->options.route6.type = NO_ROUTER;
1059 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1060 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1061 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1062 card->options.fake_broadcast = 0;
1063 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1064 card->options.fake_ll = 0;
1065 card->options.performance_stats = 0;
1066 card->options.rx_sg_cb = QETH_RX_SG_CB;
1067}
1068
1069static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1070{
1071 unsigned long flags;
1072 int rc = 0;
1073
1074 spin_lock_irqsave(&card->thread_mask_lock, flags);
1075 QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
1076 (u8) card->thread_start_mask,
1077 (u8) card->thread_allowed_mask,
1078 (u8) card->thread_running_mask);
1079 rc = (card->thread_start_mask & thread);
1080 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1081 return rc;
1082}
1083
1084static void qeth_start_kernel_thread(struct work_struct *work)
1085{
1086 struct qeth_card *card = container_of(work, struct qeth_card,
1087 kernel_thread_starter);
1088 QETH_DBF_TEXT(trace , 2, "strthrd");
1089
1090 if (card->read.state != CH_STATE_UP &&
1091 card->write.state != CH_STATE_UP)
1092 return;
1093 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1094 kthread_run(card->discipline.recover, (void *) card,
1095 "qeth_recover");
1096}
1097
1098static int qeth_setup_card(struct qeth_card *card)
1099{
1100
1101 QETH_DBF_TEXT(setup, 2, "setupcrd");
1102 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1103
1104 card->read.state = CH_STATE_DOWN;
1105 card->write.state = CH_STATE_DOWN;
1106 card->data.state = CH_STATE_DOWN;
1107 card->state = CARD_STATE_DOWN;
1108 card->lan_online = 0;
1109 card->use_hard_stop = 0;
1110 card->dev = NULL;
1111 spin_lock_init(&card->vlanlock);
1112 spin_lock_init(&card->mclock);
1113 card->vlangrp = NULL;
1114 spin_lock_init(&card->lock);
1115 spin_lock_init(&card->ip_lock);
1116 spin_lock_init(&card->thread_mask_lock);
1117 card->thread_start_mask = 0;
1118 card->thread_allowed_mask = 0;
1119 card->thread_running_mask = 0;
1120 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1121 INIT_LIST_HEAD(&card->ip_list);
1122 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1123 if (!card->ip_tbd_list) {
1124 QETH_DBF_TEXT(setup, 0, "iptbdnom");
1125 return -ENOMEM;
1126 }
1127 INIT_LIST_HEAD(card->ip_tbd_list);
1128 INIT_LIST_HEAD(&card->cmd_waiter_list);
1129 init_waitqueue_head(&card->wait_q);
1130 /* intial options */
1131 qeth_set_intial_options(card);
1132 /* IP address takeover */
1133 INIT_LIST_HEAD(&card->ipato.entries);
1134 card->ipato.enabled = 0;
1135 card->ipato.invert4 = 0;
1136 card->ipato.invert6 = 0;
1137 /* init QDIO stuff */
1138 qeth_init_qdio_info(card);
1139 return 0;
1140}
1141
1142static struct qeth_card *qeth_alloc_card(void)
1143{
1144 struct qeth_card *card;
1145
1146 QETH_DBF_TEXT(setup, 2, "alloccrd");
1147 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
1148 if (!card)
1149 return NULL;
1150 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1151 if (qeth_setup_channel(&card->read)) {
1152 kfree(card);
1153 return NULL;
1154 }
1155 if (qeth_setup_channel(&card->write)) {
1156 qeth_clean_channel(&card->read);
1157 kfree(card);
1158 return NULL;
1159 }
1160 card->options.layer2 = -1;
1161 return card;
1162}
1163
1164static int qeth_determine_card_type(struct qeth_card *card)
1165{
1166 int i = 0;
1167
1168 QETH_DBF_TEXT(setup, 2, "detcdtyp");
1169
1170 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1171 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1172 while (known_devices[i][4]) {
1173 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1174 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1175 card->info.type = known_devices[i][4];
1176 card->qdio.no_out_queues = known_devices[i][8];
1177 card->info.is_multicast_different = known_devices[i][9];
1178 if (qeth_is_1920_device(card)) {
1179 PRINT_INFO("Priority Queueing not able "
1180 "due to hardware limitations!\n");
1181 card->qdio.no_out_queues = 1;
1182 card->qdio.default_out_queue = 0;
1183 }
1184 return 0;
1185 }
1186 i++;
1187 }
1188 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1189 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1190 return -ENOENT;
1191}
1192
1193static int qeth_clear_channel(struct qeth_channel *channel)
1194{
1195 unsigned long flags;
1196 struct qeth_card *card;
1197 int rc;
1198
1199 QETH_DBF_TEXT(trace, 3, "clearch");
1200 card = CARD_FROM_CDEV(channel->ccwdev);
1201 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1202 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1203 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1204
1205 if (rc)
1206 return rc;
1207 rc = wait_event_interruptible_timeout(card->wait_q,
1208 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1209 if (rc == -ERESTARTSYS)
1210 return rc;
1211 if (channel->state != CH_STATE_STOPPED)
1212 return -ETIME;
1213 channel->state = CH_STATE_DOWN;
1214 return 0;
1215}
1216
1217static int qeth_halt_channel(struct qeth_channel *channel)
1218{
1219 unsigned long flags;
1220 struct qeth_card *card;
1221 int rc;
1222
1223 QETH_DBF_TEXT(trace, 3, "haltch");
1224 card = CARD_FROM_CDEV(channel->ccwdev);
1225 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1226 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1227 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1228
1229 if (rc)
1230 return rc;
1231 rc = wait_event_interruptible_timeout(card->wait_q,
1232 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1233 if (rc == -ERESTARTSYS)
1234 return rc;
1235 if (channel->state != CH_STATE_HALTED)
1236 return -ETIME;
1237 return 0;
1238}
1239
1240static int qeth_halt_channels(struct qeth_card *card)
1241{
1242 int rc1 = 0, rc2 = 0, rc3 = 0;
1243
1244 QETH_DBF_TEXT(trace, 3, "haltchs");
1245 rc1 = qeth_halt_channel(&card->read);
1246 rc2 = qeth_halt_channel(&card->write);
1247 rc3 = qeth_halt_channel(&card->data);
1248 if (rc1)
1249 return rc1;
1250 if (rc2)
1251 return rc2;
1252 return rc3;
1253}
1254
1255static int qeth_clear_channels(struct qeth_card *card)
1256{
1257 int rc1 = 0, rc2 = 0, rc3 = 0;
1258
1259 QETH_DBF_TEXT(trace, 3, "clearchs");
1260 rc1 = qeth_clear_channel(&card->read);
1261 rc2 = qeth_clear_channel(&card->write);
1262 rc3 = qeth_clear_channel(&card->data);
1263 if (rc1)
1264 return rc1;
1265 if (rc2)
1266 return rc2;
1267 return rc3;
1268}
1269
1270static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1271{
1272 int rc = 0;
1273
1274 QETH_DBF_TEXT(trace, 3, "clhacrd");
1275 QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
1276
1277 if (halt)
1278 rc = qeth_halt_channels(card);
1279 if (rc)
1280 return rc;
1281 return qeth_clear_channels(card);
1282}
1283
1284int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1285{
1286 int rc = 0;
1287
1288 QETH_DBF_TEXT(trace, 3, "qdioclr");
1289 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1290 QETH_QDIO_CLEANING)) {
1291 case QETH_QDIO_ESTABLISHED:
1292 if (card->info.type == QETH_CARD_TYPE_IQD)
1293 rc = qdio_cleanup(CARD_DDEV(card),
1294 QDIO_FLAG_CLEANUP_USING_HALT);
1295 else
1296 rc = qdio_cleanup(CARD_DDEV(card),
1297 QDIO_FLAG_CLEANUP_USING_CLEAR);
1298 if (rc)
1299 QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
1300 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1301 break;
1302 case QETH_QDIO_CLEANING:
1303 return rc;
1304 default:
1305 break;
1306 }
1307 rc = qeth_clear_halt_card(card, use_halt);
1308 if (rc)
1309 QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
1310 card->state = CARD_STATE_DOWN;
1311 return rc;
1312}
1313EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1314
1315static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1316 int *length)
1317{
1318 struct ciw *ciw;
1319 char *rcd_buf;
1320 int ret;
1321 struct qeth_channel *channel = &card->data;
1322 unsigned long flags;
1323
1324 /*
1325 * scan for RCD command in extended SenseID data
1326 */
1327 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1328 if (!ciw || ciw->cmd == 0)
1329 return -EOPNOTSUPP;
1330 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1331 if (!rcd_buf)
1332 return -ENOMEM;
1333
1334 channel->ccw.cmd_code = ciw->cmd;
1335 channel->ccw.cda = (__u32) __pa(rcd_buf);
1336 channel->ccw.count = ciw->count;
1337 channel->ccw.flags = CCW_FLAG_SLI;
1338 channel->state = CH_STATE_RCD;
1339 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1340 ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1341 QETH_RCD_PARM, LPM_ANYPATH, 0,
1342 QETH_RCD_TIMEOUT);
1343 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1344 if (!ret)
1345 wait_event(card->wait_q,
1346 (channel->state == CH_STATE_RCD_DONE ||
1347 channel->state == CH_STATE_DOWN));
1348 if (channel->state == CH_STATE_DOWN)
1349 ret = -EIO;
1350 else
1351 channel->state = CH_STATE_DOWN;
1352 if (ret) {
1353 kfree(rcd_buf);
1354 *buffer = NULL;
1355 *length = 0;
1356 } else {
1357 *length = ciw->count;
1358 *buffer = rcd_buf;
1359 }
1360 return ret;
1361}
1362
1363static int qeth_get_unitaddr(struct qeth_card *card)
1364{
1365 int length;
1366 char *prcd;
1367 int rc;
1368
1369 QETH_DBF_TEXT(setup, 2, "getunit");
1370 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
1371 if (rc) {
1372 PRINT_ERR("qeth_read_conf_data for device %s returned %i\n",
1373 CARD_DDEV_ID(card), rc);
1374 return rc;
1375 }
1376 card->info.chpid = prcd[30];
1377 card->info.unit_addr2 = prcd[31];
1378 card->info.cula = prcd[63];
1379 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1380 (prcd[0x11] == _ascebc['M']));
1381 kfree(prcd);
1382 return 0;
1383}
1384
1385static void qeth_init_tokens(struct qeth_card *card)
1386{
1387 card->token.issuer_rm_w = 0x00010103UL;
1388 card->token.cm_filter_w = 0x00010108UL;
1389 card->token.cm_connection_w = 0x0001010aUL;
1390 card->token.ulp_filter_w = 0x0001010bUL;
1391 card->token.ulp_connection_w = 0x0001010dUL;
1392}
1393
1394static void qeth_init_func_level(struct qeth_card *card)
1395{
1396 if (card->ipato.enabled) {
1397 if (card->info.type == QETH_CARD_TYPE_IQD)
1398 card->info.func_level =
1399 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
1400 else
1401 card->info.func_level =
1402 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
1403 } else {
1404 if (card->info.type == QETH_CARD_TYPE_IQD)
1405 /*FIXME:why do we have same values for dis and ena for
1406 osae??? */
1407 card->info.func_level =
1408 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
1409 else
1410 card->info.func_level =
1411 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
1412 }
1413}
1414
1415static inline __u16 qeth_raw_devno_from_bus_id(char *id)
1416{
1417 id += (strlen(id) - 4);
1418 return (__u16) simple_strtoul(id, &id, 16);
1419}
1420
1421static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1422 void (*idx_reply_cb)(struct qeth_channel *,
1423 struct qeth_cmd_buffer *))
1424{
1425 struct qeth_cmd_buffer *iob;
1426 unsigned long flags;
1427 int rc;
1428 struct qeth_card *card;
1429
1430 QETH_DBF_TEXT(setup, 2, "idxanswr");
1431 card = CARD_FROM_CDEV(channel->ccwdev);
1432 iob = qeth_get_buffer(channel);
1433 iob->callback = idx_reply_cb;
1434 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1435 channel->ccw.count = QETH_BUFSIZE;
1436 channel->ccw.cda = (__u32) __pa(iob->data);
1437
1438 wait_event(card->wait_q,
1439 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1440 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1441 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1442 rc = ccw_device_start(channel->ccwdev,
1443 &channel->ccw, (addr_t) iob, 0, 0);
1444 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1445
1446 if (rc) {
1447 PRINT_ERR("Error2 in activating channel rc=%d\n", rc);
1448 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1449 atomic_set(&channel->irq_pending, 0);
1450 wake_up(&card->wait_q);
1451 return rc;
1452 }
1453 rc = wait_event_interruptible_timeout(card->wait_q,
1454 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1455 if (rc == -ERESTARTSYS)
1456 return rc;
1457 if (channel->state != CH_STATE_UP) {
1458 rc = -ETIME;
1459 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1460 qeth_clear_cmd_buffers(channel);
1461 } else
1462 rc = 0;
1463 return rc;
1464}
1465
1466static int qeth_idx_activate_channel(struct qeth_channel *channel,
1467 void (*idx_reply_cb)(struct qeth_channel *,
1468 struct qeth_cmd_buffer *))
1469{
1470 struct qeth_card *card;
1471 struct qeth_cmd_buffer *iob;
1472 unsigned long flags;
1473 __u16 temp;
1474 __u8 tmp;
1475 int rc;
1476
1477 card = CARD_FROM_CDEV(channel->ccwdev);
1478
1479 QETH_DBF_TEXT(setup, 2, "idxactch");
1480
1481 iob = qeth_get_buffer(channel);
1482 iob->callback = idx_reply_cb;
1483 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1484 channel->ccw.count = IDX_ACTIVATE_SIZE;
1485 channel->ccw.cda = (__u32) __pa(iob->data);
1486 if (channel == &card->write) {
1487 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1488 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1489 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1490 card->seqno.trans_hdr++;
1491 } else {
1492 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1493 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1494 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1495 }
1496 tmp = ((__u8)card->info.portno) | 0x80;
1497 memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
1498 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1499 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1500 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1501 &card->info.func_level, sizeof(__u16));
1502 temp = qeth_raw_devno_from_bus_id(CARD_DDEV_ID(card));
1503 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1504 temp = (card->info.cula << 8) + card->info.unit_addr2;
1505 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1506
1507 wait_event(card->wait_q,
1508 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1509 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1510 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1511 rc = ccw_device_start(channel->ccwdev,
1512 &channel->ccw, (addr_t) iob, 0, 0);
1513 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1514
1515 if (rc) {
1516 PRINT_ERR("Error1 in activating channel. rc=%d\n", rc);
1517 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1518 atomic_set(&channel->irq_pending, 0);
1519 wake_up(&card->wait_q);
1520 return rc;
1521 }
1522 rc = wait_event_interruptible_timeout(card->wait_q,
1523 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1524 if (rc == -ERESTARTSYS)
1525 return rc;
1526 if (channel->state != CH_STATE_ACTIVATING) {
1527 PRINT_WARN("IDX activate timed out!\n");
1528 QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1529 qeth_clear_cmd_buffers(channel);
1530 return -ETIME;
1531 }
1532 return qeth_idx_activate_get_answer(channel, idx_reply_cb);
1533}
1534
1535static int qeth_peer_func_level(int level)
1536{
1537 if ((level & 0xff) == 8)
1538 return (level & 0xff) + 0x400;
1539 if (((level >> 8) & 3) == 1)
1540 return (level & 0xff) + 0x200;
1541 return level;
1542}
1543
1544static void qeth_idx_write_cb(struct qeth_channel *channel,
1545 struct qeth_cmd_buffer *iob)
1546{
1547 struct qeth_card *card;
1548 __u16 temp;
1549
1550 QETH_DBF_TEXT(setup , 2, "idxwrcb");
1551
1552 if (channel->state == CH_STATE_DOWN) {
1553 channel->state = CH_STATE_ACTIVATING;
1554 goto out;
1555 }
1556 card = CARD_FROM_CDEV(channel->ccwdev);
1557
1558 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1559 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
1560 PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
1561 "adapter exclusively used by another host\n",
1562 CARD_WDEV_ID(card));
1563 else
1564 PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
1565 "negative reply\n", CARD_WDEV_ID(card));
1566 goto out;
1567 }
1568 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1569 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1570 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1571 "function level mismatch "
1572 "(sent: 0x%x, received: 0x%x)\n",
1573 CARD_WDEV_ID(card), card->info.func_level, temp);
1574 goto out;
1575 }
1576 channel->state = CH_STATE_UP;
1577out:
1578 qeth_release_buffer(channel, iob);
1579}
1580
1581static void qeth_idx_read_cb(struct qeth_channel *channel,
1582 struct qeth_cmd_buffer *iob)
1583{
1584 struct qeth_card *card;
1585 __u16 temp;
1586
1587 QETH_DBF_TEXT(setup , 2, "idxrdcb");
1588 if (channel->state == CH_STATE_DOWN) {
1589 channel->state = CH_STATE_ACTIVATING;
1590 goto out;
1591 }
1592
1593 card = CARD_FROM_CDEV(channel->ccwdev);
1594 if (qeth_check_idx_response(iob->data))
1595 goto out;
1596
1597 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1598 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
1599 PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
1600 "adapter exclusively used by another host\n",
1601 CARD_RDEV_ID(card));
1602 else
1603 PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
1604 "negative reply\n", CARD_RDEV_ID(card));
1605 goto out;
1606 }
1607
1608/**
1609 * temporary fix for microcode bug
1610 * to revert it,replace OR by AND
1611 */
1612 if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1613 (card->info.type == QETH_CARD_TYPE_OSAE))
1614 card->info.portname_required = 1;
1615
1616 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1617 if (temp != qeth_peer_func_level(card->info.func_level)) {
1618 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1619 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1620 CARD_RDEV_ID(card), card->info.func_level, temp);
1621 goto out;
1622 }
1623 memcpy(&card->token.issuer_rm_r,
1624 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1625 QETH_MPC_TOKEN_LENGTH);
1626 memcpy(&card->info.mcl_level[0],
1627 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1628 channel->state = CH_STATE_UP;
1629out:
1630 qeth_release_buffer(channel, iob);
1631}
1632
1633void qeth_prepare_control_data(struct qeth_card *card, int len,
1634 struct qeth_cmd_buffer *iob)
1635{
1636 qeth_setup_ccw(&card->write, iob->data, len);
1637 iob->callback = qeth_release_buffer;
1638
1639 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1640 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1641 card->seqno.trans_hdr++;
1642 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1643 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1644 card->seqno.pdu_hdr++;
1645 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1646 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1647 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1648}
1649EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
1650
1651int qeth_send_control_data(struct qeth_card *card, int len,
1652 struct qeth_cmd_buffer *iob,
1653 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1654 unsigned long),
1655 void *reply_param)
1656{
1657 int rc;
1658 unsigned long flags;
1659 struct qeth_reply *reply = NULL;
1660 unsigned long timeout;
1661
1662 QETH_DBF_TEXT(trace, 2, "sendctl");
1663
1664 reply = qeth_alloc_reply(card);
1665 if (!reply) {
1666 PRINT_WARN("Could no alloc qeth_reply!\n");
1667 return -ENOMEM;
1668 }
1669 reply->callback = reply_cb;
1670 reply->param = reply_param;
1671 if (card->state == CARD_STATE_DOWN)
1672 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1673 else
1674 reply->seqno = card->seqno.ipa++;
1675 init_waitqueue_head(&reply->wait_q);
1676 spin_lock_irqsave(&card->lock, flags);
1677 list_add_tail(&reply->list, &card->cmd_waiter_list);
1678 spin_unlock_irqrestore(&card->lock, flags);
1679 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1680
1681 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
1682 qeth_prepare_control_data(card, len, iob);
1683
1684 if (IS_IPA(iob->data))
1685 timeout = jiffies + QETH_IPA_TIMEOUT;
1686 else
1687 timeout = jiffies + QETH_TIMEOUT;
1688
1689 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1690 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1691 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1692 (addr_t) iob, 0, 0);
1693 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1694 if (rc) {
1695 PRINT_WARN("qeth_send_control_data: "
1696 "ccw_device_start rc = %i\n", rc);
1697 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1698 spin_lock_irqsave(&card->lock, flags);
1699 list_del_init(&reply->list);
1700 qeth_put_reply(reply);
1701 spin_unlock_irqrestore(&card->lock, flags);
1702 qeth_release_buffer(iob->channel, iob);
1703 atomic_set(&card->write.irq_pending, 0);
1704 wake_up(&card->wait_q);
1705 return rc;
1706 }
1707 while (!atomic_read(&reply->received)) {
1708 if (time_after(jiffies, timeout)) {
1709 spin_lock_irqsave(&reply->card->lock, flags);
1710 list_del_init(&reply->list);
1711 spin_unlock_irqrestore(&reply->card->lock, flags);
1712 reply->rc = -ETIME;
1713 atomic_inc(&reply->received);
1714 wake_up(&reply->wait_q);
1715 }
1716 cpu_relax();
1717 };
1718 rc = reply->rc;
1719 qeth_put_reply(reply);
1720 return rc;
1721}
1722EXPORT_SYMBOL_GPL(qeth_send_control_data);
1723
1724static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1725 unsigned long data)
1726{
1727 struct qeth_cmd_buffer *iob;
1728
1729 QETH_DBF_TEXT(setup, 2, "cmenblcb");
1730
1731 iob = (struct qeth_cmd_buffer *) data;
1732 memcpy(&card->token.cm_filter_r,
1733 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1734 QETH_MPC_TOKEN_LENGTH);
1735 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1736 return 0;
1737}
1738
1739static int qeth_cm_enable(struct qeth_card *card)
1740{
1741 int rc;
1742 struct qeth_cmd_buffer *iob;
1743
1744 QETH_DBF_TEXT(setup, 2, "cmenable");
1745
1746 iob = qeth_wait_for_buffer(&card->write);
1747 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
1748 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
1749 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1750 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
1751 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
1752
1753 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
1754 qeth_cm_enable_cb, NULL);
1755 return rc;
1756}
1757
1758static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1759 unsigned long data)
1760{
1761
1762 struct qeth_cmd_buffer *iob;
1763
1764 QETH_DBF_TEXT(setup, 2, "cmsetpcb");
1765
1766 iob = (struct qeth_cmd_buffer *) data;
1767 memcpy(&card->token.cm_connection_r,
1768 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
1769 QETH_MPC_TOKEN_LENGTH);
1770 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1771 return 0;
1772}
1773
1774static int qeth_cm_setup(struct qeth_card *card)
1775{
1776 int rc;
1777 struct qeth_cmd_buffer *iob;
1778
1779 QETH_DBF_TEXT(setup, 2, "cmsetup");
1780
1781 iob = qeth_wait_for_buffer(&card->write);
1782 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
1783 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
1784 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1785 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
1786 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
1787 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
1788 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
1789 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
1790 qeth_cm_setup_cb, NULL);
1791 return rc;
1792
1793}
1794
1795static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
1796{
1797 switch (card->info.type) {
1798 case QETH_CARD_TYPE_UNKNOWN:
1799 return 1500;
1800 case QETH_CARD_TYPE_IQD:
1801 return card->info.max_mtu;
1802 case QETH_CARD_TYPE_OSAE:
1803 switch (card->info.link_type) {
1804 case QETH_LINK_TYPE_HSTR:
1805 case QETH_LINK_TYPE_LANE_TR:
1806 return 2000;
1807 default:
1808 return 1492;
1809 }
1810 default:
1811 return 1500;
1812 }
1813}
1814
1815static inline int qeth_get_max_mtu_for_card(int cardtype)
1816{
1817 switch (cardtype) {
1818
1819 case QETH_CARD_TYPE_UNKNOWN:
1820 case QETH_CARD_TYPE_OSAE:
1821 case QETH_CARD_TYPE_OSN:
1822 return 61440;
1823 case QETH_CARD_TYPE_IQD:
1824 return 57344;
1825 default:
1826 return 1500;
1827 }
1828}
1829
1830static inline int qeth_get_mtu_out_of_mpc(int cardtype)
1831{
1832 switch (cardtype) {
1833 case QETH_CARD_TYPE_IQD:
1834 return 1;
1835 default:
1836 return 0;
1837 }
1838}
1839
1840static inline int qeth_get_mtu_outof_framesize(int framesize)
1841{
1842 switch (framesize) {
1843 case 0x4000:
1844 return 8192;
1845 case 0x6000:
1846 return 16384;
1847 case 0xa000:
1848 return 32768;
1849 case 0xffff:
1850 return 57344;
1851 default:
1852 return 0;
1853 }
1854}
1855
1856static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
1857{
1858 switch (card->info.type) {
1859 case QETH_CARD_TYPE_OSAE:
1860 return ((mtu >= 576) && (mtu <= 61440));
1861 case QETH_CARD_TYPE_IQD:
1862 return ((mtu >= 576) &&
1863 (mtu <= card->info.max_mtu + 4096 - 32));
1864 case QETH_CARD_TYPE_OSN:
1865 case QETH_CARD_TYPE_UNKNOWN:
1866 default:
1867 return 1;
1868 }
1869}
1870
1871static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1872 unsigned long data)
1873{
1874
1875 __u16 mtu, framesize;
1876 __u16 len;
1877 __u8 link_type;
1878 struct qeth_cmd_buffer *iob;
1879
1880 QETH_DBF_TEXT(setup, 2, "ulpenacb");
1881
1882 iob = (struct qeth_cmd_buffer *) data;
1883 memcpy(&card->token.ulp_filter_r,
1884 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1885 QETH_MPC_TOKEN_LENGTH);
1886 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
1887 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1888 mtu = qeth_get_mtu_outof_framesize(framesize);
1889 if (!mtu) {
1890 iob->rc = -EINVAL;
1891 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1892 return 0;
1893 }
1894 card->info.max_mtu = mtu;
1895 card->info.initial_mtu = mtu;
1896 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1897 } else {
1898 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1899 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
1900 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1901 }
1902
1903 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
1904 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
1905 memcpy(&link_type,
1906 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
1907 card->info.link_type = link_type;
1908 } else
1909 card->info.link_type = 0;
1910 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1911 return 0;
1912}
1913
1914static int qeth_ulp_enable(struct qeth_card *card)
1915{
1916 int rc;
1917 char prot_type;
1918 struct qeth_cmd_buffer *iob;
1919
1920 /*FIXME: trace view callbacks*/
1921 QETH_DBF_TEXT(setup, 2, "ulpenabl");
1922
1923 iob = qeth_wait_for_buffer(&card->write);
1924 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
1925
1926 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
1927 (__u8) card->info.portno;
1928 if (card->options.layer2)
1929 if (card->info.type == QETH_CARD_TYPE_OSN)
1930 prot_type = QETH_PROT_OSN2;
1931 else
1932 prot_type = QETH_PROT_LAYER2;
1933 else
1934 prot_type = QETH_PROT_TCPIP;
1935
1936 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
1937 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
1938 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
1939 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
1940 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
1941 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
1942 card->info.portname, 9);
1943 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
1944 qeth_ulp_enable_cb, NULL);
1945 return rc;
1946
1947}
1948
1949static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1950 unsigned long data)
1951{
1952 struct qeth_cmd_buffer *iob;
1953
1954 QETH_DBF_TEXT(setup, 2, "ulpstpcb");
1955
1956 iob = (struct qeth_cmd_buffer *) data;
1957 memcpy(&card->token.ulp_connection_r,
1958 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
1959 QETH_MPC_TOKEN_LENGTH);
1960 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1961 return 0;
1962}
1963
1964static int qeth_ulp_setup(struct qeth_card *card)
1965{
1966 int rc;
1967 __u16 temp;
1968 struct qeth_cmd_buffer *iob;
1969 struct ccw_dev_id dev_id;
1970
1971 QETH_DBF_TEXT(setup, 2, "ulpsetup");
1972
1973 iob = qeth_wait_for_buffer(&card->write);
1974 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
1975
1976 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
1977 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
1978 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
1979 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
1980 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
1981 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
1982
1983 ccw_device_get_id(CARD_DDEV(card), &dev_id);
1984 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
1985 temp = (card->info.cula << 8) + card->info.unit_addr2;
1986 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
1987 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
1988 qeth_ulp_setup_cb, NULL);
1989 return rc;
1990}
1991
1992static int qeth_alloc_qdio_buffers(struct qeth_card *card)
1993{
1994 int i, j;
1995
1996 QETH_DBF_TEXT(setup, 2, "allcqdbf");
1997
1998 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
1999 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2000 return 0;
2001
2002 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
2003 GFP_KERNEL|GFP_DMA);
2004 if (!card->qdio.in_q)
2005 goto out_nomem;
2006 QETH_DBF_TEXT(setup, 2, "inq");
2007 QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
2008 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
2009 /* give inbound qeth_qdio_buffers their qdio_buffers */
2010 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
2011 card->qdio.in_q->bufs[i].buffer =
2012 &card->qdio.in_q->qdio_bufs[i];
2013 /* inbound buffer pool */
2014 if (qeth_alloc_buffer_pool(card))
2015 goto out_freeinq;
2016 /* outbound */
2017 card->qdio.out_qs =
2018 kmalloc(card->qdio.no_out_queues *
2019 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
2020 if (!card->qdio.out_qs)
2021 goto out_freepool;
2022 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2023 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
2024 GFP_KERNEL|GFP_DMA);
2025 if (!card->qdio.out_qs[i])
2026 goto out_freeoutq;
2027 QETH_DBF_TEXT_(setup, 2, "outq %i", i);
2028 QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
2029 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
2030 card->qdio.out_qs[i]->queue_no = i;
2031 /* give outbound qeth_qdio_buffers their qdio_buffers */
2032 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2033 card->qdio.out_qs[i]->bufs[j].buffer =
2034 &card->qdio.out_qs[i]->qdio_bufs[j];
2035 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
2036 skb_list);
2037 lockdep_set_class(
2038 &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
2039 &qdio_out_skb_queue_key);
2040 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
2041 }
2042 }
2043 return 0;
2044
2045out_freeoutq:
2046 while (i > 0)
2047 kfree(card->qdio.out_qs[--i]);
2048 kfree(card->qdio.out_qs);
2049 card->qdio.out_qs = NULL;
2050out_freepool:
2051 qeth_free_buffer_pool(card);
2052out_freeinq:
2053 kfree(card->qdio.in_q);
2054 card->qdio.in_q = NULL;
2055out_nomem:
2056 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2057 return -ENOMEM;
2058}
2059
2060static void qeth_create_qib_param_field(struct qeth_card *card,
2061 char *param_field)
2062{
2063
2064 param_field[0] = _ascebc['P'];
2065 param_field[1] = _ascebc['C'];
2066 param_field[2] = _ascebc['I'];
2067 param_field[3] = _ascebc['T'];
2068 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2069 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2070 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2071}
2072
2073static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2074 char *param_field)
2075{
2076 param_field[16] = _ascebc['B'];
2077 param_field[17] = _ascebc['L'];
2078 param_field[18] = _ascebc['K'];
2079 param_field[19] = _ascebc['T'];
2080 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2081 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2082 *((unsigned int *) (&param_field[28])) =
2083 card->info.blkt.inter_packet_jumbo;
2084}
2085
2086static int qeth_qdio_activate(struct qeth_card *card)
2087{
2088 QETH_DBF_TEXT(setup, 3, "qdioact");
2089 return qdio_activate(CARD_DDEV(card), 0);
2090}
2091
2092static int qeth_dm_act(struct qeth_card *card)
2093{
2094 int rc;
2095 struct qeth_cmd_buffer *iob;
2096
2097 QETH_DBF_TEXT(setup, 2, "dmact");
2098
2099 iob = qeth_wait_for_buffer(&card->write);
2100 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
2101
2102 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2103 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2104 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2105 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2106 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
2107 return rc;
2108}
2109
2110static int qeth_mpc_initialize(struct qeth_card *card)
2111{
2112 int rc;
2113
2114 QETH_DBF_TEXT(setup, 2, "mpcinit");
2115
2116 rc = qeth_issue_next_read(card);
2117 if (rc) {
2118 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
2119 return rc;
2120 }
2121 rc = qeth_cm_enable(card);
2122 if (rc) {
2123 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
2124 goto out_qdio;
2125 }
2126 rc = qeth_cm_setup(card);
2127 if (rc) {
2128 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
2129 goto out_qdio;
2130 }
2131 rc = qeth_ulp_enable(card);
2132 if (rc) {
2133 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
2134 goto out_qdio;
2135 }
2136 rc = qeth_ulp_setup(card);
2137 if (rc) {
2138 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
2139 goto out_qdio;
2140 }
2141 rc = qeth_alloc_qdio_buffers(card);
2142 if (rc) {
2143 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
2144 goto out_qdio;
2145 }
2146 rc = qeth_qdio_establish(card);
2147 if (rc) {
2148 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
2149 qeth_free_qdio_buffers(card);
2150 goto out_qdio;
2151 }
2152 rc = qeth_qdio_activate(card);
2153 if (rc) {
2154 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
2155 goto out_qdio;
2156 }
2157 rc = qeth_dm_act(card);
2158 if (rc) {
2159 QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
2160 goto out_qdio;
2161 }
2162
2163 return 0;
2164out_qdio:
2165 qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
2166 return rc;
2167}
2168
2169static void qeth_print_status_with_portname(struct qeth_card *card)
2170{
2171 char dbf_text[15];
2172 int i;
2173
2174 sprintf(dbf_text, "%s", card->info.portname + 1);
2175 for (i = 0; i < 8; i++)
2176 dbf_text[i] =
2177 (char) _ebcasc[(__u8) dbf_text[i]];
2178 dbf_text[8] = 0;
2179 PRINT_INFO("Device %s/%s/%s is a%s card%s%s%s\n"
2180 "with link type %s (portname: %s)\n",
2181 CARD_RDEV_ID(card),
2182 CARD_WDEV_ID(card),
2183 CARD_DDEV_ID(card),
2184 qeth_get_cardname(card),
2185 (card->info.mcl_level[0]) ? " (level: " : "",
2186 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2187 (card->info.mcl_level[0]) ? ")" : "",
2188 qeth_get_cardname_short(card),
2189 dbf_text);
2190
2191}
2192
2193static void qeth_print_status_no_portname(struct qeth_card *card)
2194{
2195 if (card->info.portname[0])
2196 PRINT_INFO("Device %s/%s/%s is a%s "
2197 "card%s%s%s\nwith link type %s "
2198 "(no portname needed by interface).\n",
2199 CARD_RDEV_ID(card),
2200 CARD_WDEV_ID(card),
2201 CARD_DDEV_ID(card),
2202 qeth_get_cardname(card),
2203 (card->info.mcl_level[0]) ? " (level: " : "",
2204 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2205 (card->info.mcl_level[0]) ? ")" : "",
2206 qeth_get_cardname_short(card));
2207 else
2208 PRINT_INFO("Device %s/%s/%s is a%s "
2209 "card%s%s%s\nwith link type %s.\n",
2210 CARD_RDEV_ID(card),
2211 CARD_WDEV_ID(card),
2212 CARD_DDEV_ID(card),
2213 qeth_get_cardname(card),
2214 (card->info.mcl_level[0]) ? " (level: " : "",
2215 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2216 (card->info.mcl_level[0]) ? ")" : "",
2217 qeth_get_cardname_short(card));
2218}
2219
2220void qeth_print_status_message(struct qeth_card *card)
2221{
2222 switch (card->info.type) {
2223 case QETH_CARD_TYPE_OSAE:
2224 /* VM will use a non-zero first character
2225 * to indicate a HiperSockets like reporting
2226 * of the level OSA sets the first character to zero
2227 * */
2228 if (!card->info.mcl_level[0]) {
2229 sprintf(card->info.mcl_level, "%02x%02x",
2230 card->info.mcl_level[2],
2231 card->info.mcl_level[3]);
2232
2233 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2234 break;
2235 }
2236 /* fallthrough */
2237 case QETH_CARD_TYPE_IQD:
2238 if (card->info.guestlan) {
2239 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2240 card->info.mcl_level[0]];
2241 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2242 card->info.mcl_level[1]];
2243 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2244 card->info.mcl_level[2]];
2245 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2246 card->info.mcl_level[3]];
2247 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2248 }
2249 break;
2250 default:
2251 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2252 }
2253 if (card->info.portname_required)
2254 qeth_print_status_with_portname(card);
2255 else
2256 qeth_print_status_no_portname(card);
2257}
2258EXPORT_SYMBOL_GPL(qeth_print_status_message);
2259
2260void qeth_put_buffer_pool_entry(struct qeth_card *card,
2261 struct qeth_buffer_pool_entry *entry)
2262{
2263 QETH_DBF_TEXT(trace, 6, "ptbfplen");
2264 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2265}
2266EXPORT_SYMBOL_GPL(qeth_put_buffer_pool_entry);
2267
2268static void qeth_initialize_working_pool_list(struct qeth_card *card)
2269{
2270 struct qeth_buffer_pool_entry *entry;
2271
2272 QETH_DBF_TEXT(trace, 5, "inwrklst");
2273
2274 list_for_each_entry(entry,
2275 &card->qdio.init_pool.entry_list, init_list) {
2276 qeth_put_buffer_pool_entry(card, entry);
2277 }
2278}
2279
2280static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2281 struct qeth_card *card)
2282{
2283 struct list_head *plh;
2284 struct qeth_buffer_pool_entry *entry;
2285 int i, free;
2286 struct page *page;
2287
2288 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2289 return NULL;
2290
2291 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2292 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2293 free = 1;
2294 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2295 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2296 free = 0;
2297 break;
2298 }
2299 }
2300 if (free) {
2301 list_del_init(&entry->list);
2302 return entry;
2303 }
2304 }
2305
2306 /* no free buffer in pool so take first one and swap pages */
2307 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2308 struct qeth_buffer_pool_entry, list);
2309 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2310 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2311 page = alloc_page(GFP_ATOMIC|GFP_DMA);
2312 if (!page) {
2313 return NULL;
2314 } else {
2315 free_page((unsigned long)entry->elements[i]);
2316 entry->elements[i] = page_address(page);
2317 if (card->options.performance_stats)
2318 card->perf_stats.sg_alloc_page_rx++;
2319 }
2320 }
2321 }
2322 list_del_init(&entry->list);
2323 return entry;
2324}
2325
2326static int qeth_init_input_buffer(struct qeth_card *card,
2327 struct qeth_qdio_buffer *buf)
2328{
2329 struct qeth_buffer_pool_entry *pool_entry;
2330 int i;
2331
2332 pool_entry = qeth_find_free_buffer_pool_entry(card);
2333 if (!pool_entry)
2334 return 1;
2335
2336 /*
2337 * since the buffer is accessed only from the input_tasklet
2338 * there shouldn't be a need to synchronize; also, since we use
2339 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2340 * buffers
2341 */
2342 BUG_ON(!pool_entry);
2343
2344 buf->pool_entry = pool_entry;
2345 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2346 buf->buffer->element[i].length = PAGE_SIZE;
2347 buf->buffer->element[i].addr = pool_entry->elements[i];
2348 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2349 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2350 else
2351 buf->buffer->element[i].flags = 0;
2352 }
2353 return 0;
2354}
2355
2356int qeth_init_qdio_queues(struct qeth_card *card)
2357{
2358 int i, j;
2359 int rc;
2360
2361 QETH_DBF_TEXT(setup, 2, "initqdqs");
2362
2363 /* inbound queue */
2364 memset(card->qdio.in_q->qdio_bufs, 0,
2365 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2366 qeth_initialize_working_pool_list(card);
2367 /*give only as many buffers to hardware as we have buffer pool entries*/
2368 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2369 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2370 card->qdio.in_q->next_buf_to_init =
2371 card->qdio.in_buf_pool.buf_count - 1;
2372 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2373 card->qdio.in_buf_pool.buf_count - 1, NULL);
2374 if (rc) {
2375 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
2376 return rc;
2377 }
2378 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
2379 if (rc) {
2380 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
2381 return rc;
2382 }
2383 /* outbound queue */
2384 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2385 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
2386 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2387 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2388 qeth_clear_output_buffer(card->qdio.out_qs[i],
2389 &card->qdio.out_qs[i]->bufs[j]);
2390 }
2391 card->qdio.out_qs[i]->card = card;
2392 card->qdio.out_qs[i]->next_buf_to_fill = 0;
2393 card->qdio.out_qs[i]->do_pack = 0;
2394 atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
2395 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
2396 atomic_set(&card->qdio.out_qs[i]->state,
2397 QETH_OUT_Q_UNLOCKED);
2398 }
2399 return 0;
2400}
2401EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2402
2403static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
2404{
2405 switch (link_type) {
2406 case QETH_LINK_TYPE_HSTR:
2407 return 2;
2408 default:
2409 return 1;
2410 }
2411}
2412
2413static void qeth_fill_ipacmd_header(struct qeth_card *card,
2414 struct qeth_ipa_cmd *cmd, __u8 command,
2415 enum qeth_prot_versions prot)
2416{
2417 memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
2418 cmd->hdr.command = command;
2419 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2420 cmd->hdr.seqno = card->seqno.ipa;
2421 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2422 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
2423 if (card->options.layer2)
2424 cmd->hdr.prim_version_no = 2;
2425 else
2426 cmd->hdr.prim_version_no = 1;
2427 cmd->hdr.param_count = 1;
2428 cmd->hdr.prot_version = prot;
2429 cmd->hdr.ipa_supported = 0;
2430 cmd->hdr.ipa_enabled = 0;
2431}
2432
2433struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2434 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2435{
2436 struct qeth_cmd_buffer *iob;
2437 struct qeth_ipa_cmd *cmd;
2438
2439 iob = qeth_wait_for_buffer(&card->write);
2440 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2441 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
2442
2443 return iob;
2444}
2445EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
2446
2447void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2448 char prot_type)
2449{
2450 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2451 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2452 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2453 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2454}
2455EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2456
2457int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2458 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2459 unsigned long),
2460 void *reply_param)
2461{
2462 int rc;
2463 char prot_type;
2464 int cmd;
2465 cmd = ((struct qeth_ipa_cmd *)
2466 (iob->data+IPA_PDU_HEADER_SIZE))->hdr.command;
2467
2468 QETH_DBF_TEXT(trace, 4, "sendipa");
2469
2470 if (card->options.layer2)
2471 if (card->info.type == QETH_CARD_TYPE_OSN)
2472 prot_type = QETH_PROT_OSN2;
2473 else
2474 prot_type = QETH_PROT_LAYER2;
2475 else
2476 prot_type = QETH_PROT_TCPIP;
2477 qeth_prepare_ipa_cmd(card, iob, prot_type);
2478 rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
2479 reply_cb, reply_param);
2480 if (rc != 0) {
2481 char *ipa_cmd_name;
2482 ipa_cmd_name = qeth_get_ipa_cmd_name(cmd);
2483 PRINT_ERR("%s %s(%x) returned %s(%x)\n", __FUNCTION__,
2484 ipa_cmd_name, cmd, qeth_get_ipa_msg(rc), rc);
2485 }
2486 return rc;
2487}
2488EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2489
2490static int qeth_send_startstoplan(struct qeth_card *card,
2491 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2492{
2493 int rc;
2494 struct qeth_cmd_buffer *iob;
2495
2496 iob = qeth_get_ipacmd_buffer(card, ipacmd, prot);
2497 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2498
2499 return rc;
2500}
2501
2502int qeth_send_startlan(struct qeth_card *card)
2503{
2504 int rc;
2505
2506 QETH_DBF_TEXT(setup, 2, "strtlan");
2507
2508 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0);
2509 return rc;
2510}
2511EXPORT_SYMBOL_GPL(qeth_send_startlan);
2512
2513int qeth_send_stoplan(struct qeth_card *card)
2514{
2515 int rc = 0;
2516
2517 /*
2518 * TODO: according to the IPA format document page 14,
2519 * TCP/IP (we!) never issue a STOPLAN
2520 * is this right ?!?
2521 */
2522 QETH_DBF_TEXT(setup, 2, "stoplan");
2523
2524 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0);
2525 return rc;
2526}
2527EXPORT_SYMBOL_GPL(qeth_send_stoplan);
2528
2529int qeth_default_setadapterparms_cb(struct qeth_card *card,
2530 struct qeth_reply *reply, unsigned long data)
2531{
2532 struct qeth_ipa_cmd *cmd;
2533
2534 QETH_DBF_TEXT(trace, 4, "defadpcb");
2535
2536 cmd = (struct qeth_ipa_cmd *) data;
2537 if (cmd->hdr.return_code == 0)
2538 cmd->hdr.return_code =
2539 cmd->data.setadapterparms.hdr.return_code;
2540 return 0;
2541}
2542EXPORT_SYMBOL_GPL(qeth_default_setadapterparms_cb);
2543
2544static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2545 struct qeth_reply *reply, unsigned long data)
2546{
2547 struct qeth_ipa_cmd *cmd;
2548
2549 QETH_DBF_TEXT(trace, 3, "quyadpcb");
2550
2551 cmd = (struct qeth_ipa_cmd *) data;
2552 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
2553 card->info.link_type =
2554 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2555 card->options.adp.supported_funcs =
2556 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2557 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
2558}
2559
2560struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2561 __u32 command, __u32 cmdlen)
2562{
2563 struct qeth_cmd_buffer *iob;
2564 struct qeth_ipa_cmd *cmd;
2565
2566 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
2567 QETH_PROT_IPV4);
2568 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2569 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
2570 cmd->data.setadapterparms.hdr.command_code = command;
2571 cmd->data.setadapterparms.hdr.used_total = 1;
2572 cmd->data.setadapterparms.hdr.seq_no = 1;
2573
2574 return iob;
2575}
2576EXPORT_SYMBOL_GPL(qeth_get_adapter_cmd);
2577
2578int qeth_query_setadapterparms(struct qeth_card *card)
2579{
2580 int rc;
2581 struct qeth_cmd_buffer *iob;
2582
2583 QETH_DBF_TEXT(trace, 3, "queryadp");
2584 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2585 sizeof(struct qeth_ipacmd_setadpparms));
2586 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2587 return rc;
2588}
2589EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2590
2591int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2592 unsigned int siga_error, const char *dbftext)
2593{
2594 if (qdio_error || siga_error) {
2595 QETH_DBF_TEXT(trace, 2, dbftext);
2596 QETH_DBF_TEXT(qerr, 2, dbftext);
2597 QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
2598 buf->element[15].flags & 0xff);
2599 QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
2600 buf->element[14].flags & 0xff);
2601 QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2602 QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
2603 return 1;
2604 }
2605 return 0;
2606}
2607EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
2608
2609void qeth_queue_input_buffer(struct qeth_card *card, int index)
2610{
2611 struct qeth_qdio_q *queue = card->qdio.in_q;
2612 int count;
2613 int i;
2614 int rc;
2615 int newcount = 0;
2616
2617 QETH_DBF_TEXT(trace, 6, "queinbuf");
2618 count = (index < queue->next_buf_to_init)?
2619 card->qdio.in_buf_pool.buf_count -
2620 (queue->next_buf_to_init - index) :
2621 card->qdio.in_buf_pool.buf_count -
2622 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2623 /* only requeue at a certain threshold to avoid SIGAs */
2624 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
2625 for (i = queue->next_buf_to_init;
2626 i < queue->next_buf_to_init + count; ++i) {
2627 if (qeth_init_input_buffer(card,
2628 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
2629 break;
2630 } else {
2631 newcount++;
2632 }
2633 }
2634
2635 if (newcount < count) {
2636 /* we are in memory shortage so we switch back to
2637 traditional skb allocation and drop packages */
2638 if (!atomic_read(&card->force_alloc_skb) &&
2639 net_ratelimit())
2640 PRINT_WARN("Switch to alloc skb\n");
2641 atomic_set(&card->force_alloc_skb, 3);
2642 count = newcount;
2643 } else {
2644 if ((atomic_read(&card->force_alloc_skb) == 1) &&
2645 net_ratelimit())
2646 PRINT_WARN("Switch to sg\n");
2647 atomic_add_unless(&card->force_alloc_skb, -1, 0);
2648 }
2649
2650 /*
2651 * according to old code it should be avoided to requeue all
2652 * 128 buffers in order to benefit from PCI avoidance.
2653 * this function keeps at least one buffer (the buffer at
2654 * 'index') un-requeued -> this buffer is the first buffer that
2655 * will be requeued the next time
2656 */
2657 if (card->options.performance_stats) {
2658 card->perf_stats.inbound_do_qdio_cnt++;
2659 card->perf_stats.inbound_do_qdio_start_time =
2660 qeth_get_micros();
2661 }
2662 rc = do_QDIO(CARD_DDEV(card),
2663 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2664 0, queue->next_buf_to_init, count, NULL);
2665 if (card->options.performance_stats)
2666 card->perf_stats.inbound_do_qdio_time +=
2667 qeth_get_micros() -
2668 card->perf_stats.inbound_do_qdio_start_time;
2669 if (rc) {
2670 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2671 "return %i (device %s).\n",
2672 rc, CARD_DDEV_ID(card));
2673 QETH_DBF_TEXT(trace, 2, "qinberr");
2674 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
2675 }
2676 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2677 QDIO_MAX_BUFFERS_PER_Q;
2678 }
2679}
2680EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2681
2682static int qeth_handle_send_error(struct qeth_card *card,
2683 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err,
2684 unsigned int siga_err)
2685{
2686 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2687 int cc = siga_err & 3;
2688
2689 QETH_DBF_TEXT(trace, 6, "hdsnderr");
2690 qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr");
2691 switch (cc) {
2692 case 0:
2693 if (qdio_err) {
2694 QETH_DBF_TEXT(trace, 1, "lnkfail");
2695 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
2696 QETH_DBF_TEXT_(trace, 1, "%04x %02x",
2697 (u16)qdio_err, (u8)sbalf15);
2698 return QETH_SEND_ERROR_LINK_FAILURE;
2699 }
2700 return QETH_SEND_ERROR_NONE;
2701 case 2:
2702 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2703 QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2704 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
2705 return QETH_SEND_ERROR_KICK_IT;
2706 }
2707 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2708 return QETH_SEND_ERROR_RETRY;
2709 return QETH_SEND_ERROR_LINK_FAILURE;
2710 /* look at qdio_error and sbalf 15 */
2711 case 1:
2712 QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2713 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
2714 return QETH_SEND_ERROR_LINK_FAILURE;
2715 case 3:
2716 default:
2717 QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2718 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
2719 return QETH_SEND_ERROR_KICK_IT;
2720 }
2721}
2722
2723/*
2724 * Switched to packing state if the number of used buffers on a queue
2725 * reaches a certain limit.
2726 */
2727static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2728{
2729 if (!queue->do_pack) {
2730 if (atomic_read(&queue->used_buffers)
2731 >= QETH_HIGH_WATERMARK_PACK){
2732 /* switch non-PACKING -> PACKING */
2733 QETH_DBF_TEXT(trace, 6, "np->pack");
2734 if (queue->card->options.performance_stats)
2735 queue->card->perf_stats.sc_dp_p++;
2736 queue->do_pack = 1;
2737 }
2738 }
2739}
2740
2741/*
2742 * Switches from packing to non-packing mode. If there is a packing
2743 * buffer on the queue this buffer will be prepared to be flushed.
2744 * In that case 1 is returned to inform the caller. If no buffer
2745 * has to be flushed, zero is returned.
2746 */
2747static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2748{
2749 struct qeth_qdio_out_buffer *buffer;
2750 int flush_count = 0;
2751
2752 if (queue->do_pack) {
2753 if (atomic_read(&queue->used_buffers)
2754 <= QETH_LOW_WATERMARK_PACK) {
2755 /* switch PACKING -> non-PACKING */
2756 QETH_DBF_TEXT(trace, 6, "pack->np");
2757 if (queue->card->options.performance_stats)
2758 queue->card->perf_stats.sc_p_dp++;
2759 queue->do_pack = 0;
2760 /* flush packing buffers */
2761 buffer = &queue->bufs[queue->next_buf_to_fill];
2762 if ((atomic_read(&buffer->state) ==
2763 QETH_QDIO_BUF_EMPTY) &&
2764 (buffer->next_element_to_fill > 0)) {
2765 atomic_set(&buffer->state,
2766 QETH_QDIO_BUF_PRIMED);
2767 flush_count++;
2768 queue->next_buf_to_fill =
2769 (queue->next_buf_to_fill + 1) %
2770 QDIO_MAX_BUFFERS_PER_Q;
2771 }
2772 }
2773 }
2774 return flush_count;
2775}
2776
2777/*
2778 * Called to flush a packing buffer if no more pci flags are on the queue.
2779 * Checks if there is a packing buffer and prepares it to be flushed.
2780 * In that case returns 1, otherwise zero.
2781 */
2782static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2783{
2784 struct qeth_qdio_out_buffer *buffer;
2785
2786 buffer = &queue->bufs[queue->next_buf_to_fill];
2787 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2788 (buffer->next_element_to_fill > 0)) {
2789 /* it's a packing buffer */
2790 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2791 queue->next_buf_to_fill =
2792 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2793 return 1;
2794 }
2795 return 0;
2796}
2797
2798static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2799 int index, int count)
2800{
2801 struct qeth_qdio_out_buffer *buf;
2802 int rc;
2803 int i;
2804 unsigned int qdio_flags;
2805
2806 QETH_DBF_TEXT(trace, 6, "flushbuf");
2807
2808 for (i = index; i < index + count; ++i) {
2809 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2810 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2811 SBAL_FLAGS_LAST_ENTRY;
2812
2813 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2814 continue;
2815
2816 if (!queue->do_pack) {
2817 if ((atomic_read(&queue->used_buffers) >=
2818 (QETH_HIGH_WATERMARK_PACK -
2819 QETH_WATERMARK_PACK_FUZZ)) &&
2820 !atomic_read(&queue->set_pci_flags_count)) {
2821 /* it's likely that we'll go to packing
2822 * mode soon */
2823 atomic_inc(&queue->set_pci_flags_count);
2824 buf->buffer->element[0].flags |= 0x40;
2825 }
2826 } else {
2827 if (!atomic_read(&queue->set_pci_flags_count)) {
2828 /*
2829 * there's no outstanding PCI any more, so we
2830 * have to request a PCI to be sure the the PCI
2831 * will wake at some time in the future then we
2832 * can flush packed buffers that might still be
2833 * hanging around, which can happen if no
2834 * further send was requested by the stack
2835 */
2836 atomic_inc(&queue->set_pci_flags_count);
2837 buf->buffer->element[0].flags |= 0x40;
2838 }
2839 }
2840 }
2841
2842 queue->card->dev->trans_start = jiffies;
2843 if (queue->card->options.performance_stats) {
2844 queue->card->perf_stats.outbound_do_qdio_cnt++;
2845 queue->card->perf_stats.outbound_do_qdio_start_time =
2846 qeth_get_micros();
2847 }
2848 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
2849 if (under_int)
2850 qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
2851 if (atomic_read(&queue->set_pci_flags_count))
2852 qdio_flags |= QDIO_FLAG_PCI_OUT;
2853 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
2854 queue->queue_no, index, count, NULL);
2855 if (queue->card->options.performance_stats)
2856 queue->card->perf_stats.outbound_do_qdio_time +=
2857 qeth_get_micros() -
2858 queue->card->perf_stats.outbound_do_qdio_start_time;
2859 if (rc) {
2860 QETH_DBF_TEXT(trace, 2, "flushbuf");
2861 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
2862 QETH_DBF_TEXT_(trace, 2, "%s", CARD_DDEV_ID(queue->card));
2863 queue->card->stats.tx_errors += count;
2864 /* this must not happen under normal circumstances. if it
2865 * happens something is really wrong -> recover */
2866 qeth_schedule_recovery(queue->card);
2867 return;
2868 }
2869 atomic_add(count, &queue->used_buffers);
2870 if (queue->card->options.performance_stats)
2871 queue->card->perf_stats.bufs_sent += count;
2872}
2873
2874static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2875{
2876 int index;
2877 int flush_cnt = 0;
2878 int q_was_packing = 0;
2879
2880 /*
2881 * check if weed have to switch to non-packing mode or if
2882 * we have to get a pci flag out on the queue
2883 */
2884 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2885 !atomic_read(&queue->set_pci_flags_count)) {
2886 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2887 QETH_OUT_Q_UNLOCKED) {
2888 /*
2889 * If we get in here, there was no action in
2890 * do_send_packet. So, we check if there is a
2891 * packing buffer to be flushed here.
2892 */
2893 netif_stop_queue(queue->card->dev);
2894 index = queue->next_buf_to_fill;
2895 q_was_packing = queue->do_pack;
2896 /* queue->do_pack may change */
2897 barrier();
2898 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
2899 if (!flush_cnt &&
2900 !atomic_read(&queue->set_pci_flags_count))
2901 flush_cnt +=
2902 qeth_flush_buffers_on_no_pci(queue);
2903 if (queue->card->options.performance_stats &&
2904 q_was_packing)
2905 queue->card->perf_stats.bufs_sent_pack +=
2906 flush_cnt;
2907 if (flush_cnt)
2908 qeth_flush_buffers(queue, 1, index, flush_cnt);
2909 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2910 }
2911 }
2912}
2913
2914void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
2915 unsigned int qdio_error, unsigned int siga_error,
2916 unsigned int __queue, int first_element, int count,
2917 unsigned long card_ptr)
2918{
2919 struct qeth_card *card = (struct qeth_card *) card_ptr;
2920 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2921 struct qeth_qdio_out_buffer *buffer;
2922 int i;
2923
2924 QETH_DBF_TEXT(trace, 6, "qdouhdl");
2925 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2926 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
2927 QETH_DBF_TEXT(trace, 2, "achkcond");
2928 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
2929 QETH_DBF_TEXT_(trace, 2, "%08x", status);
2930 netif_stop_queue(card->dev);
2931 qeth_schedule_recovery(card);
2932 return;
2933 }
2934 }
2935 if (card->options.performance_stats) {
2936 card->perf_stats.outbound_handler_cnt++;
2937 card->perf_stats.outbound_handler_start_time =
2938 qeth_get_micros();
2939 }
2940 for (i = first_element; i < (first_element + count); ++i) {
2941 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2942 /*we only handle the KICK_IT error by doing a recovery */
2943 if (qeth_handle_send_error(card, buffer,
2944 qdio_error, siga_error)
2945 == QETH_SEND_ERROR_KICK_IT){
2946 netif_stop_queue(card->dev);
2947 qeth_schedule_recovery(card);
2948 return;
2949 }
2950 qeth_clear_output_buffer(queue, buffer);
2951 }
2952 atomic_sub(count, &queue->used_buffers);
2953 /* check if we need to do something on this outbound queue */
2954 if (card->info.type != QETH_CARD_TYPE_IQD)
2955 qeth_check_outbound_queue(queue);
2956
2957 netif_wake_queue(queue->card->dev);
2958 if (card->options.performance_stats)
2959 card->perf_stats.outbound_handler_time += qeth_get_micros() -
2960 card->perf_stats.outbound_handler_start_time;
2961}
2962EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
2963
2964int qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2965{
2966 int cast_type = RTN_UNSPEC;
2967
2968 if (card->info.type == QETH_CARD_TYPE_OSN)
2969 return cast_type;
2970
2971 if (skb->dst && skb->dst->neighbour) {
2972 cast_type = skb->dst->neighbour->type;
2973 if ((cast_type == RTN_BROADCAST) ||
2974 (cast_type == RTN_MULTICAST) ||
2975 (cast_type == RTN_ANYCAST))
2976 return cast_type;
2977 else
2978 return RTN_UNSPEC;
2979 }
2980 /* try something else */
2981 if (skb->protocol == ETH_P_IPV6)
2982 return (skb_network_header(skb)[24] == 0xff) ?
2983 RTN_MULTICAST : 0;
2984 else if (skb->protocol == ETH_P_IP)
2985 return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
2986 RTN_MULTICAST : 0;
2987 /* ... */
2988 if (!memcmp(skb->data, skb->dev->broadcast, 6))
2989 return RTN_BROADCAST;
2990 else {
2991 u16 hdr_mac;
2992
2993 hdr_mac = *((u16 *)skb->data);
2994 /* tr multicast? */
2995 switch (card->info.link_type) {
2996 case QETH_LINK_TYPE_HSTR:
2997 case QETH_LINK_TYPE_LANE_TR:
2998 if ((hdr_mac == QETH_TR_MAC_NC) ||
2999 (hdr_mac == QETH_TR_MAC_C))
3000 return RTN_MULTICAST;
3001 break;
3002 /* eth or so multicast? */
3003 default:
3004 if ((hdr_mac == QETH_ETH_MAC_V4) ||
3005 (hdr_mac == QETH_ETH_MAC_V6))
3006 return RTN_MULTICAST;
3007 }
3008 }
3009 return cast_type;
3010}
3011EXPORT_SYMBOL_GPL(qeth_get_cast_type);
3012
3013int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3014 int ipv, int cast_type)
3015{
3016 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
3017 return card->qdio.default_out_queue;
3018 switch (card->qdio.no_out_queues) {
3019 case 4:
3020 if (cast_type && card->info.is_multicast_different)
3021 return card->info.is_multicast_different &
3022 (card->qdio.no_out_queues - 1);
3023 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3024 const u8 tos = ip_hdr(skb)->tos;
3025
3026 if (card->qdio.do_prio_queueing ==
3027 QETH_PRIO_Q_ING_TOS) {
3028 if (tos & IP_TOS_NOTIMPORTANT)
3029 return 3;
3030 if (tos & IP_TOS_HIGHRELIABILITY)
3031 return 2;
3032 if (tos & IP_TOS_HIGHTHROUGHPUT)
3033 return 1;
3034 if (tos & IP_TOS_LOWDELAY)
3035 return 0;
3036 }
3037 if (card->qdio.do_prio_queueing ==
3038 QETH_PRIO_Q_ING_PREC)
3039 return 3 - (tos >> 6);
3040 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3041 /* TODO: IPv6!!! */
3042 }
3043 return card->qdio.default_out_queue;
3044 case 1: /* fallthrough for single-out-queue 1920-device */
3045 default:
3046 return card->qdio.default_out_queue;
3047 }
3048}
3049EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3050
3051static void __qeth_free_new_skb(struct sk_buff *orig_skb,
3052 struct sk_buff *new_skb)
3053{
3054 if (orig_skb != new_skb)
3055 dev_kfree_skb_any(new_skb);
3056}
3057
3058static inline struct sk_buff *qeth_realloc_headroom(struct qeth_card *card,
3059 struct sk_buff *skb, int size)
3060{
3061 struct sk_buff *new_skb = skb;
3062
3063 if (skb_headroom(skb) >= size)
3064 return skb;
3065 new_skb = skb_realloc_headroom(skb, size);
3066 if (!new_skb)
3067 PRINT_ERR("Could not realloc headroom for qeth_hdr "
3068 "on interface %s", QETH_CARD_IFNAME(card));
3069 return new_skb;
3070}
3071
3072struct sk_buff *qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3073 struct qeth_hdr **hdr)
3074{
3075 struct sk_buff *new_skb;
3076
3077 QETH_DBF_TEXT(trace, 6, "prepskb");
3078
3079 new_skb = qeth_realloc_headroom(card, skb,
3080 sizeof(struct qeth_hdr));
3081 if (!new_skb)
3082 return NULL;
3083
3084 *hdr = ((struct qeth_hdr *)qeth_push_skb(card, new_skb,
3085 sizeof(struct qeth_hdr)));
3086 if (*hdr == NULL) {
3087 __qeth_free_new_skb(skb, new_skb);
3088 return NULL;
3089 }
3090 return new_skb;
3091}
3092EXPORT_SYMBOL_GPL(qeth_prepare_skb);
3093
3094int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3095 struct sk_buff *skb, int elems)
3096{
3097 int elements_needed = 0;
3098
3099 if (skb_shinfo(skb)->nr_frags > 0)
3100 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
3101 if (elements_needed == 0)
3102 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
3103 + skb->len) >> PAGE_SHIFT);
3104 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3105 PRINT_ERR("Invalid size of IP packet "
3106 "(Number=%d / Length=%d). Discarded.\n",
3107 (elements_needed+elems), skb->len);
3108 return 0;
3109 }
3110 return elements_needed;
3111}
3112EXPORT_SYMBOL_GPL(qeth_get_elements_no);
3113
3114static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
3115 int is_tso, int *next_element_to_fill)
3116{
3117 int length = skb->len;
3118 int length_here;
3119 int element;
3120 char *data;
3121 int first_lap ;
3122
3123 element = *next_element_to_fill;
3124 data = skb->data;
3125 first_lap = (is_tso == 0 ? 1 : 0);
3126
3127 while (length > 0) {
3128 /* length_here is the remaining amount of data in this page */
3129 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3130 if (length < length_here)
3131 length_here = length;
3132
3133 buffer->element[element].addr = data;
3134 buffer->element[element].length = length_here;
3135 length -= length_here;
3136 if (!length) {
3137 if (first_lap)
3138 buffer->element[element].flags = 0;
3139 else
3140 buffer->element[element].flags =
3141 SBAL_FLAGS_LAST_FRAG;
3142 } else {
3143 if (first_lap)
3144 buffer->element[element].flags =
3145 SBAL_FLAGS_FIRST_FRAG;
3146 else
3147 buffer->element[element].flags =
3148 SBAL_FLAGS_MIDDLE_FRAG;
3149 }
3150 data += length_here;
3151 element++;
3152 first_lap = 0;
3153 }
3154 *next_element_to_fill = element;
3155}
3156
3157static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3158 struct qeth_qdio_out_buffer *buf, struct sk_buff *skb)
3159{
3160 struct qdio_buffer *buffer;
3161 struct qeth_hdr_tso *hdr;
3162 int flush_cnt = 0, hdr_len, large_send = 0;
3163
3164 QETH_DBF_TEXT(trace, 6, "qdfillbf");
3165
3166 buffer = buf->buffer;
3167 atomic_inc(&skb->users);
3168 skb_queue_tail(&buf->skb_list, skb);
3169
3170 hdr = (struct qeth_hdr_tso *) skb->data;
3171 /*check first on TSO ....*/
3172 if (hdr->hdr.hdr.l3.id == QETH_HEADER_TYPE_TSO) {
3173 int element = buf->next_element_to_fill;
3174
3175 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
3176 /*fill first buffer entry only with header information */
3177 buffer->element[element].addr = skb->data;
3178 buffer->element[element].length = hdr_len;
3179 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
3180 buf->next_element_to_fill++;
3181 skb->data += hdr_len;
3182 skb->len -= hdr_len;
3183 large_send = 1;
3184 }
3185 if (skb_shinfo(skb)->nr_frags == 0)
3186 __qeth_fill_buffer(skb, buffer, large_send,
3187 (int *)&buf->next_element_to_fill);
3188 else
3189 __qeth_fill_buffer_frag(skb, buffer, large_send,
3190 (int *)&buf->next_element_to_fill);
3191
3192 if (!queue->do_pack) {
3193 QETH_DBF_TEXT(trace, 6, "fillbfnp");
3194 /* set state to PRIMED -> will be flushed */
3195 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3196 flush_cnt = 1;
3197 } else {
3198 QETH_DBF_TEXT(trace, 6, "fillbfpa");
3199 if (queue->card->options.performance_stats)
3200 queue->card->perf_stats.skbs_sent_pack++;
3201 if (buf->next_element_to_fill >=
3202 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
3203 /*
3204 * packed buffer if full -> set state PRIMED
3205 * -> will be flushed
3206 */
3207 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3208 flush_cnt = 1;
3209 }
3210 }
3211 return flush_cnt;
3212}
3213
3214int qeth_do_send_packet_fast(struct qeth_card *card,
3215 struct qeth_qdio_out_q *queue, struct sk_buff *skb,
3216 struct qeth_hdr *hdr, int elements_needed,
3217 struct qeth_eddp_context *ctx)
3218{
3219 struct qeth_qdio_out_buffer *buffer;
3220 int buffers_needed = 0;
3221 int flush_cnt = 0;
3222 int index;
3223
3224 QETH_DBF_TEXT(trace, 6, "dosndpfa");
3225
3226 /* spin until we get the queue ... */
3227 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3228 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3229 /* ... now we've got the queue */
3230 index = queue->next_buf_to_fill;
3231 buffer = &queue->bufs[queue->next_buf_to_fill];
3232 /*
3233 * check if buffer is empty to make sure that we do not 'overtake'
3234 * ourselves and try to fill a buffer that is already primed
3235 */
3236 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3237 goto out;
3238 if (ctx == NULL)
3239 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3240 QDIO_MAX_BUFFERS_PER_Q;
3241 else {
3242 buffers_needed = qeth_eddp_check_buffers_for_context(queue,
3243 ctx);
3244 if (buffers_needed < 0)
3245 goto out;
3246 queue->next_buf_to_fill =
3247 (queue->next_buf_to_fill + buffers_needed) %
3248 QDIO_MAX_BUFFERS_PER_Q;
3249 }
3250 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3251 if (ctx == NULL) {
3252 qeth_fill_buffer(queue, buffer, skb);
3253 qeth_flush_buffers(queue, 0, index, 1);
3254 } else {
3255 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
3256 WARN_ON(buffers_needed != flush_cnt);
3257 qeth_flush_buffers(queue, 0, index, flush_cnt);
3258 }
3259 return 0;
3260out:
3261 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3262 return -EBUSY;
3263}
3264EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
3265
3266int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3267 struct sk_buff *skb, struct qeth_hdr *hdr,
3268 int elements_needed, struct qeth_eddp_context *ctx)
3269{
3270 struct qeth_qdio_out_buffer *buffer;
3271 int start_index;
3272 int flush_count = 0;
3273 int do_pack = 0;
3274 int tmp;
3275 int rc = 0;
3276
3277 QETH_DBF_TEXT(trace, 6, "dosndpkt");
3278
3279 /* spin until we get the queue ... */
3280 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3281 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3282 start_index = queue->next_buf_to_fill;
3283 buffer = &queue->bufs[queue->next_buf_to_fill];
3284 /*
3285 * check if buffer is empty to make sure that we do not 'overtake'
3286 * ourselves and try to fill a buffer that is already primed
3287 */
3288 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3289 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3290 return -EBUSY;
3291 }
3292 /* check if we need to switch packing state of this queue */
3293 qeth_switch_to_packing_if_needed(queue);
3294 if (queue->do_pack) {
3295 do_pack = 1;
3296 if (ctx == NULL) {
3297 /* does packet fit in current buffer? */
3298 if ((QETH_MAX_BUFFER_ELEMENTS(card) -
3299 buffer->next_element_to_fill) < elements_needed) {
3300 /* ... no -> set state PRIMED */
3301 atomic_set(&buffer->state,
3302 QETH_QDIO_BUF_PRIMED);
3303 flush_count++;
3304 queue->next_buf_to_fill =
3305 (queue->next_buf_to_fill + 1) %
3306 QDIO_MAX_BUFFERS_PER_Q;
3307 buffer = &queue->bufs[queue->next_buf_to_fill];
3308 /* we did a step forward, so check buffer state
3309 * again */
3310 if (atomic_read(&buffer->state) !=
3311 QETH_QDIO_BUF_EMPTY){
3312 qeth_flush_buffers(queue, 0,
3313 start_index, flush_count);
3314 atomic_set(&queue->state,
3315 QETH_OUT_Q_UNLOCKED);
3316 return -EBUSY;
3317 }
3318 }
3319 } else {
3320 /* check if we have enough elements (including following
3321 * free buffers) to handle eddp context */
3322 if (qeth_eddp_check_buffers_for_context(queue, ctx)
3323 < 0) {
3324 if (net_ratelimit())
3325 PRINT_WARN("eddp tx_dropped 1\n");
3326 rc = -EBUSY;
3327 goto out;
3328 }
3329 }
3330 }
3331 if (ctx == NULL)
3332 tmp = qeth_fill_buffer(queue, buffer, skb);
3333 else {
3334 tmp = qeth_eddp_fill_buffer(queue, ctx,
3335 queue->next_buf_to_fill);
3336 if (tmp < 0) {
3337 PRINT_ERR("eddp tx_dropped 2\n");
3338 rc = -EBUSY;
3339 goto out;
3340 }
3341 }
3342 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
3343 QDIO_MAX_BUFFERS_PER_Q;
3344 flush_count += tmp;
3345out:
3346 if (flush_count)
3347 qeth_flush_buffers(queue, 0, start_index, flush_count);
3348 else if (!atomic_read(&queue->set_pci_flags_count))
3349 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3350 /*
3351 * queue->state will go from LOCKED -> UNLOCKED or from
3352 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3353 * (switch packing state or flush buffer to get another pci flag out).
3354 * In that case we will enter this loop
3355 */
3356 while (atomic_dec_return(&queue->state)) {
3357 flush_count = 0;
3358 start_index = queue->next_buf_to_fill;
3359 /* check if we can go back to non-packing state */
3360 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
3361 /*
3362 * check if we need to flush a packing buffer to get a pci
3363 * flag out on the queue
3364 */
3365 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
3366 flush_count += qeth_flush_buffers_on_no_pci(queue);
3367 if (flush_count)
3368 qeth_flush_buffers(queue, 0, start_index, flush_count);
3369 }
3370 /* at this point the queue is UNLOCKED again */
3371 if (queue->card->options.performance_stats && do_pack)
3372 queue->card->perf_stats.bufs_sent_pack += flush_count;
3373
3374 return rc;
3375}
3376EXPORT_SYMBOL_GPL(qeth_do_send_packet);
3377
3378static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
3379 struct qeth_reply *reply, unsigned long data)
3380{
3381 struct qeth_ipa_cmd *cmd;
3382 struct qeth_ipacmd_setadpparms *setparms;
3383
3384 QETH_DBF_TEXT(trace, 4, "prmadpcb");
3385
3386 cmd = (struct qeth_ipa_cmd *) data;
3387 setparms = &(cmd->data.setadapterparms);
3388
3389 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
3390 if (cmd->hdr.return_code) {
3391 QETH_DBF_TEXT_(trace, 4, "prmrc%2.2x", cmd->hdr.return_code);
3392 setparms->data.mode = SET_PROMISC_MODE_OFF;
3393 }
3394 card->info.promisc_mode = setparms->data.mode;
3395 return 0;
3396}
3397
3398void qeth_setadp_promisc_mode(struct qeth_card *card)
3399{
3400 enum qeth_ipa_promisc_modes mode;
3401 struct net_device *dev = card->dev;
3402 struct qeth_cmd_buffer *iob;
3403 struct qeth_ipa_cmd *cmd;
3404
3405 QETH_DBF_TEXT(trace, 4, "setprom");
3406
3407 if (((dev->flags & IFF_PROMISC) &&
3408 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
3409 (!(dev->flags & IFF_PROMISC) &&
3410 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
3411 return;
3412 mode = SET_PROMISC_MODE_OFF;
3413 if (dev->flags & IFF_PROMISC)
3414 mode = SET_PROMISC_MODE_ON;
3415 QETH_DBF_TEXT_(trace, 4, "mode:%x", mode);
3416
3417 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
3418 sizeof(struct qeth_ipacmd_setadpparms));
3419 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
3420 cmd->data.setadapterparms.data.mode = mode;
3421 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
3422}
3423EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
3424
3425int qeth_change_mtu(struct net_device *dev, int new_mtu)
3426{
3427 struct qeth_card *card;
3428 char dbf_text[15];
3429
3430 card = netdev_priv(dev);
3431
3432 QETH_DBF_TEXT(trace, 4, "chgmtu");
3433 sprintf(dbf_text, "%8x", new_mtu);
3434 QETH_DBF_TEXT(trace, 4, dbf_text);
3435
3436 if (new_mtu < 64)
3437 return -EINVAL;
3438 if (new_mtu > 65535)
3439 return -EINVAL;
3440 if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) &&
3441 (!qeth_mtu_is_valid(card, new_mtu)))
3442 return -EINVAL;
3443 dev->mtu = new_mtu;
3444 return 0;
3445}
3446EXPORT_SYMBOL_GPL(qeth_change_mtu);
3447
3448struct net_device_stats *qeth_get_stats(struct net_device *dev)
3449{
3450 struct qeth_card *card;
3451
3452 card = netdev_priv(dev);
3453
3454 QETH_DBF_TEXT(trace, 5, "getstat");
3455
3456 return &card->stats;
3457}
3458EXPORT_SYMBOL_GPL(qeth_get_stats);
3459
3460static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
3461 struct qeth_reply *reply, unsigned long data)
3462{
3463 struct qeth_ipa_cmd *cmd;
3464
3465 QETH_DBF_TEXT(trace, 4, "chgmaccb");
3466
3467 cmd = (struct qeth_ipa_cmd *) data;
3468 if (!card->options.layer2 ||
3469 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
3470 memcpy(card->dev->dev_addr,
3471 &cmd->data.setadapterparms.data.change_addr.addr,
3472 OSA_ADDR_LEN);
3473 card->info.mac_bits |= QETH_LAYER2_MAC_READ;
3474 }
3475 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3476 return 0;
3477}
3478
3479int qeth_setadpparms_change_macaddr(struct qeth_card *card)
3480{
3481 int rc;
3482 struct qeth_cmd_buffer *iob;
3483 struct qeth_ipa_cmd *cmd;
3484
3485 QETH_DBF_TEXT(trace, 4, "chgmac");
3486
3487 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
3488 sizeof(struct qeth_ipacmd_setadpparms));
3489 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3490 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
3491 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
3492 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
3493 card->dev->dev_addr, OSA_ADDR_LEN);
3494 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
3495 NULL);
3496 return rc;
3497}
3498EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
3499
3500void qeth_tx_timeout(struct net_device *dev)
3501{
3502 struct qeth_card *card;
3503
3504 card = netdev_priv(dev);
3505 card->stats.tx_errors++;
3506 qeth_schedule_recovery(card);
3507}
3508EXPORT_SYMBOL_GPL(qeth_tx_timeout);
3509
3510int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
3511{
3512 struct qeth_card *card = netdev_priv(dev);
3513 int rc = 0;
3514
3515 switch (regnum) {
3516 case MII_BMCR: /* Basic mode control register */
3517 rc = BMCR_FULLDPLX;
3518 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
3519 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
3520 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
3521 rc |= BMCR_SPEED100;
3522 break;
3523 case MII_BMSR: /* Basic mode status register */
3524 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
3525 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
3526 BMSR_100BASE4;
3527 break;
3528 case MII_PHYSID1: /* PHYS ID 1 */
3529 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
3530 dev->dev_addr[2];
3531 rc = (rc >> 5) & 0xFFFF;
3532 break;
3533 case MII_PHYSID2: /* PHYS ID 2 */
3534 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
3535 break;
3536 case MII_ADVERTISE: /* Advertisement control reg */
3537 rc = ADVERTISE_ALL;
3538 break;
3539 case MII_LPA: /* Link partner ability reg */
3540 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
3541 LPA_100BASE4 | LPA_LPACK;
3542 break;
3543 case MII_EXPANSION: /* Expansion register */
3544 break;
3545 case MII_DCOUNTER: /* disconnect counter */
3546 break;
3547 case MII_FCSCOUNTER: /* false carrier counter */
3548 break;
3549 case MII_NWAYTEST: /* N-way auto-neg test register */
3550 break;
3551 case MII_RERRCOUNTER: /* rx error counter */
3552 rc = card->stats.rx_errors;
3553 break;
3554 case MII_SREVISION: /* silicon revision */
3555 break;
3556 case MII_RESV1: /* reserved 1 */
3557 break;
3558 case MII_LBRERROR: /* loopback, rx, bypass error */
3559 break;
3560 case MII_PHYADDR: /* physical address */
3561 break;
3562 case MII_RESV2: /* reserved 2 */
3563 break;
3564 case MII_TPISTATUS: /* TPI status for 10mbps */
3565 break;
3566 case MII_NCONFIG: /* network interface config */
3567 break;
3568 default:
3569 break;
3570 }
3571 return rc;
3572}
3573EXPORT_SYMBOL_GPL(qeth_mdio_read);
3574
3575static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
3576 struct qeth_cmd_buffer *iob, int len,
3577 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
3578 unsigned long),
3579 void *reply_param)
3580{
3581 u16 s1, s2;
3582
3583 QETH_DBF_TEXT(trace, 4, "sendsnmp");
3584
3585 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3586 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3587 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3588 /* adjust PDU length fields in IPA_PDU_HEADER */
3589 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
3590 s2 = (u32) len;
3591 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
3592 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
3593 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
3594 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
3595 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
3596 reply_cb, reply_param);
3597}
3598
3599static int qeth_snmp_command_cb(struct qeth_card *card,
3600 struct qeth_reply *reply, unsigned long sdata)
3601{
3602 struct qeth_ipa_cmd *cmd;
3603 struct qeth_arp_query_info *qinfo;
3604 struct qeth_snmp_cmd *snmp;
3605 unsigned char *data;
3606 __u16 data_len;
3607
3608 QETH_DBF_TEXT(trace, 3, "snpcmdcb");
3609
3610 cmd = (struct qeth_ipa_cmd *) sdata;
3611 data = (unsigned char *)((char *)cmd - reply->offset);
3612 qinfo = (struct qeth_arp_query_info *) reply->param;
3613 snmp = &cmd->data.setadapterparms.data.snmp;
3614
3615 if (cmd->hdr.return_code) {
3616 QETH_DBF_TEXT_(trace, 4, "scer1%i", cmd->hdr.return_code);
3617 return 0;
3618 }
3619 if (cmd->data.setadapterparms.hdr.return_code) {
3620 cmd->hdr.return_code =
3621 cmd->data.setadapterparms.hdr.return_code;
3622 QETH_DBF_TEXT_(trace, 4, "scer2%i", cmd->hdr.return_code);
3623 return 0;
3624 }
3625 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
3626 if (cmd->data.setadapterparms.hdr.seq_no == 1)
3627 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
3628 else
3629 data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
3630
3631 /* check if there is enough room in userspace */
3632 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
3633 QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
3634 cmd->hdr.return_code = -ENOMEM;
3635 return 0;
3636 }
3637 QETH_DBF_TEXT_(trace, 4, "snore%i",
3638 cmd->data.setadapterparms.hdr.used_total);
3639 QETH_DBF_TEXT_(trace, 4, "sseqn%i",
3640 cmd->data.setadapterparms.hdr.seq_no);
3641 /*copy entries to user buffer*/
3642 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
3643 memcpy(qinfo->udata + qinfo->udata_offset,
3644 (char *)snmp,
3645 data_len + offsetof(struct qeth_snmp_cmd, data));
3646 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
3647 } else {
3648 memcpy(qinfo->udata + qinfo->udata_offset,
3649 (char *)&snmp->request, data_len);
3650 }
3651 qinfo->udata_offset += data_len;
3652 /* check if all replies received ... */
3653 QETH_DBF_TEXT_(trace, 4, "srtot%i",
3654 cmd->data.setadapterparms.hdr.used_total);
3655 QETH_DBF_TEXT_(trace, 4, "srseq%i",
3656 cmd->data.setadapterparms.hdr.seq_no);
3657 if (cmd->data.setadapterparms.hdr.seq_no <
3658 cmd->data.setadapterparms.hdr.used_total)
3659 return 1;
3660 return 0;
3661}
3662
3663int qeth_snmp_command(struct qeth_card *card, char __user *udata)
3664{
3665 struct qeth_cmd_buffer *iob;
3666 struct qeth_ipa_cmd *cmd;
3667 struct qeth_snmp_ureq *ureq;
3668 int req_len;
3669 struct qeth_arp_query_info qinfo = {0, };
3670 int rc = 0;
3671
3672 QETH_DBF_TEXT(trace, 3, "snmpcmd");
3673
3674 if (card->info.guestlan)
3675 return -EOPNOTSUPP;
3676
3677 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
3678 (!card->options.layer2)) {
3679 PRINT_WARN("SNMP Query MIBS not supported "
3680 "on %s!\n", QETH_CARD_IFNAME(card));
3681 return -EOPNOTSUPP;
3682 }
3683 /* skip 4 bytes (data_len struct member) to get req_len */
3684 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
3685 return -EFAULT;
3686 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
3687 if (!ureq) {
3688 QETH_DBF_TEXT(trace, 2, "snmpnome");
3689 return -ENOMEM;
3690 }
3691 if (copy_from_user(ureq, udata,
3692 req_len + sizeof(struct qeth_snmp_ureq_hdr))) {
3693 kfree(ureq);
3694 return -EFAULT;
3695 }
3696 qinfo.udata_len = ureq->hdr.data_len;
3697 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
3698 if (!qinfo.udata) {
3699 kfree(ureq);
3700 return -ENOMEM;
3701 }
3702 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
3703
3704 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
3705 QETH_SNMP_SETADP_CMDLENGTH + req_len);
3706 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3707 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
3708 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
3709 qeth_snmp_command_cb, (void *)&qinfo);
3710 if (rc)
3711 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
3712 QETH_CARD_IFNAME(card), rc);
3713 else {
3714 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
3715 rc = -EFAULT;
3716 }
3717
3718 kfree(ureq);
3719 kfree(qinfo.udata);
3720 return rc;
3721}
3722EXPORT_SYMBOL_GPL(qeth_snmp_command);
3723
3724static inline int qeth_get_qdio_q_format(struct qeth_card *card)
3725{
3726 switch (card->info.type) {
3727 case QETH_CARD_TYPE_IQD:
3728 return 2;
3729 default:
3730 return 0;
3731 }
3732}
3733
3734static int qeth_qdio_establish(struct qeth_card *card)
3735{
3736 struct qdio_initialize init_data;
3737 char *qib_param_field;
3738 struct qdio_buffer **in_sbal_ptrs;
3739 struct qdio_buffer **out_sbal_ptrs;
3740 int i, j, k;
3741 int rc = 0;
3742
3743 QETH_DBF_TEXT(setup, 2, "qdioest");
3744
3745 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3746 GFP_KERNEL);
3747 if (!qib_param_field)
3748 return -ENOMEM;
3749
3750 qeth_create_qib_param_field(card, qib_param_field);
3751 qeth_create_qib_param_field_blkt(card, qib_param_field);
3752
3753 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3754 GFP_KERNEL);
3755 if (!in_sbal_ptrs) {
3756 kfree(qib_param_field);
3757 return -ENOMEM;
3758 }
3759 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3760 in_sbal_ptrs[i] = (struct qdio_buffer *)
3761 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3762
3763 out_sbal_ptrs =
3764 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3765 sizeof(void *), GFP_KERNEL);
3766 if (!out_sbal_ptrs) {
3767 kfree(in_sbal_ptrs);
3768 kfree(qib_param_field);
3769 return -ENOMEM;
3770 }
3771 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3772 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
3773 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
3774 card->qdio.out_qs[i]->bufs[j].buffer);
3775 }
3776
3777 memset(&init_data, 0, sizeof(struct qdio_initialize));
3778 init_data.cdev = CARD_DDEV(card);
3779 init_data.q_format = qeth_get_qdio_q_format(card);
3780 init_data.qib_param_field_format = 0;
3781 init_data.qib_param_field = qib_param_field;
3782 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3783 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3784 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3785 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3786 init_data.no_input_qs = 1;
3787 init_data.no_output_qs = card->qdio.no_out_queues;
3788 init_data.input_handler = card->discipline.input_handler;
3789 init_data.output_handler = card->discipline.output_handler;
3790 init_data.int_parm = (unsigned long) card;
3791 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3792 QDIO_OUTBOUND_0COPY_SBALS |
3793 QDIO_USE_OUTBOUND_PCIS;
3794 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3795 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3796
3797 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3798 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
3799 rc = qdio_initialize(&init_data);
3800 if (rc)
3801 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3802 }
3803 kfree(out_sbal_ptrs);
3804 kfree(in_sbal_ptrs);
3805 kfree(qib_param_field);
3806 return rc;
3807}
3808
3809static void qeth_core_free_card(struct qeth_card *card)
3810{
3811
3812 QETH_DBF_TEXT(setup, 2, "freecrd");
3813 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
3814 qeth_clean_channel(&card->read);
3815 qeth_clean_channel(&card->write);
3816 if (card->dev)
3817 free_netdev(card->dev);
3818 kfree(card->ip_tbd_list);
3819 qeth_free_qdio_buffers(card);
3820 kfree(card);
3821}
3822
3823static struct ccw_device_id qeth_ids[] = {
3824 {CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE},
3825 {CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD},
3826 {CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN},
3827 {},
3828};
3829MODULE_DEVICE_TABLE(ccw, qeth_ids);
3830
3831static struct ccw_driver qeth_ccw_driver = {
3832 .name = "qeth",
3833 .ids = qeth_ids,
3834 .probe = ccwgroup_probe_ccwdev,
3835 .remove = ccwgroup_remove_ccwdev,
3836};
3837
3838static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3839 unsigned long driver_id)
3840{
3841 const char *start, *end;
3842 char bus_ids[3][BUS_ID_SIZE], *argv[3];
3843 int i;
3844
3845 start = buf;
3846 for (i = 0; i < 3; i++) {
3847 static const char delim[] = { ',', ',', '\n' };
3848 int len;
3849
3850 end = strchr(start, delim[i]);
3851 if (!end)
3852 return -EINVAL;
3853 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start);
3854 strncpy(bus_ids[i], start, len);
3855 bus_ids[i][len] = '\0';
3856 start = end + 1;
3857 argv[i] = bus_ids[i];
3858 }
3859
3860 return (ccwgroup_create(root_dev, driver_id,
3861 &qeth_ccw_driver, 3, argv));
3862}
3863
3864int qeth_core_hardsetup_card(struct qeth_card *card)
3865{
3866 int retries = 3;
3867 int mpno;
3868 int rc;
3869
3870 QETH_DBF_TEXT(setup, 2, "hrdsetup");
3871 atomic_set(&card->force_alloc_skb, 0);
3872retry:
3873 if (retries < 3) {
3874 PRINT_WARN("Retrying to do IDX activates.\n");
3875 ccw_device_set_offline(CARD_DDEV(card));
3876 ccw_device_set_offline(CARD_WDEV(card));
3877 ccw_device_set_offline(CARD_RDEV(card));
3878 ccw_device_set_online(CARD_RDEV(card));
3879 ccw_device_set_online(CARD_WDEV(card));
3880 ccw_device_set_online(CARD_DDEV(card));
3881 }
3882 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
3883 if (rc == -ERESTARTSYS) {
3884 QETH_DBF_TEXT(setup, 2, "break1");
3885 return rc;
3886 } else if (rc) {
3887 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3888 if (--retries < 0)
3889 goto out;
3890 else
3891 goto retry;
3892 }
3893
3894 rc = qeth_get_unitaddr(card);
3895 if (rc) {
3896 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3897 return rc;
3898 }
3899
3900 mpno = QETH_MAX_PORTNO;
3901 if (card->info.portno > mpno) {
3902 PRINT_ERR("Device %s does not offer port number %d \n.",
3903 CARD_BUS_ID(card), card->info.portno);
3904 rc = -ENODEV;
3905 goto out;
3906 }
3907 qeth_init_tokens(card);
3908 qeth_init_func_level(card);
3909 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
3910 if (rc == -ERESTARTSYS) {
3911 QETH_DBF_TEXT(setup, 2, "break2");
3912 return rc;
3913 } else if (rc) {
3914 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3915 if (--retries < 0)
3916 goto out;
3917 else
3918 goto retry;
3919 }
3920 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
3921 if (rc == -ERESTARTSYS) {
3922 QETH_DBF_TEXT(setup, 2, "break3");
3923 return rc;
3924 } else if (rc) {
3925 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3926 if (--retries < 0)
3927 goto out;
3928 else
3929 goto retry;
3930 }
3931 rc = qeth_mpc_initialize(card);
3932 if (rc) {
3933 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3934 goto out;
3935 }
3936 return 0;
3937out:
3938 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
3939 return rc;
3940}
3941EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
3942
3943static inline int qeth_create_skb_frag(struct qdio_buffer_element *element,
3944 struct sk_buff **pskb, int offset, int *pfrag, int data_len)
3945{
3946 struct page *page = virt_to_page(element->addr);
3947 if (*pskb == NULL) {
3948 /* the upper protocol layers assume that there is data in the
3949 * skb itself. Copy a small amount (64 bytes) to make them
3950 * happy. */
3951 *pskb = dev_alloc_skb(64 + ETH_HLEN);
3952 if (!(*pskb))
3953 return -ENOMEM;
3954 skb_reserve(*pskb, ETH_HLEN);
3955 if (data_len <= 64) {
3956 memcpy(skb_put(*pskb, data_len), element->addr + offset,
3957 data_len);
3958 } else {
3959 get_page(page);
3960 memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
3961 skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
3962 data_len - 64);
3963 (*pskb)->data_len += data_len - 64;
3964 (*pskb)->len += data_len - 64;
3965 (*pskb)->truesize += data_len - 64;
3966 (*pfrag)++;
3967 }
3968 } else {
3969 get_page(page);
3970 skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
3971 (*pskb)->data_len += data_len;
3972 (*pskb)->len += data_len;
3973 (*pskb)->truesize += data_len;
3974 (*pfrag)++;
3975 }
3976 return 0;
3977}
3978
3979struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
3980 struct qdio_buffer *buffer,
3981 struct qdio_buffer_element **__element, int *__offset,
3982 struct qeth_hdr **hdr)
3983{
3984 struct qdio_buffer_element *element = *__element;
3985 int offset = *__offset;
3986 struct sk_buff *skb = NULL;
3987 int skb_len;
3988 void *data_ptr;
3989 int data_len;
3990 int headroom = 0;
3991 int use_rx_sg = 0;
3992 int frag = 0;
3993
3994 QETH_DBF_TEXT(trace, 6, "nextskb");
3995 /* qeth_hdr must not cross element boundaries */
3996 if (element->length < offset + sizeof(struct qeth_hdr)) {
3997 if (qeth_is_last_sbale(element))
3998 return NULL;
3999 element++;
4000 offset = 0;
4001 if (element->length < sizeof(struct qeth_hdr))
4002 return NULL;
4003 }
4004 *hdr = element->addr + offset;
4005
4006 offset += sizeof(struct qeth_hdr);
4007 if (card->options.layer2) {
4008 if (card->info.type == QETH_CARD_TYPE_OSN) {
4009 skb_len = (*hdr)->hdr.osn.pdu_length;
4010 headroom = sizeof(struct qeth_hdr);
4011 } else {
4012 skb_len = (*hdr)->hdr.l2.pkt_length;
4013 }
4014 } else {
4015 skb_len = (*hdr)->hdr.l3.length;
4016 headroom = max((int)ETH_HLEN, (int)TR_HLEN);
4017 }
4018
4019 if (!skb_len)
4020 return NULL;
4021
4022 if ((skb_len >= card->options.rx_sg_cb) &&
4023 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
4024 (!atomic_read(&card->force_alloc_skb))) {
4025 use_rx_sg = 1;
4026 } else {
4027 skb = dev_alloc_skb(skb_len + headroom);
4028 if (!skb)
4029 goto no_mem;
4030 if (headroom)
4031 skb_reserve(skb, headroom);
4032 }
4033
4034 data_ptr = element->addr + offset;
4035 while (skb_len) {
4036 data_len = min(skb_len, (int)(element->length - offset));
4037 if (data_len) {
4038 if (use_rx_sg) {
4039 if (qeth_create_skb_frag(element, &skb, offset,
4040 &frag, data_len))
4041 goto no_mem;
4042 } else {
4043 memcpy(skb_put(skb, data_len), data_ptr,
4044 data_len);
4045 }
4046 }
4047 skb_len -= data_len;
4048 if (skb_len) {
4049 if (qeth_is_last_sbale(element)) {
4050 QETH_DBF_TEXT(trace, 4, "unexeob");
4051 QETH_DBF_TEXT_(trace, 4, "%s",
4052 CARD_BUS_ID(card));
4053 QETH_DBF_TEXT(qerr, 2, "unexeob");
4054 QETH_DBF_TEXT_(qerr, 2, "%s",
4055 CARD_BUS_ID(card));
4056 QETH_DBF_HEX(misc, 4, buffer, sizeof(*buffer));
4057 dev_kfree_skb_any(skb);
4058 card->stats.rx_errors++;
4059 return NULL;
4060 }
4061 element++;
4062 offset = 0;
4063 data_ptr = element->addr;
4064 } else {
4065 offset += data_len;
4066 }
4067 }
4068 *__element = element;
4069 *__offset = offset;
4070 if (use_rx_sg && card->options.performance_stats) {
4071 card->perf_stats.sg_skbs_rx++;
4072 card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
4073 }
4074 return skb;
4075no_mem:
4076 if (net_ratelimit()) {
4077 PRINT_WARN("No memory for packet received on %s.\n",
4078 QETH_CARD_IFNAME(card));
4079 QETH_DBF_TEXT(trace, 2, "noskbmem");
4080 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
4081 }
4082 card->stats.rx_dropped++;
4083 return NULL;
4084}
4085EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
4086
4087static void qeth_unregister_dbf_views(void)
4088{
4089 if (qeth_dbf_setup)
4090 debug_unregister(qeth_dbf_setup);
4091 if (qeth_dbf_qerr)
4092 debug_unregister(qeth_dbf_qerr);
4093 if (qeth_dbf_sense)
4094 debug_unregister(qeth_dbf_sense);
4095 if (qeth_dbf_misc)
4096 debug_unregister(qeth_dbf_misc);
4097 if (qeth_dbf_data)
4098 debug_unregister(qeth_dbf_data);
4099 if (qeth_dbf_control)
4100 debug_unregister(qeth_dbf_control);
4101 if (qeth_dbf_trace)
4102 debug_unregister(qeth_dbf_trace);
4103}
4104
4105static int qeth_register_dbf_views(void)
4106{
4107 qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
4108 QETH_DBF_SETUP_PAGES,
4109 QETH_DBF_SETUP_NR_AREAS,
4110 QETH_DBF_SETUP_LEN);
4111 qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
4112 QETH_DBF_MISC_PAGES,
4113 QETH_DBF_MISC_NR_AREAS,
4114 QETH_DBF_MISC_LEN);
4115 qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
4116 QETH_DBF_DATA_PAGES,
4117 QETH_DBF_DATA_NR_AREAS,
4118 QETH_DBF_DATA_LEN);
4119 qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
4120 QETH_DBF_CONTROL_PAGES,
4121 QETH_DBF_CONTROL_NR_AREAS,
4122 QETH_DBF_CONTROL_LEN);
4123 qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
4124 QETH_DBF_SENSE_PAGES,
4125 QETH_DBF_SENSE_NR_AREAS,
4126 QETH_DBF_SENSE_LEN);
4127 qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
4128 QETH_DBF_QERR_PAGES,
4129 QETH_DBF_QERR_NR_AREAS,
4130 QETH_DBF_QERR_LEN);
4131 qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
4132 QETH_DBF_TRACE_PAGES,
4133 QETH_DBF_TRACE_NR_AREAS,
4134 QETH_DBF_TRACE_LEN);
4135
4136 if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
4137 (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
4138 (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
4139 (qeth_dbf_trace == NULL)) {
4140 qeth_unregister_dbf_views();
4141 return -ENOMEM;
4142 }
4143 debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
4144 debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
4145
4146 debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
4147 debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
4148
4149 debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
4150 debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
4151
4152 debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
4153 debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
4154
4155 debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
4156 debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
4157
4158 debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
4159 debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
4160
4161 debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
4162 debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
4163
4164 return 0;
4165}
4166
4167int qeth_core_load_discipline(struct qeth_card *card,
4168 enum qeth_discipline_id discipline)
4169{
4170 int rc = 0;
4171 switch (discipline) {
4172 case QETH_DISCIPLINE_LAYER3:
4173 card->discipline.ccwgdriver = try_then_request_module(
4174 symbol_get(qeth_l3_ccwgroup_driver),
4175 "qeth_l3");
4176 break;
4177 case QETH_DISCIPLINE_LAYER2:
4178 card->discipline.ccwgdriver = try_then_request_module(
4179 symbol_get(qeth_l2_ccwgroup_driver),
4180 "qeth_l2");
4181 break;
4182 }
4183 if (!card->discipline.ccwgdriver) {
4184 PRINT_ERR("Support for discipline %d not present\n",
4185 discipline);
4186 rc = -EINVAL;
4187 }
4188 return rc;
4189}
4190
4191void qeth_core_free_discipline(struct qeth_card *card)
4192{
4193 if (card->options.layer2)
4194 symbol_put(qeth_l2_ccwgroup_driver);
4195 else
4196 symbol_put(qeth_l3_ccwgroup_driver);
4197 card->discipline.ccwgdriver = NULL;
4198}
4199
4200static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4201{
4202 struct qeth_card *card;
4203 struct device *dev;
4204 int rc;
4205 unsigned long flags;
4206
4207 QETH_DBF_TEXT(setup, 2, "probedev");
4208
4209 dev = &gdev->dev;
4210 if (!get_device(dev))
4211 return -ENODEV;
4212
4213 QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id);
4214
4215 card = qeth_alloc_card();
4216 if (!card) {
4217 QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
4218 rc = -ENOMEM;
4219 goto err_dev;
4220 }
4221 card->read.ccwdev = gdev->cdev[0];
4222 card->write.ccwdev = gdev->cdev[1];
4223 card->data.ccwdev = gdev->cdev[2];
4224 dev_set_drvdata(&gdev->dev, card);
4225 card->gdev = gdev;
4226 gdev->cdev[0]->handler = qeth_irq;
4227 gdev->cdev[1]->handler = qeth_irq;
4228 gdev->cdev[2]->handler = qeth_irq;
4229
4230 rc = qeth_determine_card_type(card);
4231 if (rc) {
4232 PRINT_WARN("%s: not a valid card type\n", __func__);
4233 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
4234 goto err_card;
4235 }
4236 rc = qeth_setup_card(card);
4237 if (rc) {
4238 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
4239 goto err_card;
4240 }
4241
4242 if (card->info.type == QETH_CARD_TYPE_OSN) {
4243 rc = qeth_core_create_osn_attributes(dev);
4244 if (rc)
4245 goto err_card;
4246 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
4247 if (rc) {
4248 qeth_core_remove_osn_attributes(dev);
4249 goto err_card;
4250 }
4251 rc = card->discipline.ccwgdriver->probe(card->gdev);
4252 if (rc) {
4253 qeth_core_free_discipline(card);
4254 qeth_core_remove_osn_attributes(dev);
4255 goto err_card;
4256 }
4257 } else {
4258 rc = qeth_core_create_device_attributes(dev);
4259 if (rc)
4260 goto err_card;
4261 }
4262
4263 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4264 list_add_tail(&card->list, &qeth_core_card_list.list);
4265 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4266 return 0;
4267
4268err_card:
4269 qeth_core_free_card(card);
4270err_dev:
4271 put_device(dev);
4272 return rc;
4273}
4274
4275static void qeth_core_remove_device(struct ccwgroup_device *gdev)
4276{
4277 unsigned long flags;
4278 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4279
4280 if (card->discipline.ccwgdriver) {
4281 card->discipline.ccwgdriver->remove(gdev);
4282 qeth_core_free_discipline(card);
4283 }
4284
4285 if (card->info.type == QETH_CARD_TYPE_OSN) {
4286 qeth_core_remove_osn_attributes(&gdev->dev);
4287 } else {
4288 qeth_core_remove_device_attributes(&gdev->dev);
4289 }
4290 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4291 list_del(&card->list);
4292 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4293 qeth_core_free_card(card);
4294 dev_set_drvdata(&gdev->dev, NULL);
4295 put_device(&gdev->dev);
4296 return;
4297}
4298
4299static int qeth_core_set_online(struct ccwgroup_device *gdev)
4300{
4301 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4302 int rc = 0;
4303 int def_discipline;
4304
4305 if (!card->discipline.ccwgdriver) {
4306 if (card->info.type == QETH_CARD_TYPE_IQD)
4307 def_discipline = QETH_DISCIPLINE_LAYER3;
4308 else
4309 def_discipline = QETH_DISCIPLINE_LAYER2;
4310 rc = qeth_core_load_discipline(card, def_discipline);
4311 if (rc)
4312 goto err;
4313 rc = card->discipline.ccwgdriver->probe(card->gdev);
4314 if (rc)
4315 goto err;
4316 }
4317 rc = card->discipline.ccwgdriver->set_online(gdev);
4318err:
4319 return rc;
4320}
4321
4322static int qeth_core_set_offline(struct ccwgroup_device *gdev)
4323{
4324 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4325 return card->discipline.ccwgdriver->set_offline(gdev);
4326}
4327
4328static void qeth_core_shutdown(struct ccwgroup_device *gdev)
4329{
4330 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4331 if (card->discipline.ccwgdriver &&
4332 card->discipline.ccwgdriver->shutdown)
4333 card->discipline.ccwgdriver->shutdown(gdev);
4334}
4335
4336static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
4337 .owner = THIS_MODULE,
4338 .name = "qeth",
4339 .driver_id = 0xD8C5E3C8,
4340 .probe = qeth_core_probe_device,
4341 .remove = qeth_core_remove_device,
4342 .set_online = qeth_core_set_online,
4343 .set_offline = qeth_core_set_offline,
4344 .shutdown = qeth_core_shutdown,
4345};
4346
4347static ssize_t
4348qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf,
4349 size_t count)
4350{
4351 int err;
4352 err = qeth_core_driver_group(buf, qeth_core_root_dev,
4353 qeth_core_ccwgroup_driver.driver_id);
4354 if (err)
4355 return err;
4356 else
4357 return count;
4358}
4359
4360static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
4361
4362static struct {
4363 const char str[ETH_GSTRING_LEN];
4364} qeth_ethtool_stats_keys[] = {
4365/* 0 */{"rx skbs"},
4366 {"rx buffers"},
4367 {"tx skbs"},
4368 {"tx buffers"},
4369 {"tx skbs no packing"},
4370 {"tx buffers no packing"},
4371 {"tx skbs packing"},
4372 {"tx buffers packing"},
4373 {"tx sg skbs"},
4374 {"tx sg frags"},
4375/* 10 */{"rx sg skbs"},
4376 {"rx sg frags"},
4377 {"rx sg page allocs"},
4378 {"tx large kbytes"},
4379 {"tx large count"},
4380 {"tx pk state ch n->p"},
4381 {"tx pk state ch p->n"},
4382 {"tx pk watermark low"},
4383 {"tx pk watermark high"},
4384 {"queue 0 buffer usage"},
4385/* 20 */{"queue 1 buffer usage"},
4386 {"queue 2 buffer usage"},
4387 {"queue 3 buffer usage"},
4388 {"rx handler time"},
4389 {"rx handler count"},
4390 {"rx do_QDIO time"},
4391 {"rx do_QDIO count"},
4392 {"tx handler time"},
4393 {"tx handler count"},
4394 {"tx time"},
4395/* 30 */{"tx count"},
4396 {"tx do_QDIO time"},
4397 {"tx do_QDIO count"},
4398};
4399
4400int qeth_core_get_stats_count(struct net_device *dev)
4401{
4402 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
4403}
4404EXPORT_SYMBOL_GPL(qeth_core_get_stats_count);
4405
4406void qeth_core_get_ethtool_stats(struct net_device *dev,
4407 struct ethtool_stats *stats, u64 *data)
4408{
4409 struct qeth_card *card = netdev_priv(dev);
4410 data[0] = card->stats.rx_packets -
4411 card->perf_stats.initial_rx_packets;
4412 data[1] = card->perf_stats.bufs_rec;
4413 data[2] = card->stats.tx_packets -
4414 card->perf_stats.initial_tx_packets;
4415 data[3] = card->perf_stats.bufs_sent;
4416 data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
4417 - card->perf_stats.skbs_sent_pack;
4418 data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
4419 data[6] = card->perf_stats.skbs_sent_pack;
4420 data[7] = card->perf_stats.bufs_sent_pack;
4421 data[8] = card->perf_stats.sg_skbs_sent;
4422 data[9] = card->perf_stats.sg_frags_sent;
4423 data[10] = card->perf_stats.sg_skbs_rx;
4424 data[11] = card->perf_stats.sg_frags_rx;
4425 data[12] = card->perf_stats.sg_alloc_page_rx;
4426 data[13] = (card->perf_stats.large_send_bytes >> 10);
4427 data[14] = card->perf_stats.large_send_cnt;
4428 data[15] = card->perf_stats.sc_dp_p;
4429 data[16] = card->perf_stats.sc_p_dp;
4430 data[17] = QETH_LOW_WATERMARK_PACK;
4431 data[18] = QETH_HIGH_WATERMARK_PACK;
4432 data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
4433 data[20] = (card->qdio.no_out_queues > 1) ?
4434 atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
4435 data[21] = (card->qdio.no_out_queues > 2) ?
4436 atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
4437 data[22] = (card->qdio.no_out_queues > 3) ?
4438 atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
4439 data[23] = card->perf_stats.inbound_time;
4440 data[24] = card->perf_stats.inbound_cnt;
4441 data[25] = card->perf_stats.inbound_do_qdio_time;
4442 data[26] = card->perf_stats.inbound_do_qdio_cnt;
4443 data[27] = card->perf_stats.outbound_handler_time;
4444 data[28] = card->perf_stats.outbound_handler_cnt;
4445 data[29] = card->perf_stats.outbound_time;
4446 data[30] = card->perf_stats.outbound_cnt;
4447 data[31] = card->perf_stats.outbound_do_qdio_time;
4448 data[32] = card->perf_stats.outbound_do_qdio_cnt;
4449}
4450EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4451
4452void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4453{
4454 switch (stringset) {
4455 case ETH_SS_STATS:
4456 memcpy(data, &qeth_ethtool_stats_keys,
4457 sizeof(qeth_ethtool_stats_keys));
4458 break;
4459 default:
4460 WARN_ON(1);
4461 break;
4462 }
4463}
4464EXPORT_SYMBOL_GPL(qeth_core_get_strings);
4465
4466void qeth_core_get_drvinfo(struct net_device *dev,
4467 struct ethtool_drvinfo *info)
4468{
4469 struct qeth_card *card = netdev_priv(dev);
4470 if (card->options.layer2)
4471 strcpy(info->driver, "qeth_l2");
4472 else
4473 strcpy(info->driver, "qeth_l3");
4474
4475 strcpy(info->version, "1.0");
4476 strcpy(info->fw_version, card->info.mcl_level);
4477 sprintf(info->bus_info, "%s/%s/%s",
4478 CARD_RDEV_ID(card),
4479 CARD_WDEV_ID(card),
4480 CARD_DDEV_ID(card));
4481}
4482EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
4483
4484static int __init qeth_core_init(void)
4485{
4486 int rc;
4487
4488 PRINT_INFO("loading core functions\n");
4489 INIT_LIST_HEAD(&qeth_core_card_list.list);
4490 rwlock_init(&qeth_core_card_list.rwlock);
4491
4492 rc = qeth_register_dbf_views();
4493 if (rc)
4494 goto out_err;
4495 rc = ccw_driver_register(&qeth_ccw_driver);
4496 if (rc)
4497 goto ccw_err;
4498 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
4499 if (rc)
4500 goto ccwgroup_err;
4501 rc = driver_create_file(&qeth_core_ccwgroup_driver.driver,
4502 &driver_attr_group);
4503 if (rc)
4504 goto driver_err;
4505 qeth_core_root_dev = s390_root_dev_register("qeth");
4506 rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
4507 if (rc)
4508 goto register_err;
4509 return 0;
4510
4511register_err:
4512 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4513 &driver_attr_group);
4514driver_err:
4515 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4516ccwgroup_err:
4517 ccw_driver_unregister(&qeth_ccw_driver);
4518ccw_err:
4519 qeth_unregister_dbf_views();
4520out_err:
4521 PRINT_ERR("Initialization failed with code %d\n", rc);
4522 return rc;
4523}
4524
4525static void __exit qeth_core_exit(void)
4526{
4527 s390_root_dev_unregister(qeth_core_root_dev);
4528 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4529 &driver_attr_group);
4530 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4531 ccw_driver_unregister(&qeth_ccw_driver);
4532 qeth_unregister_dbf_views();
4533 PRINT_INFO("core functions removed\n");
4534}
4535
4536module_init(qeth_core_init);
4537module_exit(qeth_core_exit);
4538MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
4539MODULE_DESCRIPTION("qeth core functions");
4540MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
new file mode 100644
index 000000000000..8653b73e5dcf
--- /dev/null
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -0,0 +1,266 @@
1/*
2 * drivers/s390/net/qeth_core_mpc.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10#include <linux/module.h>
11#include <asm/cio.h>
12#include "qeth_core_mpc.h"
13
14unsigned char IDX_ACTIVATE_READ[] = {
15 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
16 0x19, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
17 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
18 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
19 0x00, 0x00
20};
21
22unsigned char IDX_ACTIVATE_WRITE[] = {
23 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
24 0x15, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
25 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
26 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
27 0x00, 0x00
28};
29
30unsigned char CM_ENABLE[] = {
31 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
32 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63,
33 0x10, 0x00, 0x00, 0x01,
34 0x00, 0x00, 0x00, 0x00,
35 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
36 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x23,
37 0x00, 0x00, 0x23, 0x05, 0x00, 0x00, 0x00, 0x00,
38 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
39 0x01, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x40,
40 0x00, 0x0c, 0x41, 0x02, 0x00, 0x17, 0x00, 0x00,
41 0x00, 0x00, 0x00, 0x00,
42 0x00, 0x0b, 0x04, 0x01,
43 0x7e, 0x04, 0x05, 0x00, 0x01, 0x01, 0x0f,
44 0x00,
45 0x0c, 0x04, 0x02, 0xff, 0xff, 0xff, 0xff, 0xff,
46 0xff, 0xff, 0xff
47};
48
49unsigned char CM_SETUP[] = {
50 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
51 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64,
52 0x10, 0x00, 0x00, 0x01,
53 0x00, 0x00, 0x00, 0x00,
54 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x24,
56 0x00, 0x00, 0x24, 0x05, 0x00, 0x00, 0x00, 0x00,
57 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
58 0x01, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x40,
59 0x00, 0x0c, 0x41, 0x04, 0x00, 0x18, 0x00, 0x00,
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x09, 0x04, 0x04,
62 0x05, 0x00, 0x01, 0x01, 0x11,
63 0x00, 0x09, 0x04,
64 0x05, 0x05, 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x06,
66 0x04, 0x06, 0xc8, 0x00
67};
68
69unsigned char ULP_ENABLE[] = {
70 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
71 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6b,
72 0x10, 0x00, 0x00, 0x01,
73 0x00, 0x00, 0x00, 0x00,
74 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
75 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x2b,
76 0x00, 0x00, 0x2b, 0x05, 0x20, 0x01, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
78 0x01, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x40,
79 0x00, 0x0c, 0x41, 0x02, 0x00, 0x1f, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x0b, 0x04, 0x01,
82 0x03, 0x04, 0x05, 0x00, 0x01, 0x01, 0x12,
83 0x00,
84 0x14, 0x04, 0x0a, 0x00, 0x20, 0x00, 0x00, 0xff,
85 0xff, 0x00, 0x08, 0xc8, 0xe8, 0xc4, 0xf1, 0xc7,
86 0xf1, 0x00, 0x00
87};
88
89unsigned char ULP_SETUP[] = {
90 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
91 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6c,
92 0x10, 0x00, 0x00, 0x01,
93 0x00, 0x00, 0x00, 0x00,
94 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02,
95 0x00, 0x00, 0x00, 0x01, 0x00, 0x24, 0x00, 0x2c,
96 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
98 0x01, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x40,
99 0x00, 0x0c, 0x41, 0x04, 0x00, 0x20, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x09, 0x04, 0x04,
102 0x05, 0x00, 0x01, 0x01, 0x14,
103 0x00, 0x09, 0x04,
104 0x05, 0x05, 0x30, 0x01, 0x00, 0x00,
105 0x00, 0x06,
106 0x04, 0x06, 0x40, 0x00,
107 0x00, 0x08, 0x04, 0x0b,
108 0x00, 0x00, 0x00, 0x00
109};
110
111unsigned char DM_ACT[] = {
112 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
113 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x55,
114 0x10, 0x00, 0x00, 0x01,
115 0x00, 0x00, 0x00, 0x00,
116 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03,
117 0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x15,
118 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
120 0x01, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x40,
121 0x00, 0x0c, 0x43, 0x60, 0x00, 0x09, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x09, 0x04, 0x04,
124 0x05, 0x40, 0x01, 0x01, 0x00
125};
126
127unsigned char IPA_PDU_HEADER[] = {
128 0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77,
129 0x00, 0x00, 0x00, 0x14, 0x00, 0x00,
130 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256,
131 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256,
132 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
133 0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00, 0x00, 0x24,
135 sizeof(struct qeth_ipa_cmd) / 256,
136 sizeof(struct qeth_ipa_cmd) % 256,
137 0x00,
138 sizeof(struct qeth_ipa_cmd) / 256,
139 sizeof(struct qeth_ipa_cmd) % 256,
140 0x05,
141 0x77, 0x77, 0x77, 0x77,
142 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
143 0x01, 0x00,
144 sizeof(struct qeth_ipa_cmd) / 256,
145 sizeof(struct qeth_ipa_cmd) % 256,
146 0x00, 0x00, 0x00, 0x40,
147};
148EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
149
150unsigned char WRITE_CCW[] = {
151 0x01, CCW_FLAG_SLI, 0, 0,
152 0, 0, 0, 0
153};
154
155unsigned char READ_CCW[] = {
156 0x02, CCW_FLAG_SLI, 0, 0,
157 0, 0, 0, 0
158};
159
160
161struct ipa_rc_msg {
162 enum qeth_ipa_return_codes rc;
163 char *msg;
164};
165
166static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
167 {IPA_RC_SUCCESS, "success"},
168 {IPA_RC_NOTSUPP, "Command not supported"},
169 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
170 {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
171 {IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"},
172 {IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
173 {IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"},
174 {IPA_RC_UNREGISTERED_ADDR, "Address not registered"},
175 {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
176 {IPA_RC_ID_NOT_FOUND, "Identifier not found"},
177 {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"},
178 {IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"},
179 {IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"},
180 {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
181 {IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
182 {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"},
183 {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"},
184 {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"},
185 {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
186 {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
187 {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
188 {IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"},
189 {IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"},
190 {IPA_RC_INVALID_LANTYPE, "Invalid LAN type"},
191 {IPA_RC_INVALID_LANNUM, "Invalid LAN num"},
192 {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"},
193 {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"},
194 {IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"},
195 {IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"},
196 {IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"},
197 {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"},
198 {IPA_RC_MULTICAST_FULL, "No task available, multicast full"},
199 {IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"},
200 {IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"},
201 {IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"},
202 {IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"},
203 {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"},
204 {IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"},
205 {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
206 {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
207 {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
208 {IPA_RC_FFFF, "Unknown Error"}
209};
210
211
212
213char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
214{
215 int x = 0;
216 qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
217 sizeof(struct ipa_rc_msg) - 1].rc = rc;
218 while (qeth_ipa_rc_msg[x].rc != rc)
219 x++;
220 return qeth_ipa_rc_msg[x].msg;
221}
222
223
224struct ipa_cmd_names {
225 enum qeth_ipa_cmds cmd;
226 char *name;
227};
228
229static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
230 {IPA_CMD_STARTLAN, "startlan"},
231 {IPA_CMD_STOPLAN, "stoplan"},
232 {IPA_CMD_SETVMAC, "setvmac"},
233 {IPA_CMD_DELVMAC, "delvmca"},
234 {IPA_CMD_SETGMAC, "setgmac"},
235 {IPA_CMD_DELGMAC, "delgmac"},
236 {IPA_CMD_SETVLAN, "setvlan"},
237 {IPA_CMD_DELVLAN, "delvlan"},
238 {IPA_CMD_SETCCID, "setccid"},
239 {IPA_CMD_DELCCID, "delccid"},
240 {IPA_CMD_MODCCID, "modccid"},
241 {IPA_CMD_SETIP, "setip"},
242 {IPA_CMD_QIPASSIST, "qipassist"},
243 {IPA_CMD_SETASSPARMS, "setassparms"},
244 {IPA_CMD_SETIPM, "setipm"},
245 {IPA_CMD_DELIPM, "delipm"},
246 {IPA_CMD_SETRTG, "setrtg"},
247 {IPA_CMD_DELIP, "delip"},
248 {IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
249 {IPA_CMD_SET_DIAG_ASS, "set_diag_ass"},
250 {IPA_CMD_CREATE_ADDR, "create_addr"},
251 {IPA_CMD_DESTROY_ADDR, "destroy_addr"},
252 {IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"},
253 {IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"},
254 {IPA_CMD_UNKNOWN, "unknown"},
255};
256
257char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
258{
259 int x = 0;
260 qeth_ipa_cmd_names[
261 sizeof(qeth_ipa_cmd_names) /
262 sizeof(struct ipa_cmd_names)-1].cmd = cmd;
263 while (qeth_ipa_cmd_names[x].cmd != cmd)
264 x++;
265 return qeth_ipa_cmd_names[x].name;
266}
diff --git a/drivers/s390/net/qeth_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6de2da5ed5fd..de221932f30f 100644
--- a/drivers/s390/net/qeth_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -1,27 +1,25 @@
1/* 1/*
2 * linux/drivers/s390/net/qeth_mpc.h 2 * drivers/s390/net/qeth_core_mpc.h
3 *
4 * Linux on zSeries OSA Express and HiperSockets support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Thomas Spatzier <tspat@de.ibm.com>
9 * Frank Pavlic <fpavlic@de.ibm.com>
10 * 3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
11 */ 8 */
12#ifndef __QETH_MPC_H__ 9
13#define __QETH_MPC_H__ 10#ifndef __QETH_CORE_MPC_H__
11#define __QETH_CORE_MPC_H__
14 12
15#include <asm/qeth.h> 13#include <asm/qeth.h>
16 14
17#define IPA_PDU_HEADER_SIZE 0x40 15#define IPA_PDU_HEADER_SIZE 0x40
18#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer+0x0e) 16#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
19#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer+0x26) 17#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26)
20#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer+0x29) 18#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29)
21#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer+0x3a) 19#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a)
22 20
23extern unsigned char IPA_PDU_HEADER[]; 21extern unsigned char IPA_PDU_HEADER[];
24#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer+0x2c) 22#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c)
25 23
26#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd)) 24#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
27 25
@@ -93,7 +91,8 @@ enum qeth_checksum_types {
93 */ 91 */
94#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */ 92#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
95enum qeth_routing_types { 93enum qeth_routing_types {
96 NO_ROUTER = 0, /* TODO: set to bit flag used in IPA Command */ 94 /* TODO: set to bit flag used in IPA Command */
95 NO_ROUTER = 0,
97 PRIMARY_ROUTER = 1, 96 PRIMARY_ROUTER = 1,
98 SECONDARY_ROUTER = 2, 97 SECONDARY_ROUTER = 2,
99 MULTICAST_ROUTER = 3, 98 MULTICAST_ROUTER = 3,
@@ -233,14 +232,14 @@ enum qeth_ipa_setdelip_flags {
233 232
234/* SETADAPTER IPA Command: ****************************************************/ 233/* SETADAPTER IPA Command: ****************************************************/
235enum qeth_ipa_setadp_cmd { 234enum qeth_ipa_setadp_cmd {
236 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x01, 235 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x0001,
237 IPA_SETADP_ALTER_MAC_ADDRESS = 0x02, 236 IPA_SETADP_ALTER_MAC_ADDRESS = 0x0002,
238 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x04, 237 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x0004,
239 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x08, 238 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x0008,
240 IPA_SETADP_SET_ADDRESSING_MODE = 0x10, 239 IPA_SETADP_SET_ADDRESSING_MODE = 0x0010,
241 IPA_SETADP_SET_CONFIG_PARMS = 0x20, 240 IPA_SETADP_SET_CONFIG_PARMS = 0x0020,
242 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x40, 241 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x0040,
243 IPA_SETADP_SET_BROADCAST_MODE = 0x80, 242 IPA_SETADP_SET_BROADCAST_MODE = 0x0080,
244 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100, 243 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100,
245 IPA_SETADP_SET_SNMP_CONTROL = 0x0200, 244 IPA_SETADP_SET_SNMP_CONTROL = 0x0200,
246 IPA_SETADP_QUERY_CARD_INFO = 0x0400, 245 IPA_SETADP_QUERY_CARD_INFO = 0x0400,
@@ -397,26 +396,11 @@ struct qeth_ipacmd_setadpparms {
397 } data; 396 } data;
398} __attribute__ ((packed)); 397} __attribute__ ((packed));
399 398
400/* IPFRAME IPA Command: ***************************************************/
401/* TODO: define in analogy to commands define above */
402
403/* ADD_ADDR_ENTRY IPA Command: ********************************************/
404/* TODO: define in analogy to commands define above */
405
406/* DELETE_ADDR_ENTRY IPA Command: *****************************************/
407/* TODO: define in analogy to commands define above */
408
409/* CREATE_ADDR IPA Command: ***********************************************/ 399/* CREATE_ADDR IPA Command: ***********************************************/
410struct qeth_create_destroy_address { 400struct qeth_create_destroy_address {
411 __u8 unique_id[8]; 401 __u8 unique_id[8];
412} __attribute__ ((packed)); 402} __attribute__ ((packed));
413 403
414/* REGISTER_LOCAL_ADDR IPA Command: ***************************************/
415/* TODO: define in analogy to commands define above */
416
417/* UNREGISTER_LOCAL_ADDR IPA Command: *************************************/
418/* TODO: define in analogy to commands define above */
419
420/* Header for each IPA command */ 404/* Header for each IPA command */
421struct qeth_ipacmd_hdr { 405struct qeth_ipacmd_hdr {
422 __u8 command; 406 __u8 command;
@@ -463,10 +447,8 @@ enum qeth_ipa_arp_return_codes {
463}; 447};
464 448
465 449
466extern char * 450extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
467qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); 451extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
468extern char *
469qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
470 452
471#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ 453#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
472 sizeof(struct qeth_ipacmd_setassparms_hdr)) 454 sizeof(struct qeth_ipacmd_setassparms_hdr))
@@ -492,88 +474,89 @@ extern unsigned char READ_CCW[];
492 474
493extern unsigned char CM_ENABLE[]; 475extern unsigned char CM_ENABLE[];
494#define CM_ENABLE_SIZE 0x63 476#define CM_ENABLE_SIZE 0x63
495#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer+0x2c) 477#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c)
496#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53) 478#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
497#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer+0x5b) 479#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer + 0x5b)
498 480
499#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \ 481#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \
500 (PDU_ENCAPSULATION(buffer)+ 0x13) 482 (PDU_ENCAPSULATION(buffer) + 0x13)
501 483
502 484
503extern unsigned char CM_SETUP[]; 485extern unsigned char CM_SETUP[];
504#define CM_SETUP_SIZE 0x64 486#define CM_SETUP_SIZE 0x64
505#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer+0x2c) 487#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
506#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51) 488#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
507#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a) 489#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
508 490
509#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \ 491#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
510 (PDU_ENCAPSULATION(buffer) + 0x1a) 492 (PDU_ENCAPSULATION(buffer) + 0x1a)
511 493
512extern unsigned char ULP_ENABLE[]; 494extern unsigned char ULP_ENABLE[];
513#define ULP_ENABLE_SIZE 0x6b 495#define ULP_ENABLE_SIZE 0x6b
514#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer+0x61) 496#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61)
515#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer+0x2c) 497#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c)
516#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53) 498#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
517#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer+0x62) 499#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer + 0x62)
518#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \ 500#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \
519 (PDU_ENCAPSULATION(buffer) + 0x13) 501 (PDU_ENCAPSULATION(buffer) + 0x13)
520#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \ 502#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \
521 (PDU_ENCAPSULATION(buffer)+ 0x1f) 503 (PDU_ENCAPSULATION(buffer) + 0x1f)
522#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \ 504#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \
523 (PDU_ENCAPSULATION(buffer) + 0x17) 505 (PDU_ENCAPSULATION(buffer) + 0x17)
524#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \ 506#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
525 (PDU_ENCAPSULATION(buffer)+ 0x2b) 507 (PDU_ENCAPSULATION(buffer) + 0x2b)
526/* Layer 2 defintions */ 508/* Layer 2 defintions */
527#define QETH_PROT_LAYER2 0x08 509#define QETH_PROT_LAYER2 0x08
528#define QETH_PROT_TCPIP 0x03 510#define QETH_PROT_TCPIP 0x03
529#define QETH_PROT_OSN2 0x0a 511#define QETH_PROT_OSN2 0x0a
530#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer+0x50) 512#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
531#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer+0x19) 513#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
532 514
533extern unsigned char ULP_SETUP[]; 515extern unsigned char ULP_SETUP[];
534#define ULP_SETUP_SIZE 0x6c 516#define ULP_SETUP_SIZE 0x6c
535#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer+0x2c) 517#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
536#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51) 518#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
537#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a) 519#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
538#define QETH_ULP_SETUP_CUA(buffer) (buffer+0x68) 520#define QETH_ULP_SETUP_CUA(buffer) (buffer + 0x68)
539#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer+0x6a) 521#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer + 0x6a)
540 522
541#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \ 523#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \
542 (PDU_ENCAPSULATION(buffer)+0x1a) 524 (PDU_ENCAPSULATION(buffer) + 0x1a)
543 525
544 526
545extern unsigned char DM_ACT[]; 527extern unsigned char DM_ACT[];
546#define DM_ACT_SIZE 0x55 528#define DM_ACT_SIZE 0x55
547#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer+0x2c) 529#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c)
548#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer+0x51) 530#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51)
549 531
550 532
551 533
552#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer+4) 534#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer + 4)
553#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer+0x1c) 535#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c)
554#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer+0x20) 536#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20)
555 537
556extern unsigned char IDX_ACTIVATE_READ[]; 538extern unsigned char IDX_ACTIVATE_READ[];
557extern unsigned char IDX_ACTIVATE_WRITE[]; 539extern unsigned char IDX_ACTIVATE_WRITE[];
558 540
559#define IDX_ACTIVATE_SIZE 0x22 541#define IDX_ACTIVATE_SIZE 0x22
560#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer+0x0c) 542#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
561#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b]&0x80) 543#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
562#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer+0x10) 544#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80)
563#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer+0x16) 545#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10)
564#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer+0x1e) 546#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16)
565#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer+0x20) 547#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer + 0x1e)
566#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08]&3)==2) 548#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer + 0x20)
567#define QETH_IDX_REPLY_LEVEL(buffer) (buffer+0x12) 549#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
550#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
568#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09] 551#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
569 552
570#define PDU_ENCAPSULATION(buffer) \ 553#define PDU_ENCAPSULATION(buffer) \
571 (buffer + *(buffer + (*(buffer+0x0b)) + \ 554 (buffer + *(buffer + (*(buffer + 0x0b)) + \
572 *(buffer + *(buffer+0x0b)+0x11) +0x07)) 555 *(buffer + *(buffer + 0x0b) + 0x11) + 0x07))
573 556
574#define IS_IPA(buffer) \ 557#define IS_IPA(buffer) \
575 ((buffer) && \ 558 ((buffer) && \
576 ( *(buffer + ((*(buffer+0x0b))+4) )==0xc1) ) 559 (*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1))
577 560
578#define ADDR_FRAME_TYPE_DIX 1 561#define ADDR_FRAME_TYPE_DIX 1
579#define ADDR_FRAME_TYPE_802_3 2 562#define ADDR_FRAME_TYPE_802_3 2
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_core_offl.c
index e3c268cfbffe..8b407d6a83cf 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_core_offl.c
@@ -1,13 +1,11 @@
1/* 1/*
2 * linux/drivers/s390/net/qeth_eddp.c 2 * drivers/s390/net/qeth_core_offl.c
3 *
4 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
9 * 3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
6 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 */ 7 */
8
11#include <linux/errno.h> 9#include <linux/errno.h>
12#include <linux/ip.h> 10#include <linux/ip.h>
13#include <linux/inetdevice.h> 11#include <linux/inetdevice.h>
@@ -18,14 +16,14 @@
18#include <linux/skbuff.h> 16#include <linux/skbuff.h>
19 17
20#include <net/ip.h> 18#include <net/ip.h>
19#include <net/ip6_checksum.h>
21 20
22#include "qeth.h" 21#include "qeth_core.h"
23#include "qeth_mpc.h" 22#include "qeth_core_mpc.h"
24#include "qeth_eddp.h" 23#include "qeth_core_offl.h"
25 24
26int 25int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
27qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, 26 struct qeth_eddp_context *ctx)
28 struct qeth_eddp_context *ctx)
29{ 27{
30 int index = queue->next_buf_to_fill; 28 int index = queue->next_buf_to_fill;
31 int elements_needed = ctx->num_elements; 29 int elements_needed = ctx->num_elements;
@@ -34,7 +32,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
34 int buffers_needed = 0; 32 int buffers_needed = 0;
35 33
36 QETH_DBF_TEXT(trace, 5, "eddpcbfc"); 34 QETH_DBF_TEXT(trace, 5, "eddpcbfc");
37 while(elements_needed > 0) { 35 while (elements_needed > 0) {
38 buffers_needed++; 36 buffers_needed++;
39 if (atomic_read(&queue->bufs[index].state) != 37 if (atomic_read(&queue->bufs[index].state) !=
40 QETH_QDIO_BUF_EMPTY) 38 QETH_QDIO_BUF_EMPTY)
@@ -49,8 +47,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
49 return buffers_needed; 47 return buffers_needed;
50} 48}
51 49
52static void 50static void qeth_eddp_free_context(struct qeth_eddp_context *ctx)
53qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54{ 51{
55 int i; 52 int i;
56 53
@@ -63,26 +60,24 @@ qeth_eddp_free_context(struct qeth_eddp_context *ctx)
63} 60}
64 61
65 62
66static inline void 63static void qeth_eddp_get_context(struct qeth_eddp_context *ctx)
67qeth_eddp_get_context(struct qeth_eddp_context *ctx)
68{ 64{
69 atomic_inc(&ctx->refcnt); 65 atomic_inc(&ctx->refcnt);
70} 66}
71 67
72void 68void qeth_eddp_put_context(struct qeth_eddp_context *ctx)
73qeth_eddp_put_context(struct qeth_eddp_context *ctx)
74{ 69{
75 if (atomic_dec_return(&ctx->refcnt) == 0) 70 if (atomic_dec_return(&ctx->refcnt) == 0)
76 qeth_eddp_free_context(ctx); 71 qeth_eddp_free_context(ctx);
77} 72}
73EXPORT_SYMBOL_GPL(qeth_eddp_put_context);
78 74
79void 75void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
80qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
81{ 76{
82 struct qeth_eddp_context_reference *ref; 77 struct qeth_eddp_context_reference *ref;
83 78
84 QETH_DBF_TEXT(trace, 6, "eddprctx"); 79 QETH_DBF_TEXT(trace, 6, "eddprctx");
85 while (!list_empty(&buf->ctx_list)){ 80 while (!list_empty(&buf->ctx_list)) {
86 ref = list_entry(buf->ctx_list.next, 81 ref = list_entry(buf->ctx_list.next,
87 struct qeth_eddp_context_reference, list); 82 struct qeth_eddp_context_reference, list);
88 qeth_eddp_put_context(ref->ctx); 83 qeth_eddp_put_context(ref->ctx);
@@ -91,9 +86,8 @@ qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
91 } 86 }
92} 87}
93 88
94static int 89static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, 90 struct qeth_eddp_context *ctx)
96 struct qeth_eddp_context *ctx)
97{ 91{
98 struct qeth_eddp_context_reference *ref; 92 struct qeth_eddp_context_reference *ref;
99 93
@@ -107,10 +101,8 @@ qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
107 return 0; 101 return 0;
108} 102}
109 103
110int 104int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
111qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, 105 struct qeth_eddp_context *ctx, int index)
112 struct qeth_eddp_context *ctx,
113 int index)
114{ 106{
115 struct qeth_qdio_out_buffer *buf = NULL; 107 struct qeth_qdio_out_buffer *buf = NULL;
116 struct qdio_buffer *buffer; 108 struct qdio_buffer *buffer;
@@ -123,7 +115,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
123 QETH_DBF_TEXT(trace, 5, "eddpfibu"); 115 QETH_DBF_TEXT(trace, 5, "eddpfibu");
124 while (elements > 0) { 116 while (elements > 0) {
125 buf = &queue->bufs[index]; 117 buf = &queue->bufs[index];
126 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){ 118 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) {
127 /* normally this should not happen since we checked for 119 /* normally this should not happen since we checked for
128 * available elements in qeth_check_elements_for_context 120 * available elements in qeth_check_elements_for_context
129 */ 121 */
@@ -148,9 +140,9 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
148 must_refcnt = 1; 140 must_refcnt = 1;
149 continue; 141 continue;
150 } 142 }
151 if (must_refcnt){ 143 if (must_refcnt) {
152 must_refcnt = 0; 144 must_refcnt = 0;
153 if (qeth_eddp_buf_ref_context(buf, ctx)){ 145 if (qeth_eddp_buf_ref_context(buf, ctx)) {
154 PRINT_WARN("no memory to create eddp context " 146 PRINT_WARN("no memory to create eddp context "
155 "reference\n"); 147 "reference\n");
156 goto out_check; 148 goto out_check;
@@ -158,7 +150,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
158 } 150 }
159 buffer = buf->buffer; 151 buffer = buf->buffer;
160 /* fill one skb into buffer */ 152 /* fill one skb into buffer */
161 for (i = 0; i < ctx->elements_per_skb; ++i){ 153 for (i = 0; i < ctx->elements_per_skb; ++i) {
162 if (ctx->elements[element].length != 0) { 154 if (ctx->elements[element].length != 0) {
163 buffer->element[buf->next_element_to_fill]. 155 buffer->element[buf->next_element_to_fill].
164 addr = ctx->elements[element].addr; 156 addr = ctx->elements[element].addr;
@@ -176,7 +168,7 @@ out_check:
176 if (!queue->do_pack) { 168 if (!queue->do_pack) {
177 QETH_DBF_TEXT(trace, 6, "fillbfnp"); 169 QETH_DBF_TEXT(trace, 6, "fillbfnp");
178 /* set state to PRIMED -> will be flushed */ 170 /* set state to PRIMED -> will be flushed */
179 if (buf->next_element_to_fill > 0){ 171 if (buf->next_element_to_fill > 0) {
180 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); 172 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
181 flush_cnt++; 173 flush_cnt++;
182 } 174 }
@@ -198,9 +190,8 @@ out:
198 return flush_cnt; 190 return flush_cnt;
199} 191}
200 192
201static void 193static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
202qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, 194 struct qeth_eddp_data *eddp, int data_len)
203 struct qeth_eddp_data *eddp, int data_len)
204{ 195{
205 u8 *page; 196 u8 *page;
206 int page_remainder; 197 int page_remainder;
@@ -220,7 +211,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
220 pkt_len += VLAN_HLEN; 211 pkt_len += VLAN_HLEN;
221 /* does complete packet fit in current page ? */ 212 /* does complete packet fit in current page ? */
222 page_remainder = PAGE_SIZE - page_offset; 213 page_remainder = PAGE_SIZE - page_offset;
223 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){ 214 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)) {
224 /* no -> go to start of next page */ 215 /* no -> go to start of next page */
225 ctx->offset += page_remainder; 216 ctx->offset += page_remainder;
226 page = ctx->pages[ctx->offset >> PAGE_SHIFT]; 217 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
@@ -232,14 +223,14 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
232 ctx->offset += sizeof(struct qeth_hdr); 223 ctx->offset += sizeof(struct qeth_hdr);
233 page_offset += sizeof(struct qeth_hdr); 224 page_offset += sizeof(struct qeth_hdr);
234 /* add mac header (?) */ 225 /* add mac header (?) */
235 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ 226 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
236 memcpy(page + page_offset, &eddp->mac, ETH_HLEN); 227 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
237 element->length += ETH_HLEN; 228 element->length += ETH_HLEN;
238 ctx->offset += ETH_HLEN; 229 ctx->offset += ETH_HLEN;
239 page_offset += ETH_HLEN; 230 page_offset += ETH_HLEN;
240 } 231 }
241 /* add VLAN tag */ 232 /* add VLAN tag */
242 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){ 233 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
243 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN); 234 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
244 element->length += VLAN_HLEN; 235 element->length += VLAN_HLEN;
245 ctx->offset += VLAN_HLEN; 236 ctx->offset += VLAN_HLEN;
@@ -258,9 +249,8 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
258 ctx->offset += eddp->thl; 249 ctx->offset += eddp->thl;
259} 250}
260 251
261static void 252static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp,
262qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, 253 int len, __wsum *hcsum)
263 __wsum *hcsum)
264{ 254{
265 struct skb_frag_struct *frag; 255 struct skb_frag_struct *frag;
266 int left_in_frag; 256 int left_in_frag;
@@ -278,16 +268,17 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
278 while (len > 0) { 268 while (len > 0) {
279 if (eddp->frag < 0) { 269 if (eddp->frag < 0) {
280 /* we're in skb->data */ 270 /* we're in skb->data */
281 left_in_frag = (eddp->skb->len - eddp->skb->data_len) 271 left_in_frag = (eddp->skb->len -
272 eddp->skb->data_len)
282 - eddp->skb_offset; 273 - eddp->skb_offset;
283 src = eddp->skb->data + eddp->skb_offset; 274 src = eddp->skb->data + eddp->skb_offset;
284 } else { 275 } else {
285 frag = &skb_shinfo(eddp->skb)-> 276 frag = &skb_shinfo(eddp->skb)->frags[
286 frags[eddp->frag]; 277 eddp->frag];
287 left_in_frag = frag->size - eddp->frag_offset; 278 left_in_frag = frag->size - eddp->frag_offset;
288 src = (u8 *)( 279 src = (u8 *)((page_to_pfn(frag->page) <<
289 (page_to_pfn(frag->page) << PAGE_SHIFT)+ 280 PAGE_SHIFT) + frag->page_offset +
290 frag->page_offset + eddp->frag_offset); 281 eddp->frag_offset);
291 } 282 }
292 if (left_in_frag <= 0) { 283 if (left_in_frag <= 0) {
293 eddp->frag++; 284 eddp->frag++;
@@ -305,10 +296,8 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
305 } 296 }
306} 297}
307 298
308static void 299static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
309qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, 300 struct qeth_eddp_data *eddp, int data_len, __wsum hcsum)
310 struct qeth_eddp_data *eddp, int data_len,
311 __wsum hcsum)
312{ 301{
313 u8 *page; 302 u8 *page;
314 int page_remainder; 303 int page_remainder;
@@ -320,9 +309,9 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
320 page = ctx->pages[ctx->offset >> PAGE_SHIFT]; 309 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
321 page_offset = ctx->offset % PAGE_SIZE; 310 page_offset = ctx->offset % PAGE_SIZE;
322 element = &ctx->elements[ctx->num_elements]; 311 element = &ctx->elements[ctx->num_elements];
323 while (data_len){ 312 while (data_len) {
324 page_remainder = PAGE_SIZE - page_offset; 313 page_remainder = PAGE_SIZE - page_offset;
325 if (page_remainder < data_len){ 314 if (page_remainder < data_len) {
326 qeth_eddp_copy_data_tcp(page + page_offset, eddp, 315 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
327 page_remainder, &hcsum); 316 page_remainder, &hcsum);
328 element->length += page_remainder; 317 element->length += page_remainder;
@@ -352,8 +341,8 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
352 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); 341 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
353} 342}
354 343
355static __wsum 344static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp,
356qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) 345 int data_len)
357{ 346{
358 __wsum phcsum; /* pseudo header checksum */ 347 __wsum phcsum; /* pseudo header checksum */
359 348
@@ -366,8 +355,8 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
366 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); 355 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
367} 356}
368 357
369static __wsum 358static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp,
370qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) 359 int data_len)
371{ 360{
372 __be32 proto; 361 __be32 proto;
373 __wsum phcsum; /* pseudo header checksum */ 362 __wsum phcsum; /* pseudo header checksum */
@@ -384,14 +373,14 @@ qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
384 return phcsum; 373 return phcsum;
385} 374}
386 375
387static struct qeth_eddp_data * 376static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh,
388qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) 377 u8 *nh, u8 nhl, u8 *th, u8 thl)
389{ 378{
390 struct qeth_eddp_data *eddp; 379 struct qeth_eddp_data *eddp;
391 380
392 QETH_DBF_TEXT(trace, 5, "eddpcrda"); 381 QETH_DBF_TEXT(trace, 5, "eddpcrda");
393 eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); 382 eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
394 if (eddp){ 383 if (eddp) {
395 eddp->nhl = nhl; 384 eddp->nhl = nhl;
396 eddp->thl = thl; 385 eddp->thl = thl;
397 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr)); 386 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
@@ -402,9 +391,8 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
402 return eddp; 391 return eddp;
403} 392}
404 393
405static void 394static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
406__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 395 struct qeth_eddp_data *eddp)
407 struct qeth_eddp_data *eddp)
408{ 396{
409 struct tcphdr *tcph; 397 struct tcphdr *tcph;
410 int data_len; 398 int data_len;
@@ -412,30 +400,26 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
412 400
413 QETH_DBF_TEXT(trace, 5, "eddpftcp"); 401 QETH_DBF_TEXT(trace, 5, "eddpftcp");
414 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; 402 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
415 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 403 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
416 eddp->skb_offset += sizeof(struct ethhdr); 404 eddp->skb_offset += sizeof(struct ethhdr);
417#ifdef CONFIG_QETH_VLAN 405 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
418 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) 406 eddp->skb_offset += VLAN_HLEN;
419 eddp->skb_offset += VLAN_HLEN; 407 }
420#endif /* CONFIG_QETH_VLAN */
421 }
422 tcph = tcp_hdr(eddp->skb); 408 tcph = tcp_hdr(eddp->skb);
423 while (eddp->skb_offset < eddp->skb->len) { 409 while (eddp->skb_offset < eddp->skb->len) {
424 data_len = min((int)skb_shinfo(eddp->skb)->gso_size, 410 data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
425 (int)(eddp->skb->len - eddp->skb_offset)); 411 (int)(eddp->skb->len - eddp->skb_offset));
426 /* prepare qdio hdr */ 412 /* prepare qdio hdr */
427 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ 413 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
428 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN + 414 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
429 eddp->nhl + eddp->thl; 415 eddp->nhl + eddp->thl;
430#ifdef CONFIG_QETH_VLAN
431 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) 416 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
432 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN; 417 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
433#endif /* CONFIG_QETH_VLAN */
434 } else 418 } else
435 eddp->qh.hdr.l3.length = data_len + eddp->nhl + 419 eddp->qh.hdr.l3.length = data_len + eddp->nhl +
436 eddp->thl; 420 eddp->thl;
437 /* prepare ip hdr */ 421 /* prepare ip hdr */
438 if (eddp->skb->protocol == htons(ETH_P_IP)){ 422 if (eddp->skb->protocol == htons(ETH_P_IP)) {
439 eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl + 423 eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
440 eddp->thl); 424 eddp->thl);
441 eddp->nh.ip4.h.check = 0; 425 eddp->nh.ip4.h.check = 0;
@@ -443,9 +427,10 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
443 ip_fast_csum((u8 *)&eddp->nh.ip4.h, 427 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
444 eddp->nh.ip4.h.ihl); 428 eddp->nh.ip4.h.ihl);
445 } else 429 } else
446 eddp->nh.ip6.h.payload_len = htons(data_len + eddp->thl); 430 eddp->nh.ip6.h.payload_len = htons(data_len +
431 eddp->thl);
447 /* prepare tcp hdr */ 432 /* prepare tcp hdr */
448 if (data_len == (eddp->skb->len - eddp->skb_offset)){ 433 if (data_len == (eddp->skb->len - eddp->skb_offset)) {
449 /* last segment -> set FIN and PSH flags */ 434 /* last segment -> set FIN and PSH flags */
450 eddp->th.tcp.h.fin = tcph->fin; 435 eddp->th.tcp.h.fin = tcph->fin;
451 eddp->th.tcp.h.psh = tcph->psh; 436 eddp->th.tcp.h.psh = tcph->psh;
@@ -462,13 +447,13 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
462 /* prepare headers for next round */ 447 /* prepare headers for next round */
463 if (eddp->skb->protocol == htons(ETH_P_IP)) 448 if (eddp->skb->protocol == htons(ETH_P_IP))
464 eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1); 449 eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
465 eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) + data_len); 450 eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) +
451 data_len);
466 } 452 }
467} 453}
468 454
469static int 455static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
470qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 456 struct sk_buff *skb, struct qeth_hdr *qhdr)
471 struct sk_buff *skb, struct qeth_hdr *qhdr)
472{ 457{
473 struct qeth_eddp_data *eddp = NULL; 458 struct qeth_eddp_data *eddp = NULL;
474 459
@@ -494,12 +479,10 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
494 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 479 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
495 skb_set_mac_header(skb, sizeof(struct qeth_hdr)); 480 skb_set_mac_header(skb, sizeof(struct qeth_hdr));
496 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); 481 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
497#ifdef CONFIG_QETH_VLAN
498 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { 482 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
499 eddp->vlan[0] = skb->protocol; 483 eddp->vlan[0] = skb->protocol;
500 eddp->vlan[1] = htons(vlan_tx_tag_get(skb)); 484 eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
501 } 485 }
502#endif /* CONFIG_QETH_VLAN */
503 } 486 }
504 /* the next flags will only be set on the last segment */ 487 /* the next flags will only be set on the last segment */
505 eddp->th.tcp.h.fin = 0; 488 eddp->th.tcp.h.fin = 0;
@@ -511,16 +494,15 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
511 return 0; 494 return 0;
512} 495}
513 496
514static void 497static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx,
515qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, 498 struct sk_buff *skb, int hdr_len)
516 int hdr_len)
517{ 499{
518 int skbs_per_page; 500 int skbs_per_page;
519 501
520 QETH_DBF_TEXT(trace, 5, "eddpcanp"); 502 QETH_DBF_TEXT(trace, 5, "eddpcanp");
521 /* can we put multiple skbs in one page? */ 503 /* can we put multiple skbs in one page? */
522 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len); 504 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
523 if (skbs_per_page > 1){ 505 if (skbs_per_page > 1) {
524 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) / 506 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
525 skbs_per_page + 1; 507 skbs_per_page + 1;
526 ctx->elements_per_skb = 1; 508 ctx->elements_per_skb = 1;
@@ -535,9 +517,8 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
535 (skb_shinfo(skb)->gso_segs + 1); 517 (skb_shinfo(skb)->gso_segs + 1);
536} 518}
537 519
538static struct qeth_eddp_context * 520static struct qeth_eddp_context *qeth_eddp_create_context_generic(
539qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, 521 struct qeth_card *card, struct sk_buff *skb, int hdr_len)
540 int hdr_len)
541{ 522{
542 struct qeth_eddp_context *ctx = NULL; 523 struct qeth_eddp_context *ctx = NULL;
543 u8 *addr; 524 u8 *addr;
@@ -546,37 +527,36 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
546 QETH_DBF_TEXT(trace, 5, "creddpcg"); 527 QETH_DBF_TEXT(trace, 5, "creddpcg");
547 /* create the context and allocate pages */ 528 /* create the context and allocate pages */
548 ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); 529 ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
549 if (ctx == NULL){ 530 if (ctx == NULL) {
550 QETH_DBF_TEXT(trace, 2, "ceddpcn1"); 531 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
551 return NULL; 532 return NULL;
552 } 533 }
553 ctx->type = QETH_LARGE_SEND_EDDP; 534 ctx->type = QETH_LARGE_SEND_EDDP;
554 qeth_eddp_calc_num_pages(ctx, skb, hdr_len); 535 qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
555 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){ 536 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) {
556 QETH_DBF_TEXT(trace, 2, "ceddpcis"); 537 QETH_DBF_TEXT(trace, 2, "ceddpcis");
557 kfree(ctx); 538 kfree(ctx);
558 return NULL; 539 return NULL;
559 } 540 }
560 ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC); 541 ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
561 if (ctx->pages == NULL){ 542 if (ctx->pages == NULL) {
562 QETH_DBF_TEXT(trace, 2, "ceddpcn2"); 543 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
563 kfree(ctx); 544 kfree(ctx);
564 return NULL; 545 return NULL;
565 } 546 }
566 for (i = 0; i < ctx->num_pages; ++i){ 547 for (i = 0; i < ctx->num_pages; ++i) {
567 addr = (u8 *)__get_free_page(GFP_ATOMIC); 548 addr = (u8 *)get_zeroed_page(GFP_ATOMIC);
568 if (addr == NULL){ 549 if (addr == NULL) {
569 QETH_DBF_TEXT(trace, 2, "ceddpcn3"); 550 QETH_DBF_TEXT(trace, 2, "ceddpcn3");
570 ctx->num_pages = i; 551 ctx->num_pages = i;
571 qeth_eddp_free_context(ctx); 552 qeth_eddp_free_context(ctx);
572 return NULL; 553 return NULL;
573 } 554 }
574 memset(addr, 0, PAGE_SIZE);
575 ctx->pages[i] = addr; 555 ctx->pages[i] = addr;
576 } 556 }
577 ctx->elements = kcalloc(ctx->num_elements, 557 ctx->elements = kcalloc(ctx->num_elements,
578 sizeof(struct qeth_eddp_element), GFP_ATOMIC); 558 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
579 if (ctx->elements == NULL){ 559 if (ctx->elements == NULL) {
580 QETH_DBF_TEXT(trace, 2, "ceddpcn4"); 560 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
581 qeth_eddp_free_context(ctx); 561 qeth_eddp_free_context(ctx);
582 return NULL; 562 return NULL;
@@ -587,18 +567,18 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
587 return ctx; 567 return ctx;
588} 568}
589 569
590static struct qeth_eddp_context * 570static struct qeth_eddp_context *qeth_eddp_create_context_tcp(
591qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, 571 struct qeth_card *card, struct sk_buff *skb,
592 struct qeth_hdr *qhdr) 572 struct qeth_hdr *qhdr)
593{ 573{
594 struct qeth_eddp_context *ctx = NULL; 574 struct qeth_eddp_context *ctx = NULL;
595 575
596 QETH_DBF_TEXT(trace, 5, "creddpct"); 576 QETH_DBF_TEXT(trace, 5, "creddpct");
597 if (skb->protocol == htons(ETH_P_IP)) 577 if (skb->protocol == htons(ETH_P_IP))
598 ctx = qeth_eddp_create_context_generic(card, skb, 578 ctx = qeth_eddp_create_context_generic(card, skb,
599 (sizeof(struct qeth_hdr) + 579 (sizeof(struct qeth_hdr) +
600 ip_hdrlen(skb) + 580 ip_hdrlen(skb) +
601 tcp_hdrlen(skb))); 581 tcp_hdrlen(skb)));
602 else if (skb->protocol == htons(ETH_P_IPV6)) 582 else if (skb->protocol == htons(ETH_P_IPV6))
603 ctx = qeth_eddp_create_context_generic(card, skb, 583 ctx = qeth_eddp_create_context_generic(card, skb,
604 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + 584 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
@@ -610,7 +590,7 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
610 QETH_DBF_TEXT(trace, 2, "creddpnl"); 590 QETH_DBF_TEXT(trace, 2, "creddpnl");
611 return NULL; 591 return NULL;
612 } 592 }
613 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){ 593 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) {
614 QETH_DBF_TEXT(trace, 2, "ceddptfe"); 594 QETH_DBF_TEXT(trace, 2, "ceddptfe");
615 qeth_eddp_free_context(ctx); 595 qeth_eddp_free_context(ctx);
616 return NULL; 596 return NULL;
@@ -619,9 +599,9 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
619 return ctx; 599 return ctx;
620} 600}
621 601
622struct qeth_eddp_context * 602struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card,
623qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb, 603 struct sk_buff *skb, struct qeth_hdr *qhdr,
624 struct qeth_hdr *qhdr, unsigned char sk_protocol) 604 unsigned char sk_protocol)
625{ 605{
626 QETH_DBF_TEXT(trace, 5, "creddpc"); 606 QETH_DBF_TEXT(trace, 5, "creddpc");
627 switch (sk_protocol) { 607 switch (sk_protocol) {
@@ -632,3 +612,90 @@ qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
632 } 612 }
633 return NULL; 613 return NULL;
634} 614}
615EXPORT_SYMBOL_GPL(qeth_eddp_create_context);
616
617void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr,
618 struct sk_buff *skb)
619{
620 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
621 struct tcphdr *tcph = tcp_hdr(skb);
622 struct iphdr *iph = ip_hdr(skb);
623 struct ipv6hdr *ip6h = ipv6_hdr(skb);
624
625 QETH_DBF_TEXT(trace, 5, "tsofhdr");
626
627 /*fix header to TSO values ...*/
628 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
629 /*set values which are fix for the first approach ...*/
630 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
631 hdr->ext.imb_hdr_no = 1;
632 hdr->ext.hdr_type = 1;
633 hdr->ext.hdr_version = 1;
634 hdr->ext.hdr_len = 28;
635 /*insert non-fix values */
636 hdr->ext.mss = skb_shinfo(skb)->gso_size;
637 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
638 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
639 sizeof(struct qeth_hdr_tso));
640 tcph->check = 0;
641 if (skb->protocol == ETH_P_IPV6) {
642 ip6h->payload_len = 0;
643 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
644 0, IPPROTO_TCP, 0);
645 } else {
646 /*OSA want us to set these values ...*/
647 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
648 0, IPPROTO_TCP, 0);
649 iph->tot_len = 0;
650 iph->check = 0;
651 }
652}
653EXPORT_SYMBOL_GPL(qeth_tso_fill_header);
654
655void qeth_tx_csum(struct sk_buff *skb)
656{
657 int tlen;
658 if (skb->protocol == htons(ETH_P_IP)) {
659 tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
660 switch (ip_hdr(skb)->protocol) {
661 case IPPROTO_TCP:
662 tcp_hdr(skb)->check = 0;
663 tcp_hdr(skb)->check = csum_tcpudp_magic(
664 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
665 tlen, ip_hdr(skb)->protocol,
666 skb_checksum(skb, skb_transport_offset(skb),
667 tlen, 0));
668 break;
669 case IPPROTO_UDP:
670 udp_hdr(skb)->check = 0;
671 udp_hdr(skb)->check = csum_tcpudp_magic(
672 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
673 tlen, ip_hdr(skb)->protocol,
674 skb_checksum(skb, skb_transport_offset(skb),
675 tlen, 0));
676 break;
677 }
678 } else if (skb->protocol == htons(ETH_P_IPV6)) {
679 switch (ipv6_hdr(skb)->nexthdr) {
680 case IPPROTO_TCP:
681 tcp_hdr(skb)->check = 0;
682 tcp_hdr(skb)->check = csum_ipv6_magic(
683 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
684 ipv6_hdr(skb)->payload_len,
685 ipv6_hdr(skb)->nexthdr,
686 skb_checksum(skb, skb_transport_offset(skb),
687 ipv6_hdr(skb)->payload_len, 0));
688 break;
689 case IPPROTO_UDP:
690 udp_hdr(skb)->check = 0;
691 udp_hdr(skb)->check = csum_ipv6_magic(
692 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
693 ipv6_hdr(skb)->payload_len,
694 ipv6_hdr(skb)->nexthdr,
695 skb_checksum(skb, skb_transport_offset(skb),
696 ipv6_hdr(skb)->payload_len, 0));
697 break;
698 }
699 }
700}
701EXPORT_SYMBOL_GPL(qeth_tx_csum);
diff --git a/drivers/s390/net/qeth_eddp.h b/drivers/s390/net/qeth_core_offl.h
index 52910c9252c0..86bf7df8cf16 100644
--- a/drivers/s390/net/qeth_eddp.h
+++ b/drivers/s390/net/qeth_core_offl.h
@@ -1,15 +1,13 @@
1/* 1/*
2 * linux/drivers/s390/net/qeth_eddp.h 2 * drivers/s390/net/qeth_core_offl.h
3 *
4 * Header file for qeth enhanced device driver packing.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
9 * 3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
6 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 */ 7 */
11#ifndef __QETH_EDDP_H__ 8
12#define __QETH_EDDP_H__ 9#ifndef __QETH_CORE_OFFL_H__
10#define __QETH_CORE_OFFL_H__
13 11
14struct qeth_eddp_element { 12struct qeth_eddp_element {
15 u32 flags; 13 u32 flags;
@@ -33,25 +31,6 @@ struct qeth_eddp_context_reference {
33 struct qeth_eddp_context *ctx; 31 struct qeth_eddp_context *ctx;
34}; 32};
35 33
36extern struct qeth_eddp_context *
37qeth_eddp_create_context(struct qeth_card *,struct sk_buff *,
38 struct qeth_hdr *, unsigned char);
39
40extern void
41qeth_eddp_put_context(struct qeth_eddp_context *);
42
43extern int
44qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,struct qeth_eddp_context *,int);
45
46extern void
47qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *);
48
49extern int
50qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
51 struct qeth_eddp_context *);
52/*
53 * Data used for fragmenting a IP packet.
54 */
55struct qeth_eddp_data { 34struct qeth_eddp_data {
56 struct qeth_hdr qh; 35 struct qeth_hdr qh;
57 struct ethhdr mac; 36 struct ethhdr mac;
@@ -81,4 +60,17 @@ struct qeth_eddp_data {
81 int frag_offset; 60 int frag_offset;
82} __attribute__ ((packed)); 61} __attribute__ ((packed));
83 62
84#endif /* __QETH_EDDP_H__ */ 63extern struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *,
64 struct sk_buff *, struct qeth_hdr *, unsigned char);
65extern void qeth_eddp_put_context(struct qeth_eddp_context *);
66extern int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,
67 struct qeth_eddp_context *, int);
68extern void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *);
69extern int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
70 struct qeth_eddp_context *);
71
72void qeth_tso_fill_header(struct qeth_card *, struct qeth_hdr *,
73 struct sk_buff *);
74void qeth_tx_csum(struct sk_buff *skb);
75
76#endif /* __QETH_CORE_EDDP_H__ */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
new file mode 100644
index 000000000000..08a50f057284
--- /dev/null
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -0,0 +1,651 @@
1/*
2 * drivers/s390/net/qeth_core_sys.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/list.h>
12#include <linux/rwsem.h>
13#include <asm/ebcdic.h>
14
15#include "qeth_core.h"
16
17static ssize_t qeth_dev_state_show(struct device *dev,
18 struct device_attribute *attr, char *buf)
19{
20 struct qeth_card *card = dev_get_drvdata(dev);
21 if (!card)
22 return -EINVAL;
23
24 switch (card->state) {
25 case CARD_STATE_DOWN:
26 return sprintf(buf, "DOWN\n");
27 case CARD_STATE_HARDSETUP:
28 return sprintf(buf, "HARDSETUP\n");
29 case CARD_STATE_SOFTSETUP:
30 return sprintf(buf, "SOFTSETUP\n");
31 case CARD_STATE_UP:
32 if (card->lan_online)
33 return sprintf(buf, "UP (LAN ONLINE)\n");
34 else
35 return sprintf(buf, "UP (LAN OFFLINE)\n");
36 case CARD_STATE_RECOVER:
37 return sprintf(buf, "RECOVER\n");
38 default:
39 return sprintf(buf, "UNKNOWN\n");
40 }
41}
42
43static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
44
45static ssize_t qeth_dev_chpid_show(struct device *dev,
46 struct device_attribute *attr, char *buf)
47{
48 struct qeth_card *card = dev_get_drvdata(dev);
49 if (!card)
50 return -EINVAL;
51
52 return sprintf(buf, "%02X\n", card->info.chpid);
53}
54
55static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
56
57static ssize_t qeth_dev_if_name_show(struct device *dev,
58 struct device_attribute *attr, char *buf)
59{
60 struct qeth_card *card = dev_get_drvdata(dev);
61 if (!card)
62 return -EINVAL;
63 return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
64}
65
66static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
67
68static ssize_t qeth_dev_card_type_show(struct device *dev,
69 struct device_attribute *attr, char *buf)
70{
71 struct qeth_card *card = dev_get_drvdata(dev);
72 if (!card)
73 return -EINVAL;
74
75 return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
76}
77
78static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
79
80static inline const char *qeth_get_bufsize_str(struct qeth_card *card)
81{
82 if (card->qdio.in_buf_size == 16384)
83 return "16k";
84 else if (card->qdio.in_buf_size == 24576)
85 return "24k";
86 else if (card->qdio.in_buf_size == 32768)
87 return "32k";
88 else if (card->qdio.in_buf_size == 40960)
89 return "40k";
90 else
91 return "64k";
92}
93
94static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
95 struct device_attribute *attr, char *buf)
96{
97 struct qeth_card *card = dev_get_drvdata(dev);
98 if (!card)
99 return -EINVAL;
100
101 return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
102}
103
104static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL);
105
106static ssize_t qeth_dev_portno_show(struct device *dev,
107 struct device_attribute *attr, char *buf)
108{
109 struct qeth_card *card = dev_get_drvdata(dev);
110 if (!card)
111 return -EINVAL;
112
113 return sprintf(buf, "%i\n", card->info.portno);
114}
115
116static ssize_t qeth_dev_portno_store(struct device *dev,
117 struct device_attribute *attr, const char *buf, size_t count)
118{
119 struct qeth_card *card = dev_get_drvdata(dev);
120 char *tmp;
121 unsigned int portno;
122
123 if (!card)
124 return -EINVAL;
125
126 if ((card->state != CARD_STATE_DOWN) &&
127 (card->state != CARD_STATE_RECOVER))
128 return -EPERM;
129
130 portno = simple_strtoul(buf, &tmp, 16);
131 if (portno > QETH_MAX_PORTNO) {
132 PRINT_WARN("portno 0x%X is out of range\n", portno);
133 return -EINVAL;
134 }
135
136 card->info.portno = portno;
137 return count;
138}
139
140static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
141
142static ssize_t qeth_dev_portname_show(struct device *dev,
143 struct device_attribute *attr, char *buf)
144{
145 struct qeth_card *card = dev_get_drvdata(dev);
146 char portname[9] = {0, };
147
148 if (!card)
149 return -EINVAL;
150
151 if (card->info.portname_required) {
152 memcpy(portname, card->info.portname + 1, 8);
153 EBCASC(portname, 8);
154 return sprintf(buf, "%s\n", portname);
155 } else
156 return sprintf(buf, "no portname required\n");
157}
158
159static ssize_t qeth_dev_portname_store(struct device *dev,
160 struct device_attribute *attr, const char *buf, size_t count)
161{
162 struct qeth_card *card = dev_get_drvdata(dev);
163 char *tmp;
164 int i;
165
166 if (!card)
167 return -EINVAL;
168
169 if ((card->state != CARD_STATE_DOWN) &&
170 (card->state != CARD_STATE_RECOVER))
171 return -EPERM;
172
173 tmp = strsep((char **) &buf, "\n");
174 if ((strlen(tmp) > 8) || (strlen(tmp) == 0))
175 return -EINVAL;
176
177 card->info.portname[0] = strlen(tmp);
178 /* for beauty reasons */
179 for (i = 1; i < 9; i++)
180 card->info.portname[i] = ' ';
181 strcpy(card->info.portname + 1, tmp);
182 ASCEBC(card->info.portname + 1, 8);
183
184 return count;
185}
186
187static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
188 qeth_dev_portname_store);
189
190static ssize_t qeth_dev_prioqing_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192{
193 struct qeth_card *card = dev_get_drvdata(dev);
194
195 if (!card)
196 return -EINVAL;
197
198 switch (card->qdio.do_prio_queueing) {
199 case QETH_PRIO_Q_ING_PREC:
200 return sprintf(buf, "%s\n", "by precedence");
201 case QETH_PRIO_Q_ING_TOS:
202 return sprintf(buf, "%s\n", "by type of service");
203 default:
204 return sprintf(buf, "always queue %i\n",
205 card->qdio.default_out_queue);
206 }
207}
208
209static ssize_t qeth_dev_prioqing_store(struct device *dev,
210 struct device_attribute *attr, const char *buf, size_t count)
211{
212 struct qeth_card *card = dev_get_drvdata(dev);
213 char *tmp;
214
215 if (!card)
216 return -EINVAL;
217
218 if ((card->state != CARD_STATE_DOWN) &&
219 (card->state != CARD_STATE_RECOVER))
220 return -EPERM;
221
222 /* check if 1920 devices are supported ,
223 * if though we have to permit priority queueing
224 */
225 if (card->qdio.no_out_queues == 1) {
226 PRINT_WARN("Priority queueing disabled due "
227 "to hardware limitations!\n");
228 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
229 return -EPERM;
230 }
231
232 tmp = strsep((char **) &buf, "\n");
233 if (!strcmp(tmp, "prio_queueing_prec"))
234 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
235 else if (!strcmp(tmp, "prio_queueing_tos"))
236 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
237 else if (!strcmp(tmp, "no_prio_queueing:0")) {
238 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
239 card->qdio.default_out_queue = 0;
240 } else if (!strcmp(tmp, "no_prio_queueing:1")) {
241 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
242 card->qdio.default_out_queue = 1;
243 } else if (!strcmp(tmp, "no_prio_queueing:2")) {
244 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
245 card->qdio.default_out_queue = 2;
246 } else if (!strcmp(tmp, "no_prio_queueing:3")) {
247 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
248 card->qdio.default_out_queue = 3;
249 } else if (!strcmp(tmp, "no_prio_queueing")) {
250 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
251 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
252 } else {
253 PRINT_WARN("Unknown queueing type '%s'\n", tmp);
254 return -EINVAL;
255 }
256 return count;
257}
258
259static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
260 qeth_dev_prioqing_store);
261
262static ssize_t qeth_dev_bufcnt_show(struct device *dev,
263 struct device_attribute *attr, char *buf)
264{
265 struct qeth_card *card = dev_get_drvdata(dev);
266
267 if (!card)
268 return -EINVAL;
269
270 return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
271}
272
273static ssize_t qeth_dev_bufcnt_store(struct device *dev,
274 struct device_attribute *attr, const char *buf, size_t count)
275{
276 struct qeth_card *card = dev_get_drvdata(dev);
277 char *tmp;
278 int cnt, old_cnt;
279 int rc;
280
281 if (!card)
282 return -EINVAL;
283
284 if ((card->state != CARD_STATE_DOWN) &&
285 (card->state != CARD_STATE_RECOVER))
286 return -EPERM;
287
288 old_cnt = card->qdio.in_buf_pool.buf_count;
289 cnt = simple_strtoul(buf, &tmp, 10);
290 cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
291 ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
292 if (old_cnt != cnt) {
293 rc = qeth_realloc_buffer_pool(card, cnt);
294 if (rc)
295 PRINT_WARN("Error (%d) while setting "
296 "buffer count.\n", rc);
297 }
298 return count;
299}
300
301static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
302 qeth_dev_bufcnt_store);
303
304static ssize_t qeth_dev_recover_store(struct device *dev,
305 struct device_attribute *attr, const char *buf, size_t count)
306{
307 struct qeth_card *card = dev_get_drvdata(dev);
308 char *tmp;
309 int i;
310
311 if (!card)
312 return -EINVAL;
313
314 if (card->state != CARD_STATE_UP)
315 return -EPERM;
316
317 i = simple_strtoul(buf, &tmp, 16);
318 if (i == 1)
319 qeth_schedule_recovery(card);
320
321 return count;
322}
323
324static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
325
326static ssize_t qeth_dev_performance_stats_show(struct device *dev,
327 struct device_attribute *attr, char *buf)
328{
329 struct qeth_card *card = dev_get_drvdata(dev);
330
331 if (!card)
332 return -EINVAL;
333
334 return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
335}
336
337static ssize_t qeth_dev_performance_stats_store(struct device *dev,
338 struct device_attribute *attr, const char *buf, size_t count)
339{
340 struct qeth_card *card = dev_get_drvdata(dev);
341 char *tmp;
342 int i;
343
344 if (!card)
345 return -EINVAL;
346
347 i = simple_strtoul(buf, &tmp, 16);
348 if ((i == 0) || (i == 1)) {
349 if (i == card->options.performance_stats)
350 return count;
351 card->options.performance_stats = i;
352 if (i == 0)
353 memset(&card->perf_stats, 0,
354 sizeof(struct qeth_perf_stats));
355 card->perf_stats.initial_rx_packets = card->stats.rx_packets;
356 card->perf_stats.initial_tx_packets = card->stats.tx_packets;
357 } else {
358 PRINT_WARN("performance_stats: write 0 or 1 to this file!\n");
359 return -EINVAL;
360 }
361 return count;
362}
363
364static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
365 qeth_dev_performance_stats_store);
366
367static ssize_t qeth_dev_layer2_show(struct device *dev,
368 struct device_attribute *attr, char *buf)
369{
370 struct qeth_card *card = dev_get_drvdata(dev);
371
372 if (!card)
373 return -EINVAL;
374
375 return sprintf(buf, "%i\n", card->options.layer2 ? 1:0);
376}
377
378static ssize_t qeth_dev_layer2_store(struct device *dev,
379 struct device_attribute *attr, const char *buf, size_t count)
380{
381 struct qeth_card *card = dev_get_drvdata(dev);
382 char *tmp;
383 int i, rc;
384 enum qeth_discipline_id newdis;
385
386 if (!card)
387 return -EINVAL;
388
389 if (((card->state != CARD_STATE_DOWN) &&
390 (card->state != CARD_STATE_RECOVER)))
391 return -EPERM;
392
393 i = simple_strtoul(buf, &tmp, 16);
394 switch (i) {
395 case 0:
396 newdis = QETH_DISCIPLINE_LAYER3;
397 break;
398 case 1:
399 newdis = QETH_DISCIPLINE_LAYER2;
400 break;
401 default:
402 PRINT_WARN("layer2: write 0 or 1 to this file!\n");
403 return -EINVAL;
404 }
405
406 if (card->options.layer2 == newdis) {
407 return count;
408 } else {
409 if (card->discipline.ccwgdriver) {
410 card->discipline.ccwgdriver->remove(card->gdev);
411 qeth_core_free_discipline(card);
412 }
413 }
414
415 rc = qeth_core_load_discipline(card, newdis);
416 if (rc)
417 return rc;
418
419 rc = card->discipline.ccwgdriver->probe(card->gdev);
420 if (rc)
421 return rc;
422 return count;
423}
424
425static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
426 qeth_dev_layer2_store);
427
428static ssize_t qeth_dev_large_send_show(struct device *dev,
429 struct device_attribute *attr, char *buf)
430{
431 struct qeth_card *card = dev_get_drvdata(dev);
432
433 if (!card)
434 return -EINVAL;
435
436 switch (card->options.large_send) {
437 case QETH_LARGE_SEND_NO:
438 return sprintf(buf, "%s\n", "no");
439 case QETH_LARGE_SEND_EDDP:
440 return sprintf(buf, "%s\n", "EDDP");
441 case QETH_LARGE_SEND_TSO:
442 return sprintf(buf, "%s\n", "TSO");
443 default:
444 return sprintf(buf, "%s\n", "N/A");
445 }
446}
447
448static ssize_t qeth_dev_large_send_store(struct device *dev,
449 struct device_attribute *attr, const char *buf, size_t count)
450{
451 struct qeth_card *card = dev_get_drvdata(dev);
452 enum qeth_large_send_types type;
453 int rc = 0;
454 char *tmp;
455
456 if (!card)
457 return -EINVAL;
458 tmp = strsep((char **) &buf, "\n");
459 if (!strcmp(tmp, "no")) {
460 type = QETH_LARGE_SEND_NO;
461 } else if (!strcmp(tmp, "EDDP")) {
462 type = QETH_LARGE_SEND_EDDP;
463 } else if (!strcmp(tmp, "TSO")) {
464 type = QETH_LARGE_SEND_TSO;
465 } else {
466 PRINT_WARN("large_send: invalid mode %s!\n", tmp);
467 return -EINVAL;
468 }
469 if (card->options.large_send == type)
470 return count;
471 rc = qeth_set_large_send(card, type);
472 if (rc)
473 return rc;
474 return count;
475}
476
477static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show,
478 qeth_dev_large_send_store);
479
480static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
481{
482
483 if (!card)
484 return -EINVAL;
485
486 return sprintf(buf, "%i\n", value);
487}
488
489static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
490 const char *buf, size_t count, int *value, int max_value)
491{
492 char *tmp;
493 int i;
494
495 if (!card)
496 return -EINVAL;
497
498 if ((card->state != CARD_STATE_DOWN) &&
499 (card->state != CARD_STATE_RECOVER))
500 return -EPERM;
501
502 i = simple_strtoul(buf, &tmp, 10);
503 if (i <= max_value) {
504 *value = i;
505 } else {
506 PRINT_WARN("blkt total time: write values between"
507 " 0 and %d to this file!\n", max_value);
508 return -EINVAL;
509 }
510 return count;
511}
512
513static ssize_t qeth_dev_blkt_total_show(struct device *dev,
514 struct device_attribute *attr, char *buf)
515{
516 struct qeth_card *card = dev_get_drvdata(dev);
517
518 return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
519}
520
521static ssize_t qeth_dev_blkt_total_store(struct device *dev,
522 struct device_attribute *attr, const char *buf, size_t count)
523{
524 struct qeth_card *card = dev_get_drvdata(dev);
525
526 return qeth_dev_blkt_store(card, buf, count,
527 &card->info.blkt.time_total, 1000);
528}
529
530
531
532static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
533 qeth_dev_blkt_total_store);
534
535static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
536 struct device_attribute *attr, char *buf)
537{
538 struct qeth_card *card = dev_get_drvdata(dev);
539
540 return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
541}
542
543static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
544 struct device_attribute *attr, const char *buf, size_t count)
545{
546 struct qeth_card *card = dev_get_drvdata(dev);
547
548 return qeth_dev_blkt_store(card, buf, count,
549 &card->info.blkt.inter_packet, 100);
550}
551
552static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
553 qeth_dev_blkt_inter_store);
554
555static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
556 struct device_attribute *attr, char *buf)
557{
558 struct qeth_card *card = dev_get_drvdata(dev);
559
560 return qeth_dev_blkt_show(buf, card,
561 card->info.blkt.inter_packet_jumbo);
562}
563
564static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
565 struct device_attribute *attr, const char *buf, size_t count)
566{
567 struct qeth_card *card = dev_get_drvdata(dev);
568
569 return qeth_dev_blkt_store(card, buf, count,
570 &card->info.blkt.inter_packet_jumbo, 100);
571}
572
573static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
574 qeth_dev_blkt_inter_jumbo_store);
575
576static struct attribute *qeth_blkt_device_attrs[] = {
577 &dev_attr_total.attr,
578 &dev_attr_inter.attr,
579 &dev_attr_inter_jumbo.attr,
580 NULL,
581};
582
583static struct attribute_group qeth_device_blkt_group = {
584 .name = "blkt",
585 .attrs = qeth_blkt_device_attrs,
586};
587
588static struct attribute *qeth_device_attrs[] = {
589 &dev_attr_state.attr,
590 &dev_attr_chpid.attr,
591 &dev_attr_if_name.attr,
592 &dev_attr_card_type.attr,
593 &dev_attr_inbuf_size.attr,
594 &dev_attr_portno.attr,
595 &dev_attr_portname.attr,
596 &dev_attr_priority_queueing.attr,
597 &dev_attr_buffer_count.attr,
598 &dev_attr_recover.attr,
599 &dev_attr_performance_stats.attr,
600 &dev_attr_layer2.attr,
601 &dev_attr_large_send.attr,
602 NULL,
603};
604
605static struct attribute_group qeth_device_attr_group = {
606 .attrs = qeth_device_attrs,
607};
608
609static struct attribute *qeth_osn_device_attrs[] = {
610 &dev_attr_state.attr,
611 &dev_attr_chpid.attr,
612 &dev_attr_if_name.attr,
613 &dev_attr_card_type.attr,
614 &dev_attr_buffer_count.attr,
615 &dev_attr_recover.attr,
616 NULL,
617};
618
619static struct attribute_group qeth_osn_device_attr_group = {
620 .attrs = qeth_osn_device_attrs,
621};
622
623int qeth_core_create_device_attributes(struct device *dev)
624{
625 int ret;
626 ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group);
627 if (ret)
628 return ret;
629 ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group);
630 if (ret)
631 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
632
633 return 0;
634}
635
636void qeth_core_remove_device_attributes(struct device *dev)
637{
638 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
639 sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
640}
641
642int qeth_core_create_osn_attributes(struct device *dev)
643{
644 return sysfs_create_group(&dev->kobj, &qeth_osn_device_attr_group);
645}
646
647void qeth_core_remove_osn_attributes(struct device *dev)
648{
649 sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
650 return;
651}
diff --git a/drivers/s390/net/qeth_fs.h b/drivers/s390/net/qeth_fs.h
deleted file mode 100644
index 61faf05517d6..000000000000
--- a/drivers/s390/net/qeth_fs.h
+++ /dev/null
@@ -1,168 +0,0 @@
1/*
2 * linux/drivers/s390/net/qeth_fs.h
3 *
4 * Linux on zSeries OSA Express and HiperSockets support.
5 *
6 * This header file contains definitions related to sysfs and procfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
10 *
11 */
12#ifndef __QETH_FS_H__
13#define __QETH_FS_H__
14
15#ifdef CONFIG_PROC_FS
16extern int
17qeth_create_procfs_entries(void);
18
19extern void
20qeth_remove_procfs_entries(void);
21#else
22static inline int
23qeth_create_procfs_entries(void)
24{
25 return 0;
26}
27
28static inline void
29qeth_remove_procfs_entries(void)
30{
31}
32#endif /* CONFIG_PROC_FS */
33
34extern int
35qeth_create_device_attributes(struct device *dev);
36
37extern void
38qeth_remove_device_attributes(struct device *dev);
39
40extern int
41qeth_create_device_attributes_osn(struct device *dev);
42
43extern void
44qeth_remove_device_attributes_osn(struct device *dev);
45
46extern int
47qeth_create_driver_attributes(void);
48
49extern void
50qeth_remove_driver_attributes(void);
51
52/*
53 * utility functions used in qeth_proc.c and qeth_sys.c
54 */
55
56static inline const char *
57qeth_get_checksum_str(struct qeth_card *card)
58{
59 if (card->options.checksum_type == SW_CHECKSUMMING)
60 return "sw";
61 else if (card->options.checksum_type == HW_CHECKSUMMING)
62 return "hw";
63 else
64 return "no";
65}
66
67static inline const char *
68qeth_get_prioq_str(struct qeth_card *card, char *buf)
69{
70 if (card->qdio.do_prio_queueing == QETH_NO_PRIO_QUEUEING)
71 sprintf(buf, "always_q_%i", card->qdio.default_out_queue);
72 else
73 strcpy(buf, (card->qdio.do_prio_queueing ==
74 QETH_PRIO_Q_ING_PREC)?
75 "by_prec." : "by_ToS");
76 return buf;
77}
78
79static inline const char *
80qeth_get_bufsize_str(struct qeth_card *card)
81{
82 if (card->qdio.in_buf_size == 16384)
83 return "16k";
84 else if (card->qdio.in_buf_size == 24576)
85 return "24k";
86 else if (card->qdio.in_buf_size == 32768)
87 return "32k";
88 else if (card->qdio.in_buf_size == 40960)
89 return "40k";
90 else
91 return "64k";
92}
93
94static inline const char *
95qeth_get_cardname(struct qeth_card *card)
96{
97 if (card->info.guestlan) {
98 switch (card->info.type) {
99 case QETH_CARD_TYPE_OSAE:
100 return " Guest LAN QDIO";
101 case QETH_CARD_TYPE_IQD:
102 return " Guest LAN Hiper";
103 default:
104 return " unknown";
105 }
106 } else {
107 switch (card->info.type) {
108 case QETH_CARD_TYPE_OSAE:
109 return " OSD Express";
110 case QETH_CARD_TYPE_IQD:
111 return " HiperSockets";
112 case QETH_CARD_TYPE_OSN:
113 return " OSN QDIO";
114 default:
115 return " unknown";
116 }
117 }
118 return " n/a";
119}
120
121/* max length to be returned: 14 */
122static inline const char *
123qeth_get_cardname_short(struct qeth_card *card)
124{
125 if (card->info.guestlan){
126 switch (card->info.type){
127 case QETH_CARD_TYPE_OSAE:
128 return "GuestLAN QDIO";
129 case QETH_CARD_TYPE_IQD:
130 return "GuestLAN Hiper";
131 default:
132 return "unknown";
133 }
134 } else {
135 switch (card->info.type) {
136 case QETH_CARD_TYPE_OSAE:
137 switch (card->info.link_type) {
138 case QETH_LINK_TYPE_FAST_ETH:
139 return "OSD_100";
140 case QETH_LINK_TYPE_HSTR:
141 return "HSTR";
142 case QETH_LINK_TYPE_GBIT_ETH:
143 return "OSD_1000";
144 case QETH_LINK_TYPE_10GBIT_ETH:
145 return "OSD_10GIG";
146 case QETH_LINK_TYPE_LANE_ETH100:
147 return "OSD_FE_LANE";
148 case QETH_LINK_TYPE_LANE_TR:
149 return "OSD_TR_LANE";
150 case QETH_LINK_TYPE_LANE_ETH1000:
151 return "OSD_GbE_LANE";
152 case QETH_LINK_TYPE_LANE:
153 return "OSD_ATM_LANE";
154 default:
155 return "OSD_Express";
156 }
157 case QETH_CARD_TYPE_IQD:
158 return "HiperSockets";
159 case QETH_CARD_TYPE_OSN:
160 return "OSN";
161 default:
162 return "unknown";
163 }
164 }
165 return "n/a";
166}
167
168#endif /* __QETH_FS_H__ */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
new file mode 100644
index 000000000000..4417a3629ae0
--- /dev/null
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -0,0 +1,1242 @@
1/*
2 * drivers/s390/net/qeth_l2_main.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/etherdevice.h>
17#include <linux/mii.h>
18#include <linux/ip.h>
19
20#include <asm/s390_rdev.h>
21
22#include "qeth_core.h"
23#include "qeth_core_offl.h"
24
25#define QETH_DBF_TEXT_(name, level, text...) \
26 do { \
27 if (qeth_dbf_passes(qeth_dbf_##name, level)) { \
28 char *dbf_txt_buf = get_cpu_var(qeth_l2_dbf_txt_buf); \
29 sprintf(dbf_txt_buf, text); \
30 debug_text_event(qeth_dbf_##name, level, dbf_txt_buf); \
31 put_cpu_var(qeth_l2_dbf_txt_buf); \
32 } \
33 } while (0)
34
35static DEFINE_PER_CPU(char[256], qeth_l2_dbf_txt_buf);
36
37static int qeth_l2_set_offline(struct ccwgroup_device *);
38static int qeth_l2_stop(struct net_device *);
39static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
40static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
41 enum qeth_ipa_cmds,
42 int (*reply_cb) (struct qeth_card *,
43 struct qeth_reply*,
44 unsigned long));
45static void qeth_l2_set_multicast_list(struct net_device *);
46static int qeth_l2_recover(void *);
47
48static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49{
50 struct qeth_card *card = netdev_priv(dev);
51 struct mii_ioctl_data *mii_data;
52 int rc = 0;
53
54 if (!card)
55 return -ENODEV;
56
57 if ((card->state != CARD_STATE_UP) &&
58 (card->state != CARD_STATE_SOFTSETUP))
59 return -ENODEV;
60
61 if (card->info.type == QETH_CARD_TYPE_OSN)
62 return -EPERM;
63
64 switch (cmd) {
65 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
66 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
67 break;
68 case SIOC_QETH_GET_CARD_TYPE:
69 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
70 !card->info.guestlan)
71 return 1;
72 return 0;
73 break;
74 case SIOCGMIIPHY:
75 mii_data = if_mii(rq);
76 mii_data->phy_id = 0;
77 break;
78 case SIOCGMIIREG:
79 mii_data = if_mii(rq);
80 if (mii_data->phy_id != 0)
81 rc = -EINVAL;
82 else
83 mii_data->val_out = qeth_mdio_read(dev,
84 mii_data->phy_id, mii_data->reg_num);
85 break;
86 default:
87 rc = -EOPNOTSUPP;
88 }
89 if (rc)
90 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
91 return rc;
92}
93
94static int qeth_l2_verify_dev(struct net_device *dev)
95{
96 struct qeth_card *card;
97 unsigned long flags;
98 int rc = 0;
99
100 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
101 list_for_each_entry(card, &qeth_core_card_list.list, list) {
102 if (card->dev == dev) {
103 rc = QETH_REAL_CARD;
104 break;
105 }
106 }
107 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
108
109 return rc;
110}
111
112static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
113{
114 struct qeth_card *card;
115 struct net_device *ndev;
116 unsigned char *readno;
117 __u16 temp_dev_no, card_dev_no;
118 char *endp;
119 unsigned long flags;
120
121 ndev = NULL;
122 memcpy(&temp_dev_no, read_dev_no, 2);
123 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
124 list_for_each_entry(card, &qeth_core_card_list.list, list) {
125 readno = CARD_RDEV_ID(card);
126 readno += (strlen(readno) - 4);
127 card_dev_no = simple_strtoul(readno, &endp, 16);
128 if (card_dev_no == temp_dev_no) {
129 ndev = card->dev;
130 break;
131 }
132 }
133 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
134 return ndev;
135}
136
137static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
138 struct qeth_reply *reply,
139 unsigned long data)
140{
141 struct qeth_ipa_cmd *cmd;
142 __u8 *mac;
143
144 QETH_DBF_TEXT(trace, 2, "L2Sgmacb");
145 cmd = (struct qeth_ipa_cmd *) data;
146 mac = &cmd->data.setdelmac.mac[0];
147 /* MAC already registered, needed in couple/uncouple case */
148 if (cmd->hdr.return_code == 0x2005) {
149 PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
150 "already existing on %s \n",
151 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
152 QETH_CARD_IFNAME(card));
153 cmd->hdr.return_code = 0;
154 }
155 if (cmd->hdr.return_code)
156 PRINT_ERR("Could not set group MAC " \
157 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
158 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
159 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
160 return 0;
161}
162
163static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
164{
165 QETH_DBF_TEXT(trace, 2, "L2Sgmac");
166 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
167 qeth_l2_send_setgroupmac_cb);
168}
169
170static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
171 struct qeth_reply *reply,
172 unsigned long data)
173{
174 struct qeth_ipa_cmd *cmd;
175 __u8 *mac;
176
177 QETH_DBF_TEXT(trace, 2, "L2Dgmacb");
178 cmd = (struct qeth_ipa_cmd *) data;
179 mac = &cmd->data.setdelmac.mac[0];
180 if (cmd->hdr.return_code)
181 PRINT_ERR("Could not delete group MAC " \
182 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
183 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
184 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
185 return 0;
186}
187
188static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
189{
190 QETH_DBF_TEXT(trace, 2, "L2Dgmac");
191 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
192 qeth_l2_send_delgroupmac_cb);
193}
194
195static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac)
196{
197 struct qeth_mc_mac *mc;
198
199 mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
200
201 if (!mc) {
202 PRINT_ERR("no mem vor mc mac address\n");
203 return;
204 }
205
206 memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
207 mc->mc_addrlen = OSA_ADDR_LEN;
208
209 if (!qeth_l2_send_setgroupmac(card, mac))
210 list_add_tail(&mc->list, &card->mc_list);
211 else
212 kfree(mc);
213}
214
215static void qeth_l2_del_all_mc(struct qeth_card *card)
216{
217 struct qeth_mc_mac *mc, *tmp;
218
219 spin_lock_bh(&card->mclock);
220 list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
221 qeth_l2_send_delgroupmac(card, mc->mc_addr);
222 list_del(&mc->list);
223 kfree(mc);
224 }
225 spin_unlock_bh(&card->mclock);
226}
227
228static void qeth_l2_get_packet_type(struct qeth_card *card,
229 struct qeth_hdr *hdr, struct sk_buff *skb)
230{
231 __u16 hdr_mac;
232
233 if (!memcmp(skb->data + QETH_HEADER_SIZE,
234 skb->dev->broadcast, 6)) {
235 /* broadcast? */
236 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
237 return;
238 }
239 hdr_mac = *((__u16 *)skb->data);
240 /* tr multicast? */
241 switch (card->info.link_type) {
242 case QETH_LINK_TYPE_HSTR:
243 case QETH_LINK_TYPE_LANE_TR:
244 if ((hdr_mac == QETH_TR_MAC_NC) ||
245 (hdr_mac == QETH_TR_MAC_C))
246 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
247 else
248 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
249 break;
250 /* eth or so multicast? */
251 default:
252 if ((hdr_mac == QETH_ETH_MAC_V4) ||
253 (hdr_mac == QETH_ETH_MAC_V6))
254 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
255 else
256 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
257 }
258}
259
260static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
261 struct sk_buff *skb, int ipv, int cast_type)
262{
263 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)((skb->data) +
264 QETH_HEADER_SIZE);
265
266 memset(hdr, 0, sizeof(struct qeth_hdr));
267 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
268
269 /* set byte byte 3 to casting flags */
270 if (cast_type == RTN_MULTICAST)
271 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
272 else if (cast_type == RTN_BROADCAST)
273 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
274 else
275 qeth_l2_get_packet_type(card, hdr, skb);
276
277 hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
278 /* VSWITCH relies on the VLAN
279 * information to be present in
280 * the QDIO header */
281 if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
282 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
283 hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
284 }
285}
286
287static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
288 struct qeth_reply *reply, unsigned long data)
289{
290 struct qeth_ipa_cmd *cmd;
291
292 QETH_DBF_TEXT(trace, 2, "L2sdvcb");
293 cmd = (struct qeth_ipa_cmd *) data;
294 if (cmd->hdr.return_code) {
295 PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
296 "Continuing\n", cmd->data.setdelvlan.vlan_id,
297 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
298 QETH_DBF_TEXT_(trace, 2, "L2VL%4x", cmd->hdr.command);
299 QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card));
300 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
301 }
302 return 0;
303}
304
305static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
306 enum qeth_ipa_cmds ipacmd)
307{
308 struct qeth_ipa_cmd *cmd;
309 struct qeth_cmd_buffer *iob;
310
311 QETH_DBF_TEXT_(trace, 4, "L2sdv%x", ipacmd);
312 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
313 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
314 cmd->data.setdelvlan.vlan_id = i;
315 return qeth_send_ipa_cmd(card, iob,
316 qeth_l2_send_setdelvlan_cb, NULL);
317}
318
319static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
320{
321 struct qeth_vlan_vid *id;
322 QETH_DBF_TEXT(trace, 3, "L2prcvln");
323 spin_lock_bh(&card->vlanlock);
324 list_for_each_entry(id, &card->vid_list, list) {
325 if (clear)
326 qeth_l2_send_setdelvlan(card, id->vid,
327 IPA_CMD_DELVLAN);
328 else
329 qeth_l2_send_setdelvlan(card, id->vid,
330 IPA_CMD_SETVLAN);
331 }
332 spin_unlock_bh(&card->vlanlock);
333}
334
335static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
336{
337 struct qeth_card *card = netdev_priv(dev);
338 struct qeth_vlan_vid *id;
339
340 QETH_DBF_TEXT_(trace, 4, "aid:%d", vid);
341 id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
342 if (id) {
343 id->vid = vid;
344 qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
345 spin_lock_bh(&card->vlanlock);
346 list_add_tail(&id->list, &card->vid_list);
347 spin_unlock_bh(&card->vlanlock);
348 } else {
349 PRINT_ERR("no memory for vid\n");
350 }
351}
352
353static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
354{
355 struct qeth_vlan_vid *id, *tmpid = NULL;
356 struct qeth_card *card = netdev_priv(dev);
357
358 QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
359 spin_lock_bh(&card->vlanlock);
360 list_for_each_entry(id, &card->vid_list, list) {
361 if (id->vid == vid) {
362 list_del(&id->list);
363 tmpid = id;
364 break;
365 }
366 }
367 spin_unlock_bh(&card->vlanlock);
368 if (tmpid) {
369 qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
370 kfree(tmpid);
371 }
372 qeth_l2_set_multicast_list(card->dev);
373}
374
375static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
376{
377 int rc = 0;
378
379 QETH_DBF_TEXT(setup , 2, "stopcard");
380 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
381
382 qeth_set_allowed_threads(card, 0, 1);
383 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
384 return -ERESTARTSYS;
385 if (card->read.state == CH_STATE_UP &&
386 card->write.state == CH_STATE_UP &&
387 (card->state == CARD_STATE_UP)) {
388 if (recovery_mode &&
389 card->info.type != QETH_CARD_TYPE_OSN) {
390 qeth_l2_stop(card->dev);
391 } else {
392 rtnl_lock();
393 dev_close(card->dev);
394 rtnl_unlock();
395 }
396 if (!card->use_hard_stop) {
397 __u8 *mac = &card->dev->dev_addr[0];
398 rc = qeth_l2_send_delmac(card, mac);
399 QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc);
400 }
401 card->state = CARD_STATE_SOFTSETUP;
402 }
403 if (card->state == CARD_STATE_SOFTSETUP) {
404 qeth_l2_process_vlans(card, 1);
405 qeth_l2_del_all_mc(card);
406 qeth_clear_ipacmd_list(card);
407 card->state = CARD_STATE_HARDSETUP;
408 }
409 if (card->state == CARD_STATE_HARDSETUP) {
410 qeth_qdio_clear_card(card, 0);
411 qeth_clear_qdio_buffers(card);
412 qeth_clear_working_pool_list(card);
413 card->state = CARD_STATE_DOWN;
414 }
415 if (card->state == CARD_STATE_DOWN) {
416 qeth_clear_cmd_buffers(&card->read);
417 qeth_clear_cmd_buffers(&card->write);
418 }
419 card->use_hard_stop = 0;
420 return rc;
421}
422
423static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
424 struct qeth_qdio_buffer *buf, int index)
425{
426 struct qdio_buffer_element *element;
427 struct sk_buff *skb;
428 struct qeth_hdr *hdr;
429 int offset;
430 unsigned int len;
431
432 /* get first element of current buffer */
433 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
434 offset = 0;
435 if (card->options.performance_stats)
436 card->perf_stats.bufs_rec++;
437 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
438 &offset, &hdr))) {
439 skb->dev = card->dev;
440 /* is device UP ? */
441 if (!(card->dev->flags & IFF_UP)) {
442 dev_kfree_skb_any(skb);
443 continue;
444 }
445
446 switch (hdr->hdr.l2.id) {
447 case QETH_HEADER_TYPE_LAYER2:
448 skb->pkt_type = PACKET_HOST;
449 skb->protocol = eth_type_trans(skb, skb->dev);
450 if (card->options.checksum_type == NO_CHECKSUMMING)
451 skb->ip_summed = CHECKSUM_UNNECESSARY;
452 else
453 skb->ip_summed = CHECKSUM_NONE;
454 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
455 len = skb->len;
456 netif_rx(skb);
457 break;
458 case QETH_HEADER_TYPE_OSN:
459 skb_push(skb, sizeof(struct qeth_hdr));
460 skb_copy_to_linear_data(skb, hdr,
461 sizeof(struct qeth_hdr));
462 len = skb->len;
463 card->osn_info.data_cb(skb);
464 break;
465 default:
466 dev_kfree_skb_any(skb);
467 QETH_DBF_TEXT(trace, 3, "inbunkno");
468 QETH_DBF_HEX(control, 3, hdr, QETH_DBF_CONTROL_LEN);
469 continue;
470 }
471 card->dev->last_rx = jiffies;
472 card->stats.rx_packets++;
473 card->stats.rx_bytes += len;
474 }
475}
476
477static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
478 enum qeth_ipa_cmds ipacmd,
479 int (*reply_cb) (struct qeth_card *,
480 struct qeth_reply*,
481 unsigned long))
482{
483 struct qeth_ipa_cmd *cmd;
484 struct qeth_cmd_buffer *iob;
485
486 QETH_DBF_TEXT(trace, 2, "L2sdmac");
487 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
488 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
489 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
490 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
491 return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
492}
493
494static int qeth_l2_send_setmac_cb(struct qeth_card *card,
495 struct qeth_reply *reply,
496 unsigned long data)
497{
498 struct qeth_ipa_cmd *cmd;
499
500 QETH_DBF_TEXT(trace, 2, "L2Smaccb");
501 cmd = (struct qeth_ipa_cmd *) data;
502 if (cmd->hdr.return_code) {
503 QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code);
504 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
505 cmd->hdr.return_code = -EIO;
506 } else {
507 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
508 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
509 OSA_ADDR_LEN);
510 PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
511 "successfully registered on device %s\n",
512 card->dev->dev_addr[0], card->dev->dev_addr[1],
513 card->dev->dev_addr[2], card->dev->dev_addr[3],
514 card->dev->dev_addr[4], card->dev->dev_addr[5],
515 card->dev->name);
516 }
517 return 0;
518}
519
520static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
521{
522 QETH_DBF_TEXT(trace, 2, "L2Setmac");
523 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
524 qeth_l2_send_setmac_cb);
525}
526
527static int qeth_l2_send_delmac_cb(struct qeth_card *card,
528 struct qeth_reply *reply,
529 unsigned long data)
530{
531 struct qeth_ipa_cmd *cmd;
532
533 QETH_DBF_TEXT(trace, 2, "L2Dmaccb");
534 cmd = (struct qeth_ipa_cmd *) data;
535 if (cmd->hdr.return_code) {
536 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
537 cmd->hdr.return_code = -EIO;
538 return 0;
539 }
540 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
541
542 return 0;
543}
544
545static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
546{
547 QETH_DBF_TEXT(trace, 2, "L2Delmac");
548 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
549 return 0;
550 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
551 qeth_l2_send_delmac_cb);
552}
553
554static int qeth_l2_request_initial_mac(struct qeth_card *card)
555{
556 int rc = 0;
557 char vendor_pre[] = {0x02, 0x00, 0x00};
558
559 QETH_DBF_TEXT(setup, 2, "doL2init");
560 QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card));
561
562 rc = qeth_query_setadapterparms(card);
563 if (rc) {
564 PRINT_WARN("could not query adapter parameters on device %s: "
565 "x%x\n", CARD_BUS_ID(card), rc);
566 }
567
568 if (card->info.guestlan) {
569 rc = qeth_setadpparms_change_macaddr(card);
570 if (rc) {
571 PRINT_WARN("couldn't get MAC address on "
572 "device %s: x%x\n",
573 CARD_BUS_ID(card), rc);
574 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
575 return rc;
576 }
577 QETH_DBF_HEX(setup, 2, card->dev->dev_addr, OSA_ADDR_LEN);
578 } else {
579 random_ether_addr(card->dev->dev_addr);
580 memcpy(card->dev->dev_addr, vendor_pre, 3);
581 }
582 return 0;
583}
584
585static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
586{
587 struct sockaddr *addr = p;
588 struct qeth_card *card = netdev_priv(dev);
589 int rc = 0;
590
591 QETH_DBF_TEXT(trace, 3, "setmac");
592
593 if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
594 QETH_DBF_TEXT(trace, 3, "setmcINV");
595 return -EOPNOTSUPP;
596 }
597
598 if (card->info.type == QETH_CARD_TYPE_OSN) {
599 PRINT_WARN("Setting MAC address on %s is not supported.\n",
600 dev->name);
601 QETH_DBF_TEXT(trace, 3, "setmcOSN");
602 return -EOPNOTSUPP;
603 }
604 QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
605 QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
606 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
607 if (!rc)
608 rc = qeth_l2_send_setmac(card, addr->sa_data);
609 return rc;
610}
611
612static void qeth_l2_set_multicast_list(struct net_device *dev)
613{
614 struct qeth_card *card = netdev_priv(dev);
615 struct dev_mc_list *dm;
616
617 if (card->info.type == QETH_CARD_TYPE_OSN)
618 return ;
619
620 QETH_DBF_TEXT(trace, 3, "setmulti");
621 qeth_l2_del_all_mc(card);
622 spin_lock_bh(&card->mclock);
623 for (dm = dev->mc_list; dm; dm = dm->next)
624 qeth_l2_add_mc(card, dm->dmi_addr);
625 spin_unlock_bh(&card->mclock);
626 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
627 return;
628 qeth_setadp_promisc_mode(card);
629}
630
631static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
632{
633 int rc;
634 struct qeth_hdr *hdr = NULL;
635 int elements = 0;
636 struct qeth_card *card = netdev_priv(dev);
637 struct sk_buff *new_skb = skb;
638 int ipv = qeth_get_ip_version(skb);
639 int cast_type = qeth_get_cast_type(card, skb);
640 struct qeth_qdio_out_q *queue = card->qdio.out_qs
641 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
642 int tx_bytes = skb->len;
643 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
644 struct qeth_eddp_context *ctx = NULL;
645
646 QETH_DBF_TEXT(trace, 6, "l2xmit");
647
648 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
649 card->stats.tx_carrier_errors++;
650 goto tx_drop;
651 }
652
653 if ((card->info.type == QETH_CARD_TYPE_OSN) &&
654 (skb->protocol == htons(ETH_P_IPV6)))
655 goto tx_drop;
656
657 if (card->options.performance_stats) {
658 card->perf_stats.outbound_cnt++;
659 card->perf_stats.outbound_start_time = qeth_get_micros();
660 }
661 netif_stop_queue(dev);
662
663 if (skb_is_gso(skb))
664 large_send = QETH_LARGE_SEND_EDDP;
665
666 if (card->info.type == QETH_CARD_TYPE_OSN)
667 hdr = (struct qeth_hdr *)skb->data;
668 else {
669 new_skb = qeth_prepare_skb(card, skb, &hdr);
670 if (!new_skb)
671 goto tx_drop;
672 qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
673 }
674
675 if (large_send == QETH_LARGE_SEND_EDDP) {
676 ctx = qeth_eddp_create_context(card, new_skb, hdr,
677 skb->sk->sk_protocol);
678 if (ctx == NULL) {
679 PRINT_WARN("could not create eddp context\n");
680 goto tx_drop;
681 }
682 } else {
683 elements = qeth_get_elements_no(card, (void *)hdr, new_skb, 0);
684 if (!elements)
685 goto tx_drop;
686 }
687
688 if ((large_send == QETH_LARGE_SEND_NO) &&
689 (skb->ip_summed == CHECKSUM_PARTIAL))
690 qeth_tx_csum(new_skb);
691
692 if (card->info.type != QETH_CARD_TYPE_IQD)
693 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
694 elements, ctx);
695 else
696 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
697 elements, ctx);
698 if (!rc) {
699 card->stats.tx_packets++;
700 card->stats.tx_bytes += tx_bytes;
701 if (new_skb != skb)
702 dev_kfree_skb_any(skb);
703 if (card->options.performance_stats) {
704 if (large_send != QETH_LARGE_SEND_NO) {
705 card->perf_stats.large_send_bytes += tx_bytes;
706 card->perf_stats.large_send_cnt++;
707 }
708 if (skb_shinfo(new_skb)->nr_frags > 0) {
709 card->perf_stats.sg_skbs_sent++;
710 /* nr_frags + skb->data */
711 card->perf_stats.sg_frags_sent +=
712 skb_shinfo(new_skb)->nr_frags + 1;
713 }
714 }
715
716 if (ctx != NULL) {
717 qeth_eddp_put_context(ctx);
718 dev_kfree_skb_any(new_skb);
719 }
720 } else {
721 if (ctx != NULL)
722 qeth_eddp_put_context(ctx);
723
724 if (rc == -EBUSY) {
725 if (new_skb != skb)
726 dev_kfree_skb_any(new_skb);
727 return NETDEV_TX_BUSY;
728 } else
729 goto tx_drop;
730 }
731
732 netif_wake_queue(dev);
733 if (card->options.performance_stats)
734 card->perf_stats.outbound_time += qeth_get_micros() -
735 card->perf_stats.outbound_start_time;
736 return rc;
737
738tx_drop:
739 card->stats.tx_dropped++;
740 card->stats.tx_errors++;
741 if ((new_skb != skb) && new_skb)
742 dev_kfree_skb_any(new_skb);
743 dev_kfree_skb_any(skb);
744 return NETDEV_TX_OK;
745}
746
747static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
748 unsigned int status, unsigned int qdio_err,
749 unsigned int siga_err, unsigned int queue,
750 int first_element, int count, unsigned long card_ptr)
751{
752 struct net_device *net_dev;
753 struct qeth_card *card;
754 struct qeth_qdio_buffer *buffer;
755 int index;
756 int i;
757
758 QETH_DBF_TEXT(trace, 6, "qdinput");
759 card = (struct qeth_card *) card_ptr;
760 net_dev = card->dev;
761 if (card->options.performance_stats) {
762 card->perf_stats.inbound_cnt++;
763 card->perf_stats.inbound_start_time = qeth_get_micros();
764 }
765 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
766 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
767 QETH_DBF_TEXT(trace, 1, "qdinchk");
768 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
769 QETH_DBF_TEXT_(trace, 1, "%04X%04X", first_element,
770 count);
771 QETH_DBF_TEXT_(trace, 1, "%04X%04X", queue, status);
772 qeth_schedule_recovery(card);
773 return;
774 }
775 }
776 for (i = first_element; i < (first_element + count); ++i) {
777 index = i % QDIO_MAX_BUFFERS_PER_Q;
778 buffer = &card->qdio.in_q->bufs[index];
779 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
780 qeth_check_qdio_errors(buffer->buffer,
781 qdio_err, siga_err, "qinerr")))
782 qeth_l2_process_inbound_buffer(card, buffer, index);
783 /* clear buffer and give back to hardware */
784 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
785 qeth_queue_input_buffer(card, index);
786 }
787 if (card->options.performance_stats)
788 card->perf_stats.inbound_time += qeth_get_micros() -
789 card->perf_stats.inbound_start_time;
790}
791
792static int qeth_l2_open(struct net_device *dev)
793{
794 struct qeth_card *card = netdev_priv(dev);
795
796 QETH_DBF_TEXT(trace, 4, "qethopen");
797 if (card->state != CARD_STATE_SOFTSETUP)
798 return -ENODEV;
799
800 if ((card->info.type != QETH_CARD_TYPE_OSN) &&
801 (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
802 QETH_DBF_TEXT(trace, 4, "nomacadr");
803 return -EPERM;
804 }
805 card->data.state = CH_STATE_UP;
806 card->state = CARD_STATE_UP;
807 card->dev->flags |= IFF_UP;
808 netif_start_queue(dev);
809
810 if (!card->lan_online && netif_carrier_ok(dev))
811 netif_carrier_off(dev);
812 return 0;
813}
814
815
816static int qeth_l2_stop(struct net_device *dev)
817{
818 struct qeth_card *card = netdev_priv(dev);
819
820 QETH_DBF_TEXT(trace, 4, "qethstop");
821 netif_tx_disable(dev);
822 card->dev->flags &= ~IFF_UP;
823 if (card->state == CARD_STATE_UP)
824 card->state = CARD_STATE_SOFTSETUP;
825 return 0;
826}
827
828static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
829{
830 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
831
832 INIT_LIST_HEAD(&card->vid_list);
833 INIT_LIST_HEAD(&card->mc_list);
834 card->options.layer2 = 1;
835 card->discipline.input_handler = (qdio_handler_t *)
836 qeth_l2_qdio_input_handler;
837 card->discipline.output_handler = (qdio_handler_t *)
838 qeth_qdio_output_handler;
839 card->discipline.recover = qeth_l2_recover;
840 return 0;
841}
842
843static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
844{
845 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
846
847 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
848
849 if (cgdev->state == CCWGROUP_ONLINE) {
850 card->use_hard_stop = 1;
851 qeth_l2_set_offline(cgdev);
852 }
853
854 if (card->dev) {
855 unregister_netdev(card->dev);
856 card->dev = NULL;
857 }
858
859 qeth_l2_del_all_mc(card);
860 return;
861}
862
863static struct ethtool_ops qeth_l2_ethtool_ops = {
864 .get_link = ethtool_op_get_link,
865 .get_tx_csum = ethtool_op_get_tx_csum,
866 .set_tx_csum = ethtool_op_set_tx_hw_csum,
867 .get_sg = ethtool_op_get_sg,
868 .set_sg = ethtool_op_set_sg,
869 .get_tso = ethtool_op_get_tso,
870 .set_tso = ethtool_op_set_tso,
871 .get_strings = qeth_core_get_strings,
872 .get_ethtool_stats = qeth_core_get_ethtool_stats,
873 .get_stats_count = qeth_core_get_stats_count,
874 .get_drvinfo = qeth_core_get_drvinfo,
875};
876
877static struct ethtool_ops qeth_l2_osn_ops = {
878 .get_strings = qeth_core_get_strings,
879 .get_ethtool_stats = qeth_core_get_ethtool_stats,
880 .get_stats_count = qeth_core_get_stats_count,
881 .get_drvinfo = qeth_core_get_drvinfo,
882};
883
884static int qeth_l2_setup_netdev(struct qeth_card *card)
885{
886 switch (card->info.type) {
887 case QETH_CARD_TYPE_OSAE:
888 card->dev = alloc_etherdev(0);
889 break;
890 case QETH_CARD_TYPE_IQD:
891 card->dev = alloc_netdev(0, "hsi%d", ether_setup);
892 break;
893 case QETH_CARD_TYPE_OSN:
894 card->dev = alloc_netdev(0, "osn%d", ether_setup);
895 card->dev->flags |= IFF_NOARP;
896 break;
897 default:
898 card->dev = alloc_etherdev(0);
899 }
900
901 if (!card->dev)
902 return -ENODEV;
903
904 card->dev->priv = card;
905 card->dev->tx_timeout = &qeth_tx_timeout;
906 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
907 card->dev->open = qeth_l2_open;
908 card->dev->stop = qeth_l2_stop;
909 card->dev->hard_start_xmit = qeth_l2_hard_start_xmit;
910 card->dev->do_ioctl = qeth_l2_do_ioctl;
911 card->dev->get_stats = qeth_get_stats;
912 card->dev->change_mtu = qeth_change_mtu;
913 card->dev->set_multicast_list = qeth_l2_set_multicast_list;
914 card->dev->vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid;
915 card->dev->vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid;
916 card->dev->set_mac_address = qeth_l2_set_mac_address;
917 card->dev->mtu = card->info.initial_mtu;
918 if (card->info.type != QETH_CARD_TYPE_OSN)
919 SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
920 else
921 SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
922 card->dev->features |= NETIF_F_HW_VLAN_FILTER;
923 card->info.broadcast_capable = 1;
924 qeth_l2_request_initial_mac(card);
925 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
926 return register_netdev(card->dev);
927}
928
929static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
930{
931 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
932 int rc = 0;
933 enum qeth_card_states recover_flag;
934
935 BUG_ON(!card);
936 QETH_DBF_TEXT(setup, 2, "setonlin");
937 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
938
939 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
940 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) {
941 PRINT_WARN("set_online of card %s interrupted by user!\n",
942 CARD_BUS_ID(card));
943 return -ERESTARTSYS;
944 }
945
946 recover_flag = card->state;
947 rc = ccw_device_set_online(CARD_RDEV(card));
948 if (rc) {
949 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
950 return -EIO;
951 }
952 rc = ccw_device_set_online(CARD_WDEV(card));
953 if (rc) {
954 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
955 return -EIO;
956 }
957 rc = ccw_device_set_online(CARD_DDEV(card));
958 if (rc) {
959 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
960 return -EIO;
961 }
962
963 rc = qeth_core_hardsetup_card(card);
964 if (rc) {
965 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
966 goto out_remove;
967 }
968
969 if (!card->dev && qeth_l2_setup_netdev(card))
970 goto out_remove;
971
972 if (card->info.type != QETH_CARD_TYPE_OSN)
973 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
974
975 card->state = CARD_STATE_HARDSETUP;
976 qeth_print_status_message(card);
977
978 /* softsetup */
979 QETH_DBF_TEXT(setup, 2, "softsetp");
980
981 rc = qeth_send_startlan(card);
982 if (rc) {
983 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
984 if (rc == 0xe080) {
985 PRINT_WARN("LAN on card %s if offline! "
986 "Waiting for STARTLAN from card.\n",
987 CARD_BUS_ID(card));
988 card->lan_online = 0;
989 }
990 return rc;
991 } else
992 card->lan_online = 1;
993
994 if (card->info.type != QETH_CARD_TYPE_OSN) {
995 qeth_set_large_send(card, card->options.large_send);
996 qeth_l2_process_vlans(card, 0);
997 }
998
999 netif_tx_disable(card->dev);
1000
1001 rc = qeth_init_qdio_queues(card);
1002 if (rc) {
1003 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
1004 goto out_remove;
1005 }
1006 card->state = CARD_STATE_SOFTSETUP;
1007 netif_carrier_on(card->dev);
1008
1009 qeth_set_allowed_threads(card, 0xffffffff, 0);
1010 if (recover_flag == CARD_STATE_RECOVER) {
1011 if (recovery_mode &&
1012 card->info.type != QETH_CARD_TYPE_OSN) {
1013 qeth_l2_open(card->dev);
1014 } else {
1015 rtnl_lock();
1016 dev_open(card->dev);
1017 rtnl_unlock();
1018 }
1019 /* this also sets saved unicast addresses */
1020 qeth_l2_set_multicast_list(card->dev);
1021 }
1022 /* let user_space know that device is online */
1023 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1024 return 0;
1025out_remove:
1026 card->use_hard_stop = 1;
1027 qeth_l2_stop_card(card, 0);
1028 ccw_device_set_offline(CARD_DDEV(card));
1029 ccw_device_set_offline(CARD_WDEV(card));
1030 ccw_device_set_offline(CARD_RDEV(card));
1031 if (recover_flag == CARD_STATE_RECOVER)
1032 card->state = CARD_STATE_RECOVER;
1033 else
1034 card->state = CARD_STATE_DOWN;
1035 return -ENODEV;
1036}
1037
1038static int qeth_l2_set_online(struct ccwgroup_device *gdev)
1039{
1040 return __qeth_l2_set_online(gdev, 0);
1041}
1042
1043static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
1044 int recovery_mode)
1045{
1046 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
1047 int rc = 0, rc2 = 0, rc3 = 0;
1048 enum qeth_card_states recover_flag;
1049
1050 QETH_DBF_TEXT(setup, 3, "setoffl");
1051 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
1052
1053 if (card->dev && netif_carrier_ok(card->dev))
1054 netif_carrier_off(card->dev);
1055 recover_flag = card->state;
1056 if (qeth_l2_stop_card(card, recovery_mode) == -ERESTARTSYS) {
1057 PRINT_WARN("Stopping card %s interrupted by user!\n",
1058 CARD_BUS_ID(card));
1059 return -ERESTARTSYS;
1060 }
1061 rc = ccw_device_set_offline(CARD_DDEV(card));
1062 rc2 = ccw_device_set_offline(CARD_WDEV(card));
1063 rc3 = ccw_device_set_offline(CARD_RDEV(card));
1064 if (!rc)
1065 rc = (rc2) ? rc2 : rc3;
1066 if (rc)
1067 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1068 if (recover_flag == CARD_STATE_UP)
1069 card->state = CARD_STATE_RECOVER;
1070 /* let user_space know that device is offline */
1071 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
1072 return 0;
1073}
1074
1075static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
1076{
1077 return __qeth_l2_set_offline(cgdev, 0);
1078}
1079
1080static int qeth_l2_recover(void *ptr)
1081{
1082 struct qeth_card *card;
1083 int rc = 0;
1084
1085 card = (struct qeth_card *) ptr;
1086 QETH_DBF_TEXT(trace, 2, "recover1");
1087 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
1088 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
1089 return 0;
1090 QETH_DBF_TEXT(trace, 2, "recover2");
1091 PRINT_WARN("Recovery of device %s started ...\n",
1092 CARD_BUS_ID(card));
1093 card->use_hard_stop = 1;
1094 __qeth_l2_set_offline(card->gdev, 1);
1095 rc = __qeth_l2_set_online(card->gdev, 1);
1096 /* don't run another scheduled recovery */
1097 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1098 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1099 if (!rc)
1100 PRINT_INFO("Device %s successfully recovered!\n",
1101 CARD_BUS_ID(card));
1102 else
1103 PRINT_INFO("Device %s could not be recovered!\n",
1104 CARD_BUS_ID(card));
1105 return 0;
1106}
1107
1108static int __init qeth_l2_init(void)
1109{
1110 PRINT_INFO("register layer 2 discipline\n");
1111 return 0;
1112}
1113
1114static void __exit qeth_l2_exit(void)
1115{
1116 PRINT_INFO("unregister layer 2 discipline\n");
1117}
1118
1119static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
1120{
1121 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
1122 qeth_qdio_clear_card(card, 0);
1123 qeth_clear_qdio_buffers(card);
1124}
1125
1126struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
1127 .probe = qeth_l2_probe_device,
1128 .remove = qeth_l2_remove_device,
1129 .set_online = qeth_l2_set_online,
1130 .set_offline = qeth_l2_set_offline,
1131 .shutdown = qeth_l2_shutdown,
1132};
1133EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
1134
1135static int qeth_osn_send_control_data(struct qeth_card *card, int len,
1136 struct qeth_cmd_buffer *iob)
1137{
1138 unsigned long flags;
1139 int rc = 0;
1140
1141 QETH_DBF_TEXT(trace, 5, "osndctrd");
1142
1143 wait_event(card->wait_q,
1144 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1145 qeth_prepare_control_data(card, len, iob);
1146 QETH_DBF_TEXT(trace, 6, "osnoirqp");
1147 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1148 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1149 (addr_t) iob, 0, 0);
1150 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1151 if (rc) {
1152 PRINT_WARN("qeth_osn_send_control_data: "
1153 "ccw_device_start rc = %i\n", rc);
1154 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1155 qeth_release_buffer(iob->channel, iob);
1156 atomic_set(&card->write.irq_pending, 0);
1157 wake_up(&card->wait_q);
1158 }
1159 return rc;
1160}
1161
1162static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
1163 struct qeth_cmd_buffer *iob, int data_len)
1164{
1165 u16 s1, s2;
1166
1167 QETH_DBF_TEXT(trace, 4, "osndipa");
1168
1169 qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
1170 s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
1171 s2 = (u16)data_len;
1172 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
1173 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
1174 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
1175 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
1176 return qeth_osn_send_control_data(card, s1, iob);
1177}
1178
1179int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
1180{
1181 struct qeth_cmd_buffer *iob;
1182 struct qeth_card *card;
1183 int rc;
1184
1185 QETH_DBF_TEXT(trace, 2, "osnsdmc");
1186 if (!dev)
1187 return -ENODEV;
1188 card = netdev_priv(dev);
1189 if (!card)
1190 return -ENODEV;
1191 if ((card->state != CARD_STATE_UP) &&
1192 (card->state != CARD_STATE_SOFTSETUP))
1193 return -ENODEV;
1194 iob = qeth_wait_for_buffer(&card->write);
1195 memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
1196 rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
1197 return rc;
1198}
1199EXPORT_SYMBOL(qeth_osn_assist);
1200
1201int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
1202 int (*assist_cb)(struct net_device *, void *),
1203 int (*data_cb)(struct sk_buff *))
1204{
1205 struct qeth_card *card;
1206
1207 QETH_DBF_TEXT(trace, 2, "osnreg");
1208 *dev = qeth_l2_netdev_by_devno(read_dev_no);
1209 if (*dev == NULL)
1210 return -ENODEV;
1211 card = netdev_priv(*dev);
1212 if (!card)
1213 return -ENODEV;
1214 if ((assist_cb == NULL) || (data_cb == NULL))
1215 return -EINVAL;
1216 card->osn_info.assist_cb = assist_cb;
1217 card->osn_info.data_cb = data_cb;
1218 return 0;
1219}
1220EXPORT_SYMBOL(qeth_osn_register);
1221
1222void qeth_osn_deregister(struct net_device *dev)
1223{
1224 struct qeth_card *card;
1225
1226 QETH_DBF_TEXT(trace, 2, "osndereg");
1227 if (!dev)
1228 return;
1229 card = netdev_priv(dev);
1230 if (!card)
1231 return;
1232 card->osn_info.assist_cb = NULL;
1233 card->osn_info.data_cb = NULL;
1234 return;
1235}
1236EXPORT_SYMBOL(qeth_osn_deregister);
1237
1238module_init(qeth_l2_init);
1239module_exit(qeth_l2_exit);
1240MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
1241MODULE_DESCRIPTION("qeth layer 2 discipline");
1242MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
new file mode 100644
index 000000000000..f639cc3af22b
--- /dev/null
+++ b/drivers/s390/net/qeth_l3.h
@@ -0,0 +1,76 @@
1/*
2 * drivers/s390/net/qeth_l3.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#ifndef __QETH_L3_H__
12#define __QETH_L3_H__
13
14#include "qeth_core.h"
15
16#define QETH_DBF_TEXT_(name, level, text...) \
17 do { \
18 if (qeth_dbf_passes(qeth_dbf_##name, level)) { \
19 char *dbf_txt_buf = get_cpu_var(qeth_l3_dbf_txt_buf); \
20 sprintf(dbf_txt_buf, text); \
21 debug_text_event(qeth_dbf_##name, level, dbf_txt_buf); \
22 put_cpu_var(qeth_l3_dbf_txt_buf); \
23 } \
24 } while (0)
25
26DECLARE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
27
28struct qeth_ipaddr {
29 struct list_head entry;
30 enum qeth_ip_types type;
31 enum qeth_ipa_setdelip_flags set_flags;
32 enum qeth_ipa_setdelip_flags del_flags;
33 int is_multicast;
34 int users;
35 enum qeth_prot_versions proto;
36 unsigned char mac[OSA_ADDR_LEN];
37 union {
38 struct {
39 unsigned int addr;
40 unsigned int mask;
41 } a4;
42 struct {
43 struct in6_addr addr;
44 unsigned int pfxlen;
45 } a6;
46 } u;
47};
48
49struct qeth_ipato_entry {
50 struct list_head entry;
51 enum qeth_prot_versions proto;
52 char addr[16];
53 int mask_bits;
54};
55
56
57void qeth_l3_ipaddr4_to_string(const __u8 *, char *);
58int qeth_l3_string_to_ipaddr4(const char *, __u8 *);
59void qeth_l3_ipaddr6_to_string(const __u8 *, char *);
60int qeth_l3_string_to_ipaddr6(const char *, __u8 *);
61void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
62int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
63int qeth_l3_create_device_attributes(struct device *);
64void qeth_l3_remove_device_attributes(struct device *);
65int qeth_l3_setrouting_v4(struct qeth_card *);
66int qeth_l3_setrouting_v6(struct qeth_card *);
67int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
68void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions,
69 u8 *, int);
70int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
71void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
72int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
73void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
74 const u8 *);
75
76#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
new file mode 100644
index 000000000000..a856cb47fc78
--- /dev/null
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -0,0 +1,3391 @@
1/*
2 * drivers/s390/net/qeth_l3_main.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/etherdevice.h>
17#include <linux/mii.h>
18#include <linux/ip.h>
19#include <linux/reboot.h>
20#include <linux/inetdevice.h>
21#include <linux/igmp.h>
22
23#include <net/ip.h>
24#include <net/arp.h>
25
26#include <asm/s390_rdev.h>
27
28#include "qeth_l3.h"
29#include "qeth_core_offl.h"
30
31DEFINE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
32
33static int qeth_l3_set_offline(struct ccwgroup_device *);
34static int qeth_l3_recover(void *);
35static int qeth_l3_stop(struct net_device *);
36static void qeth_l3_set_multicast_list(struct net_device *);
37static int qeth_l3_neigh_setup(struct net_device *, struct neigh_parms *);
38static int qeth_l3_register_addr_entry(struct qeth_card *,
39 struct qeth_ipaddr *);
40static int qeth_l3_deregister_addr_entry(struct qeth_card *,
41 struct qeth_ipaddr *);
42static int __qeth_l3_set_online(struct ccwgroup_device *, int);
43static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
44
45
46static int qeth_l3_isxdigit(char *buf)
47{
48 while (*buf) {
49 if (!isxdigit(*buf++))
50 return 0;
51 }
52 return 1;
53}
54
55void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
56{
57 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
58}
59
60int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
61{
62 int count = 0, rc = 0;
63 int in[4];
64 char c;
65
66 rc = sscanf(buf, "%u.%u.%u.%u%c",
67 &in[0], &in[1], &in[2], &in[3], &c);
68 if (rc != 4 && (rc != 5 || c != '\n'))
69 return -EINVAL;
70 for (count = 0; count < 4; count++) {
71 if (in[count] > 255)
72 return -EINVAL;
73 addr[count] = in[count];
74 }
75 return 0;
76}
77
78void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
79{
80 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
81 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
82 addr[0], addr[1], addr[2], addr[3],
83 addr[4], addr[5], addr[6], addr[7],
84 addr[8], addr[9], addr[10], addr[11],
85 addr[12], addr[13], addr[14], addr[15]);
86}
87
88int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
89{
90 const char *end, *end_tmp, *start;
91 __u16 *in;
92 char num[5];
93 int num2, cnt, out, found, save_cnt;
94 unsigned short in_tmp[8] = {0, };
95
96 cnt = out = found = save_cnt = num2 = 0;
97 end = start = buf;
98 in = (__u16 *) addr;
99 memset(in, 0, 16);
100 while (*end) {
101 end = strchr(start, ':');
102 if (end == NULL) {
103 end = buf + strlen(buf);
104 end_tmp = strchr(start, '\n');
105 if (end_tmp != NULL)
106 end = end_tmp;
107 out = 1;
108 }
109 if ((end - start)) {
110 memset(num, 0, 5);
111 if ((end - start) > 4)
112 return -EINVAL;
113 memcpy(num, start, end - start);
114 if (!qeth_l3_isxdigit(num))
115 return -EINVAL;
116 sscanf(start, "%x", &num2);
117 if (found)
118 in_tmp[save_cnt++] = num2;
119 else
120 in[cnt++] = num2;
121 if (out)
122 break;
123 } else {
124 if (found)
125 return -EINVAL;
126 found = 1;
127 }
128 start = ++end;
129 }
130 if (cnt + save_cnt > 8)
131 return -EINVAL;
132 cnt = 7;
133 while (save_cnt)
134 in[cnt--] = in_tmp[--save_cnt];
135 return 0;
136}
137
138void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
139 char *buf)
140{
141 if (proto == QETH_PROT_IPV4)
142 qeth_l3_ipaddr4_to_string(addr, buf);
143 else if (proto == QETH_PROT_IPV6)
144 qeth_l3_ipaddr6_to_string(addr, buf);
145}
146
147int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
148 __u8 *addr)
149{
150 if (proto == QETH_PROT_IPV4)
151 return qeth_l3_string_to_ipaddr4(buf, addr);
152 else if (proto == QETH_PROT_IPV6)
153 return qeth_l3_string_to_ipaddr6(buf, addr);
154 else
155 return -EINVAL;
156}
157
158static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
159{
160 int i, j;
161 u8 octet;
162
163 for (i = 0; i < len; ++i) {
164 octet = addr[i];
165 for (j = 7; j >= 0; --j) {
166 bits[i*8 + j] = octet & 1;
167 octet >>= 1;
168 }
169 }
170}
171
172static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
173 struct qeth_ipaddr *addr)
174{
175 struct qeth_ipato_entry *ipatoe;
176 u8 addr_bits[128] = {0, };
177 u8 ipatoe_bits[128] = {0, };
178 int rc = 0;
179
180 if (!card->ipato.enabled)
181 return 0;
182
183 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
184 (addr->proto == QETH_PROT_IPV4)? 4:16);
185 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
186 if (addr->proto != ipatoe->proto)
187 continue;
188 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
189 (ipatoe->proto == QETH_PROT_IPV4) ?
190 4 : 16);
191 if (addr->proto == QETH_PROT_IPV4)
192 rc = !memcmp(addr_bits, ipatoe_bits,
193 min(32, ipatoe->mask_bits));
194 else
195 rc = !memcmp(addr_bits, ipatoe_bits,
196 min(128, ipatoe->mask_bits));
197 if (rc)
198 break;
199 }
200 /* invert? */
201 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
202 rc = !rc;
203 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
204 rc = !rc;
205
206 return rc;
207}
208
209/*
210 * Add IP to be added to todo list. If there is already an "add todo"
211 * in this list we just incremenent the reference count.
212 * Returns 0 if we just incremented reference count.
213 */
214static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
215 struct qeth_ipaddr *addr, int add)
216{
217 struct qeth_ipaddr *tmp, *t;
218 int found = 0;
219
220 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
221 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
222 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
223 return 0;
224 if ((tmp->proto == QETH_PROT_IPV4) &&
225 (addr->proto == QETH_PROT_IPV4) &&
226 (tmp->type == addr->type) &&
227 (tmp->is_multicast == addr->is_multicast) &&
228 (tmp->u.a4.addr == addr->u.a4.addr) &&
229 (tmp->u.a4.mask == addr->u.a4.mask)) {
230 found = 1;
231 break;
232 }
233 if ((tmp->proto == QETH_PROT_IPV6) &&
234 (addr->proto == QETH_PROT_IPV6) &&
235 (tmp->type == addr->type) &&
236 (tmp->is_multicast == addr->is_multicast) &&
237 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
238 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
239 sizeof(struct in6_addr)) == 0)) {
240 found = 1;
241 break;
242 }
243 }
244 if (found) {
245 if (addr->users != 0)
246 tmp->users += addr->users;
247 else
248 tmp->users += add ? 1 : -1;
249 if (tmp->users == 0) {
250 list_del(&tmp->entry);
251 kfree(tmp);
252 }
253 return 0;
254 } else {
255 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
256 list_add(&addr->entry, card->ip_tbd_list);
257 else {
258 if (addr->users == 0)
259 addr->users += add ? 1 : -1;
260 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
261 qeth_l3_is_addr_covered_by_ipato(card, addr)) {
262 QETH_DBF_TEXT(trace, 2, "tkovaddr");
263 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
264 }
265 list_add_tail(&addr->entry, card->ip_tbd_list);
266 }
267 return 1;
268 }
269}
270
271static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
272{
273 unsigned long flags;
274 int rc = 0;
275
276 QETH_DBF_TEXT(trace, 4, "delip");
277
278 if (addr->proto == QETH_PROT_IPV4)
279 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
280 else {
281 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
282 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
283 }
284 spin_lock_irqsave(&card->ip_lock, flags);
285 rc = __qeth_l3_insert_ip_todo(card, addr, 0);
286 spin_unlock_irqrestore(&card->ip_lock, flags);
287 return rc;
288}
289
290static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
291{
292 unsigned long flags;
293 int rc = 0;
294
295 QETH_DBF_TEXT(trace, 4, "addip");
296 if (addr->proto == QETH_PROT_IPV4)
297 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
298 else {
299 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
300 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
301 }
302 spin_lock_irqsave(&card->ip_lock, flags);
303 rc = __qeth_l3_insert_ip_todo(card, addr, 1);
304 spin_unlock_irqrestore(&card->ip_lock, flags);
305 return rc;
306}
307
308
309static struct qeth_ipaddr *qeth_l3_get_addr_buffer(
310 enum qeth_prot_versions prot)
311{
312 struct qeth_ipaddr *addr;
313
314 addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
315 if (addr == NULL) {
316 PRINT_WARN("Not enough memory to add address\n");
317 return NULL;
318 }
319 addr->type = QETH_IP_TYPE_NORMAL;
320 addr->proto = prot;
321 return addr;
322}
323
324static void qeth_l3_delete_mc_addresses(struct qeth_card *card)
325{
326 struct qeth_ipaddr *iptodo;
327 unsigned long flags;
328
329 QETH_DBF_TEXT(trace, 4, "delmc");
330 iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
331 if (!iptodo) {
332 QETH_DBF_TEXT(trace, 2, "dmcnomem");
333 return;
334 }
335 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
336 spin_lock_irqsave(&card->ip_lock, flags);
337 if (!__qeth_l3_insert_ip_todo(card, iptodo, 0))
338 kfree(iptodo);
339 spin_unlock_irqrestore(&card->ip_lock, flags);
340}
341
342/*
343 * Add/remove address to/from card's ip list, i.e. try to add or remove
344 * reference to/from an IP address that is already registered on the card.
345 * Returns:
346 * 0 address was on card and its reference count has been adjusted,
347 * but is still > 0, so nothing has to be done
348 * also returns 0 if card was not on card and the todo was to delete
349 * the address -> there is also nothing to be done
350 * 1 address was not on card and the todo is to add it to the card's ip
351 * list
352 * -1 address was on card and its reference count has been decremented
353 * to <= 0 by the todo -> address must be removed from card
354 */
355static int __qeth_l3_ref_ip_on_card(struct qeth_card *card,
356 struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr)
357{
358 struct qeth_ipaddr *addr;
359 int found = 0;
360
361 list_for_each_entry(addr, &card->ip_list, entry) {
362 if ((addr->proto == QETH_PROT_IPV4) &&
363 (todo->proto == QETH_PROT_IPV4) &&
364 (addr->type == todo->type) &&
365 (addr->u.a4.addr == todo->u.a4.addr) &&
366 (addr->u.a4.mask == todo->u.a4.mask)) {
367 found = 1;
368 break;
369 }
370 if ((addr->proto == QETH_PROT_IPV6) &&
371 (todo->proto == QETH_PROT_IPV6) &&
372 (addr->type == todo->type) &&
373 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
374 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
375 sizeof(struct in6_addr)) == 0)) {
376 found = 1;
377 break;
378 }
379 }
380 if (found) {
381 addr->users += todo->users;
382 if (addr->users <= 0) {
383 *__addr = addr;
384 return -1;
385 } else {
386 /* for VIPA and RXIP limit refcount to 1 */
387 if (addr->type != QETH_IP_TYPE_NORMAL)
388 addr->users = 1;
389 return 0;
390 }
391 }
392 if (todo->users > 0) {
393 /* for VIPA and RXIP limit refcount to 1 */
394 if (todo->type != QETH_IP_TYPE_NORMAL)
395 todo->users = 1;
396 return 1;
397 } else
398 return 0;
399}
400
401static void __qeth_l3_delete_all_mc(struct qeth_card *card,
402 unsigned long *flags)
403{
404 struct qeth_ipaddr *addr, *tmp;
405 int rc;
406again:
407 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
408 if (addr->is_multicast) {
409 list_del(&addr->entry);
410 spin_unlock_irqrestore(&card->ip_lock, *flags);
411 rc = qeth_l3_deregister_addr_entry(card, addr);
412 spin_lock_irqsave(&card->ip_lock, *flags);
413 if (!rc) {
414 kfree(addr);
415 goto again;
416 } else
417 list_add(&addr->entry, &card->ip_list);
418 }
419 }
420}
421
422static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
423{
424 struct list_head *tbd_list;
425 struct qeth_ipaddr *todo, *addr;
426 unsigned long flags;
427 int rc;
428
429 QETH_DBF_TEXT(trace, 2, "sdiplist");
430 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
431
432 spin_lock_irqsave(&card->ip_lock, flags);
433 tbd_list = card->ip_tbd_list;
434 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
435 if (!card->ip_tbd_list) {
436 QETH_DBF_TEXT(trace, 0, "silnomem");
437 card->ip_tbd_list = tbd_list;
438 spin_unlock_irqrestore(&card->ip_lock, flags);
439 return;
440 } else
441 INIT_LIST_HEAD(card->ip_tbd_list);
442
443 while (!list_empty(tbd_list)) {
444 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
445 list_del(&todo->entry);
446 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) {
447 __qeth_l3_delete_all_mc(card, &flags);
448 kfree(todo);
449 continue;
450 }
451 rc = __qeth_l3_ref_ip_on_card(card, todo, &addr);
452 if (rc == 0) {
453 /* nothing to be done; only adjusted refcount */
454 kfree(todo);
455 } else if (rc == 1) {
456 /* new entry to be added to on-card list */
457 spin_unlock_irqrestore(&card->ip_lock, flags);
458 rc = qeth_l3_register_addr_entry(card, todo);
459 spin_lock_irqsave(&card->ip_lock, flags);
460 if (!rc)
461 list_add_tail(&todo->entry, &card->ip_list);
462 else
463 kfree(todo);
464 } else if (rc == -1) {
465 /* on-card entry to be removed */
466 list_del_init(&addr->entry);
467 spin_unlock_irqrestore(&card->ip_lock, flags);
468 rc = qeth_l3_deregister_addr_entry(card, addr);
469 spin_lock_irqsave(&card->ip_lock, flags);
470 if (!rc)
471 kfree(addr);
472 else
473 list_add_tail(&addr->entry, &card->ip_list);
474 kfree(todo);
475 }
476 }
477 spin_unlock_irqrestore(&card->ip_lock, flags);
478 kfree(tbd_list);
479}
480
481static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
482 int recover)
483{
484 struct qeth_ipaddr *addr, *tmp;
485 unsigned long flags;
486
487 QETH_DBF_TEXT(trace, 4, "clearip");
488 spin_lock_irqsave(&card->ip_lock, flags);
489 /* clear todo list */
490 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
491 list_del(&addr->entry);
492 kfree(addr);
493 }
494
495 while (!list_empty(&card->ip_list)) {
496 addr = list_entry(card->ip_list.next,
497 struct qeth_ipaddr, entry);
498 list_del_init(&addr->entry);
499 if (clean) {
500 spin_unlock_irqrestore(&card->ip_lock, flags);
501 qeth_l3_deregister_addr_entry(card, addr);
502 spin_lock_irqsave(&card->ip_lock, flags);
503 }
504 if (!recover || addr->is_multicast) {
505 kfree(addr);
506 continue;
507 }
508 list_add_tail(&addr->entry, card->ip_tbd_list);
509 }
510 spin_unlock_irqrestore(&card->ip_lock, flags);
511}
512
513static int qeth_l3_address_exists_in_list(struct list_head *list,
514 struct qeth_ipaddr *addr, int same_type)
515{
516 struct qeth_ipaddr *tmp;
517
518 list_for_each_entry(tmp, list, entry) {
519 if ((tmp->proto == QETH_PROT_IPV4) &&
520 (addr->proto == QETH_PROT_IPV4) &&
521 ((same_type && (tmp->type == addr->type)) ||
522 (!same_type && (tmp->type != addr->type))) &&
523 (tmp->u.a4.addr == addr->u.a4.addr))
524 return 1;
525
526 if ((tmp->proto == QETH_PROT_IPV6) &&
527 (addr->proto == QETH_PROT_IPV6) &&
528 ((same_type && (tmp->type == addr->type)) ||
529 (!same_type && (tmp->type != addr->type))) &&
530 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
531 sizeof(struct in6_addr)) == 0))
532 return 1;
533
534 }
535 return 0;
536}
537
538static int qeth_l3_send_setdelmc(struct qeth_card *card,
539 struct qeth_ipaddr *addr, int ipacmd)
540{
541 int rc;
542 struct qeth_cmd_buffer *iob;
543 struct qeth_ipa_cmd *cmd;
544
545 QETH_DBF_TEXT(trace, 4, "setdelmc");
546
547 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
548 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
549 memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
550 if (addr->proto == QETH_PROT_IPV6)
551 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
552 sizeof(struct in6_addr));
553 else
554 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
555
556 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
557
558 return rc;
559}
560
561static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
562{
563 int i, j;
564 for (i = 0; i < 16; i++) {
565 j = (len) - (i * 8);
566 if (j >= 8)
567 netmask[i] = 0xff;
568 else if (j > 0)
569 netmask[i] = (u8)(0xFF00 >> j);
570 else
571 netmask[i] = 0;
572 }
573}
574
575static int qeth_l3_send_setdelip(struct qeth_card *card,
576 struct qeth_ipaddr *addr, int ipacmd, unsigned int flags)
577{
578 int rc;
579 struct qeth_cmd_buffer *iob;
580 struct qeth_ipa_cmd *cmd;
581 __u8 netmask[16];
582
583 QETH_DBF_TEXT(trace, 4, "setdelip");
584 QETH_DBF_TEXT_(trace, 4, "flags%02X", flags);
585
586 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
587 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
588 if (addr->proto == QETH_PROT_IPV6) {
589 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
590 sizeof(struct in6_addr));
591 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
592 memcpy(cmd->data.setdelip6.mask, netmask,
593 sizeof(struct in6_addr));
594 cmd->data.setdelip6.flags = flags;
595 } else {
596 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
597 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
598 cmd->data.setdelip4.flags = flags;
599 }
600
601 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
602
603 return rc;
604}
605
606static int qeth_l3_send_setrouting(struct qeth_card *card,
607 enum qeth_routing_types type, enum qeth_prot_versions prot)
608{
609 int rc;
610 struct qeth_ipa_cmd *cmd;
611 struct qeth_cmd_buffer *iob;
612
613 QETH_DBF_TEXT(trace, 4, "setroutg");
614 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
615 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
616 cmd->data.setrtg.type = (type);
617 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
618
619 return rc;
620}
621
622static void qeth_l3_correct_routing_type(struct qeth_card *card,
623 enum qeth_routing_types *type, enum qeth_prot_versions prot)
624{
625 if (card->info.type == QETH_CARD_TYPE_IQD) {
626 switch (*type) {
627 case NO_ROUTER:
628 case PRIMARY_CONNECTOR:
629 case SECONDARY_CONNECTOR:
630 case MULTICAST_ROUTER:
631 return;
632 default:
633 goto out_inval;
634 }
635 } else {
636 switch (*type) {
637 case NO_ROUTER:
638 case PRIMARY_ROUTER:
639 case SECONDARY_ROUTER:
640 return;
641 case MULTICAST_ROUTER:
642 if (qeth_is_ipafunc_supported(card, prot,
643 IPA_OSA_MC_ROUTER))
644 return;
645 default:
646 goto out_inval;
647 }
648 }
649out_inval:
650 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
651 "Router status set to 'no router'.\n",
652 ((*type == PRIMARY_ROUTER)? "primary router" :
653 (*type == SECONDARY_ROUTER)? "secondary router" :
654 (*type == PRIMARY_CONNECTOR)? "primary connector" :
655 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
656 (*type == MULTICAST_ROUTER)? "multicast router" :
657 "unknown"),
658 card->dev->name);
659 *type = NO_ROUTER;
660}
661
662int qeth_l3_setrouting_v4(struct qeth_card *card)
663{
664 int rc;
665
666 QETH_DBF_TEXT(trace, 3, "setrtg4");
667
668 qeth_l3_correct_routing_type(card, &card->options.route4.type,
669 QETH_PROT_IPV4);
670
671 rc = qeth_l3_send_setrouting(card, card->options.route4.type,
672 QETH_PROT_IPV4);
673 if (rc) {
674 card->options.route4.type = NO_ROUTER;
675 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
676 "Type set to 'no router'.\n",
677 rc, QETH_CARD_IFNAME(card));
678 }
679 return rc;
680}
681
682int qeth_l3_setrouting_v6(struct qeth_card *card)
683{
684 int rc = 0;
685
686 QETH_DBF_TEXT(trace, 3, "setrtg6");
687#ifdef CONFIG_QETH_IPV6
688
689 if (!qeth_is_supported(card, IPA_IPV6))
690 return 0;
691 qeth_l3_correct_routing_type(card, &card->options.route6.type,
692 QETH_PROT_IPV6);
693
694 rc = qeth_l3_send_setrouting(card, card->options.route6.type,
695 QETH_PROT_IPV6);
696 if (rc) {
697 card->options.route6.type = NO_ROUTER;
698 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
699 "Type set to 'no router'.\n",
700 rc, QETH_CARD_IFNAME(card));
701 }
702#endif
703 return rc;
704}
705
706/*
707 * IP address takeover related functions
708 */
709static void qeth_l3_clear_ipato_list(struct qeth_card *card)
710{
711
712 struct qeth_ipato_entry *ipatoe, *tmp;
713 unsigned long flags;
714
715 spin_lock_irqsave(&card->ip_lock, flags);
716 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
717 list_del(&ipatoe->entry);
718 kfree(ipatoe);
719 }
720 spin_unlock_irqrestore(&card->ip_lock, flags);
721}
722
723int qeth_l3_add_ipato_entry(struct qeth_card *card,
724 struct qeth_ipato_entry *new)
725{
726 struct qeth_ipato_entry *ipatoe;
727 unsigned long flags;
728 int rc = 0;
729
730 QETH_DBF_TEXT(trace, 2, "addipato");
731 spin_lock_irqsave(&card->ip_lock, flags);
732 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
733 if (ipatoe->proto != new->proto)
734 continue;
735 if (!memcmp(ipatoe->addr, new->addr,
736 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
737 (ipatoe->mask_bits == new->mask_bits)) {
738 PRINT_WARN("ipato entry already exists!\n");
739 rc = -EEXIST;
740 break;
741 }
742 }
743 if (!rc)
744 list_add_tail(&new->entry, &card->ipato.entries);
745
746 spin_unlock_irqrestore(&card->ip_lock, flags);
747 return rc;
748}
749
750void qeth_l3_del_ipato_entry(struct qeth_card *card,
751 enum qeth_prot_versions proto, u8 *addr, int mask_bits)
752{
753 struct qeth_ipato_entry *ipatoe, *tmp;
754 unsigned long flags;
755
756 QETH_DBF_TEXT(trace, 2, "delipato");
757 spin_lock_irqsave(&card->ip_lock, flags);
758 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
759 if (ipatoe->proto != proto)
760 continue;
761 if (!memcmp(ipatoe->addr, addr,
762 (proto == QETH_PROT_IPV4)? 4:16) &&
763 (ipatoe->mask_bits == mask_bits)) {
764 list_del(&ipatoe->entry);
765 kfree(ipatoe);
766 }
767 }
768 spin_unlock_irqrestore(&card->ip_lock, flags);
769}
770
771/*
772 * VIPA related functions
773 */
774int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
775 const u8 *addr)
776{
777 struct qeth_ipaddr *ipaddr;
778 unsigned long flags;
779 int rc = 0;
780
781 ipaddr = qeth_l3_get_addr_buffer(proto);
782 if (ipaddr) {
783 if (proto == QETH_PROT_IPV4) {
784 QETH_DBF_TEXT(trace, 2, "addvipa4");
785 memcpy(&ipaddr->u.a4.addr, addr, 4);
786 ipaddr->u.a4.mask = 0;
787 } else if (proto == QETH_PROT_IPV6) {
788 QETH_DBF_TEXT(trace, 2, "addvipa6");
789 memcpy(&ipaddr->u.a6.addr, addr, 16);
790 ipaddr->u.a6.pfxlen = 0;
791 }
792 ipaddr->type = QETH_IP_TYPE_VIPA;
793 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
794 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
795 } else
796 return -ENOMEM;
797 spin_lock_irqsave(&card->ip_lock, flags);
798 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
799 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
800 rc = -EEXIST;
801 spin_unlock_irqrestore(&card->ip_lock, flags);
802 if (rc) {
803 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
804 return rc;
805 }
806 if (!qeth_l3_add_ip(card, ipaddr))
807 kfree(ipaddr);
808 qeth_l3_set_ip_addr_list(card);
809 return rc;
810}
811
812void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
813 const u8 *addr)
814{
815 struct qeth_ipaddr *ipaddr;
816
817 ipaddr = qeth_l3_get_addr_buffer(proto);
818 if (ipaddr) {
819 if (proto == QETH_PROT_IPV4) {
820 QETH_DBF_TEXT(trace, 2, "delvipa4");
821 memcpy(&ipaddr->u.a4.addr, addr, 4);
822 ipaddr->u.a4.mask = 0;
823 } else if (proto == QETH_PROT_IPV6) {
824 QETH_DBF_TEXT(trace, 2, "delvipa6");
825 memcpy(&ipaddr->u.a6.addr, addr, 16);
826 ipaddr->u.a6.pfxlen = 0;
827 }
828 ipaddr->type = QETH_IP_TYPE_VIPA;
829 } else
830 return;
831 if (!qeth_l3_delete_ip(card, ipaddr))
832 kfree(ipaddr);
833 qeth_l3_set_ip_addr_list(card);
834}
835
836/*
837 * proxy ARP related functions
838 */
839int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
840 const u8 *addr)
841{
842 struct qeth_ipaddr *ipaddr;
843 unsigned long flags;
844 int rc = 0;
845
846 ipaddr = qeth_l3_get_addr_buffer(proto);
847 if (ipaddr) {
848 if (proto == QETH_PROT_IPV4) {
849 QETH_DBF_TEXT(trace, 2, "addrxip4");
850 memcpy(&ipaddr->u.a4.addr, addr, 4);
851 ipaddr->u.a4.mask = 0;
852 } else if (proto == QETH_PROT_IPV6) {
853 QETH_DBF_TEXT(trace, 2, "addrxip6");
854 memcpy(&ipaddr->u.a6.addr, addr, 16);
855 ipaddr->u.a6.pfxlen = 0;
856 }
857 ipaddr->type = QETH_IP_TYPE_RXIP;
858 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
859 ipaddr->del_flags = 0;
860 } else
861 return -ENOMEM;
862 spin_lock_irqsave(&card->ip_lock, flags);
863 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
864 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
865 rc = -EEXIST;
866 spin_unlock_irqrestore(&card->ip_lock, flags);
867 if (rc) {
868 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
869 return rc;
870 }
871 if (!qeth_l3_add_ip(card, ipaddr))
872 kfree(ipaddr);
873 qeth_l3_set_ip_addr_list(card);
874 return 0;
875}
876
877void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
878 const u8 *addr)
879{
880 struct qeth_ipaddr *ipaddr;
881
882 ipaddr = qeth_l3_get_addr_buffer(proto);
883 if (ipaddr) {
884 if (proto == QETH_PROT_IPV4) {
885 QETH_DBF_TEXT(trace, 2, "addrxip4");
886 memcpy(&ipaddr->u.a4.addr, addr, 4);
887 ipaddr->u.a4.mask = 0;
888 } else if (proto == QETH_PROT_IPV6) {
889 QETH_DBF_TEXT(trace, 2, "addrxip6");
890 memcpy(&ipaddr->u.a6.addr, addr, 16);
891 ipaddr->u.a6.pfxlen = 0;
892 }
893 ipaddr->type = QETH_IP_TYPE_RXIP;
894 } else
895 return;
896 if (!qeth_l3_delete_ip(card, ipaddr))
897 kfree(ipaddr);
898 qeth_l3_set_ip_addr_list(card);
899}
900
901static int qeth_l3_register_addr_entry(struct qeth_card *card,
902 struct qeth_ipaddr *addr)
903{
904 char buf[50];
905 int rc = 0;
906 int cnt = 3;
907
908 if (addr->proto == QETH_PROT_IPV4) {
909 QETH_DBF_TEXT(trace, 2, "setaddr4");
910 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
911 } else if (addr->proto == QETH_PROT_IPV6) {
912 QETH_DBF_TEXT(trace, 2, "setaddr6");
913 QETH_DBF_HEX(trace, 3, &addr->u.a6.addr, 8);
914 QETH_DBF_HEX(trace, 3, ((char *)&addr->u.a6.addr) + 8, 8);
915 } else {
916 QETH_DBF_TEXT(trace, 2, "setaddr?");
917 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
918 }
919 do {
920 if (addr->is_multicast)
921 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
922 else
923 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP,
924 addr->set_flags);
925 if (rc)
926 QETH_DBF_TEXT(trace, 2, "failed");
927 } while ((--cnt > 0) && rc);
928 if (rc) {
929 QETH_DBF_TEXT(trace, 2, "FAILED");
930 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
931 PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
932 buf, rc, rc);
933 }
934 return rc;
935}
936
937static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
938 struct qeth_ipaddr *addr)
939{
940 int rc = 0;
941
942 if (addr->proto == QETH_PROT_IPV4) {
943 QETH_DBF_TEXT(trace, 2, "deladdr4");
944 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
945 } else if (addr->proto == QETH_PROT_IPV6) {
946 QETH_DBF_TEXT(trace, 2, "deladdr6");
947 QETH_DBF_HEX(trace, 3, &addr->u.a6.addr, 8);
948 QETH_DBF_HEX(trace, 3, ((char *)&addr->u.a6.addr) + 8, 8);
949 } else {
950 QETH_DBF_TEXT(trace, 2, "deladdr?");
951 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
952 }
953 if (addr->is_multicast)
954 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
955 else
956 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
957 addr->del_flags);
958 if (rc) {
959 QETH_DBF_TEXT(trace, 2, "failed");
960 /* TODO: re-activate this warning as soon as we have a
961 * clean mirco code
962 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
963 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
964 buf, rc);
965 */
966 }
967
968 return rc;
969}
970
971static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
972{
973 if (cast_type == RTN_MULTICAST)
974 return QETH_CAST_MULTICAST;
975 if (cast_type == RTN_BROADCAST)
976 return QETH_CAST_BROADCAST;
977 return QETH_CAST_UNICAST;
978}
979
980static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
981{
982 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
983 if (cast_type == RTN_MULTICAST)
984 return ct | QETH_CAST_MULTICAST;
985 if (cast_type == RTN_ANYCAST)
986 return ct | QETH_CAST_ANYCAST;
987 if (cast_type == RTN_BROADCAST)
988 return ct | QETH_CAST_BROADCAST;
989 return ct | QETH_CAST_UNICAST;
990}
991
992static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
993 __u32 mode)
994{
995 int rc;
996 struct qeth_cmd_buffer *iob;
997 struct qeth_ipa_cmd *cmd;
998
999 QETH_DBF_TEXT(trace, 4, "adpmode");
1000
1001 iob = qeth_get_adapter_cmd(card, command,
1002 sizeof(struct qeth_ipacmd_setadpparms));
1003 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1004 cmd->data.setadapterparms.data.mode = mode;
1005 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
1006 NULL);
1007 return rc;
1008}
1009
1010static int qeth_l3_setadapter_hstr(struct qeth_card *card)
1011{
1012 int rc;
1013
1014 QETH_DBF_TEXT(trace, 4, "adphstr");
1015
1016 if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
1017 rc = qeth_l3_send_setadp_mode(card,
1018 IPA_SETADP_SET_BROADCAST_MODE,
1019 card->options.broadcast_mode);
1020 if (rc)
1021 PRINT_WARN("couldn't set broadcast mode on "
1022 "device %s: x%x\n",
1023 CARD_BUS_ID(card), rc);
1024 rc = qeth_l3_send_setadp_mode(card,
1025 IPA_SETADP_ALTER_MAC_ADDRESS,
1026 card->options.macaddr_mode);
1027 if (rc)
1028 PRINT_WARN("couldn't set macaddr mode on "
1029 "device %s: x%x\n", CARD_BUS_ID(card), rc);
1030 return rc;
1031 }
1032 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
1033 PRINT_WARN("set adapter parameters not available "
1034 "to set broadcast mode, using ALLRINGS "
1035 "on device %s:\n", CARD_BUS_ID(card));
1036 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
1037 PRINT_WARN("set adapter parameters not available "
1038 "to set macaddr mode, using NONCANONICAL "
1039 "on device %s:\n", CARD_BUS_ID(card));
1040 return 0;
1041}
1042
1043static int qeth_l3_setadapter_parms(struct qeth_card *card)
1044{
1045 int rc;
1046
1047 QETH_DBF_TEXT(setup, 2, "setadprm");
1048
1049 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
1050 PRINT_WARN("set adapter parameters not supported "
1051 "on device %s.\n",
1052 CARD_BUS_ID(card));
1053 QETH_DBF_TEXT(setup, 2, " notsupp");
1054 return 0;
1055 }
1056 rc = qeth_query_setadapterparms(card);
1057 if (rc) {
1058 PRINT_WARN("couldn't set adapter parameters on device %s: "
1059 "x%x\n", CARD_BUS_ID(card), rc);
1060 return rc;
1061 }
1062 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
1063 rc = qeth_setadpparms_change_macaddr(card);
1064 if (rc)
1065 PRINT_WARN("couldn't get MAC address on "
1066 "device %s: x%x\n",
1067 CARD_BUS_ID(card), rc);
1068 }
1069
1070 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
1071 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
1072 rc = qeth_l3_setadapter_hstr(card);
1073
1074 return rc;
1075}
1076
1077static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
1078 struct qeth_reply *reply, unsigned long data)
1079{
1080 struct qeth_ipa_cmd *cmd;
1081
1082 QETH_DBF_TEXT(trace, 4, "defadpcb");
1083
1084 cmd = (struct qeth_ipa_cmd *) data;
1085 if (cmd->hdr.return_code == 0) {
1086 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
1087 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
1088 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1089 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
1090 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1091 }
1092 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
1093 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
1094 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
1095 QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
1096 }
1097 return 0;
1098}
1099
1100static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
1101 struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code,
1102 __u16 len, enum qeth_prot_versions prot)
1103{
1104 struct qeth_cmd_buffer *iob;
1105 struct qeth_ipa_cmd *cmd;
1106
1107 QETH_DBF_TEXT(trace, 4, "getasscm");
1108 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
1109
1110 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1111 cmd->data.setassparms.hdr.assist_no = ipa_func;
1112 cmd->data.setassparms.hdr.length = 8 + len;
1113 cmd->data.setassparms.hdr.command_code = cmd_code;
1114 cmd->data.setassparms.hdr.return_code = 0;
1115 cmd->data.setassparms.hdr.seq_no = 0;
1116
1117 return iob;
1118}
1119
1120static int qeth_l3_send_setassparms(struct qeth_card *card,
1121 struct qeth_cmd_buffer *iob, __u16 len, long data,
1122 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1123 unsigned long),
1124 void *reply_param)
1125{
1126 int rc;
1127 struct qeth_ipa_cmd *cmd;
1128
1129 QETH_DBF_TEXT(trace, 4, "sendassp");
1130
1131 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1132 if (len <= sizeof(__u32))
1133 cmd->data.setassparms.data.flags_32bit = (__u32) data;
1134 else /* (len > sizeof(__u32)) */
1135 memcpy(&cmd->data.setassparms.data, (void *) data, len);
1136
1137 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
1138 return rc;
1139}
1140
1141#ifdef CONFIG_QETH_IPV6
1142static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
1143 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
1144{
1145 int rc;
1146 struct qeth_cmd_buffer *iob;
1147
1148 QETH_DBF_TEXT(trace, 4, "simassp6");
1149 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1150 0, QETH_PROT_IPV6);
1151 rc = qeth_l3_send_setassparms(card, iob, 0, 0,
1152 qeth_l3_default_setassparms_cb, NULL);
1153 return rc;
1154}
1155#endif
1156
1157static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
1158 enum qeth_ipa_funcs ipa_func, __u16 cmd_code, long data)
1159{
1160 int rc;
1161 int length = 0;
1162 struct qeth_cmd_buffer *iob;
1163
1164 QETH_DBF_TEXT(trace, 4, "simassp4");
1165 if (data)
1166 length = sizeof(__u32);
1167 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1168 length, QETH_PROT_IPV4);
1169 rc = qeth_l3_send_setassparms(card, iob, length, data,
1170 qeth_l3_default_setassparms_cb, NULL);
1171 return rc;
1172}
1173
1174static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
1175{
1176 int rc;
1177
1178 QETH_DBF_TEXT(trace, 3, "ipaarp");
1179
1180 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1181 PRINT_WARN("ARP processing not supported "
1182 "on %s!\n", QETH_CARD_IFNAME(card));
1183 return 0;
1184 }
1185 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
1186 IPA_CMD_ASS_START, 0);
1187 if (rc) {
1188 PRINT_WARN("Could not start ARP processing "
1189 "assist on %s: 0x%x\n",
1190 QETH_CARD_IFNAME(card), rc);
1191 }
1192 return rc;
1193}
1194
1195static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card)
1196{
1197 int rc;
1198
1199 QETH_DBF_TEXT(trace, 3, "ipaipfrg");
1200
1201 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
1202 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
1203 QETH_CARD_IFNAME(card));
1204 return -EOPNOTSUPP;
1205 }
1206
1207 rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
1208 IPA_CMD_ASS_START, 0);
1209 if (rc) {
1210 PRINT_WARN("Could not start Hardware IP fragmentation "
1211 "assist on %s: 0x%x\n",
1212 QETH_CARD_IFNAME(card), rc);
1213 } else
1214 PRINT_INFO("Hardware IP fragmentation enabled \n");
1215 return rc;
1216}
1217
1218static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
1219{
1220 int rc;
1221
1222 QETH_DBF_TEXT(trace, 3, "stsrcmac");
1223
1224 if (!card->options.fake_ll)
1225 return -EOPNOTSUPP;
1226
1227 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
1228 PRINT_INFO("Inbound source address not "
1229 "supported on %s\n", QETH_CARD_IFNAME(card));
1230 return -EOPNOTSUPP;
1231 }
1232
1233 rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC,
1234 IPA_CMD_ASS_START, 0);
1235 if (rc)
1236 PRINT_WARN("Could not start inbound source "
1237 "assist on %s: 0x%x\n",
1238 QETH_CARD_IFNAME(card), rc);
1239 return rc;
1240}
1241
1242static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
1243{
1244 int rc = 0;
1245
1246 QETH_DBF_TEXT(trace, 3, "strtvlan");
1247
1248 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
1249 PRINT_WARN("VLAN not supported on %s\n",
1250 QETH_CARD_IFNAME(card));
1251 return -EOPNOTSUPP;
1252 }
1253
1254 rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO,
1255 IPA_CMD_ASS_START, 0);
1256 if (rc) {
1257 PRINT_WARN("Could not start vlan "
1258 "assist on %s: 0x%x\n",
1259 QETH_CARD_IFNAME(card), rc);
1260 } else {
1261 PRINT_INFO("VLAN enabled \n");
1262 }
1263 return rc;
1264}
1265
1266static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
1267{
1268 int rc;
1269
1270 QETH_DBF_TEXT(trace, 3, "stmcast");
1271
1272 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
1273 PRINT_WARN("Multicast not supported on %s\n",
1274 QETH_CARD_IFNAME(card));
1275 return -EOPNOTSUPP;
1276 }
1277
1278 rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING,
1279 IPA_CMD_ASS_START, 0);
1280 if (rc) {
1281 PRINT_WARN("Could not start multicast "
1282 "assist on %s: rc=%i\n",
1283 QETH_CARD_IFNAME(card), rc);
1284 } else {
1285 PRINT_INFO("Multicast enabled\n");
1286 card->dev->flags |= IFF_MULTICAST;
1287 }
1288 return rc;
1289}
1290
1291static int qeth_l3_query_ipassists_cb(struct qeth_card *card,
1292 struct qeth_reply *reply, unsigned long data)
1293{
1294 struct qeth_ipa_cmd *cmd;
1295
1296 QETH_DBF_TEXT(setup, 2, "qipasscb");
1297
1298 cmd = (struct qeth_ipa_cmd *) data;
1299 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
1300 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
1301 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1302 } else {
1303 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
1304 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1305 }
1306 QETH_DBF_TEXT(setup, 2, "suppenbl");
1307 QETH_DBF_TEXT_(setup, 2, "%x", cmd->hdr.ipa_supported);
1308 QETH_DBF_TEXT_(setup, 2, "%x", cmd->hdr.ipa_enabled);
1309 return 0;
1310}
1311
1312static int qeth_l3_query_ipassists(struct qeth_card *card,
1313 enum qeth_prot_versions prot)
1314{
1315 int rc;
1316 struct qeth_cmd_buffer *iob;
1317
1318 QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
1319 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
1320 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL);
1321 return rc;
1322}
1323
1324#ifdef CONFIG_QETH_IPV6
1325static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
1326{
1327 int rc;
1328
1329 QETH_DBF_TEXT(trace, 3, "softipv6");
1330
1331 if (card->info.type == QETH_CARD_TYPE_IQD)
1332 goto out;
1333
1334 rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6);
1335 if (rc) {
1336 PRINT_ERR("IPv6 query ipassist failed on %s\n",
1337 QETH_CARD_IFNAME(card));
1338 return rc;
1339 }
1340 rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6,
1341 IPA_CMD_ASS_START, 3);
1342 if (rc) {
1343 PRINT_WARN("IPv6 start assist (version 4) failed "
1344 "on %s: 0x%x\n",
1345 QETH_CARD_IFNAME(card), rc);
1346 return rc;
1347 }
1348 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6,
1349 IPA_CMD_ASS_START);
1350 if (rc) {
1351 PRINT_WARN("IPV6 start assist (version 6) failed "
1352 "on %s: 0x%x\n",
1353 QETH_CARD_IFNAME(card), rc);
1354 return rc;
1355 }
1356 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
1357 IPA_CMD_ASS_START);
1358 if (rc) {
1359 PRINT_WARN("Could not enable passthrough "
1360 "on %s: 0x%x\n",
1361 QETH_CARD_IFNAME(card), rc);
1362 return rc;
1363 }
1364out:
1365 PRINT_INFO("IPV6 enabled \n");
1366 return 0;
1367}
1368#endif
1369
1370static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
1371{
1372 int rc = 0;
1373
1374 QETH_DBF_TEXT(trace, 3, "strtipv6");
1375
1376 if (!qeth_is_supported(card, IPA_IPV6)) {
1377 PRINT_WARN("IPv6 not supported on %s\n",
1378 QETH_CARD_IFNAME(card));
1379 return 0;
1380 }
1381#ifdef CONFIG_QETH_IPV6
1382 rc = qeth_l3_softsetup_ipv6(card);
1383#endif
1384 return rc ;
1385}
1386
1387static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
1388{
1389 int rc;
1390
1391 QETH_DBF_TEXT(trace, 3, "stbrdcst");
1392 card->info.broadcast_capable = 0;
1393 if (!qeth_is_supported(card, IPA_FILTERING)) {
1394 PRINT_WARN("Broadcast not supported on %s\n",
1395 QETH_CARD_IFNAME(card));
1396 rc = -EOPNOTSUPP;
1397 goto out;
1398 }
1399 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1400 IPA_CMD_ASS_START, 0);
1401 if (rc) {
1402 PRINT_WARN("Could not enable broadcasting filtering "
1403 "on %s: 0x%x\n",
1404 QETH_CARD_IFNAME(card), rc);
1405 goto out;
1406 }
1407
1408 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1409 IPA_CMD_ASS_CONFIGURE, 1);
1410 if (rc) {
1411 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
1412 QETH_CARD_IFNAME(card), rc);
1413 goto out;
1414 }
1415 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
1416 PRINT_INFO("Broadcast enabled \n");
1417 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1418 IPA_CMD_ASS_ENABLE, 1);
1419 if (rc) {
1420 PRINT_WARN("Could not set up broadcast echo filtering on "
1421 "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
1422 goto out;
1423 }
1424 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
1425out:
1426 if (card->info.broadcast_capable)
1427 card->dev->flags |= IFF_BROADCAST;
1428 else
1429 card->dev->flags &= ~IFF_BROADCAST;
1430 return rc;
1431}
1432
1433static int qeth_l3_send_checksum_command(struct qeth_card *card)
1434{
1435 int rc;
1436
1437 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
1438 IPA_CMD_ASS_START, 0);
1439 if (rc) {
1440 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
1441 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
1442 QETH_CARD_IFNAME(card), rc);
1443 return rc;
1444 }
1445 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
1446 IPA_CMD_ASS_ENABLE,
1447 card->info.csum_mask);
1448 if (rc) {
1449 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
1450 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
1451 QETH_CARD_IFNAME(card), rc);
1452 return rc;
1453 }
1454 return 0;
1455}
1456
1457static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1458{
1459 int rc = 0;
1460
1461 QETH_DBF_TEXT(trace, 3, "strtcsum");
1462
1463 if (card->options.checksum_type == NO_CHECKSUMMING) {
1464 PRINT_WARN("Using no checksumming on %s.\n",
1465 QETH_CARD_IFNAME(card));
1466 return 0;
1467 }
1468 if (card->options.checksum_type == SW_CHECKSUMMING) {
1469 PRINT_WARN("Using SW checksumming on %s.\n",
1470 QETH_CARD_IFNAME(card));
1471 return 0;
1472 }
1473 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
1474 PRINT_WARN("Inbound HW Checksumming not "
1475 "supported on %s,\ncontinuing "
1476 "using Inbound SW Checksumming\n",
1477 QETH_CARD_IFNAME(card));
1478 card->options.checksum_type = SW_CHECKSUMMING;
1479 return 0;
1480 }
1481 rc = qeth_l3_send_checksum_command(card);
1482 if (!rc)
1483 PRINT_INFO("HW Checksumming (inbound) enabled \n");
1484
1485 return rc;
1486}
1487
1488static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1489{
1490 int rc;
1491
1492 QETH_DBF_TEXT(trace, 3, "sttso");
1493
1494 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
1495 PRINT_WARN("Outbound TSO not supported on %s\n",
1496 QETH_CARD_IFNAME(card));
1497 rc = -EOPNOTSUPP;
1498 } else {
1499 rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
1500 IPA_CMD_ASS_START, 0);
1501 if (rc)
1502 PRINT_WARN("Could not start outbound TSO "
1503 "assist on %s: rc=%i\n",
1504 QETH_CARD_IFNAME(card), rc);
1505 else
1506 PRINT_INFO("Outbound TSO enabled\n");
1507 }
1508 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) {
1509 card->options.large_send = QETH_LARGE_SEND_NO;
1510 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
1511 }
1512 return rc;
1513}
1514
1515static int qeth_l3_start_ipassists(struct qeth_card *card)
1516{
1517 QETH_DBF_TEXT(trace, 3, "strtipas");
1518 qeth_l3_start_ipa_arp_processing(card); /* go on*/
1519 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
1520 qeth_l3_start_ipa_source_mac(card); /* go on*/
1521 qeth_l3_start_ipa_vlan(card); /* go on*/
1522 qeth_l3_start_ipa_multicast(card); /* go on*/
1523 qeth_l3_start_ipa_ipv6(card); /* go on*/
1524 qeth_l3_start_ipa_broadcast(card); /* go on*/
1525 qeth_l3_start_ipa_checksum(card); /* go on*/
1526 qeth_l3_start_ipa_tso(card); /* go on*/
1527 return 0;
1528}
1529
1530static int qeth_l3_put_unique_id(struct qeth_card *card)
1531{
1532
1533 int rc = 0;
1534 struct qeth_cmd_buffer *iob;
1535 struct qeth_ipa_cmd *cmd;
1536
1537 QETH_DBF_TEXT(trace, 2, "puniqeid");
1538
1539 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
1540 UNIQUE_ID_NOT_BY_CARD)
1541 return -1;
1542 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
1543 QETH_PROT_IPV6);
1544 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1545 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1546 card->info.unique_id;
1547 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
1548 card->dev->dev_addr, OSA_ADDR_LEN);
1549 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
1550 return rc;
1551}
1552
1553static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
1554 struct qeth_reply *reply, unsigned long data)
1555{
1556 struct qeth_ipa_cmd *cmd;
1557
1558 cmd = (struct qeth_ipa_cmd *) data;
1559 if (cmd->hdr.return_code == 0)
1560 memcpy(card->dev->dev_addr,
1561 cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
1562 else
1563 random_ether_addr(card->dev->dev_addr);
1564
1565 return 0;
1566}
1567
1568static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
1569{
1570 int rc = 0;
1571 struct qeth_cmd_buffer *iob;
1572 struct qeth_ipa_cmd *cmd;
1573
1574 QETH_DBF_TEXT(setup, 2, "hsrmac");
1575
1576 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1577 QETH_PROT_IPV6);
1578 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1579 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1580 card->info.unique_id;
1581
1582 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
1583 NULL);
1584 return rc;
1585}
1586
1587static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
1588 struct qeth_reply *reply, unsigned long data)
1589{
1590 struct qeth_ipa_cmd *cmd;
1591
1592 cmd = (struct qeth_ipa_cmd *) data;
1593 if (cmd->hdr.return_code == 0)
1594 card->info.unique_id = *((__u16 *)
1595 &cmd->data.create_destroy_addr.unique_id[6]);
1596 else {
1597 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1598 UNIQUE_ID_NOT_BY_CARD;
1599 PRINT_WARN("couldn't get a unique id from the card on device "
1600 "%s (result=x%x), using default id. ipv6 "
1601 "autoconfig on other lpars may lead to duplicate "
1602 "ip addresses. please use manually "
1603 "configured ones.\n",
1604 CARD_BUS_ID(card), cmd->hdr.return_code);
1605 }
1606 return 0;
1607}
1608
1609static int qeth_l3_get_unique_id(struct qeth_card *card)
1610{
1611 int rc = 0;
1612 struct qeth_cmd_buffer *iob;
1613 struct qeth_ipa_cmd *cmd;
1614
1615 QETH_DBF_TEXT(setup, 2, "guniqeid");
1616
1617 if (!qeth_is_supported(card, IPA_IPV6)) {
1618 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1619 UNIQUE_ID_NOT_BY_CARD;
1620 return 0;
1621 }
1622
1623 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1624 QETH_PROT_IPV6);
1625 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1626 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1627 card->info.unique_id;
1628
1629 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
1630 return rc;
1631}
1632
1633static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
1634 struct net_device *dev)
1635{
1636 if (dev->type == ARPHRD_IEEE802_TR)
1637 ip_tr_mc_map(ipm, mac);
1638 else
1639 ip_eth_mc_map(ipm, mac);
1640}
1641
1642static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
1643{
1644 struct qeth_ipaddr *ipm;
1645 struct ip_mc_list *im4;
1646 char buf[MAX_ADDR_LEN];
1647
1648 QETH_DBF_TEXT(trace, 4, "addmc");
1649 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
1650 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
1651 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1652 if (!ipm)
1653 continue;
1654 ipm->u.a4.addr = im4->multiaddr;
1655 memcpy(ipm->mac, buf, OSA_ADDR_LEN);
1656 ipm->is_multicast = 1;
1657 if (!qeth_l3_add_ip(card, ipm))
1658 kfree(ipm);
1659 }
1660}
1661
1662static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1663{
1664 struct in_device *in_dev;
1665 struct vlan_group *vg;
1666 int i;
1667
1668 QETH_DBF_TEXT(trace, 4, "addmcvl");
1669 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
1670 return;
1671
1672 vg = card->vlangrp;
1673 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
1674 struct net_device *netdev = vlan_group_get_device(vg, i);
1675 if (netdev == NULL ||
1676 !(netdev->flags & IFF_UP))
1677 continue;
1678 in_dev = in_dev_get(netdev);
1679 if (!in_dev)
1680 continue;
1681 read_lock(&in_dev->mc_list_lock);
1682 qeth_l3_add_mc(card, in_dev);
1683 read_unlock(&in_dev->mc_list_lock);
1684 in_dev_put(in_dev);
1685 }
1686}
1687
1688static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
1689{
1690 struct in_device *in4_dev;
1691
1692 QETH_DBF_TEXT(trace, 4, "chkmcv4");
1693 in4_dev = in_dev_get(card->dev);
1694 if (in4_dev == NULL)
1695 return;
1696 read_lock(&in4_dev->mc_list_lock);
1697 qeth_l3_add_mc(card, in4_dev);
1698 qeth_l3_add_vlan_mc(card);
1699 read_unlock(&in4_dev->mc_list_lock);
1700 in_dev_put(in4_dev);
1701}
1702
1703#ifdef CONFIG_QETH_IPV6
1704static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
1705{
1706 struct qeth_ipaddr *ipm;
1707 struct ifmcaddr6 *im6;
1708 char buf[MAX_ADDR_LEN];
1709
1710 QETH_DBF_TEXT(trace, 4, "addmc6");
1711 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
1712 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
1713 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1714 if (!ipm)
1715 continue;
1716 ipm->is_multicast = 1;
1717 memcpy(ipm->mac, buf, OSA_ADDR_LEN);
1718 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
1719 sizeof(struct in6_addr));
1720 if (!qeth_l3_add_ip(card, ipm))
1721 kfree(ipm);
1722 }
1723}
1724
1725static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1726{
1727 struct inet6_dev *in_dev;
1728 struct vlan_group *vg;
1729 int i;
1730
1731 QETH_DBF_TEXT(trace, 4, "admc6vl");
1732 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
1733 return;
1734
1735 vg = card->vlangrp;
1736 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
1737 struct net_device *netdev = vlan_group_get_device(vg, i);
1738 if (netdev == NULL ||
1739 !(netdev->flags & IFF_UP))
1740 continue;
1741 in_dev = in6_dev_get(netdev);
1742 if (!in_dev)
1743 continue;
1744 read_lock_bh(&in_dev->lock);
1745 qeth_l3_add_mc6(card, in_dev);
1746 read_unlock_bh(&in_dev->lock);
1747 in6_dev_put(in_dev);
1748 }
1749}
1750
1751static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
1752{
1753 struct inet6_dev *in6_dev;
1754
1755 QETH_DBF_TEXT(trace, 4, "chkmcv6");
1756 if (!qeth_is_supported(card, IPA_IPV6))
1757 return ;
1758 in6_dev = in6_dev_get(card->dev);
1759 if (in6_dev == NULL)
1760 return;
1761 read_lock_bh(&in6_dev->lock);
1762 qeth_l3_add_mc6(card, in6_dev);
1763 qeth_l3_add_vlan_mc6(card);
1764 read_unlock_bh(&in6_dev->lock);
1765 in6_dev_put(in6_dev);
1766}
1767#endif /* CONFIG_QETH_IPV6 */
1768
1769static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
1770 unsigned short vid)
1771{
1772 struct in_device *in_dev;
1773 struct in_ifaddr *ifa;
1774 struct qeth_ipaddr *addr;
1775
1776 QETH_DBF_TEXT(trace, 4, "frvaddr4");
1777
1778 in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid));
1779 if (!in_dev)
1780 return;
1781 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1782 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1783 if (addr) {
1784 addr->u.a4.addr = ifa->ifa_address;
1785 addr->u.a4.mask = ifa->ifa_mask;
1786 addr->type = QETH_IP_TYPE_NORMAL;
1787 if (!qeth_l3_delete_ip(card, addr))
1788 kfree(addr);
1789 }
1790 }
1791 in_dev_put(in_dev);
1792}
1793
1794static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
1795 unsigned short vid)
1796{
1797#ifdef CONFIG_QETH_IPV6
1798 struct inet6_dev *in6_dev;
1799 struct inet6_ifaddr *ifa;
1800 struct qeth_ipaddr *addr;
1801
1802 QETH_DBF_TEXT(trace, 4, "frvaddr6");
1803
1804 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
1805 if (!in6_dev)
1806 return;
1807 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) {
1808 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1809 if (addr) {
1810 memcpy(&addr->u.a6.addr, &ifa->addr,
1811 sizeof(struct in6_addr));
1812 addr->u.a6.pfxlen = ifa->prefix_len;
1813 addr->type = QETH_IP_TYPE_NORMAL;
1814 if (!qeth_l3_delete_ip(card, addr))
1815 kfree(addr);
1816 }
1817 }
1818 in6_dev_put(in6_dev);
1819#endif /* CONFIG_QETH_IPV6 */
1820}
1821
1822static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
1823 unsigned short vid)
1824{
1825 if (!card->vlangrp)
1826 return;
1827 qeth_l3_free_vlan_addresses4(card, vid);
1828 qeth_l3_free_vlan_addresses6(card, vid);
1829}
1830
1831static void qeth_l3_vlan_rx_register(struct net_device *dev,
1832 struct vlan_group *grp)
1833{
1834 struct qeth_card *card = netdev_priv(dev);
1835 unsigned long flags;
1836
1837 QETH_DBF_TEXT(trace, 4, "vlanreg");
1838 spin_lock_irqsave(&card->vlanlock, flags);
1839 card->vlangrp = grp;
1840 spin_unlock_irqrestore(&card->vlanlock, flags);
1841}
1842
1843static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1844{
1845 struct net_device *vlandev;
1846 struct qeth_card *card = (struct qeth_card *) dev->priv;
1847 struct in_device *in_dev;
1848
1849 if (card->info.type == QETH_CARD_TYPE_IQD)
1850 return;
1851
1852 vlandev = vlan_group_get_device(card->vlangrp, vid);
1853 vlandev->neigh_setup = qeth_l3_neigh_setup;
1854
1855 in_dev = in_dev_get(vlandev);
1856#ifdef CONFIG_SYSCTL
1857 neigh_sysctl_unregister(in_dev->arp_parms);
1858#endif
1859 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
1860
1861 in_dev->arp_parms = neigh_parms_alloc(vlandev, &arp_tbl);
1862#ifdef CONFIG_SYSCTL
1863 neigh_sysctl_register(vlandev, in_dev->arp_parms, NET_IPV4,
1864 NET_IPV4_NEIGH, "ipv4", NULL, NULL);
1865#endif
1866 in_dev_put(in_dev);
1867 return;
1868}
1869
1870static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1871{
1872 struct qeth_card *card = netdev_priv(dev);
1873 unsigned long flags;
1874
1875 QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
1876 spin_lock_irqsave(&card->vlanlock, flags);
1877 /* unregister IP addresses of vlan device */
1878 qeth_l3_free_vlan_addresses(card, vid);
1879 vlan_group_set_device(card->vlangrp, vid, NULL);
1880 spin_unlock_irqrestore(&card->vlanlock, flags);
1881 qeth_l3_set_multicast_list(card->dev);
1882}
1883
1884static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
1885 struct sk_buff *skb, struct qeth_hdr *hdr)
1886{
1887 unsigned short vlan_id = 0;
1888 __be16 prot;
1889 struct iphdr *ip_hdr;
1890 unsigned char tg_addr[MAX_ADDR_LEN];
1891
1892 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
1893 prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
1894 ETH_P_IP);
1895 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
1896 case QETH_CAST_MULTICAST:
1897 switch (prot) {
1898#ifdef CONFIG_QETH_IPV6
1899 case __constant_htons(ETH_P_IPV6):
1900 ndisc_mc_map((struct in6_addr *)
1901 skb->data + 24,
1902 tg_addr, card->dev, 0);
1903 break;
1904#endif
1905 case __constant_htons(ETH_P_IP):
1906 ip_hdr = (struct iphdr *)skb->data;
1907 (card->dev->type == ARPHRD_IEEE802_TR) ?
1908 ip_tr_mc_map(ip_hdr->daddr, tg_addr):
1909 ip_eth_mc_map(ip_hdr->daddr, tg_addr);
1910 break;
1911 default:
1912 memcpy(tg_addr, card->dev->broadcast,
1913 card->dev->addr_len);
1914 }
1915 card->stats.multicast++;
1916 skb->pkt_type = PACKET_MULTICAST;
1917 break;
1918 case QETH_CAST_BROADCAST:
1919 memcpy(tg_addr, card->dev->broadcast,
1920 card->dev->addr_len);
1921 card->stats.multicast++;
1922 skb->pkt_type = PACKET_BROADCAST;
1923 break;
1924 case QETH_CAST_UNICAST:
1925 case QETH_CAST_ANYCAST:
1926 case QETH_CAST_NOCAST:
1927 default:
1928 skb->pkt_type = PACKET_HOST;
1929 memcpy(tg_addr, card->dev->dev_addr,
1930 card->dev->addr_len);
1931 }
1932 card->dev->header_ops->create(skb, card->dev, prot, tg_addr,
1933 "FAKELL", card->dev->addr_len);
1934 }
1935
1936#ifdef CONFIG_TR
1937 if (card->dev->type == ARPHRD_IEEE802_TR)
1938 skb->protocol = tr_type_trans(skb, card->dev);
1939 else
1940#endif
1941 skb->protocol = eth_type_trans(skb, card->dev);
1942
1943 if (hdr->hdr.l3.ext_flags &
1944 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
1945 vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
1946 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
1947 }
1948
1949 skb->ip_summed = card->options.checksum_type;
1950 if (card->options.checksum_type == HW_CHECKSUMMING) {
1951 if ((hdr->hdr.l3.ext_flags &
1952 (QETH_HDR_EXT_CSUM_HDR_REQ |
1953 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
1954 (QETH_HDR_EXT_CSUM_HDR_REQ |
1955 QETH_HDR_EXT_CSUM_TRANSP_REQ))
1956 skb->ip_summed = CHECKSUM_UNNECESSARY;
1957 else
1958 skb->ip_summed = SW_CHECKSUMMING;
1959 }
1960
1961 return vlan_id;
1962}
1963
1964static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
1965 struct qeth_qdio_buffer *buf, int index)
1966{
1967 struct qdio_buffer_element *element;
1968 struct sk_buff *skb;
1969 struct qeth_hdr *hdr;
1970 int offset;
1971 __u16 vlan_tag = 0;
1972 unsigned int len;
1973
1974 /* get first element of current buffer */
1975 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
1976 offset = 0;
1977 if (card->options.performance_stats)
1978 card->perf_stats.bufs_rec++;
1979 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
1980 &offset, &hdr))) {
1981 skb->dev = card->dev;
1982 /* is device UP ? */
1983 if (!(card->dev->flags & IFF_UP)) {
1984 dev_kfree_skb_any(skb);
1985 continue;
1986 }
1987
1988 switch (hdr->hdr.l3.id) {
1989 case QETH_HEADER_TYPE_LAYER3:
1990 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
1991 len = skb->len;
1992 if (vlan_tag)
1993 if (card->vlangrp)
1994 vlan_hwaccel_rx(skb, card->vlangrp,
1995 vlan_tag);
1996 else {
1997 dev_kfree_skb_any(skb);
1998 continue;
1999 }
2000 else
2001 netif_rx(skb);
2002 break;
2003 default:
2004 dev_kfree_skb_any(skb);
2005 QETH_DBF_TEXT(trace, 3, "inbunkno");
2006 QETH_DBF_HEX(control, 3, hdr, QETH_DBF_CONTROL_LEN);
2007 continue;
2008 }
2009
2010 card->dev->last_rx = jiffies;
2011 card->stats.rx_packets++;
2012 card->stats.rx_bytes += len;
2013 }
2014}
2015
2016static int qeth_l3_verify_vlan_dev(struct net_device *dev,
2017 struct qeth_card *card)
2018{
2019 int rc = 0;
2020 struct vlan_group *vg;
2021 int i;
2022
2023 vg = card->vlangrp;
2024 if (!vg)
2025 return rc;
2026
2027 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
2028 if (vlan_group_get_device(vg, i) == dev) {
2029 rc = QETH_VLAN_CARD;
2030 break;
2031 }
2032 }
2033
2034 if (rc && !(netdev_priv(vlan_dev_info(dev)->real_dev) == (void *)card))
2035 return 0;
2036
2037 return rc;
2038}
2039
2040static int qeth_l3_verify_dev(struct net_device *dev)
2041{
2042 struct qeth_card *card;
2043 unsigned long flags;
2044 int rc = 0;
2045
2046 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
2047 list_for_each_entry(card, &qeth_core_card_list.list, list) {
2048 if (card->dev == dev) {
2049 rc = QETH_REAL_CARD;
2050 break;
2051 }
2052 rc = qeth_l3_verify_vlan_dev(dev, card);
2053 if (rc)
2054 break;
2055 }
2056 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
2057
2058 return rc;
2059}
2060
2061static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
2062{
2063 struct qeth_card *card = NULL;
2064 int rc;
2065
2066 rc = qeth_l3_verify_dev(dev);
2067 if (rc == QETH_REAL_CARD)
2068 card = netdev_priv(dev);
2069 else if (rc == QETH_VLAN_CARD)
2070 card = netdev_priv(vlan_dev_info(dev)->real_dev);
2071 if (card->options.layer2)
2072 card = NULL;
2073 QETH_DBF_TEXT_(trace, 4, "%d", rc);
2074 return card ;
2075}
2076
2077static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2078{
2079 int rc = 0;
2080
2081 QETH_DBF_TEXT(setup, 2, "stopcard");
2082 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
2083
2084 qeth_set_allowed_threads(card, 0, 1);
2085 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
2086 return -ERESTARTSYS;
2087 if (card->read.state == CH_STATE_UP &&
2088 card->write.state == CH_STATE_UP &&
2089 (card->state == CARD_STATE_UP)) {
2090 if (recovery_mode)
2091 qeth_l3_stop(card->dev);
2092 if (!card->use_hard_stop) {
2093 rc = qeth_send_stoplan(card);
2094 if (rc)
2095 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
2096 }
2097 card->state = CARD_STATE_SOFTSETUP;
2098 }
2099 if (card->state == CARD_STATE_SOFTSETUP) {
2100 qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1);
2101 qeth_clear_ipacmd_list(card);
2102 card->state = CARD_STATE_HARDSETUP;
2103 }
2104 if (card->state == CARD_STATE_HARDSETUP) {
2105 if (!card->use_hard_stop &&
2106 (card->info.type != QETH_CARD_TYPE_IQD)) {
2107 rc = qeth_l3_put_unique_id(card);
2108 if (rc)
2109 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
2110 }
2111 qeth_qdio_clear_card(card, 0);
2112 qeth_clear_qdio_buffers(card);
2113 qeth_clear_working_pool_list(card);
2114 card->state = CARD_STATE_DOWN;
2115 }
2116 if (card->state == CARD_STATE_DOWN) {
2117 qeth_clear_cmd_buffers(&card->read);
2118 qeth_clear_cmd_buffers(&card->write);
2119 }
2120 card->use_hard_stop = 0;
2121 return rc;
2122}
2123
2124static void qeth_l3_set_multicast_list(struct net_device *dev)
2125{
2126 struct qeth_card *card = netdev_priv(dev);
2127
2128 QETH_DBF_TEXT(trace, 3, "setmulti");
2129 qeth_l3_delete_mc_addresses(card);
2130 qeth_l3_add_multicast_ipv4(card);
2131#ifdef CONFIG_QETH_IPV6
2132 qeth_l3_add_multicast_ipv6(card);
2133#endif
2134 qeth_l3_set_ip_addr_list(card);
2135 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2136 return;
2137 qeth_setadp_promisc_mode(card);
2138}
2139
2140static const char *qeth_l3_arp_get_error_cause(int *rc)
2141{
2142 switch (*rc) {
2143 case QETH_IPA_ARP_RC_FAILED:
2144 *rc = -EIO;
2145 return "operation failed";
2146 case QETH_IPA_ARP_RC_NOTSUPP:
2147 *rc = -EOPNOTSUPP;
2148 return "operation not supported";
2149 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
2150 *rc = -EINVAL;
2151 return "argument out of range";
2152 case QETH_IPA_ARP_RC_Q_NOTSUPP:
2153 *rc = -EOPNOTSUPP;
2154 return "query operation not supported";
2155 case QETH_IPA_ARP_RC_Q_NO_DATA:
2156 *rc = -ENOENT;
2157 return "no query data available";
2158 default:
2159 return "unknown error";
2160 }
2161}
2162
2163static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
2164{
2165 int tmp;
2166 int rc;
2167
2168 QETH_DBF_TEXT(trace, 3, "arpstnoe");
2169
2170 /*
2171 * currently GuestLAN only supports the ARP assist function
2172 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
2173 * thus we say EOPNOTSUPP for this ARP function
2174 */
2175 if (card->info.guestlan)
2176 return -EOPNOTSUPP;
2177 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2178 PRINT_WARN("ARP processing not supported "
2179 "on %s!\n", QETH_CARD_IFNAME(card));
2180 return -EOPNOTSUPP;
2181 }
2182 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
2183 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
2184 no_entries);
2185 if (rc) {
2186 tmp = rc;
2187 PRINT_WARN("Could not set number of ARP entries on %s: "
2188 "%s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
2189 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2190 }
2191 return rc;
2192}
2193
2194static void qeth_l3_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
2195 struct qeth_arp_query_data *qdata, int entry_size,
2196 int uentry_size)
2197{
2198 char *entry_ptr;
2199 char *uentry_ptr;
2200 int i;
2201
2202 entry_ptr = (char *)&qdata->data;
2203 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
2204 for (i = 0; i < qdata->no_entries; ++i) {
2205 /* strip off 32 bytes "media specific information" */
2206 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
2207 entry_ptr += entry_size;
2208 uentry_ptr += uentry_size;
2209 }
2210}
2211
2212static int qeth_l3_arp_query_cb(struct qeth_card *card,
2213 struct qeth_reply *reply, unsigned long data)
2214{
2215 struct qeth_ipa_cmd *cmd;
2216 struct qeth_arp_query_data *qdata;
2217 struct qeth_arp_query_info *qinfo;
2218 int entry_size;
2219 int uentry_size;
2220 int i;
2221
2222 QETH_DBF_TEXT(trace, 4, "arpquecb");
2223
2224 qinfo = (struct qeth_arp_query_info *) reply->param;
2225 cmd = (struct qeth_ipa_cmd *) data;
2226 if (cmd->hdr.return_code) {
2227 QETH_DBF_TEXT_(trace, 4, "qaer1%i", cmd->hdr.return_code);
2228 return 0;
2229 }
2230 if (cmd->data.setassparms.hdr.return_code) {
2231 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
2232 QETH_DBF_TEXT_(trace, 4, "qaer2%i", cmd->hdr.return_code);
2233 return 0;
2234 }
2235 qdata = &cmd->data.setassparms.data.query_arp;
2236 switch (qdata->reply_bits) {
2237 case 5:
2238 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
2239 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2240 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
2241 break;
2242 case 7:
2243 /* fall through to default */
2244 default:
2245 /* tr is the same as eth -> entry7 */
2246 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
2247 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2248 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
2249 break;
2250 }
2251 /* check if there is enough room in userspace */
2252 if ((qinfo->udata_len - qinfo->udata_offset) <
2253 qdata->no_entries * uentry_size){
2254 QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
2255 cmd->hdr.return_code = -ENOMEM;
2256 PRINT_WARN("query ARP user space buffer is too small for "
2257 "the returned number of ARP entries. "
2258 "Aborting query!\n");
2259 goto out_error;
2260 }
2261 QETH_DBF_TEXT_(trace, 4, "anore%i",
2262 cmd->data.setassparms.hdr.number_of_replies);
2263 QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
2264 QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
2265
2266 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
2267 /* strip off "media specific information" */
2268 qeth_l3_copy_arp_entries_stripped(qinfo, qdata, entry_size,
2269 uentry_size);
2270 } else
2271 /*copy entries to user buffer*/
2272 memcpy(qinfo->udata + qinfo->udata_offset,
2273 (char *)&qdata->data, qdata->no_entries*uentry_size);
2274
2275 qinfo->no_entries += qdata->no_entries;
2276 qinfo->udata_offset += (qdata->no_entries*uentry_size);
2277 /* check if all replies received ... */
2278 if (cmd->data.setassparms.hdr.seq_no <
2279 cmd->data.setassparms.hdr.number_of_replies)
2280 return 1;
2281 memcpy(qinfo->udata, &qinfo->no_entries, 4);
2282 /* keep STRIP_ENTRIES flag so the user program can distinguish
2283 * stripped entries from normal ones */
2284 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2285 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
2286 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
2287 return 0;
2288out_error:
2289 i = 0;
2290 memcpy(qinfo->udata, &i, 4);
2291 return 0;
2292}
2293
2294static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
2295 struct qeth_cmd_buffer *iob, int len,
2296 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
2297 unsigned long),
2298 void *reply_param)
2299{
2300 QETH_DBF_TEXT(trace, 4, "sendarp");
2301
2302 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2303 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2304 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2305 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
2306 reply_cb, reply_param);
2307}
2308
2309static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
2310{
2311 struct qeth_cmd_buffer *iob;
2312 struct qeth_arp_query_info qinfo = {0, };
2313 int tmp;
2314 int rc;
2315
2316 QETH_DBF_TEXT(trace, 3, "arpquery");
2317
2318 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
2319 IPA_ARP_PROCESSING)) {
2320 PRINT_WARN("ARP processing not supported "
2321 "on %s!\n", QETH_CARD_IFNAME(card));
2322 return -EOPNOTSUPP;
2323 }
2324 /* get size of userspace buffer and mask_bits -> 6 bytes */
2325 if (copy_from_user(&qinfo, udata, 6))
2326 return -EFAULT;
2327 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
2328 if (!qinfo.udata)
2329 return -ENOMEM;
2330 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
2331 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2332 IPA_CMD_ASS_ARP_QUERY_INFO,
2333 sizeof(int), QETH_PROT_IPV4);
2334
2335 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
2336 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
2337 qeth_l3_arp_query_cb, (void *)&qinfo);
2338 if (rc) {
2339 tmp = rc;
2340 PRINT_WARN("Error while querying ARP cache on %s: %s "
2341 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
2342 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2343 if (copy_to_user(udata, qinfo.udata, 4))
2344 rc = -EFAULT;
2345 } else {
2346 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
2347 rc = -EFAULT;
2348 }
2349 kfree(qinfo.udata);
2350 return rc;
2351}
2352
2353static int qeth_l3_arp_add_entry(struct qeth_card *card,
2354 struct qeth_arp_cache_entry *entry)
2355{
2356 struct qeth_cmd_buffer *iob;
2357 char buf[16];
2358 int tmp;
2359 int rc;
2360
2361 QETH_DBF_TEXT(trace, 3, "arpadent");
2362
2363 /*
2364 * currently GuestLAN only supports the ARP assist function
2365 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
2366 * thus we say EOPNOTSUPP for this ARP function
2367 */
2368 if (card->info.guestlan)
2369 return -EOPNOTSUPP;
2370 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2371 PRINT_WARN("ARP processing not supported "
2372 "on %s!\n", QETH_CARD_IFNAME(card));
2373 return -EOPNOTSUPP;
2374 }
2375
2376 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2377 IPA_CMD_ASS_ARP_ADD_ENTRY,
2378 sizeof(struct qeth_arp_cache_entry),
2379 QETH_PROT_IPV4);
2380 rc = qeth_l3_send_setassparms(card, iob,
2381 sizeof(struct qeth_arp_cache_entry),
2382 (unsigned long) entry,
2383 qeth_l3_default_setassparms_cb, NULL);
2384 if (rc) {
2385 tmp = rc;
2386 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
2387 PRINT_WARN("Could not add ARP entry for address %s on %s: "
2388 "%s (0x%x/%d)\n",
2389 buf, QETH_CARD_IFNAME(card),
2390 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2391 }
2392 return rc;
2393}
2394
2395static int qeth_l3_arp_remove_entry(struct qeth_card *card,
2396 struct qeth_arp_cache_entry *entry)
2397{
2398 struct qeth_cmd_buffer *iob;
2399 char buf[16] = {0, };
2400 int tmp;
2401 int rc;
2402
2403 QETH_DBF_TEXT(trace, 3, "arprment");
2404
2405 /*
2406 * currently GuestLAN only supports the ARP assist function
2407 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
2408 * thus we say EOPNOTSUPP for this ARP function
2409 */
2410 if (card->info.guestlan)
2411 return -EOPNOTSUPP;
2412 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2413 PRINT_WARN("ARP processing not supported "
2414 "on %s!\n", QETH_CARD_IFNAME(card));
2415 return -EOPNOTSUPP;
2416 }
2417 memcpy(buf, entry, 12);
2418 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2419 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
2420 12,
2421 QETH_PROT_IPV4);
2422 rc = qeth_l3_send_setassparms(card, iob,
2423 12, (unsigned long)buf,
2424 qeth_l3_default_setassparms_cb, NULL);
2425 if (rc) {
2426 tmp = rc;
2427 memset(buf, 0, 16);
2428 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
2429 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
2430 "%s (0x%x/%d)\n",
2431 buf, QETH_CARD_IFNAME(card),
2432 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2433 }
2434 return rc;
2435}
2436
2437static int qeth_l3_arp_flush_cache(struct qeth_card *card)
2438{
2439 int rc;
2440 int tmp;
2441
2442 QETH_DBF_TEXT(trace, 3, "arpflush");
2443
2444 /*
2445 * currently GuestLAN only supports the ARP assist function
2446 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
2447 * thus we say EOPNOTSUPP for this ARP function
2448 */
2449 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
2450 return -EOPNOTSUPP;
2451 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2452 PRINT_WARN("ARP processing not supported "
2453 "on %s!\n", QETH_CARD_IFNAME(card));
2454 return -EOPNOTSUPP;
2455 }
2456 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
2457 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
2458 if (rc) {
2459 tmp = rc;
2460 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
2461 QETH_CARD_IFNAME(card),
2462 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2463 }
2464 return rc;
2465}
2466
2467static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2468{
2469 struct qeth_card *card = netdev_priv(dev);
2470 struct qeth_arp_cache_entry arp_entry;
2471 struct mii_ioctl_data *mii_data;
2472 int rc = 0;
2473
2474 if (!card)
2475 return -ENODEV;
2476
2477 if ((card->state != CARD_STATE_UP) &&
2478 (card->state != CARD_STATE_SOFTSETUP))
2479 return -ENODEV;
2480
2481 switch (cmd) {
2482 case SIOC_QETH_ARP_SET_NO_ENTRIES:
2483 if (!capable(CAP_NET_ADMIN)) {
2484 rc = -EPERM;
2485 break;
2486 }
2487 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
2488 break;
2489 case SIOC_QETH_ARP_QUERY_INFO:
2490 if (!capable(CAP_NET_ADMIN)) {
2491 rc = -EPERM;
2492 break;
2493 }
2494 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
2495 break;
2496 case SIOC_QETH_ARP_ADD_ENTRY:
2497 if (!capable(CAP_NET_ADMIN)) {
2498 rc = -EPERM;
2499 break;
2500 }
2501 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
2502 sizeof(struct qeth_arp_cache_entry)))
2503 rc = -EFAULT;
2504 else
2505 rc = qeth_l3_arp_add_entry(card, &arp_entry);
2506 break;
2507 case SIOC_QETH_ARP_REMOVE_ENTRY:
2508 if (!capable(CAP_NET_ADMIN)) {
2509 rc = -EPERM;
2510 break;
2511 }
2512 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
2513 sizeof(struct qeth_arp_cache_entry)))
2514 rc = -EFAULT;
2515 else
2516 rc = qeth_l3_arp_remove_entry(card, &arp_entry);
2517 break;
2518 case SIOC_QETH_ARP_FLUSH_CACHE:
2519 if (!capable(CAP_NET_ADMIN)) {
2520 rc = -EPERM;
2521 break;
2522 }
2523 rc = qeth_l3_arp_flush_cache(card);
2524 break;
2525 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
2526 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
2527 break;
2528 case SIOC_QETH_GET_CARD_TYPE:
2529 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
2530 !card->info.guestlan)
2531 return 1;
2532 return 0;
2533 break;
2534 case SIOCGMIIPHY:
2535 mii_data = if_mii(rq);
2536 mii_data->phy_id = 0;
2537 break;
2538 case SIOCGMIIREG:
2539 mii_data = if_mii(rq);
2540 if (mii_data->phy_id != 0)
2541 rc = -EINVAL;
2542 else
2543 mii_data->val_out = qeth_mdio_read(dev,
2544 mii_data->phy_id,
2545 mii_data->reg_num);
2546 break;
2547 default:
2548 rc = -EOPNOTSUPP;
2549 }
2550 if (rc)
2551 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
2552 return rc;
2553}
2554
2555static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2556 struct sk_buff *skb, int ipv, int cast_type)
2557{
2558 QETH_DBF_TEXT(trace, 6, "fillhdr");
2559
2560 memset(hdr, 0, sizeof(struct qeth_hdr));
2561 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2562 hdr->hdr.l3.ext_flags = 0;
2563
2564 /*
2565 * before we're going to overwrite this location with next hop ip.
2566 * v6 uses passthrough, v4 sets the tag in the QDIO header.
2567 */
2568 if (card->vlangrp && vlan_tx_tag_present(skb)) {
2569 hdr->hdr.l3.ext_flags = (ipv == 4) ?
2570 QETH_HDR_EXT_VLAN_FRAME :
2571 QETH_HDR_EXT_INCLUDE_VLAN_TAG;
2572 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
2573 }
2574
2575 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
2576 if (ipv == 4) {
2577 /* IPv4 */
2578 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
2579 memset(hdr->hdr.l3.dest_addr, 0, 12);
2580 if ((skb->dst) && (skb->dst->neighbour)) {
2581 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2582 *((u32 *) skb->dst->neighbour->primary_key);
2583 } else {
2584 /* fill in destination address used in ip header */
2585 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2586 ip_hdr(skb)->daddr;
2587 }
2588 } else if (ipv == 6) {
2589 /* IPv6 */
2590 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
2591 if (card->info.type == QETH_CARD_TYPE_IQD)
2592 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
2593 if ((skb->dst) && (skb->dst->neighbour)) {
2594 memcpy(hdr->hdr.l3.dest_addr,
2595 skb->dst->neighbour->primary_key, 16);
2596 } else {
2597 /* fill in destination address used in ip header */
2598 memcpy(hdr->hdr.l3.dest_addr,
2599 &ipv6_hdr(skb)->daddr, 16);
2600 }
2601 } else {
2602 /* passthrough */
2603 if ((skb->dev->type == ARPHRD_IEEE802_TR) &&
2604 !memcmp(skb->data + sizeof(struct qeth_hdr) +
2605 sizeof(__u16), skb->dev->broadcast, 6)) {
2606 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2607 QETH_HDR_PASSTHRU;
2608 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
2609 skb->dev->broadcast, 6)) {
2610 /* broadcast? */
2611 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2612 QETH_HDR_PASSTHRU;
2613 } else {
2614 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
2615 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
2616 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
2617 }
2618 }
2619}
2620
2621static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2622{
2623 int rc;
2624 u16 *tag;
2625 struct qeth_hdr *hdr = NULL;
2626 int elements_needed = 0;
2627 struct qeth_card *card = netdev_priv(dev);
2628 struct sk_buff *new_skb = NULL;
2629 int ipv = qeth_get_ip_version(skb);
2630 int cast_type = qeth_get_cast_type(card, skb);
2631 struct qeth_qdio_out_q *queue = card->qdio.out_qs
2632 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
2633 int tx_bytes = skb->len;
2634 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
2635 struct qeth_eddp_context *ctx = NULL;
2636
2637 QETH_DBF_TEXT(trace, 6, "l3xmit");
2638
2639 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2640 (skb->protocol != htons(ETH_P_IPV6)) &&
2641 (skb->protocol != htons(ETH_P_IP)))
2642 goto tx_drop;
2643
2644 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
2645 card->stats.tx_carrier_errors++;
2646 goto tx_drop;
2647 }
2648
2649 if ((cast_type == RTN_BROADCAST) &&
2650 (card->info.broadcast_capable == 0))
2651 goto tx_drop;
2652
2653 if (card->options.performance_stats) {
2654 card->perf_stats.outbound_cnt++;
2655 card->perf_stats.outbound_start_time = qeth_get_micros();
2656 }
2657
2658 /* create a clone with writeable headroom */
2659 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
2660 VLAN_HLEN);
2661 if (!new_skb)
2662 goto tx_drop;
2663
2664 if (card->info.type == QETH_CARD_TYPE_IQD) {
2665 skb_pull(new_skb, ETH_HLEN);
2666 } else {
2667 if (new_skb->protocol == htons(ETH_P_IP)) {
2668 if (card->dev->type == ARPHRD_IEEE802_TR)
2669 skb_pull(new_skb, TR_HLEN);
2670 else
2671 skb_pull(new_skb, ETH_HLEN);
2672 }
2673
2674 if (new_skb->protocol == ETH_P_IPV6 && card->vlangrp &&
2675 vlan_tx_tag_present(new_skb)) {
2676 skb_push(new_skb, VLAN_HLEN);
2677 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
2678 skb_copy_to_linear_data_offset(new_skb, 4,
2679 new_skb->data + 8, 4);
2680 skb_copy_to_linear_data_offset(new_skb, 8,
2681 new_skb->data + 12, 4);
2682 tag = (u16 *)(new_skb->data + 12);
2683 *tag = __constant_htons(ETH_P_8021Q);
2684 *(tag + 1) = htons(vlan_tx_tag_get(new_skb));
2685 VLAN_TX_SKB_CB(new_skb)->magic = 0;
2686 }
2687 }
2688
2689 netif_stop_queue(dev);
2690
2691 if (skb_is_gso(new_skb))
2692 large_send = card->options.large_send;
2693
2694 /* fix hardware limitation: as long as we do not have sbal
2695 * chaining we can not send long frag lists so we temporary
2696 * switch to EDDP
2697 */
2698 if ((large_send == QETH_LARGE_SEND_TSO) &&
2699 ((skb_shinfo(new_skb)->nr_frags + 2) > 16))
2700 large_send = QETH_LARGE_SEND_EDDP;
2701
2702 if ((large_send == QETH_LARGE_SEND_TSO) &&
2703 (cast_type == RTN_UNSPEC)) {
2704 hdr = (struct qeth_hdr *)skb_push(new_skb,
2705 sizeof(struct qeth_hdr_tso));
2706 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2707 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2708 qeth_tso_fill_header(card, hdr, new_skb);
2709 elements_needed++;
2710 } else {
2711 hdr = (struct qeth_hdr *)skb_push(new_skb,
2712 sizeof(struct qeth_hdr));
2713 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2714 }
2715
2716 if (large_send == QETH_LARGE_SEND_EDDP) {
2717 /* new_skb is not owned by a socket so we use skb to get
2718 * the protocol
2719 */
2720 ctx = qeth_eddp_create_context(card, new_skb, hdr,
2721 skb->sk->sk_protocol);
2722 if (ctx == NULL) {
2723 PRINT_WARN("could not create eddp context\n");
2724 goto tx_drop;
2725 }
2726 } else {
2727 int elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
2728 elements_needed);
2729 if (!elems)
2730 goto tx_drop;
2731 elements_needed += elems;
2732 }
2733
2734 if ((large_send == QETH_LARGE_SEND_NO) &&
2735 (new_skb->ip_summed == CHECKSUM_PARTIAL))
2736 qeth_tx_csum(new_skb);
2737
2738 if (card->info.type != QETH_CARD_TYPE_IQD)
2739 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
2740 elements_needed, ctx);
2741 else
2742 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
2743 elements_needed, ctx);
2744
2745 if (!rc) {
2746 card->stats.tx_packets++;
2747 card->stats.tx_bytes += tx_bytes;
2748 if (new_skb != skb)
2749 dev_kfree_skb_any(skb);
2750 if (card->options.performance_stats) {
2751 if (large_send != QETH_LARGE_SEND_NO) {
2752 card->perf_stats.large_send_bytes += tx_bytes;
2753 card->perf_stats.large_send_cnt++;
2754 }
2755 if (skb_shinfo(new_skb)->nr_frags > 0) {
2756 card->perf_stats.sg_skbs_sent++;
2757 /* nr_frags + skb->data */
2758 card->perf_stats.sg_frags_sent +=
2759 skb_shinfo(new_skb)->nr_frags + 1;
2760 }
2761 }
2762
2763 if (ctx != NULL) {
2764 qeth_eddp_put_context(ctx);
2765 dev_kfree_skb_any(new_skb);
2766 }
2767 } else {
2768 if (ctx != NULL)
2769 qeth_eddp_put_context(ctx);
2770
2771 if (rc == -EBUSY) {
2772 if (new_skb != skb)
2773 dev_kfree_skb_any(new_skb);
2774 return NETDEV_TX_BUSY;
2775 } else
2776 goto tx_drop;
2777 }
2778
2779 netif_wake_queue(dev);
2780 if (card->options.performance_stats)
2781 card->perf_stats.outbound_time += qeth_get_micros() -
2782 card->perf_stats.outbound_start_time;
2783 return rc;
2784
2785tx_drop:
2786 card->stats.tx_dropped++;
2787 card->stats.tx_errors++;
2788 if ((new_skb != skb) && new_skb)
2789 dev_kfree_skb_any(new_skb);
2790 dev_kfree_skb_any(skb);
2791 return NETDEV_TX_OK;
2792}
2793
2794static int qeth_l3_open(struct net_device *dev)
2795{
2796 struct qeth_card *card = netdev_priv(dev);
2797
2798 QETH_DBF_TEXT(trace, 4, "qethopen");
2799 if (card->state != CARD_STATE_SOFTSETUP)
2800 return -ENODEV;
2801 card->data.state = CH_STATE_UP;
2802 card->state = CARD_STATE_UP;
2803 card->dev->flags |= IFF_UP;
2804 netif_start_queue(dev);
2805
2806 if (!card->lan_online && netif_carrier_ok(dev))
2807 netif_carrier_off(dev);
2808 return 0;
2809}
2810
2811static int qeth_l3_stop(struct net_device *dev)
2812{
2813 struct qeth_card *card = netdev_priv(dev);
2814
2815 QETH_DBF_TEXT(trace, 4, "qethstop");
2816 netif_tx_disable(dev);
2817 card->dev->flags &= ~IFF_UP;
2818 if (card->state == CARD_STATE_UP)
2819 card->state = CARD_STATE_SOFTSETUP;
2820 return 0;
2821}
2822
2823static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
2824{
2825 struct qeth_card *card = netdev_priv(dev);
2826
2827 return (card->options.checksum_type == HW_CHECKSUMMING);
2828}
2829
2830static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2831{
2832 struct qeth_card *card = netdev_priv(dev);
2833 enum qeth_card_states old_state;
2834 enum qeth_checksum_types csum_type;
2835
2836 if ((card->state != CARD_STATE_UP) &&
2837 (card->state != CARD_STATE_DOWN))
2838 return -EPERM;
2839
2840 if (data)
2841 csum_type = HW_CHECKSUMMING;
2842 else
2843 csum_type = SW_CHECKSUMMING;
2844
2845 if (card->options.checksum_type != csum_type) {
2846 old_state = card->state;
2847 if (card->state == CARD_STATE_UP)
2848 __qeth_l3_set_offline(card->gdev, 1);
2849 card->options.checksum_type = csum_type;
2850 if (old_state == CARD_STATE_UP)
2851 __qeth_l3_set_online(card->gdev, 1);
2852 }
2853 return 0;
2854}
2855
2856static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2857{
2858 struct qeth_card *card = netdev_priv(dev);
2859
2860 if (data) {
2861 if (card->options.large_send == QETH_LARGE_SEND_NO) {
2862 if (card->info.type == QETH_CARD_TYPE_IQD)
2863 card->options.large_send = QETH_LARGE_SEND_EDDP;
2864 else
2865 card->options.large_send = QETH_LARGE_SEND_TSO;
2866 dev->features |= NETIF_F_TSO;
2867 }
2868 } else {
2869 dev->features &= ~NETIF_F_TSO;
2870 card->options.large_send = QETH_LARGE_SEND_NO;
2871 }
2872 return 0;
2873}
2874
2875static struct ethtool_ops qeth_l3_ethtool_ops = {
2876 .get_link = ethtool_op_get_link,
2877 .get_tx_csum = ethtool_op_get_tx_csum,
2878 .set_tx_csum = ethtool_op_set_tx_hw_csum,
2879 .get_rx_csum = qeth_l3_ethtool_get_rx_csum,
2880 .set_rx_csum = qeth_l3_ethtool_set_rx_csum,
2881 .get_sg = ethtool_op_get_sg,
2882 .set_sg = ethtool_op_set_sg,
2883 .get_tso = ethtool_op_get_tso,
2884 .set_tso = qeth_l3_ethtool_set_tso,
2885 .get_strings = qeth_core_get_strings,
2886 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2887 .get_stats_count = qeth_core_get_stats_count,
2888 .get_drvinfo = qeth_core_get_drvinfo,
2889};
2890
2891/*
2892 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
2893 * NOARP on the netdevice is no option because it also turns off neighbor
2894 * solicitation. For IPv4 we install a neighbor_setup function. We don't want
2895 * arp resolution but we want the hard header (packet socket will work
2896 * e.g. tcpdump)
2897 */
2898static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
2899{
2900 n->nud_state = NUD_NOARP;
2901 memcpy(n->ha, "FAKELL", 6);
2902 n->output = n->ops->connected_output;
2903 return 0;
2904}
2905
2906static int
2907qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
2908{
2909 if (np->tbl->family == AF_INET)
2910 np->neigh_setup = qeth_l3_neigh_setup_noarp;
2911
2912 return 0;
2913}
2914
2915static int qeth_l3_setup_netdev(struct qeth_card *card)
2916{
2917 if (card->info.type == QETH_CARD_TYPE_OSAE) {
2918 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
2919 (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
2920#ifdef CONFIG_TR
2921 card->dev = alloc_trdev(0);
2922#endif
2923 if (!card->dev)
2924 return -ENODEV;
2925 } else {
2926 card->dev = alloc_etherdev(0);
2927 if (!card->dev)
2928 return -ENODEV;
2929 card->dev->neigh_setup = qeth_l3_neigh_setup;
2930
2931 /*IPv6 address autoconfiguration stuff*/
2932 qeth_l3_get_unique_id(card);
2933 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
2934 card->dev->dev_id = card->info.unique_id &
2935 0xffff;
2936 }
2937 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
2938 card->dev = alloc_netdev(0, "hsi%d", ether_setup);
2939 if (!card->dev)
2940 return -ENODEV;
2941 card->dev->flags |= IFF_NOARP;
2942 qeth_l3_iqd_read_initial_mac(card);
2943 } else
2944 return -ENODEV;
2945
2946 card->dev->hard_start_xmit = qeth_l3_hard_start_xmit;
2947 card->dev->priv = card;
2948 card->dev->tx_timeout = &qeth_tx_timeout;
2949 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
2950 card->dev->open = qeth_l3_open;
2951 card->dev->stop = qeth_l3_stop;
2952 card->dev->do_ioctl = qeth_l3_do_ioctl;
2953 card->dev->get_stats = qeth_get_stats;
2954 card->dev->change_mtu = qeth_change_mtu;
2955 card->dev->set_multicast_list = qeth_l3_set_multicast_list;
2956 card->dev->vlan_rx_register = qeth_l3_vlan_rx_register;
2957 card->dev->vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid;
2958 card->dev->vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid;
2959 card->dev->mtu = card->info.initial_mtu;
2960 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
2961 card->dev->features |= NETIF_F_HW_VLAN_TX |
2962 NETIF_F_HW_VLAN_RX |
2963 NETIF_F_HW_VLAN_FILTER;
2964
2965 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
2966 return register_netdev(card->dev);
2967}
2968
2969static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
2970 unsigned int status, unsigned int qdio_err,
2971 unsigned int siga_err, unsigned int queue, int first_element,
2972 int count, unsigned long card_ptr)
2973{
2974 struct net_device *net_dev;
2975 struct qeth_card *card;
2976 struct qeth_qdio_buffer *buffer;
2977 int index;
2978 int i;
2979
2980 QETH_DBF_TEXT(trace, 6, "qdinput");
2981 card = (struct qeth_card *) card_ptr;
2982 net_dev = card->dev;
2983 if (card->options.performance_stats) {
2984 card->perf_stats.inbound_cnt++;
2985 card->perf_stats.inbound_start_time = qeth_get_micros();
2986 }
2987 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2988 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
2989 QETH_DBF_TEXT(trace, 1, "qdinchk");
2990 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
2991 QETH_DBF_TEXT_(trace, 1, "%04X%04X",
2992 first_element, count);
2993 QETH_DBF_TEXT_(trace, 1, "%04X%04X", queue, status);
2994 qeth_schedule_recovery(card);
2995 return;
2996 }
2997 }
2998 for (i = first_element; i < (first_element + count); ++i) {
2999 index = i % QDIO_MAX_BUFFERS_PER_Q;
3000 buffer = &card->qdio.in_q->bufs[index];
3001 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
3002 qeth_check_qdio_errors(buffer->buffer,
3003 qdio_err, siga_err, "qinerr")))
3004 qeth_l3_process_inbound_buffer(card, buffer, index);
3005 /* clear buffer and give back to hardware */
3006 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
3007 qeth_queue_input_buffer(card, index);
3008 }
3009 if (card->options.performance_stats)
3010 card->perf_stats.inbound_time += qeth_get_micros() -
3011 card->perf_stats.inbound_start_time;
3012}
3013
3014static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3015{
3016 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3017
3018 qeth_l3_create_device_attributes(&gdev->dev);
3019 card->options.layer2 = 0;
3020 card->discipline.input_handler = (qdio_handler_t *)
3021 qeth_l3_qdio_input_handler;
3022 card->discipline.output_handler = (qdio_handler_t *)
3023 qeth_qdio_output_handler;
3024 card->discipline.recover = qeth_l3_recover;
3025 return 0;
3026}
3027
3028static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3029{
3030 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
3031
3032 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
3033
3034 if (cgdev->state == CCWGROUP_ONLINE) {
3035 card->use_hard_stop = 1;
3036 qeth_l3_set_offline(cgdev);
3037 }
3038
3039 if (card->dev) {
3040 unregister_netdev(card->dev);
3041 card->dev = NULL;
3042 }
3043
3044 qeth_l3_remove_device_attributes(&cgdev->dev);
3045 qeth_l3_clear_ip_list(card, 0, 0);
3046 qeth_l3_clear_ipato_list(card);
3047 return;
3048}
3049
3050static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3051{
3052 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3053 int rc = 0;
3054 enum qeth_card_states recover_flag;
3055
3056 BUG_ON(!card);
3057 QETH_DBF_TEXT(setup, 2, "setonlin");
3058 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
3059
3060 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3061 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) {
3062 PRINT_WARN("set_online of card %s interrupted by user!\n",
3063 CARD_BUS_ID(card));
3064 return -ERESTARTSYS;
3065 }
3066
3067 recover_flag = card->state;
3068 rc = ccw_device_set_online(CARD_RDEV(card));
3069 if (rc) {
3070 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3071 return -EIO;
3072 }
3073 rc = ccw_device_set_online(CARD_WDEV(card));
3074 if (rc) {
3075 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3076 return -EIO;
3077 }
3078 rc = ccw_device_set_online(CARD_DDEV(card));
3079 if (rc) {
3080 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3081 return -EIO;
3082 }
3083
3084 rc = qeth_core_hardsetup_card(card);
3085 if (rc) {
3086 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3087 goto out_remove;
3088 }
3089
3090 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3091
3092 if (!card->dev && qeth_l3_setup_netdev(card))
3093 goto out_remove;
3094
3095 card->state = CARD_STATE_HARDSETUP;
3096 qeth_print_status_message(card);
3097
3098 /* softsetup */
3099 QETH_DBF_TEXT(setup, 2, "softsetp");
3100
3101 rc = qeth_send_startlan(card);
3102 if (rc) {
3103 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3104 if (rc == 0xe080) {
3105 PRINT_WARN("LAN on card %s if offline! "
3106 "Waiting for STARTLAN from card.\n",
3107 CARD_BUS_ID(card));
3108 card->lan_online = 0;
3109 }
3110 return rc;
3111 } else
3112 card->lan_online = 1;
3113 qeth_set_large_send(card, card->options.large_send);
3114
3115 rc = qeth_l3_setadapter_parms(card);
3116 if (rc)
3117 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3118 rc = qeth_l3_start_ipassists(card);
3119 if (rc)
3120 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3121 rc = qeth_l3_setrouting_v4(card);
3122 if (rc)
3123 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3124 rc = qeth_l3_setrouting_v6(card);
3125 if (rc)
3126 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3127 netif_tx_disable(card->dev);
3128
3129 rc = qeth_init_qdio_queues(card);
3130 if (rc) {
3131 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3132 goto out_remove;
3133 }
3134 card->state = CARD_STATE_SOFTSETUP;
3135 netif_carrier_on(card->dev);
3136
3137 qeth_set_allowed_threads(card, 0xffffffff, 0);
3138 if ((recover_flag == CARD_STATE_RECOVER) && recovery_mode) {
3139 qeth_l3_open(card->dev);
3140 qeth_l3_set_multicast_list(card->dev);
3141 }
3142 /* let user_space know that device is online */
3143 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
3144 return 0;
3145out_remove:
3146 card->use_hard_stop = 1;
3147 qeth_l3_stop_card(card, 0);
3148 ccw_device_set_offline(CARD_DDEV(card));
3149 ccw_device_set_offline(CARD_WDEV(card));
3150 ccw_device_set_offline(CARD_RDEV(card));
3151 if (recover_flag == CARD_STATE_RECOVER)
3152 card->state = CARD_STATE_RECOVER;
3153 else
3154 card->state = CARD_STATE_DOWN;
3155 return -ENODEV;
3156}
3157
3158static int qeth_l3_set_online(struct ccwgroup_device *gdev)
3159{
3160 return __qeth_l3_set_online(gdev, 0);
3161}
3162
3163static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
3164 int recovery_mode)
3165{
3166 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
3167 int rc = 0, rc2 = 0, rc3 = 0;
3168 enum qeth_card_states recover_flag;
3169
3170 QETH_DBF_TEXT(setup, 3, "setoffl");
3171 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
3172
3173 if (card->dev && netif_carrier_ok(card->dev))
3174 netif_carrier_off(card->dev);
3175 recover_flag = card->state;
3176 if (qeth_l3_stop_card(card, recovery_mode) == -ERESTARTSYS) {
3177 PRINT_WARN("Stopping card %s interrupted by user!\n",
3178 CARD_BUS_ID(card));
3179 return -ERESTARTSYS;
3180 }
3181 rc = ccw_device_set_offline(CARD_DDEV(card));
3182 rc2 = ccw_device_set_offline(CARD_WDEV(card));
3183 rc3 = ccw_device_set_offline(CARD_RDEV(card));
3184 if (!rc)
3185 rc = (rc2) ? rc2 : rc3;
3186 if (rc)
3187 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3188 if (recover_flag == CARD_STATE_UP)
3189 card->state = CARD_STATE_RECOVER;
3190 /* let user_space know that device is offline */
3191 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
3192 return 0;
3193}
3194
3195static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
3196{
3197 return __qeth_l3_set_offline(cgdev, 0);
3198}
3199
3200static int qeth_l3_recover(void *ptr)
3201{
3202 struct qeth_card *card;
3203 int rc = 0;
3204
3205 card = (struct qeth_card *) ptr;
3206 QETH_DBF_TEXT(trace, 2, "recover1");
3207 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
3208 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
3209 return 0;
3210 QETH_DBF_TEXT(trace, 2, "recover2");
3211 PRINT_WARN("Recovery of device %s started ...\n",
3212 CARD_BUS_ID(card));
3213 card->use_hard_stop = 1;
3214 __qeth_l3_set_offline(card->gdev, 1);
3215 rc = __qeth_l3_set_online(card->gdev, 1);
3216 /* don't run another scheduled recovery */
3217 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3218 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3219 if (!rc)
3220 PRINT_INFO("Device %s successfully recovered!\n",
3221 CARD_BUS_ID(card));
3222 else
3223 PRINT_INFO("Device %s could not be recovered!\n",
3224 CARD_BUS_ID(card));
3225 return 0;
3226}
3227
3228static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
3229{
3230 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3231 qeth_l3_clear_ip_list(card, 0, 0);
3232 qeth_qdio_clear_card(card, 0);
3233 qeth_clear_qdio_buffers(card);
3234}
3235
3236struct ccwgroup_driver qeth_l3_ccwgroup_driver = {
3237 .probe = qeth_l3_probe_device,
3238 .remove = qeth_l3_remove_device,
3239 .set_online = qeth_l3_set_online,
3240 .set_offline = qeth_l3_set_offline,
3241 .shutdown = qeth_l3_shutdown,
3242};
3243EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver);
3244
3245static int qeth_l3_ip_event(struct notifier_block *this,
3246 unsigned long event, void *ptr)
3247{
3248 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3249 struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev;
3250 struct qeth_ipaddr *addr;
3251 struct qeth_card *card;
3252
3253 if (dev->nd_net != &init_net)
3254 return NOTIFY_DONE;
3255
3256 QETH_DBF_TEXT(trace, 3, "ipevent");
3257 card = qeth_l3_get_card_from_dev(dev);
3258 if (!card)
3259 return NOTIFY_DONE;
3260
3261 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
3262 if (addr != NULL) {
3263 addr->u.a4.addr = ifa->ifa_address;
3264 addr->u.a4.mask = ifa->ifa_mask;
3265 addr->type = QETH_IP_TYPE_NORMAL;
3266 } else
3267 goto out;
3268
3269 switch (event) {
3270 case NETDEV_UP:
3271 if (!qeth_l3_add_ip(card, addr))
3272 kfree(addr);
3273 break;
3274 case NETDEV_DOWN:
3275 if (!qeth_l3_delete_ip(card, addr))
3276 kfree(addr);
3277 break;
3278 default:
3279 break;
3280 }
3281 qeth_l3_set_ip_addr_list(card);
3282out:
3283 return NOTIFY_DONE;
3284}
3285
3286static struct notifier_block qeth_l3_ip_notifier = {
3287 qeth_l3_ip_event,
3288 NULL,
3289};
3290
3291#ifdef CONFIG_QETH_IPV6
3292/**
3293 * IPv6 event handler
3294 */
3295static int qeth_l3_ip6_event(struct notifier_block *this,
3296 unsigned long event, void *ptr)
3297{
3298 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
3299 struct net_device *dev = (struct net_device *)ifa->idev->dev;
3300 struct qeth_ipaddr *addr;
3301 struct qeth_card *card;
3302
3303 QETH_DBF_TEXT(trace, 3, "ip6event");
3304
3305 card = qeth_l3_get_card_from_dev(dev);
3306 if (!card)
3307 return NOTIFY_DONE;
3308 if (!qeth_is_supported(card, IPA_IPV6))
3309 return NOTIFY_DONE;
3310
3311 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
3312 if (addr != NULL) {
3313 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
3314 addr->u.a6.pfxlen = ifa->prefix_len;
3315 addr->type = QETH_IP_TYPE_NORMAL;
3316 } else
3317 goto out;
3318
3319 switch (event) {
3320 case NETDEV_UP:
3321 if (!qeth_l3_add_ip(card, addr))
3322 kfree(addr);
3323 break;
3324 case NETDEV_DOWN:
3325 if (!qeth_l3_delete_ip(card, addr))
3326 kfree(addr);
3327 break;
3328 default:
3329 break;
3330 }
3331 qeth_l3_set_ip_addr_list(card);
3332out:
3333 return NOTIFY_DONE;
3334}
3335
3336static struct notifier_block qeth_l3_ip6_notifier = {
3337 qeth_l3_ip6_event,
3338 NULL,
3339};
3340#endif
3341
3342static int qeth_l3_register_notifiers(void)
3343{
3344 int rc;
3345
3346 QETH_DBF_TEXT(trace, 5, "regnotif");
3347 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
3348 if (rc)
3349 return rc;
3350#ifdef CONFIG_QETH_IPV6
3351 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
3352 if (rc) {
3353 unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
3354 return rc;
3355 }
3356#else
3357 PRINT_WARN("layer 3 discipline no IPv6 support\n");
3358#endif
3359 return 0;
3360}
3361
3362static void qeth_l3_unregister_notifiers(void)
3363{
3364
3365 QETH_DBF_TEXT(trace, 5, "unregnot");
3366 BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
3367#ifdef CONFIG_QETH_IPV6
3368 BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
3369#endif /* QETH_IPV6 */
3370}
3371
3372static int __init qeth_l3_init(void)
3373{
3374 int rc = 0;
3375
3376 PRINT_INFO("register layer 3 discipline\n");
3377 rc = qeth_l3_register_notifiers();
3378 return rc;
3379}
3380
3381static void __exit qeth_l3_exit(void)
3382{
3383 qeth_l3_unregister_notifiers();
3384 PRINT_INFO("unregister layer 3 discipline\n");
3385}
3386
3387module_init(qeth_l3_init);
3388module_exit(qeth_l3_exit);
3389MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
3390MODULE_DESCRIPTION("qeth layer 3 discipline");
3391MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
new file mode 100644
index 000000000000..08f51fd902c4
--- /dev/null
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -0,0 +1,1051 @@
1/*
2 * drivers/s390/net/qeth_l3_sys.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include "qeth_l3.h"
12
13#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
14struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
15
16static const char *qeth_l3_get_checksum_str(struct qeth_card *card)
17{
18 if (card->options.checksum_type == SW_CHECKSUMMING)
19 return "sw";
20 else if (card->options.checksum_type == HW_CHECKSUMMING)
21 return "hw";
22 else
23 return "no";
24}
25
26static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
27 struct qeth_routing_info *route, char *buf)
28{
29 switch (route->type) {
30 case PRIMARY_ROUTER:
31 return sprintf(buf, "%s\n", "primary router");
32 case SECONDARY_ROUTER:
33 return sprintf(buf, "%s\n", "secondary router");
34 case MULTICAST_ROUTER:
35 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
36 return sprintf(buf, "%s\n", "multicast router+");
37 else
38 return sprintf(buf, "%s\n", "multicast router");
39 case PRIMARY_CONNECTOR:
40 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
41 return sprintf(buf, "%s\n", "primary connector+");
42 else
43 return sprintf(buf, "%s\n", "primary connector");
44 case SECONDARY_CONNECTOR:
45 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
46 return sprintf(buf, "%s\n", "secondary connector+");
47 else
48 return sprintf(buf, "%s\n", "secondary connector");
49 default:
50 return sprintf(buf, "%s\n", "no");
51 }
52}
53
54static ssize_t qeth_l3_dev_route4_show(struct device *dev,
55 struct device_attribute *attr, char *buf)
56{
57 struct qeth_card *card = dev_get_drvdata(dev);
58
59 if (!card)
60 return -EINVAL;
61
62 return qeth_l3_dev_route_show(card, &card->options.route4, buf);
63}
64
65static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
66 struct qeth_routing_info *route, enum qeth_prot_versions prot,
67 const char *buf, size_t count)
68{
69 enum qeth_routing_types old_route_type = route->type;
70 char *tmp;
71 int rc;
72
73 tmp = strsep((char **) &buf, "\n");
74
75 if (!strcmp(tmp, "no_router")) {
76 route->type = NO_ROUTER;
77 } else if (!strcmp(tmp, "primary_connector")) {
78 route->type = PRIMARY_CONNECTOR;
79 } else if (!strcmp(tmp, "secondary_connector")) {
80 route->type = SECONDARY_CONNECTOR;
81 } else if (!strcmp(tmp, "primary_router")) {
82 route->type = PRIMARY_ROUTER;
83 } else if (!strcmp(tmp, "secondary_router")) {
84 route->type = SECONDARY_ROUTER;
85 } else if (!strcmp(tmp, "multicast_router")) {
86 route->type = MULTICAST_ROUTER;
87 } else {
88 PRINT_WARN("Invalid routing type '%s'.\n", tmp);
89 return -EINVAL;
90 }
91 if (((card->state == CARD_STATE_SOFTSETUP) ||
92 (card->state == CARD_STATE_UP)) &&
93 (old_route_type != route->type)) {
94 if (prot == QETH_PROT_IPV4)
95 rc = qeth_l3_setrouting_v4(card);
96 else if (prot == QETH_PROT_IPV6)
97 rc = qeth_l3_setrouting_v6(card);
98 }
99 return count;
100}
101
102static ssize_t qeth_l3_dev_route4_store(struct device *dev,
103 struct device_attribute *attr, const char *buf, size_t count)
104{
105 struct qeth_card *card = dev_get_drvdata(dev);
106
107 if (!card)
108 return -EINVAL;
109
110 return qeth_l3_dev_route_store(card, &card->options.route4,
111 QETH_PROT_IPV4, buf, count);
112}
113
114static DEVICE_ATTR(route4, 0644, qeth_l3_dev_route4_show,
115 qeth_l3_dev_route4_store);
116
117static ssize_t qeth_l3_dev_route6_show(struct device *dev,
118 struct device_attribute *attr, char *buf)
119{
120 struct qeth_card *card = dev_get_drvdata(dev);
121
122 if (!card)
123 return -EINVAL;
124
125 if (!qeth_is_supported(card, IPA_IPV6))
126 return sprintf(buf, "%s\n", "n/a");
127
128 return qeth_l3_dev_route_show(card, &card->options.route6, buf);
129}
130
131static ssize_t qeth_l3_dev_route6_store(struct device *dev,
132 struct device_attribute *attr, const char *buf, size_t count)
133{
134 struct qeth_card *card = dev_get_drvdata(dev);
135
136 if (!card)
137 return -EINVAL;
138
139 if (!qeth_is_supported(card, IPA_IPV6)) {
140 PRINT_WARN("IPv6 not supported for interface %s.\n"
141 "Routing status no changed.\n",
142 QETH_CARD_IFNAME(card));
143 return -ENOTSUPP;
144 }
145
146 return qeth_l3_dev_route_store(card, &card->options.route6,
147 QETH_PROT_IPV6, buf, count);
148}
149
150static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show,
151 qeth_l3_dev_route6_store);
152
153static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
154 struct device_attribute *attr, char *buf)
155{
156 struct qeth_card *card = dev_get_drvdata(dev);
157
158 if (!card)
159 return -EINVAL;
160
161 return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
162}
163
164static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
165 struct device_attribute *attr, const char *buf, size_t count)
166{
167 struct qeth_card *card = dev_get_drvdata(dev);
168 char *tmp;
169 int i;
170
171 if (!card)
172 return -EINVAL;
173
174 if ((card->state != CARD_STATE_DOWN) &&
175 (card->state != CARD_STATE_RECOVER))
176 return -EPERM;
177
178 i = simple_strtoul(buf, &tmp, 16);
179 if ((i == 0) || (i == 1))
180 card->options.fake_broadcast = i;
181 else {
182 PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n");
183 return -EINVAL;
184 }
185 return count;
186}
187
188static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
189 qeth_l3_dev_fake_broadcast_store);
190
191static ssize_t qeth_l3_dev_broadcast_mode_show(struct device *dev,
192 struct device_attribute *attr, char *buf)
193{
194 struct qeth_card *card = dev_get_drvdata(dev);
195
196 if (!card)
197 return -EINVAL;
198
199 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
200 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
201 return sprintf(buf, "n/a\n");
202
203 return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
204 QETH_TR_BROADCAST_ALLRINGS)?
205 "all rings":"local");
206}
207
208static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
209 struct device_attribute *attr, const char *buf, size_t count)
210{
211 struct qeth_card *card = dev_get_drvdata(dev);
212 char *tmp;
213
214 if (!card)
215 return -EINVAL;
216
217 if ((card->state != CARD_STATE_DOWN) &&
218 (card->state != CARD_STATE_RECOVER))
219 return -EPERM;
220
221 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
222 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
223 PRINT_WARN("Device is not a tokenring device!\n");
224 return -EINVAL;
225 }
226
227 tmp = strsep((char **) &buf, "\n");
228
229 if (!strcmp(tmp, "local")) {
230 card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
231 return count;
232 } else if (!strcmp(tmp, "all_rings")) {
233 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
234 return count;
235 } else {
236 PRINT_WARN("broadcast_mode: invalid mode %s!\n",
237 tmp);
238 return -EINVAL;
239 }
240 return count;
241}
242
243static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show,
244 qeth_l3_dev_broadcast_mode_store);
245
246static ssize_t qeth_l3_dev_canonical_macaddr_show(struct device *dev,
247 struct device_attribute *attr, char *buf)
248{
249 struct qeth_card *card = dev_get_drvdata(dev);
250
251 if (!card)
252 return -EINVAL;
253
254 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
255 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
256 return sprintf(buf, "n/a\n");
257
258 return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
259 QETH_TR_MACADDR_CANONICAL)? 1:0);
260}
261
262static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
263 struct device_attribute *attr, const char *buf, size_t count)
264{
265 struct qeth_card *card = dev_get_drvdata(dev);
266 char *tmp;
267 int i;
268
269 if (!card)
270 return -EINVAL;
271
272 if ((card->state != CARD_STATE_DOWN) &&
273 (card->state != CARD_STATE_RECOVER))
274 return -EPERM;
275
276 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
277 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
278 PRINT_WARN("Device is not a tokenring device!\n");
279 return -EINVAL;
280 }
281
282 i = simple_strtoul(buf, &tmp, 16);
283 if ((i == 0) || (i == 1))
284 card->options.macaddr_mode = i?
285 QETH_TR_MACADDR_CANONICAL :
286 QETH_TR_MACADDR_NONCANONICAL;
287 else {
288 PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n");
289 return -EINVAL;
290 }
291 return count;
292}
293
294static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
295 qeth_l3_dev_canonical_macaddr_store);
296
297static ssize_t qeth_l3_dev_checksum_show(struct device *dev,
298 struct device_attribute *attr, char *buf)
299{
300 struct qeth_card *card = dev_get_drvdata(dev);
301
302 if (!card)
303 return -EINVAL;
304
305 return sprintf(buf, "%s checksumming\n",
306 qeth_l3_get_checksum_str(card));
307}
308
309static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
310 struct device_attribute *attr, const char *buf, size_t count)
311{
312 struct qeth_card *card = dev_get_drvdata(dev);
313 char *tmp;
314
315 if (!card)
316 return -EINVAL;
317
318 if ((card->state != CARD_STATE_DOWN) &&
319 (card->state != CARD_STATE_RECOVER))
320 return -EPERM;
321
322 tmp = strsep((char **) &buf, "\n");
323 if (!strcmp(tmp, "sw_checksumming"))
324 card->options.checksum_type = SW_CHECKSUMMING;
325 else if (!strcmp(tmp, "hw_checksumming"))
326 card->options.checksum_type = HW_CHECKSUMMING;
327 else if (!strcmp(tmp, "no_checksumming"))
328 card->options.checksum_type = NO_CHECKSUMMING;
329 else {
330 PRINT_WARN("Unknown checksumming type '%s'\n", tmp);
331 return -EINVAL;
332 }
333 return count;
334}
335
336static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
337 qeth_l3_dev_checksum_store);
338
339static struct attribute *qeth_l3_device_attrs[] = {
340 &dev_attr_route4.attr,
341 &dev_attr_route6.attr,
342 &dev_attr_fake_broadcast.attr,
343 &dev_attr_broadcast_mode.attr,
344 &dev_attr_canonical_macaddr.attr,
345 &dev_attr_checksumming.attr,
346 NULL,
347};
348
349static struct attribute_group qeth_l3_device_attr_group = {
350 .attrs = qeth_l3_device_attrs,
351};
352
353static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
354 struct device_attribute *attr, char *buf)
355{
356 struct qeth_card *card = dev_get_drvdata(dev);
357
358 if (!card)
359 return -EINVAL;
360
361 return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
362}
363
364static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
365 struct device_attribute *attr, const char *buf, size_t count)
366{
367 struct qeth_card *card = dev_get_drvdata(dev);
368 char *tmp;
369
370 if (!card)
371 return -EINVAL;
372
373 if ((card->state != CARD_STATE_DOWN) &&
374 (card->state != CARD_STATE_RECOVER))
375 return -EPERM;
376
377 tmp = strsep((char **) &buf, "\n");
378 if (!strcmp(tmp, "toggle")) {
379 card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
380 } else if (!strcmp(tmp, "1")) {
381 card->ipato.enabled = 1;
382 } else if (!strcmp(tmp, "0")) {
383 card->ipato.enabled = 0;
384 } else {
385 PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to "
386 "this file\n");
387 return -EINVAL;
388 }
389 return count;
390}
391
392static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
393 qeth_l3_dev_ipato_enable_show,
394 qeth_l3_dev_ipato_enable_store);
395
396static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
397 struct device_attribute *attr, char *buf)
398{
399 struct qeth_card *card = dev_get_drvdata(dev);
400
401 if (!card)
402 return -EINVAL;
403
404 return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
405}
406
407static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
408 struct device_attribute *attr,
409 const char *buf, size_t count)
410{
411 struct qeth_card *card = dev_get_drvdata(dev);
412 char *tmp;
413
414 if (!card)
415 return -EINVAL;
416
417 tmp = strsep((char **) &buf, "\n");
418 if (!strcmp(tmp, "toggle")) {
419 card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
420 } else if (!strcmp(tmp, "1")) {
421 card->ipato.invert4 = 1;
422 } else if (!strcmp(tmp, "0")) {
423 card->ipato.invert4 = 0;
424 } else {
425 PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to "
426 "this file\n");
427 return -EINVAL;
428 }
429 return count;
430}
431
432static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
433 qeth_l3_dev_ipato_invert4_show,
434 qeth_l3_dev_ipato_invert4_store);
435
436static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
437 enum qeth_prot_versions proto)
438{
439 struct qeth_ipato_entry *ipatoe;
440 unsigned long flags;
441 char addr_str[40];
442 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
443 int i = 0;
444
445 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
446 /* add strlen for "/<mask>\n" */
447 entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
448 spin_lock_irqsave(&card->ip_lock, flags);
449 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
450 if (ipatoe->proto != proto)
451 continue;
452 /* String must not be longer than PAGE_SIZE. So we check if
453 * string length gets near PAGE_SIZE. Then we can savely display
454 * the next IPv6 address (worst case, compared to IPv4) */
455 if ((PAGE_SIZE - i) <= entry_len)
456 break;
457 qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
458 i += snprintf(buf + i, PAGE_SIZE - i,
459 "%s/%i\n", addr_str, ipatoe->mask_bits);
460 }
461 spin_unlock_irqrestore(&card->ip_lock, flags);
462 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
463
464 return i;
465}
466
467static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
468 struct device_attribute *attr, char *buf)
469{
470 struct qeth_card *card = dev_get_drvdata(dev);
471
472 if (!card)
473 return -EINVAL;
474
475 return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
476}
477
478static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
479 u8 *addr, int *mask_bits)
480{
481 const char *start, *end;
482 char *tmp;
483 char buffer[40] = {0, };
484
485 start = buf;
486 /* get address string */
487 end = strchr(start, '/');
488 if (!end || (end - start >= 40)) {
489 PRINT_WARN("Invalid format for ipato_addx/delx. "
490 "Use <ip addr>/<mask bits>\n");
491 return -EINVAL;
492 }
493 strncpy(buffer, start, end - start);
494 if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) {
495 PRINT_WARN("Invalid IP address format!\n");
496 return -EINVAL;
497 }
498 start = end + 1;
499 *mask_bits = simple_strtoul(start, &tmp, 10);
500 if (!strlen(start) ||
501 (tmp == start) ||
502 (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
503 PRINT_WARN("Invalid mask bits for ipato_addx/delx !\n");
504 return -EINVAL;
505 }
506 return 0;
507}
508
509static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
510 struct qeth_card *card, enum qeth_prot_versions proto)
511{
512 struct qeth_ipato_entry *ipatoe;
513 u8 addr[16];
514 int mask_bits;
515 int rc;
516
517 rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
518 if (rc)
519 return rc;
520
521 ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
522 if (!ipatoe) {
523 PRINT_WARN("No memory to allocate ipato entry\n");
524 return -ENOMEM;
525 }
526 ipatoe->proto = proto;
527 memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
528 ipatoe->mask_bits = mask_bits;
529
530 rc = qeth_l3_add_ipato_entry(card, ipatoe);
531 if (rc) {
532 kfree(ipatoe);
533 return rc;
534 }
535
536 return count;
537}
538
539static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
540 struct device_attribute *attr, const char *buf, size_t count)
541{
542 struct qeth_card *card = dev_get_drvdata(dev);
543
544 if (!card)
545 return -EINVAL;
546
547 return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
548}
549
550static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
551 qeth_l3_dev_ipato_add4_show,
552 qeth_l3_dev_ipato_add4_store);
553
554static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
555 struct qeth_card *card, enum qeth_prot_versions proto)
556{
557 u8 addr[16];
558 int mask_bits;
559 int rc;
560
561 rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
562 if (rc)
563 return rc;
564
565 qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
566
567 return count;
568}
569
570static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
571 struct device_attribute *attr, const char *buf, size_t count)
572{
573 struct qeth_card *card = dev_get_drvdata(dev);
574
575 if (!card)
576 return -EINVAL;
577
578 return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
579}
580
581static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
582 qeth_l3_dev_ipato_del4_store);
583
584static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
585 struct device_attribute *attr, char *buf)
586{
587 struct qeth_card *card = dev_get_drvdata(dev);
588
589 if (!card)
590 return -EINVAL;
591
592 return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
593}
594
595static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
596 struct device_attribute *attr, const char *buf, size_t count)
597{
598 struct qeth_card *card = dev_get_drvdata(dev);
599 char *tmp;
600
601 if (!card)
602 return -EINVAL;
603
604 tmp = strsep((char **) &buf, "\n");
605 if (!strcmp(tmp, "toggle")) {
606 card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
607 } else if (!strcmp(tmp, "1")) {
608 card->ipato.invert6 = 1;
609 } else if (!strcmp(tmp, "0")) {
610 card->ipato.invert6 = 0;
611 } else {
612 PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to "
613 "this file\n");
614 return -EINVAL;
615 }
616 return count;
617}
618
619static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
620 qeth_l3_dev_ipato_invert6_show,
621 qeth_l3_dev_ipato_invert6_store);
622
623
624static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev,
625 struct device_attribute *attr, char *buf)
626{
627 struct qeth_card *card = dev_get_drvdata(dev);
628
629 if (!card)
630 return -EINVAL;
631
632 return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
633}
634
635static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev,
636 struct device_attribute *attr, const char *buf, size_t count)
637{
638 struct qeth_card *card = dev_get_drvdata(dev);
639
640 if (!card)
641 return -EINVAL;
642
643 return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
644}
645
646static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
647 qeth_l3_dev_ipato_add6_show,
648 qeth_l3_dev_ipato_add6_store);
649
650static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev,
651 struct device_attribute *attr, const char *buf, size_t count)
652{
653 struct qeth_card *card = dev_get_drvdata(dev);
654
655 if (!card)
656 return -EINVAL;
657
658 return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
659}
660
661static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
662 qeth_l3_dev_ipato_del6_store);
663
664static struct attribute *qeth_ipato_device_attrs[] = {
665 &dev_attr_ipato_enable.attr,
666 &dev_attr_ipato_invert4.attr,
667 &dev_attr_ipato_add4.attr,
668 &dev_attr_ipato_del4.attr,
669 &dev_attr_ipato_invert6.attr,
670 &dev_attr_ipato_add6.attr,
671 &dev_attr_ipato_del6.attr,
672 NULL,
673};
674
675static struct attribute_group qeth_device_ipato_group = {
676 .name = "ipa_takeover",
677 .attrs = qeth_ipato_device_attrs,
678};
679
680static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
681 enum qeth_prot_versions proto)
682{
683 struct qeth_ipaddr *ipaddr;
684 char addr_str[40];
685 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
686 unsigned long flags;
687 int i = 0;
688
689 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
690 entry_len += 2; /* \n + terminator */
691 spin_lock_irqsave(&card->ip_lock, flags);
692 list_for_each_entry(ipaddr, &card->ip_list, entry) {
693 if (ipaddr->proto != proto)
694 continue;
695 if (ipaddr->type != QETH_IP_TYPE_VIPA)
696 continue;
697 /* String must not be longer than PAGE_SIZE. So we check if
698 * string length gets near PAGE_SIZE. Then we can savely display
699 * the next IPv6 address (worst case, compared to IPv4) */
700 if ((PAGE_SIZE - i) <= entry_len)
701 break;
702 qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
703 addr_str);
704 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
705 }
706 spin_unlock_irqrestore(&card->ip_lock, flags);
707 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
708
709 return i;
710}
711
712static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
713 struct device_attribute *attr, char *buf)
714{
715 struct qeth_card *card = dev_get_drvdata(dev);
716
717 if (!card)
718 return -EINVAL;
719
720 return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
721}
722
723static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
724 u8 *addr)
725{
726 if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
727 PRINT_WARN("Invalid IP address format!\n");
728 return -EINVAL;
729 }
730 return 0;
731}
732
733static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
734 struct qeth_card *card, enum qeth_prot_versions proto)
735{
736 u8 addr[16] = {0, };
737 int rc;
738
739 rc = qeth_l3_parse_vipae(buf, proto, addr);
740 if (rc)
741 return rc;
742
743 rc = qeth_l3_add_vipa(card, proto, addr);
744 if (rc)
745 return rc;
746
747 return count;
748}
749
750static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
751 struct device_attribute *attr, const char *buf, size_t count)
752{
753 struct qeth_card *card = dev_get_drvdata(dev);
754
755 if (!card)
756 return -EINVAL;
757
758 return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
759}
760
761static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
762 qeth_l3_dev_vipa_add4_show,
763 qeth_l3_dev_vipa_add4_store);
764
765static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
766 struct qeth_card *card, enum qeth_prot_versions proto)
767{
768 u8 addr[16];
769 int rc;
770
771 rc = qeth_l3_parse_vipae(buf, proto, addr);
772 if (rc)
773 return rc;
774
775 qeth_l3_del_vipa(card, proto, addr);
776
777 return count;
778}
779
780static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
781 struct device_attribute *attr, const char *buf, size_t count)
782{
783 struct qeth_card *card = dev_get_drvdata(dev);
784
785 if (!card)
786 return -EINVAL;
787
788 return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
789}
790
791static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
792 qeth_l3_dev_vipa_del4_store);
793
794static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
795 struct device_attribute *attr, char *buf)
796{
797 struct qeth_card *card = dev_get_drvdata(dev);
798
799 if (!card)
800 return -EINVAL;
801
802 return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
803}
804
805static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
806 struct device_attribute *attr, const char *buf, size_t count)
807{
808 struct qeth_card *card = dev_get_drvdata(dev);
809
810 if (!card)
811 return -EINVAL;
812
813 return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
814}
815
816static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
817 qeth_l3_dev_vipa_add6_show,
818 qeth_l3_dev_vipa_add6_store);
819
820static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
821 struct device_attribute *attr, const char *buf, size_t count)
822{
823 struct qeth_card *card = dev_get_drvdata(dev);
824
825 if (!card)
826 return -EINVAL;
827
828 return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
829}
830
831static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
832 qeth_l3_dev_vipa_del6_store);
833
834static struct attribute *qeth_vipa_device_attrs[] = {
835 &dev_attr_vipa_add4.attr,
836 &dev_attr_vipa_del4.attr,
837 &dev_attr_vipa_add6.attr,
838 &dev_attr_vipa_del6.attr,
839 NULL,
840};
841
842static struct attribute_group qeth_device_vipa_group = {
843 .name = "vipa",
844 .attrs = qeth_vipa_device_attrs,
845};
846
847static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
848 enum qeth_prot_versions proto)
849{
850 struct qeth_ipaddr *ipaddr;
851 char addr_str[40];
852 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
853 unsigned long flags;
854 int i = 0;
855
856 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
857 entry_len += 2; /* \n + terminator */
858 spin_lock_irqsave(&card->ip_lock, flags);
859 list_for_each_entry(ipaddr, &card->ip_list, entry) {
860 if (ipaddr->proto != proto)
861 continue;
862 if (ipaddr->type != QETH_IP_TYPE_RXIP)
863 continue;
864 /* String must not be longer than PAGE_SIZE. So we check if
865 * string length gets near PAGE_SIZE. Then we can savely display
866 * the next IPv6 address (worst case, compared to IPv4) */
867 if ((PAGE_SIZE - i) <= entry_len)
868 break;
869 qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
870 addr_str);
871 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
872 }
873 spin_unlock_irqrestore(&card->ip_lock, flags);
874 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
875
876 return i;
877}
878
879static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
880 struct device_attribute *attr, char *buf)
881{
882 struct qeth_card *card = dev_get_drvdata(dev);
883
884 if (!card)
885 return -EINVAL;
886
887 return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
888}
889
890static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
891 u8 *addr)
892{
893 if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
894 PRINT_WARN("Invalid IP address format!\n");
895 return -EINVAL;
896 }
897 return 0;
898}
899
900static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
901 struct qeth_card *card, enum qeth_prot_versions proto)
902{
903 u8 addr[16] = {0, };
904 int rc;
905
906 rc = qeth_l3_parse_rxipe(buf, proto, addr);
907 if (rc)
908 return rc;
909
910 rc = qeth_l3_add_rxip(card, proto, addr);
911 if (rc)
912 return rc;
913
914 return count;
915}
916
917static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
918 struct device_attribute *attr, const char *buf, size_t count)
919{
920 struct qeth_card *card = dev_get_drvdata(dev);
921
922 if (!card)
923 return -EINVAL;
924
925 return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
926}
927
928static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
929 qeth_l3_dev_rxip_add4_show,
930 qeth_l3_dev_rxip_add4_store);
931
932static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
933 struct qeth_card *card, enum qeth_prot_versions proto)
934{
935 u8 addr[16];
936 int rc;
937
938 rc = qeth_l3_parse_rxipe(buf, proto, addr);
939 if (rc)
940 return rc;
941
942 qeth_l3_del_rxip(card, proto, addr);
943
944 return count;
945}
946
947static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
948 struct device_attribute *attr, const char *buf, size_t count)
949{
950 struct qeth_card *card = dev_get_drvdata(dev);
951
952 if (!card)
953 return -EINVAL;
954
955 return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
956}
957
958static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
959 qeth_l3_dev_rxip_del4_store);
960
961static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
962 struct device_attribute *attr, char *buf)
963{
964 struct qeth_card *card = dev_get_drvdata(dev);
965
966 if (!card)
967 return -EINVAL;
968
969 return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
970}
971
972static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
973 struct device_attribute *attr, const char *buf, size_t count)
974{
975 struct qeth_card *card = dev_get_drvdata(dev);
976
977 if (!card)
978 return -EINVAL;
979
980 return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
981}
982
983static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
984 qeth_l3_dev_rxip_add6_show,
985 qeth_l3_dev_rxip_add6_store);
986
987static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
988 struct device_attribute *attr, const char *buf, size_t count)
989{
990 struct qeth_card *card = dev_get_drvdata(dev);
991
992 if (!card)
993 return -EINVAL;
994
995 return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
996}
997
998static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
999 qeth_l3_dev_rxip_del6_store);
1000
1001static struct attribute *qeth_rxip_device_attrs[] = {
1002 &dev_attr_rxip_add4.attr,
1003 &dev_attr_rxip_del4.attr,
1004 &dev_attr_rxip_add6.attr,
1005 &dev_attr_rxip_del6.attr,
1006 NULL,
1007};
1008
1009static struct attribute_group qeth_device_rxip_group = {
1010 .name = "rxip",
1011 .attrs = qeth_rxip_device_attrs,
1012};
1013
1014int qeth_l3_create_device_attributes(struct device *dev)
1015{
1016 int ret;
1017
1018 ret = sysfs_create_group(&dev->kobj, &qeth_l3_device_attr_group);
1019 if (ret)
1020 return ret;
1021
1022 ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group);
1023 if (ret) {
1024 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1025 return ret;
1026 }
1027
1028 ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group);
1029 if (ret) {
1030 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1031 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1032 return ret;
1033 }
1034
1035 ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group);
1036 if (ret) {
1037 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1038 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1039 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1040 return ret;
1041 }
1042 return 0;
1043}
1044
1045void qeth_l3_remove_device_attributes(struct device *dev)
1046{
1047 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1048 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1049 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1050 sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
1051}
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
deleted file mode 100644
index d063e9ecf804..000000000000
--- a/drivers/s390/net/qeth_main.c
+++ /dev/null
@@ -1,8959 +0,0 @@
1/*
2 * linux/drivers/s390/net/qeth_main.c
3 *
4 * Linux on zSeries OSA Express and HiperSockets support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 *
8 * Author(s): Original Code written by
9 * Utz Bacher (utz.bacher@de.ibm.com)
10 * Rewritten by
11 * Frank Pavlic (fpavlic@de.ibm.com) and
12 * Thomas Spatzier <tspat@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29
30#include <linux/module.h>
31#include <linux/moduleparam.h>
32#include <linux/string.h>
33#include <linux/errno.h>
34#include <linux/mm.h>
35#include <linux/ip.h>
36#include <linux/inetdevice.h>
37#include <linux/netdevice.h>
38#include <linux/sched.h>
39#include <linux/workqueue.h>
40#include <linux/kernel.h>
41#include <linux/slab.h>
42#include <linux/interrupt.h>
43#include <linux/tcp.h>
44#include <linux/icmp.h>
45#include <linux/skbuff.h>
46#include <linux/in.h>
47#include <linux/igmp.h>
48#include <linux/init.h>
49#include <linux/reboot.h>
50#include <linux/mii.h>
51#include <linux/rcupdate.h>
52#include <linux/ethtool.h>
53
54#include <net/arp.h>
55#include <net/ip.h>
56#include <net/route.h>
57
58#include <asm/ebcdic.h>
59#include <asm/io.h>
60#include <asm/qeth.h>
61#include <asm/timex.h>
62#include <asm/semaphore.h>
63#include <asm/uaccess.h>
64#include <asm/s390_rdev.h>
65
66#include "qeth.h"
67#include "qeth_mpc.h"
68#include "qeth_fs.h"
69#include "qeth_eddp.h"
70#include "qeth_tso.h"
71
72static const char *version = "qeth S/390 OSA-Express driver";
73
74/**
75 * Debug Facility Stuff
76 */
77static debug_info_t *qeth_dbf_setup = NULL;
78static debug_info_t *qeth_dbf_data = NULL;
79static debug_info_t *qeth_dbf_misc = NULL;
80static debug_info_t *qeth_dbf_control = NULL;
81debug_info_t *qeth_dbf_trace = NULL;
82static debug_info_t *qeth_dbf_sense = NULL;
83static debug_info_t *qeth_dbf_qerr = NULL;
84
85DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
86
87static struct lock_class_key qdio_out_skb_queue_key;
88
89/**
90 * some more definitions and declarations
91 */
92static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
93
94/* list of our cards */
95struct qeth_card_list_struct qeth_card_list;
96/*process list want to be notified*/
97spinlock_t qeth_notify_lock;
98struct list_head qeth_notify_list;
99
100static void qeth_send_control_data_cb(struct qeth_channel *,
101 struct qeth_cmd_buffer *);
102
103/**
104 * here we go with function implementation
105 */
106static void
107qeth_init_qdio_info(struct qeth_card *card);
108
109static int
110qeth_init_qdio_queues(struct qeth_card *card);
111
112static int
113qeth_alloc_qdio_buffers(struct qeth_card *card);
114
115static void
116qeth_free_qdio_buffers(struct qeth_card *);
117
118static void
119qeth_clear_qdio_buffers(struct qeth_card *);
120
121static void
122qeth_clear_ip_list(struct qeth_card *, int, int);
123
124static void
125qeth_clear_ipacmd_list(struct qeth_card *);
126
127static int
128qeth_qdio_clear_card(struct qeth_card *, int);
129
130static void
131qeth_clear_working_pool_list(struct qeth_card *);
132
133static void
134qeth_clear_cmd_buffers(struct qeth_channel *);
135
136static int
137qeth_stop(struct net_device *);
138
139static void
140qeth_clear_ipato_list(struct qeth_card *);
141
142static int
143qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
144
145static void
146qeth_irq_tasklet(unsigned long);
147
148static int
149qeth_set_online(struct ccwgroup_device *);
150
151static int
152__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode);
153
154static struct qeth_ipaddr *
155qeth_get_addr_buffer(enum qeth_prot_versions);
156
157static void
158qeth_set_multicast_list(struct net_device *);
159
160static void
161qeth_setadp_promisc_mode(struct qeth_card *);
162
163static int
164qeth_hard_header_parse(const struct sk_buff *skb, unsigned char *haddr);
165
166static void
167qeth_notify_processes(void)
168{
169 /*notify all registered processes */
170 struct qeth_notify_list_struct *n_entry;
171
172 QETH_DBF_TEXT(trace,3,"procnoti");
173 spin_lock(&qeth_notify_lock);
174 list_for_each_entry(n_entry, &qeth_notify_list, list) {
175 send_sig(n_entry->signum, n_entry->task, 1);
176 }
177 spin_unlock(&qeth_notify_lock);
178
179}
180int
181qeth_notifier_unregister(struct task_struct *p)
182{
183 struct qeth_notify_list_struct *n_entry, *tmp;
184
185 QETH_DBF_TEXT(trace, 2, "notunreg");
186 spin_lock(&qeth_notify_lock);
187 list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) {
188 if (n_entry->task == p) {
189 list_del(&n_entry->list);
190 kfree(n_entry);
191 goto out;
192 }
193 }
194out:
195 spin_unlock(&qeth_notify_lock);
196 return 0;
197}
198int
199qeth_notifier_register(struct task_struct *p, int signum)
200{
201 struct qeth_notify_list_struct *n_entry;
202
203 /*check first if entry already exists*/
204 spin_lock(&qeth_notify_lock);
205 list_for_each_entry(n_entry, &qeth_notify_list, list) {
206 if (n_entry->task == p) {
207 n_entry->signum = signum;
208 spin_unlock(&qeth_notify_lock);
209 return 0;
210 }
211 }
212 spin_unlock(&qeth_notify_lock);
213
214 n_entry = (struct qeth_notify_list_struct *)
215 kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL);
216 if (!n_entry)
217 return -ENOMEM;
218 n_entry->task = p;
219 n_entry->signum = signum;
220 spin_lock(&qeth_notify_lock);
221 list_add(&n_entry->list,&qeth_notify_list);
222 spin_unlock(&qeth_notify_lock);
223 return 0;
224}
225
226
227/**
228 * free channel command buffers
229 */
230static void
231qeth_clean_channel(struct qeth_channel *channel)
232{
233 int cnt;
234
235 QETH_DBF_TEXT(setup, 2, "freech");
236 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
237 kfree(channel->iob[cnt].data);
238}
239
240/**
241 * free card
242 */
243static void
244qeth_free_card(struct qeth_card *card)
245{
246
247 QETH_DBF_TEXT(setup, 2, "freecrd");
248 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
249 qeth_clean_channel(&card->read);
250 qeth_clean_channel(&card->write);
251 if (card->dev)
252 free_netdev(card->dev);
253 qeth_clear_ip_list(card, 0, 0);
254 qeth_clear_ipato_list(card);
255 kfree(card->ip_tbd_list);
256 qeth_free_qdio_buffers(card);
257 kfree(card);
258}
259
260/**
261 * alloc memory for command buffer per channel
262 */
263static int
264qeth_setup_channel(struct qeth_channel *channel)
265{
266 int cnt;
267
268 QETH_DBF_TEXT(setup, 2, "setupch");
269 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
270 channel->iob[cnt].data = (char *)
271 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
272 if (channel->iob[cnt].data == NULL)
273 break;
274 channel->iob[cnt].state = BUF_STATE_FREE;
275 channel->iob[cnt].channel = channel;
276 channel->iob[cnt].callback = qeth_send_control_data_cb;
277 channel->iob[cnt].rc = 0;
278 }
279 if (cnt < QETH_CMD_BUFFER_NO) {
280 while (cnt-- > 0)
281 kfree(channel->iob[cnt].data);
282 return -ENOMEM;
283 }
284 channel->buf_no = 0;
285 channel->io_buf_no = 0;
286 atomic_set(&channel->irq_pending, 0);
287 spin_lock_init(&channel->iob_lock);
288
289 init_waitqueue_head(&channel->wait_q);
290 channel->irq_tasklet.data = (unsigned long) channel;
291 channel->irq_tasklet.func = qeth_irq_tasklet;
292 return 0;
293}
294
295/**
296 * alloc memory for card structure
297 */
298static struct qeth_card *
299qeth_alloc_card(void)
300{
301 struct qeth_card *card;
302
303 QETH_DBF_TEXT(setup, 2, "alloccrd");
304 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
305 if (!card)
306 return NULL;
307 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
308 if (qeth_setup_channel(&card->read)) {
309 kfree(card);
310 return NULL;
311 }
312 if (qeth_setup_channel(&card->write)) {
313 qeth_clean_channel(&card->read);
314 kfree(card);
315 return NULL;
316 }
317 return card;
318}
319
320static long
321__qeth_check_irb_error(struct ccw_device *cdev, unsigned long intparm,
322 struct irb *irb)
323{
324 if (!IS_ERR(irb))
325 return 0;
326
327 switch (PTR_ERR(irb)) {
328 case -EIO:
329 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
330 QETH_DBF_TEXT(trace, 2, "ckirberr");
331 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
332 break;
333 case -ETIMEDOUT:
334 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
335 QETH_DBF_TEXT(trace, 2, "ckirberr");
336 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
337 if (intparm == QETH_RCD_PARM) {
338 struct qeth_card *card = CARD_FROM_CDEV(cdev);
339
340 if (card && (card->data.ccwdev == cdev)) {
341 card->data.state = CH_STATE_DOWN;
342 wake_up(&card->wait_q);
343 }
344 }
345 break;
346 default:
347 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
348 cdev->dev.bus_id);
349 QETH_DBF_TEXT(trace, 2, "ckirberr");
350 QETH_DBF_TEXT(trace, 2, " rc???");
351 }
352 return PTR_ERR(irb);
353}
354
355static int
356qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
357{
358 int dstat,cstat;
359 char *sense;
360
361 sense = (char *) irb->ecw;
362 cstat = irb->scsw.cstat;
363 dstat = irb->scsw.dstat;
364
365 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
366 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
367 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
368 QETH_DBF_TEXT(trace,2, "CGENCHK");
369 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
370 cdev->dev.bus_id, dstat, cstat);
371 HEXDUMP16(WARN, "irb: ", irb);
372 HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32);
373 return 1;
374 }
375
376 if (dstat & DEV_STAT_UNIT_CHECK) {
377 if (sense[SENSE_RESETTING_EVENT_BYTE] &
378 SENSE_RESETTING_EVENT_FLAG) {
379 QETH_DBF_TEXT(trace,2,"REVIND");
380 return 1;
381 }
382 if (sense[SENSE_COMMAND_REJECT_BYTE] &
383 SENSE_COMMAND_REJECT_FLAG) {
384 QETH_DBF_TEXT(trace,2,"CMDREJi");
385 return 0;
386 }
387 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
388 QETH_DBF_TEXT(trace,2,"AFFE");
389 return 1;
390 }
391 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
392 QETH_DBF_TEXT(trace,2,"ZEROSEN");
393 return 0;
394 }
395 QETH_DBF_TEXT(trace,2,"DGENCHK");
396 return 1;
397 }
398 return 0;
399}
400static int qeth_issue_next_read(struct qeth_card *);
401
402/**
403 * interrupt handler
404 */
405static void
406qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
407{
408 int rc;
409 int cstat,dstat;
410 struct qeth_cmd_buffer *buffer;
411 struct qeth_channel *channel;
412 struct qeth_card *card;
413
414 QETH_DBF_TEXT(trace,5,"irq");
415
416 if (__qeth_check_irb_error(cdev, intparm, irb))
417 return;
418 cstat = irb->scsw.cstat;
419 dstat = irb->scsw.dstat;
420
421 card = CARD_FROM_CDEV(cdev);
422 if (!card)
423 return;
424
425 if (card->read.ccwdev == cdev){
426 channel = &card->read;
427 QETH_DBF_TEXT(trace,5,"read");
428 } else if (card->write.ccwdev == cdev) {
429 channel = &card->write;
430 QETH_DBF_TEXT(trace,5,"write");
431 } else {
432 channel = &card->data;
433 QETH_DBF_TEXT(trace,5,"data");
434 }
435 atomic_set(&channel->irq_pending, 0);
436
437 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
438 channel->state = CH_STATE_STOPPED;
439
440 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
441 channel->state = CH_STATE_HALTED;
442
443 /*let's wake up immediately on data channel*/
444 if ((channel == &card->data) && (intparm != 0) &&
445 (intparm != QETH_RCD_PARM))
446 goto out;
447
448 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
449 QETH_DBF_TEXT(trace, 6, "clrchpar");
450 /* we don't have to handle this further */
451 intparm = 0;
452 }
453 if (intparm == QETH_HALT_CHANNEL_PARM) {
454 QETH_DBF_TEXT(trace, 6, "hltchpar");
455 /* we don't have to handle this further */
456 intparm = 0;
457 }
458 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
459 (dstat & DEV_STAT_UNIT_CHECK) ||
460 (cstat)) {
461 if (irb->esw.esw0.erw.cons) {
462 /* TODO: we should make this s390dbf */
463 PRINT_WARN("sense data available on channel %s.\n",
464 CHANNEL_ID(channel));
465 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
466 HEXDUMP16(WARN,"irb: ",irb);
467 HEXDUMP16(WARN,"sense data: ",irb->ecw);
468 }
469 if (intparm == QETH_RCD_PARM) {
470 channel->state = CH_STATE_DOWN;
471 goto out;
472 }
473 rc = qeth_get_problem(cdev,irb);
474 if (rc) {
475 qeth_schedule_recovery(card);
476 goto out;
477 }
478 }
479
480 if (intparm == QETH_RCD_PARM) {
481 channel->state = CH_STATE_RCD_DONE;
482 goto out;
483 }
484 if (intparm) {
485 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
486 buffer->state = BUF_STATE_PROCESSED;
487 }
488 if (channel == &card->data)
489 return;
490
491 if (channel == &card->read &&
492 channel->state == CH_STATE_UP)
493 qeth_issue_next_read(card);
494
495 qeth_irq_tasklet((unsigned long)channel);
496 return;
497out:
498 wake_up(&card->wait_q);
499}
500
501/**
502 * tasklet function scheduled from irq handler
503 */
504static void
505qeth_irq_tasklet(unsigned long data)
506{
507 struct qeth_card *card;
508 struct qeth_channel *channel;
509 struct qeth_cmd_buffer *iob;
510 __u8 index;
511
512 QETH_DBF_TEXT(trace,5,"irqtlet");
513 channel = (struct qeth_channel *) data;
514 iob = channel->iob;
515 index = channel->buf_no;
516 card = CARD_FROM_CDEV(channel->ccwdev);
517 while (iob[index].state == BUF_STATE_PROCESSED) {
518 if (iob[index].callback !=NULL) {
519 iob[index].callback(channel,iob + index);
520 }
521 index = (index + 1) % QETH_CMD_BUFFER_NO;
522 }
523 channel->buf_no = index;
524 wake_up(&card->wait_q);
525}
526
527static int qeth_stop_card(struct qeth_card *, int);
528
529static int
530__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
531{
532 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
533 int rc = 0, rc2 = 0, rc3 = 0;
534 enum qeth_card_states recover_flag;
535
536 QETH_DBF_TEXT(setup, 3, "setoffl");
537 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
538
539 if (card->dev && netif_carrier_ok(card->dev))
540 netif_carrier_off(card->dev);
541 recover_flag = card->state;
542 if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){
543 PRINT_WARN("Stopping card %s interrupted by user!\n",
544 CARD_BUS_ID(card));
545 return -ERESTARTSYS;
546 }
547 rc = ccw_device_set_offline(CARD_DDEV(card));
548 rc2 = ccw_device_set_offline(CARD_WDEV(card));
549 rc3 = ccw_device_set_offline(CARD_RDEV(card));
550 if (!rc)
551 rc = (rc2) ? rc2 : rc3;
552 if (rc)
553 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
554 if (recover_flag == CARD_STATE_UP)
555 card->state = CARD_STATE_RECOVER;
556 qeth_notify_processes();
557 return 0;
558}
559
560static int
561qeth_set_offline(struct ccwgroup_device *cgdev)
562{
563 return __qeth_set_offline(cgdev, 0);
564}
565
566static int
567qeth_threads_running(struct qeth_card *card, unsigned long threads);
568
569
570static void
571qeth_remove_device(struct ccwgroup_device *cgdev)
572{
573 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
574 unsigned long flags;
575
576 QETH_DBF_TEXT(setup, 3, "rmdev");
577 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
578
579 if (!card)
580 return;
581
582 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
583
584 if (cgdev->state == CCWGROUP_ONLINE){
585 card->use_hard_stop = 1;
586 qeth_set_offline(cgdev);
587 }
588 /* remove form our internal list */
589 write_lock_irqsave(&qeth_card_list.rwlock, flags);
590 list_del(&card->list);
591 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
592 if (card->dev)
593 unregister_netdev(card->dev);
594 qeth_remove_device_attributes(&cgdev->dev);
595 qeth_free_card(card);
596 cgdev->dev.driver_data = NULL;
597 put_device(&cgdev->dev);
598}
599
600static int
601qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
602static int
603qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *);
604
605/**
606 * Add/remove address to/from card's ip list, i.e. try to add or remove
607 * reference to/from an IP address that is already registered on the card.
608 * Returns:
609 * 0 address was on card and its reference count has been adjusted,
610 * but is still > 0, so nothing has to be done
611 * also returns 0 if card was not on card and the todo was to delete
612 * the address -> there is also nothing to be done
613 * 1 address was not on card and the todo is to add it to the card's ip
614 * list
615 * -1 address was on card and its reference count has been decremented
616 * to <= 0 by the todo -> address must be removed from card
617 */
618static int
619__qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
620 struct qeth_ipaddr **__addr)
621{
622 struct qeth_ipaddr *addr;
623 int found = 0;
624
625 list_for_each_entry(addr, &card->ip_list, entry) {
626 if (card->options.layer2) {
627 if ((addr->type == todo->type) &&
628 (memcmp(&addr->mac, &todo->mac,
629 OSA_ADDR_LEN) == 0)) {
630 found = 1;
631 break;
632 }
633 continue;
634 }
635 if ((addr->proto == QETH_PROT_IPV4) &&
636 (todo->proto == QETH_PROT_IPV4) &&
637 (addr->type == todo->type) &&
638 (addr->u.a4.addr == todo->u.a4.addr) &&
639 (addr->u.a4.mask == todo->u.a4.mask)) {
640 found = 1;
641 break;
642 }
643 if ((addr->proto == QETH_PROT_IPV6) &&
644 (todo->proto == QETH_PROT_IPV6) &&
645 (addr->type == todo->type) &&
646 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
647 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
648 sizeof(struct in6_addr)) == 0)) {
649 found = 1;
650 break;
651 }
652 }
653 if (found) {
654 addr->users += todo->users;
655 if (addr->users <= 0){
656 *__addr = addr;
657 return -1;
658 } else {
659 /* for VIPA and RXIP limit refcount to 1 */
660 if (addr->type != QETH_IP_TYPE_NORMAL)
661 addr->users = 1;
662 return 0;
663 }
664 }
665 if (todo->users > 0) {
666 /* for VIPA and RXIP limit refcount to 1 */
667 if (todo->type != QETH_IP_TYPE_NORMAL)
668 todo->users = 1;
669 return 1;
670 } else
671 return 0;
672}
673
674static int
675__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
676 int same_type)
677{
678 struct qeth_ipaddr *tmp;
679
680 list_for_each_entry(tmp, list, entry) {
681 if ((tmp->proto == QETH_PROT_IPV4) &&
682 (addr->proto == QETH_PROT_IPV4) &&
683 ((same_type && (tmp->type == addr->type)) ||
684 (!same_type && (tmp->type != addr->type)) ) &&
685 (tmp->u.a4.addr == addr->u.a4.addr) ){
686 return 1;
687 }
688 if ((tmp->proto == QETH_PROT_IPV6) &&
689 (addr->proto == QETH_PROT_IPV6) &&
690 ((same_type && (tmp->type == addr->type)) ||
691 (!same_type && (tmp->type != addr->type)) ) &&
692 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
693 sizeof(struct in6_addr)) == 0) ) {
694 return 1;
695 }
696 }
697 return 0;
698}
699
700/*
701 * Add IP to be added to todo list. If there is already an "add todo"
702 * in this list we just incremenent the reference count.
703 * Returns 0 if we just incremented reference count.
704 */
705static int
706__qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add)
707{
708 struct qeth_ipaddr *tmp, *t;
709 int found = 0;
710
711 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
712 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
713 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
714 return 0;
715 if (card->options.layer2) {
716 if ((tmp->type == addr->type) &&
717 (tmp->is_multicast == addr->is_multicast) &&
718 (memcmp(&tmp->mac, &addr->mac,
719 OSA_ADDR_LEN) == 0)) {
720 found = 1;
721 break;
722 }
723 continue;
724 }
725 if ((tmp->proto == QETH_PROT_IPV4) &&
726 (addr->proto == QETH_PROT_IPV4) &&
727 (tmp->type == addr->type) &&
728 (tmp->is_multicast == addr->is_multicast) &&
729 (tmp->u.a4.addr == addr->u.a4.addr) &&
730 (tmp->u.a4.mask == addr->u.a4.mask)) {
731 found = 1;
732 break;
733 }
734 if ((tmp->proto == QETH_PROT_IPV6) &&
735 (addr->proto == QETH_PROT_IPV6) &&
736 (tmp->type == addr->type) &&
737 (tmp->is_multicast == addr->is_multicast) &&
738 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
739 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
740 sizeof(struct in6_addr)) == 0)) {
741 found = 1;
742 break;
743 }
744 }
745 if (found){
746 if (addr->users != 0)
747 tmp->users += addr->users;
748 else
749 tmp->users += add? 1:-1;
750 if (tmp->users == 0) {
751 list_del(&tmp->entry);
752 kfree(tmp);
753 }
754 return 0;
755 } else {
756 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
757 list_add(&addr->entry, card->ip_tbd_list);
758 else {
759 if (addr->users == 0)
760 addr->users += add? 1:-1;
761 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
762 qeth_is_addr_covered_by_ipato(card, addr)){
763 QETH_DBF_TEXT(trace, 2, "tkovaddr");
764 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
765 }
766 list_add_tail(&addr->entry, card->ip_tbd_list);
767 }
768 return 1;
769 }
770}
771
772/**
773 * Remove IP address from list
774 */
775static int
776qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
777{
778 unsigned long flags;
779 int rc = 0;
780
781 QETH_DBF_TEXT(trace, 4, "delip");
782
783 if (card->options.layer2)
784 QETH_DBF_HEX(trace, 4, &addr->mac, 6);
785 else if (addr->proto == QETH_PROT_IPV4)
786 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
787 else {
788 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
789 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
790 }
791 spin_lock_irqsave(&card->ip_lock, flags);
792 rc = __qeth_insert_ip_todo(card, addr, 0);
793 spin_unlock_irqrestore(&card->ip_lock, flags);
794 return rc;
795}
796
797static int
798qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
799{
800 unsigned long flags;
801 int rc = 0;
802
803 QETH_DBF_TEXT(trace, 4, "addip");
804 if (card->options.layer2)
805 QETH_DBF_HEX(trace, 4, &addr->mac, 6);
806 else if (addr->proto == QETH_PROT_IPV4)
807 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
808 else {
809 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
810 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
811 }
812 spin_lock_irqsave(&card->ip_lock, flags);
813 rc = __qeth_insert_ip_todo(card, addr, 1);
814 spin_unlock_irqrestore(&card->ip_lock, flags);
815 return rc;
816}
817
818static void
819__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
820{
821 struct qeth_ipaddr *addr, *tmp;
822 int rc;
823again:
824 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
825 if (addr->is_multicast) {
826 list_del(&addr->entry);
827 spin_unlock_irqrestore(&card->ip_lock, *flags);
828 rc = qeth_deregister_addr_entry(card, addr);
829 spin_lock_irqsave(&card->ip_lock, *flags);
830 if (!rc) {
831 kfree(addr);
832 goto again;
833 } else
834 list_add(&addr->entry, &card->ip_list);
835 }
836 }
837}
838
839static void
840qeth_set_ip_addr_list(struct qeth_card *card)
841{
842 struct list_head *tbd_list;
843 struct qeth_ipaddr *todo, *addr;
844 unsigned long flags;
845 int rc;
846
847 QETH_DBF_TEXT(trace, 2, "sdiplist");
848 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
849
850 spin_lock_irqsave(&card->ip_lock, flags);
851 tbd_list = card->ip_tbd_list;
852 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
853 if (!card->ip_tbd_list) {
854 QETH_DBF_TEXT(trace, 0, "silnomem");
855 card->ip_tbd_list = tbd_list;
856 spin_unlock_irqrestore(&card->ip_lock, flags);
857 return;
858 } else
859 INIT_LIST_HEAD(card->ip_tbd_list);
860
861 while (!list_empty(tbd_list)){
862 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
863 list_del(&todo->entry);
864 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC){
865 __qeth_delete_all_mc(card, &flags);
866 kfree(todo);
867 continue;
868 }
869 rc = __qeth_ref_ip_on_card(card, todo, &addr);
870 if (rc == 0) {
871 /* nothing to be done; only adjusted refcount */
872 kfree(todo);
873 } else if (rc == 1) {
874 /* new entry to be added to on-card list */
875 spin_unlock_irqrestore(&card->ip_lock, flags);
876 rc = qeth_register_addr_entry(card, todo);
877 spin_lock_irqsave(&card->ip_lock, flags);
878 if (!rc)
879 list_add_tail(&todo->entry, &card->ip_list);
880 else
881 kfree(todo);
882 } else if (rc == -1) {
883 /* on-card entry to be removed */
884 list_del_init(&addr->entry);
885 spin_unlock_irqrestore(&card->ip_lock, flags);
886 rc = qeth_deregister_addr_entry(card, addr);
887 spin_lock_irqsave(&card->ip_lock, flags);
888 if (!rc)
889 kfree(addr);
890 else
891 list_add_tail(&addr->entry, &card->ip_list);
892 kfree(todo);
893 }
894 }
895 spin_unlock_irqrestore(&card->ip_lock, flags);
896 kfree(tbd_list);
897}
898
899static void qeth_delete_mc_addresses(struct qeth_card *);
900static void qeth_add_multicast_ipv4(struct qeth_card *);
901static void qeth_layer2_add_multicast(struct qeth_card *);
902#ifdef CONFIG_QETH_IPV6
903static void qeth_add_multicast_ipv6(struct qeth_card *);
904#endif
905
906static int
907qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
908{
909 unsigned long flags;
910
911 spin_lock_irqsave(&card->thread_mask_lock, flags);
912 if ( !(card->thread_allowed_mask & thread) ||
913 (card->thread_start_mask & thread) ) {
914 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
915 return -EPERM;
916 }
917 card->thread_start_mask |= thread;
918 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
919 return 0;
920}
921
922static void
923qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
924{
925 unsigned long flags;
926
927 spin_lock_irqsave(&card->thread_mask_lock, flags);
928 card->thread_start_mask &= ~thread;
929 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
930 wake_up(&card->wait_q);
931}
932
933static void
934qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
935{
936 unsigned long flags;
937
938 spin_lock_irqsave(&card->thread_mask_lock, flags);
939 card->thread_running_mask &= ~thread;
940 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
941 wake_up(&card->wait_q);
942}
943
944static int
945__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
946{
947 unsigned long flags;
948 int rc = 0;
949
950 spin_lock_irqsave(&card->thread_mask_lock, flags);
951 if (card->thread_start_mask & thread){
952 if ((card->thread_allowed_mask & thread) &&
953 !(card->thread_running_mask & thread)){
954 rc = 1;
955 card->thread_start_mask &= ~thread;
956 card->thread_running_mask |= thread;
957 } else
958 rc = -EPERM;
959 }
960 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
961 return rc;
962}
963
964static int
965qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
966{
967 int rc = 0;
968
969 wait_event(card->wait_q,
970 (rc = __qeth_do_run_thread(card, thread)) >= 0);
971 return rc;
972}
973
974static int
975qeth_recover(void *ptr)
976{
977 struct qeth_card *card;
978 int rc = 0;
979
980 card = (struct qeth_card *) ptr;
981 daemonize("qeth_recover");
982 QETH_DBF_TEXT(trace,2,"recover1");
983 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
984 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
985 return 0;
986 QETH_DBF_TEXT(trace,2,"recover2");
987 PRINT_WARN("Recovery of device %s started ...\n",
988 CARD_BUS_ID(card));
989 card->use_hard_stop = 1;
990 __qeth_set_offline(card->gdev,1);
991 rc = __qeth_set_online(card->gdev,1);
992 /* don't run another scheduled recovery */
993 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
994 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
995 if (!rc)
996 PRINT_INFO("Device %s successfully recovered!\n",
997 CARD_BUS_ID(card));
998 else
999 PRINT_INFO("Device %s could not be recovered!\n",
1000 CARD_BUS_ID(card));
1001 return 0;
1002}
1003
1004void
1005qeth_schedule_recovery(struct qeth_card *card)
1006{
1007 QETH_DBF_TEXT(trace,2,"startrec");
1008 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
1009 schedule_work(&card->kernel_thread_starter);
1010}
1011
1012static int
1013qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1014{
1015 unsigned long flags;
1016 int rc = 0;
1017
1018 spin_lock_irqsave(&card->thread_mask_lock, flags);
1019 QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
1020 (u8) card->thread_start_mask,
1021 (u8) card->thread_allowed_mask,
1022 (u8) card->thread_running_mask);
1023 rc = (card->thread_start_mask & thread);
1024 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1025 return rc;
1026}
1027
1028static void
1029qeth_start_kernel_thread(struct work_struct *work)
1030{
1031 struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter);
1032 QETH_DBF_TEXT(trace , 2, "strthrd");
1033
1034 if (card->read.state != CH_STATE_UP &&
1035 card->write.state != CH_STATE_UP)
1036 return;
1037 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1038 kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1039}
1040
1041
1042static void
1043qeth_set_intial_options(struct qeth_card *card)
1044{
1045 card->options.route4.type = NO_ROUTER;
1046#ifdef CONFIG_QETH_IPV6
1047 card->options.route6.type = NO_ROUTER;
1048#endif /* QETH_IPV6 */
1049 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1050 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1051 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1052 card->options.fake_broadcast = 0;
1053 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1054 card->options.fake_ll = 0;
1055 if (card->info.type == QETH_CARD_TYPE_OSN)
1056 card->options.layer2 = 1;
1057 else
1058 card->options.layer2 = 0;
1059 card->options.performance_stats = 0;
1060 card->options.rx_sg_cb = QETH_RX_SG_CB;
1061}
1062
1063/**
1064 * initialize channels ,card and all state machines
1065 */
1066static int
1067qeth_setup_card(struct qeth_card *card)
1068{
1069
1070 QETH_DBF_TEXT(setup, 2, "setupcrd");
1071 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1072
1073 card->read.state = CH_STATE_DOWN;
1074 card->write.state = CH_STATE_DOWN;
1075 card->data.state = CH_STATE_DOWN;
1076 card->state = CARD_STATE_DOWN;
1077 card->lan_online = 0;
1078 card->use_hard_stop = 0;
1079 card->dev = NULL;
1080#ifdef CONFIG_QETH_VLAN
1081 spin_lock_init(&card->vlanlock);
1082 card->vlangrp = NULL;
1083#endif
1084 spin_lock_init(&card->lock);
1085 spin_lock_init(&card->ip_lock);
1086 spin_lock_init(&card->thread_mask_lock);
1087 card->thread_start_mask = 0;
1088 card->thread_allowed_mask = 0;
1089 card->thread_running_mask = 0;
1090 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1091 INIT_LIST_HEAD(&card->ip_list);
1092 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1093 if (!card->ip_tbd_list) {
1094 QETH_DBF_TEXT(setup, 0, "iptbdnom");
1095 return -ENOMEM;
1096 }
1097 INIT_LIST_HEAD(card->ip_tbd_list);
1098 INIT_LIST_HEAD(&card->cmd_waiter_list);
1099 init_waitqueue_head(&card->wait_q);
1100 /* intial options */
1101 qeth_set_intial_options(card);
1102 /* IP address takeover */
1103 INIT_LIST_HEAD(&card->ipato.entries);
1104 card->ipato.enabled = 0;
1105 card->ipato.invert4 = 0;
1106 card->ipato.invert6 = 0;
1107 /* init QDIO stuff */
1108 qeth_init_qdio_info(card);
1109 return 0;
1110}
1111
1112static int
1113is_1920_device (struct qeth_card *card)
1114{
1115 int single_queue = 0;
1116 struct ccw_device *ccwdev;
1117 struct channelPath_dsc {
1118 u8 flags;
1119 u8 lsn;
1120 u8 desc;
1121 u8 chpid;
1122 u8 swla;
1123 u8 zeroes;
1124 u8 chla;
1125 u8 chpp;
1126 } *chp_dsc;
1127
1128 QETH_DBF_TEXT(setup, 2, "chk_1920");
1129
1130 ccwdev = card->data.ccwdev;
1131 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1132 if (chp_dsc != NULL) {
1133 /* CHPP field bit 6 == 1 -> single queue */
1134 single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
1135 kfree(chp_dsc);
1136 }
1137 QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue);
1138 return single_queue;
1139}
1140
1141static int
1142qeth_determine_card_type(struct qeth_card *card)
1143{
1144 int i = 0;
1145
1146 QETH_DBF_TEXT(setup, 2, "detcdtyp");
1147
1148 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1149 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1150 while (known_devices[i][4]) {
1151 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1152 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1153 card->info.type = known_devices[i][4];
1154 card->qdio.no_out_queues = known_devices[i][8];
1155 card->info.is_multicast_different = known_devices[i][9];
1156 if (is_1920_device(card)) {
1157 PRINT_INFO("Priority Queueing not able "
1158 "due to hardware limitations!\n");
1159 card->qdio.no_out_queues = 1;
1160 card->qdio.default_out_queue = 0;
1161 }
1162 return 0;
1163 }
1164 i++;
1165 }
1166 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1167 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1168 return -ENOENT;
1169}
1170
1171static int
1172qeth_probe_device(struct ccwgroup_device *gdev)
1173{
1174 struct qeth_card *card;
1175 struct device *dev;
1176 unsigned long flags;
1177 int rc;
1178
1179 QETH_DBF_TEXT(setup, 2, "probedev");
1180
1181 dev = &gdev->dev;
1182 if (!get_device(dev))
1183 return -ENODEV;
1184
1185 QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id);
1186
1187 card = qeth_alloc_card();
1188 if (!card) {
1189 put_device(dev);
1190 QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
1191 return -ENOMEM;
1192 }
1193 card->read.ccwdev = gdev->cdev[0];
1194 card->write.ccwdev = gdev->cdev[1];
1195 card->data.ccwdev = gdev->cdev[2];
1196 gdev->dev.driver_data = card;
1197 card->gdev = gdev;
1198 gdev->cdev[0]->handler = qeth_irq;
1199 gdev->cdev[1]->handler = qeth_irq;
1200 gdev->cdev[2]->handler = qeth_irq;
1201
1202 if ((rc = qeth_determine_card_type(card))){
1203 PRINT_WARN("%s: not a valid card type\n", __func__);
1204 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1205 put_device(dev);
1206 qeth_free_card(card);
1207 return rc;
1208 }
1209 if ((rc = qeth_setup_card(card))){
1210 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1211 put_device(dev);
1212 qeth_free_card(card);
1213 return rc;
1214 }
1215 rc = qeth_create_device_attributes(dev);
1216 if (rc) {
1217 put_device(dev);
1218 qeth_free_card(card);
1219 return rc;
1220 }
1221 /* insert into our internal list */
1222 write_lock_irqsave(&qeth_card_list.rwlock, flags);
1223 list_add_tail(&card->list, &qeth_card_list.list);
1224 write_unlock_irqrestore(&qeth_card_list.rwlock, flags);
1225 return rc;
1226}
1227
1228
1229static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1230 int *length)
1231{
1232 struct ciw *ciw;
1233 char *rcd_buf;
1234 int ret;
1235 struct qeth_channel *channel = &card->data;
1236 unsigned long flags;
1237
1238 /*
1239 * scan for RCD command in extended SenseID data
1240 */
1241 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1242 if (!ciw || ciw->cmd == 0)
1243 return -EOPNOTSUPP;
1244 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1245 if (!rcd_buf)
1246 return -ENOMEM;
1247
1248 channel->ccw.cmd_code = ciw->cmd;
1249 channel->ccw.cda = (__u32) __pa (rcd_buf);
1250 channel->ccw.count = ciw->count;
1251 channel->ccw.flags = CCW_FLAG_SLI;
1252 channel->state = CH_STATE_RCD;
1253 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1254 ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1255 QETH_RCD_PARM, LPM_ANYPATH, 0,
1256 QETH_RCD_TIMEOUT);
1257 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1258 if (!ret)
1259 wait_event(card->wait_q,
1260 (channel->state == CH_STATE_RCD_DONE ||
1261 channel->state == CH_STATE_DOWN));
1262 if (channel->state == CH_STATE_DOWN)
1263 ret = -EIO;
1264 else
1265 channel->state = CH_STATE_DOWN;
1266 if (ret) {
1267 kfree(rcd_buf);
1268 *buffer = NULL;
1269 *length = 0;
1270 } else {
1271 *length = ciw->count;
1272 *buffer = rcd_buf;
1273 }
1274 return ret;
1275}
1276
1277static int
1278qeth_get_unitaddr(struct qeth_card *card)
1279{
1280 int length;
1281 char *prcd;
1282 int rc;
1283
1284 QETH_DBF_TEXT(setup, 2, "getunit");
1285 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
1286 if (rc) {
1287 PRINT_ERR("qeth_read_conf_data for device %s returned %i\n",
1288 CARD_DDEV_ID(card), rc);
1289 return rc;
1290 }
1291 card->info.chpid = prcd[30];
1292 card->info.unit_addr2 = prcd[31];
1293 card->info.cula = prcd[63];
1294 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1295 (prcd[0x11] == _ascebc['M']));
1296 kfree(prcd);
1297 return 0;
1298}
1299
1300static void
1301qeth_init_tokens(struct qeth_card *card)
1302{
1303 card->token.issuer_rm_w = 0x00010103UL;
1304 card->token.cm_filter_w = 0x00010108UL;
1305 card->token.cm_connection_w = 0x0001010aUL;
1306 card->token.ulp_filter_w = 0x0001010bUL;
1307 card->token.ulp_connection_w = 0x0001010dUL;
1308}
1309
1310static inline __u16
1311raw_devno_from_bus_id(char *id)
1312{
1313 id += (strlen(id) - 4);
1314 return (__u16) simple_strtoul(id, &id, 16);
1315}
1316/**
1317 * setup channel
1318 */
1319static void
1320qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len)
1321{
1322 struct qeth_card *card;
1323
1324 QETH_DBF_TEXT(trace, 4, "setupccw");
1325 card = CARD_FROM_CDEV(channel->ccwdev);
1326 if (channel == &card->read)
1327 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1328 else
1329 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1330 channel->ccw.count = len;
1331 channel->ccw.cda = (__u32) __pa(iob);
1332}
1333
1334/**
1335 * get free buffer for ccws (IDX activation, lancmds,ipassists...)
1336 */
1337static struct qeth_cmd_buffer *
1338__qeth_get_buffer(struct qeth_channel *channel)
1339{
1340 __u8 index;
1341
1342 QETH_DBF_TEXT(trace, 6, "getbuff");
1343 index = channel->io_buf_no;
1344 do {
1345 if (channel->iob[index].state == BUF_STATE_FREE) {
1346 channel->iob[index].state = BUF_STATE_LOCKED;
1347 channel->io_buf_no = (channel->io_buf_no + 1) %
1348 QETH_CMD_BUFFER_NO;
1349 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
1350 return channel->iob + index;
1351 }
1352 index = (index + 1) % QETH_CMD_BUFFER_NO;
1353 } while(index != channel->io_buf_no);
1354
1355 return NULL;
1356}
1357
1358/**
1359 * release command buffer
1360 */
1361static void
1362qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1363{
1364 unsigned long flags;
1365
1366 QETH_DBF_TEXT(trace, 6, "relbuff");
1367 spin_lock_irqsave(&channel->iob_lock, flags);
1368 memset(iob->data, 0, QETH_BUFSIZE);
1369 iob->state = BUF_STATE_FREE;
1370 iob->callback = qeth_send_control_data_cb;
1371 iob->rc = 0;
1372 spin_unlock_irqrestore(&channel->iob_lock, flags);
1373}
1374
1375static struct qeth_cmd_buffer *
1376qeth_get_buffer(struct qeth_channel *channel)
1377{
1378 struct qeth_cmd_buffer *buffer = NULL;
1379 unsigned long flags;
1380
1381 spin_lock_irqsave(&channel->iob_lock, flags);
1382 buffer = __qeth_get_buffer(channel);
1383 spin_unlock_irqrestore(&channel->iob_lock, flags);
1384 return buffer;
1385}
1386
1387static struct qeth_cmd_buffer *
1388qeth_wait_for_buffer(struct qeth_channel *channel)
1389{
1390 struct qeth_cmd_buffer *buffer;
1391 wait_event(channel->wait_q,
1392 ((buffer = qeth_get_buffer(channel)) != NULL));
1393 return buffer;
1394}
1395
1396static void
1397qeth_clear_cmd_buffers(struct qeth_channel *channel)
1398{
1399 int cnt;
1400
1401 for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1402 qeth_release_buffer(channel,&channel->iob[cnt]);
1403 channel->buf_no = 0;
1404 channel->io_buf_no = 0;
1405}
1406
1407/**
1408 * start IDX for read and write channel
1409 */
1410static int
1411qeth_idx_activate_get_answer(struct qeth_channel *channel,
1412 void (*idx_reply_cb)(struct qeth_channel *,
1413 struct qeth_cmd_buffer *))
1414{
1415 struct qeth_cmd_buffer *iob;
1416 unsigned long flags;
1417 int rc;
1418 struct qeth_card *card;
1419
1420 QETH_DBF_TEXT(setup, 2, "idxanswr");
1421 card = CARD_FROM_CDEV(channel->ccwdev);
1422 iob = qeth_get_buffer(channel);
1423 iob->callback = idx_reply_cb;
1424 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1425 channel->ccw.count = QETH_BUFSIZE;
1426 channel->ccw.cda = (__u32) __pa(iob->data);
1427
1428 wait_event(card->wait_q,
1429 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1430 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1431 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1432 rc = ccw_device_start(channel->ccwdev,
1433 &channel->ccw,(addr_t) iob, 0, 0);
1434 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1435
1436 if (rc) {
1437 PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc);
1438 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1439 atomic_set(&channel->irq_pending, 0);
1440 wake_up(&card->wait_q);
1441 return rc;
1442 }
1443 rc = wait_event_interruptible_timeout(card->wait_q,
1444 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1445 if (rc == -ERESTARTSYS)
1446 return rc;
1447 if (channel->state != CH_STATE_UP){
1448 rc = -ETIME;
1449 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1450 qeth_clear_cmd_buffers(channel);
1451 } else
1452 rc = 0;
1453 return rc;
1454}
1455
1456static int
1457qeth_idx_activate_channel(struct qeth_channel *channel,
1458 void (*idx_reply_cb)(struct qeth_channel *,
1459 struct qeth_cmd_buffer *))
1460{
1461 struct qeth_card *card;
1462 struct qeth_cmd_buffer *iob;
1463 unsigned long flags;
1464 __u16 temp;
1465 int rc;
1466
1467 card = CARD_FROM_CDEV(channel->ccwdev);
1468
1469 QETH_DBF_TEXT(setup, 2, "idxactch");
1470
1471 iob = qeth_get_buffer(channel);
1472 iob->callback = idx_reply_cb;
1473 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1474 channel->ccw.count = IDX_ACTIVATE_SIZE;
1475 channel->ccw.cda = (__u32) __pa(iob->data);
1476 if (channel == &card->write) {
1477 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1478 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1479 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1480 card->seqno.trans_hdr++;
1481 } else {
1482 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1483 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1484 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1485 }
1486 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1487 &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH);
1488 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1489 &card->info.func_level,sizeof(__u16));
1490 temp = raw_devno_from_bus_id(CARD_DDEV_ID(card));
1491 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1492 temp = (card->info.cula << 8) + card->info.unit_addr2;
1493 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1494
1495 wait_event(card->wait_q,
1496 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1497 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1498 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1499 rc = ccw_device_start(channel->ccwdev,
1500 &channel->ccw,(addr_t) iob, 0, 0);
1501 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1502
1503 if (rc) {
1504 PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc);
1505 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1506 atomic_set(&channel->irq_pending, 0);
1507 wake_up(&card->wait_q);
1508 return rc;
1509 }
1510 rc = wait_event_interruptible_timeout(card->wait_q,
1511 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1512 if (rc == -ERESTARTSYS)
1513 return rc;
1514 if (channel->state != CH_STATE_ACTIVATING) {
1515 PRINT_WARN("qeth: IDX activate timed out!\n");
1516 QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1517 qeth_clear_cmd_buffers(channel);
1518 return -ETIME;
1519 }
1520 return qeth_idx_activate_get_answer(channel,idx_reply_cb);
1521}
1522
1523static int
1524qeth_peer_func_level(int level)
1525{
1526 if ((level & 0xff) == 8)
1527 return (level & 0xff) + 0x400;
1528 if (((level >> 8) & 3) == 1)
1529 return (level & 0xff) + 0x200;
1530 return level;
1531}
1532
1533static void
1534qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1535{
1536 struct qeth_card *card;
1537 __u16 temp;
1538
1539 QETH_DBF_TEXT(setup ,2, "idxwrcb");
1540
1541 if (channel->state == CH_STATE_DOWN) {
1542 channel->state = CH_STATE_ACTIVATING;
1543 goto out;
1544 }
1545 card = CARD_FROM_CDEV(channel->ccwdev);
1546
1547 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1548 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
1549 PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
1550 "adapter exclusively used by another host\n",
1551 CARD_WDEV_ID(card));
1552 else
1553 PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
1554 "negative reply\n", CARD_WDEV_ID(card));
1555 goto out;
1556 }
1557 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1558 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1559 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1560 "function level mismatch "
1561 "(sent: 0x%x, received: 0x%x)\n",
1562 CARD_WDEV_ID(card), card->info.func_level, temp);
1563 goto out;
1564 }
1565 channel->state = CH_STATE_UP;
1566out:
1567 qeth_release_buffer(channel, iob);
1568}
1569
1570static int
1571qeth_check_idx_response(unsigned char *buffer)
1572{
1573 if (!buffer)
1574 return 0;
1575
1576 QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
1577 if ((buffer[2] & 0xc0) == 0xc0) {
1578 PRINT_WARN("received an IDX TERMINATE "
1579 "with cause code 0x%02x%s\n",
1580 buffer[4],
1581 ((buffer[4] == 0x22) ?
1582 " -- try another portname" : ""));
1583 QETH_DBF_TEXT(trace, 2, "ckidxres");
1584 QETH_DBF_TEXT(trace, 2, " idxterm");
1585 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1586 return -EIO;
1587 }
1588 return 0;
1589}
1590
1591static void
1592qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob)
1593{
1594 struct qeth_card *card;
1595 __u16 temp;
1596
1597 QETH_DBF_TEXT(setup , 2, "idxrdcb");
1598 if (channel->state == CH_STATE_DOWN) {
1599 channel->state = CH_STATE_ACTIVATING;
1600 goto out;
1601 }
1602
1603 card = CARD_FROM_CDEV(channel->ccwdev);
1604 if (qeth_check_idx_response(iob->data)) {
1605 goto out;
1606 }
1607 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1608 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
1609 PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
1610 "adapter exclusively used by another host\n",
1611 CARD_RDEV_ID(card));
1612 else
1613 PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
1614 "negative reply\n", CARD_RDEV_ID(card));
1615 goto out;
1616 }
1617
1618/**
1619 * temporary fix for microcode bug
1620 * to revert it,replace OR by AND
1621 */
1622 if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1623 (card->info.type == QETH_CARD_TYPE_OSAE) )
1624 card->info.portname_required = 1;
1625
1626 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1627 if (temp != qeth_peer_func_level(card->info.func_level)) {
1628 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1629 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1630 CARD_RDEV_ID(card), card->info.func_level, temp);
1631 goto out;
1632 }
1633 memcpy(&card->token.issuer_rm_r,
1634 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1635 QETH_MPC_TOKEN_LENGTH);
1636 memcpy(&card->info.mcl_level[0],
1637 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1638 channel->state = CH_STATE_UP;
1639out:
1640 qeth_release_buffer(channel,iob);
1641}
1642
1643static int
1644qeth_issue_next_read(struct qeth_card *card)
1645{
1646 int rc;
1647 struct qeth_cmd_buffer *iob;
1648
1649 QETH_DBF_TEXT(trace,5,"issnxrd");
1650 if (card->read.state != CH_STATE_UP)
1651 return -EIO;
1652 iob = qeth_get_buffer(&card->read);
1653 if (!iob) {
1654 PRINT_WARN("issue_next_read failed: no iob available!\n");
1655 return -ENOMEM;
1656 }
1657 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1658 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1659 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1660 (addr_t) iob, 0, 0);
1661 if (rc) {
1662 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
1663 atomic_set(&card->read.irq_pending, 0);
1664 qeth_schedule_recovery(card);
1665 wake_up(&card->wait_q);
1666 }
1667 return rc;
1668}
1669
1670static struct qeth_reply *
1671qeth_alloc_reply(struct qeth_card *card)
1672{
1673 struct qeth_reply *reply;
1674
1675 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1676 if (reply){
1677 atomic_set(&reply->refcnt, 1);
1678 atomic_set(&reply->received, 0);
1679 reply->card = card;
1680 };
1681 return reply;
1682}
1683
1684static void
1685qeth_get_reply(struct qeth_reply *reply)
1686{
1687 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1688 atomic_inc(&reply->refcnt);
1689}
1690
1691static void
1692qeth_put_reply(struct qeth_reply *reply)
1693{
1694 WARN_ON(atomic_read(&reply->refcnt) <= 0);
1695 if (atomic_dec_and_test(&reply->refcnt))
1696 kfree(reply);
1697}
1698
1699static void
1700qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, struct qeth_card *card)
1701{
1702 int rc;
1703 int com;
1704 char * ipa_name;
1705
1706 com = cmd->hdr.command;
1707 rc = cmd->hdr.return_code;
1708 ipa_name = qeth_get_ipa_cmd_name(com);
1709
1710 PRINT_ERR("%s(x%X) for %s returned x%X \"%s\"\n", ipa_name, com,
1711 QETH_CARD_IFNAME(card), rc, qeth_get_ipa_msg(rc));
1712}
1713
1714static struct qeth_ipa_cmd *
1715qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1716{
1717 struct qeth_ipa_cmd *cmd = NULL;
1718
1719 QETH_DBF_TEXT(trace,5,"chkipad");
1720 if (IS_IPA(iob->data)){
1721 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
1722 if (IS_IPA_REPLY(cmd)) {
1723 if (cmd->hdr.return_code)
1724 qeth_issue_ipa_msg(cmd, card);
1725 return cmd;
1726 }
1727 else {
1728 switch (cmd->hdr.command) {
1729 case IPA_CMD_STOPLAN:
1730 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
1731 "there is a network problem or "
1732 "someone pulled the cable or "
1733 "disabled the port.\n",
1734 QETH_CARD_IFNAME(card),
1735 card->info.chpid);
1736 card->lan_online = 0;
1737 if (card->dev && netif_carrier_ok(card->dev))
1738 netif_carrier_off(card->dev);
1739 return NULL;
1740 case IPA_CMD_STARTLAN:
1741 PRINT_INFO("Link reestablished on %s "
1742 "(CHPID 0x%X). Scheduling "
1743 "IP address reset.\n",
1744 QETH_CARD_IFNAME(card),
1745 card->info.chpid);
1746 netif_carrier_on(card->dev);
1747 qeth_schedule_recovery(card);
1748 return NULL;
1749 case IPA_CMD_MODCCID:
1750 return cmd;
1751 case IPA_CMD_REGISTER_LOCAL_ADDR:
1752 QETH_DBF_TEXT(trace,3, "irla");
1753 break;
1754 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
1755 QETH_DBF_TEXT(trace,3, "urla");
1756 break;
1757 default:
1758 PRINT_WARN("Received data is IPA "
1759 "but not a reply!\n");
1760 break;
1761 }
1762 }
1763 }
1764 return cmd;
1765}
1766
1767/**
1768 * wake all waiting ipa commands
1769 */
1770static void
1771qeth_clear_ipacmd_list(struct qeth_card *card)
1772{
1773 struct qeth_reply *reply, *r;
1774 unsigned long flags;
1775
1776 QETH_DBF_TEXT(trace, 4, "clipalst");
1777
1778 spin_lock_irqsave(&card->lock, flags);
1779 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1780 qeth_get_reply(reply);
1781 reply->rc = -EIO;
1782 atomic_inc(&reply->received);
1783 list_del_init(&reply->list);
1784 wake_up(&reply->wait_q);
1785 qeth_put_reply(reply);
1786 }
1787 spin_unlock_irqrestore(&card->lock, flags);
1788}
1789
1790static void
1791qeth_send_control_data_cb(struct qeth_channel *channel,
1792 struct qeth_cmd_buffer *iob)
1793{
1794 struct qeth_card *card;
1795 struct qeth_reply *reply, *r;
1796 struct qeth_ipa_cmd *cmd;
1797 unsigned long flags;
1798 int keep_reply;
1799
1800 QETH_DBF_TEXT(trace,4,"sndctlcb");
1801
1802 card = CARD_FROM_CDEV(channel->ccwdev);
1803 if (qeth_check_idx_response(iob->data)) {
1804 qeth_clear_ipacmd_list(card);
1805 qeth_schedule_recovery(card);
1806 goto out;
1807 }
1808
1809 cmd = qeth_check_ipa_data(card, iob);
1810 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
1811 goto out;
1812 /*in case of OSN : check if cmd is set */
1813 if (card->info.type == QETH_CARD_TYPE_OSN &&
1814 cmd &&
1815 cmd->hdr.command != IPA_CMD_STARTLAN &&
1816 card->osn_info.assist_cb != NULL) {
1817 card->osn_info.assist_cb(card->dev, cmd);
1818 goto out;
1819 }
1820
1821 spin_lock_irqsave(&card->lock, flags);
1822 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1823 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
1824 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
1825 qeth_get_reply(reply);
1826 list_del_init(&reply->list);
1827 spin_unlock_irqrestore(&card->lock, flags);
1828 keep_reply = 0;
1829 if (reply->callback != NULL) {
1830 if (cmd) {
1831 reply->offset = (__u16)((char*)cmd -
1832 (char *)iob->data);
1833 keep_reply = reply->callback(card,
1834 reply,
1835 (unsigned long)cmd);
1836 } else
1837 keep_reply = reply->callback(card,
1838 reply,
1839 (unsigned long)iob);
1840 }
1841 if (cmd)
1842 reply->rc = (u16) cmd->hdr.return_code;
1843 else if (iob->rc)
1844 reply->rc = iob->rc;
1845 if (keep_reply) {
1846 spin_lock_irqsave(&card->lock, flags);
1847 list_add_tail(&reply->list,
1848 &card->cmd_waiter_list);
1849 spin_unlock_irqrestore(&card->lock, flags);
1850 } else {
1851 atomic_inc(&reply->received);
1852 wake_up(&reply->wait_q);
1853 }
1854 qeth_put_reply(reply);
1855 goto out;
1856 }
1857 }
1858 spin_unlock_irqrestore(&card->lock, flags);
1859out:
1860 memcpy(&card->seqno.pdu_hdr_ack,
1861 QETH_PDU_HEADER_SEQ_NO(iob->data),
1862 QETH_SEQ_NO_LENGTH);
1863 qeth_release_buffer(channel,iob);
1864}
1865
1866static void
1867qeth_prepare_control_data(struct qeth_card *card, int len,
1868 struct qeth_cmd_buffer *iob)
1869{
1870 qeth_setup_ccw(&card->write,iob->data,len);
1871 iob->callback = qeth_release_buffer;
1872
1873 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1874 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1875 card->seqno.trans_hdr++;
1876 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1877 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1878 card->seqno.pdu_hdr++;
1879 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1880 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1881 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1882}
1883
1884static int
1885qeth_send_control_data(struct qeth_card *card, int len,
1886 struct qeth_cmd_buffer *iob,
1887 int (*reply_cb)
1888 (struct qeth_card *, struct qeth_reply*, unsigned long),
1889 void *reply_param)
1890
1891{
1892 int rc;
1893 unsigned long flags;
1894 struct qeth_reply *reply = NULL;
1895 unsigned long timeout;
1896
1897 QETH_DBF_TEXT(trace, 2, "sendctl");
1898
1899 reply = qeth_alloc_reply(card);
1900 if (!reply) {
1901 PRINT_WARN("Could no alloc qeth_reply!\n");
1902 return -ENOMEM;
1903 }
1904 reply->callback = reply_cb;
1905 reply->param = reply_param;
1906 if (card->state == CARD_STATE_DOWN)
1907 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1908 else
1909 reply->seqno = card->seqno.ipa++;
1910 init_waitqueue_head(&reply->wait_q);
1911 spin_lock_irqsave(&card->lock, flags);
1912 list_add_tail(&reply->list, &card->cmd_waiter_list);
1913 spin_unlock_irqrestore(&card->lock, flags);
1914 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1915
1916 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
1917 qeth_prepare_control_data(card, len, iob);
1918
1919 if (IS_IPA(iob->data))
1920 timeout = jiffies + QETH_IPA_TIMEOUT;
1921 else
1922 timeout = jiffies + QETH_TIMEOUT;
1923
1924 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1925 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1926 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1927 (addr_t) iob, 0, 0);
1928 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1929 if (rc){
1930 PRINT_WARN("qeth_send_control_data: "
1931 "ccw_device_start rc = %i\n", rc);
1932 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1933 spin_lock_irqsave(&card->lock, flags);
1934 list_del_init(&reply->list);
1935 qeth_put_reply(reply);
1936 spin_unlock_irqrestore(&card->lock, flags);
1937 qeth_release_buffer(iob->channel, iob);
1938 atomic_set(&card->write.irq_pending, 0);
1939 wake_up(&card->wait_q);
1940 return rc;
1941 }
1942 while (!atomic_read(&reply->received)) {
1943 if (time_after(jiffies, timeout)) {
1944 spin_lock_irqsave(&reply->card->lock, flags);
1945 list_del_init(&reply->list);
1946 spin_unlock_irqrestore(&reply->card->lock, flags);
1947 reply->rc = -ETIME;
1948 atomic_inc(&reply->received);
1949 wake_up(&reply->wait_q);
1950 }
1951 cpu_relax();
1952 };
1953 rc = reply->rc;
1954 qeth_put_reply(reply);
1955 return rc;
1956}
1957
1958static int
1959qeth_osn_send_control_data(struct qeth_card *card, int len,
1960 struct qeth_cmd_buffer *iob)
1961{
1962 unsigned long flags;
1963 int rc = 0;
1964
1965 QETH_DBF_TEXT(trace, 5, "osndctrd");
1966
1967 wait_event(card->wait_q,
1968 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1969 qeth_prepare_control_data(card, len, iob);
1970 QETH_DBF_TEXT(trace, 6, "osnoirqp");
1971 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1972 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1973 (addr_t) iob, 0, 0);
1974 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1975 if (rc){
1976 PRINT_WARN("qeth_osn_send_control_data: "
1977 "ccw_device_start rc = %i\n", rc);
1978 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1979 qeth_release_buffer(iob->channel, iob);
1980 atomic_set(&card->write.irq_pending, 0);
1981 wake_up(&card->wait_q);
1982 }
1983 return rc;
1984}
1985
1986static inline void
1987qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1988 char prot_type)
1989{
1990 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
1991 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
1992 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
1993 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
1994}
1995
1996static int
1997qeth_osn_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
1998 int data_len)
1999{
2000 u16 s1, s2;
2001
2002 QETH_DBF_TEXT(trace,4,"osndipa");
2003
2004 qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
2005 s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
2006 s2 = (u16)data_len;
2007 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
2008 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
2009 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
2010 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
2011 return qeth_osn_send_control_data(card, s1, iob);
2012}
2013
2014static int
2015qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2016 int (*reply_cb)
2017 (struct qeth_card *,struct qeth_reply*, unsigned long),
2018 void *reply_param)
2019{
2020 int rc;
2021 char prot_type;
2022
2023 QETH_DBF_TEXT(trace,4,"sendipa");
2024
2025 if (card->options.layer2)
2026 if (card->info.type == QETH_CARD_TYPE_OSN)
2027 prot_type = QETH_PROT_OSN2;
2028 else
2029 prot_type = QETH_PROT_LAYER2;
2030 else
2031 prot_type = QETH_PROT_TCPIP;
2032 qeth_prepare_ipa_cmd(card,iob,prot_type);
2033 rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
2034 reply_cb, reply_param);
2035 return rc;
2036}
2037
2038
2039static int
2040qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2041 unsigned long data)
2042{
2043 struct qeth_cmd_buffer *iob;
2044
2045 QETH_DBF_TEXT(setup, 2, "cmenblcb");
2046
2047 iob = (struct qeth_cmd_buffer *) data;
2048 memcpy(&card->token.cm_filter_r,
2049 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2050 QETH_MPC_TOKEN_LENGTH);
2051 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2052 return 0;
2053}
2054
2055static int
2056qeth_cm_enable(struct qeth_card *card)
2057{
2058 int rc;
2059 struct qeth_cmd_buffer *iob;
2060
2061 QETH_DBF_TEXT(setup,2,"cmenable");
2062
2063 iob = qeth_wait_for_buffer(&card->write);
2064 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
2065 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2066 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2067 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2068 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2069
2070 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
2071 qeth_cm_enable_cb, NULL);
2072 return rc;
2073}
2074
2075static int
2076qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2077 unsigned long data)
2078{
2079
2080 struct qeth_cmd_buffer *iob;
2081
2082 QETH_DBF_TEXT(setup, 2, "cmsetpcb");
2083
2084 iob = (struct qeth_cmd_buffer *) data;
2085 memcpy(&card->token.cm_connection_r,
2086 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2087 QETH_MPC_TOKEN_LENGTH);
2088 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2089 return 0;
2090}
2091
2092static int
2093qeth_cm_setup(struct qeth_card *card)
2094{
2095 int rc;
2096 struct qeth_cmd_buffer *iob;
2097
2098 QETH_DBF_TEXT(setup,2,"cmsetup");
2099
2100 iob = qeth_wait_for_buffer(&card->write);
2101 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
2102 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2103 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2104 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2105 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2106 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2107 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2108 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
2109 qeth_cm_setup_cb, NULL);
2110 return rc;
2111
2112}
2113
2114static int
2115qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2116 unsigned long data)
2117{
2118
2119 __u16 mtu, framesize;
2120 __u16 len;
2121 __u8 link_type;
2122 struct qeth_cmd_buffer *iob;
2123
2124 QETH_DBF_TEXT(setup, 2, "ulpenacb");
2125
2126 iob = (struct qeth_cmd_buffer *) data;
2127 memcpy(&card->token.ulp_filter_r,
2128 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2129 QETH_MPC_TOKEN_LENGTH);
2130 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
2131 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2132 mtu = qeth_get_mtu_outof_framesize(framesize);
2133 if (!mtu) {
2134 iob->rc = -EINVAL;
2135 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2136 return 0;
2137 }
2138 card->info.max_mtu = mtu;
2139 card->info.initial_mtu = mtu;
2140 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
2141 } else {
2142 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
2143 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
2144 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
2145 }
2146
2147 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2148 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2149 memcpy(&link_type,
2150 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2151 card->info.link_type = link_type;
2152 } else
2153 card->info.link_type = 0;
2154 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2155 return 0;
2156}
2157
2158static int
2159qeth_ulp_enable(struct qeth_card *card)
2160{
2161 int rc;
2162 char prot_type;
2163 struct qeth_cmd_buffer *iob;
2164
2165 /*FIXME: trace view callbacks*/
2166 QETH_DBF_TEXT(setup,2,"ulpenabl");
2167
2168 iob = qeth_wait_for_buffer(&card->write);
2169 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2170
2171 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
2172 (__u8) card->info.portno;
2173 if (card->options.layer2)
2174 if (card->info.type == QETH_CARD_TYPE_OSN)
2175 prot_type = QETH_PROT_OSN2;
2176 else
2177 prot_type = QETH_PROT_LAYER2;
2178 else
2179 prot_type = QETH_PROT_TCPIP;
2180
2181 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data),&prot_type,1);
2182 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2183 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2184 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2185 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2186 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
2187 card->info.portname, 9);
2188 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2189 qeth_ulp_enable_cb, NULL);
2190 return rc;
2191
2192}
2193
2194static int
2195qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2196 unsigned long data)
2197{
2198 struct qeth_cmd_buffer *iob;
2199
2200 QETH_DBF_TEXT(setup, 2, "ulpstpcb");
2201
2202 iob = (struct qeth_cmd_buffer *) data;
2203 memcpy(&card->token.ulp_connection_r,
2204 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2205 QETH_MPC_TOKEN_LENGTH);
2206 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
2207 return 0;
2208}
2209
2210static int
2211qeth_ulp_setup(struct qeth_card *card)
2212{
2213 int rc;
2214 __u16 temp;
2215 struct qeth_cmd_buffer *iob;
2216 struct ccw_dev_id dev_id;
2217
2218 QETH_DBF_TEXT(setup,2,"ulpsetup");
2219
2220 iob = qeth_wait_for_buffer(&card->write);
2221 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2222
2223 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2224 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2225 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2226 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2227 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2228 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2229
2230 ccw_device_get_id(CARD_DDEV(card), &dev_id);
2231 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2232 temp = (card->info.cula << 8) + card->info.unit_addr2;
2233 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2234 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2235 qeth_ulp_setup_cb, NULL);
2236 return rc;
2237}
2238
2239static inline int
2240qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2241 unsigned int siga_error, const char *dbftext)
2242{
2243 if (qdio_error || siga_error) {
2244 QETH_DBF_TEXT(trace, 2, dbftext);
2245 QETH_DBF_TEXT(qerr, 2, dbftext);
2246 QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
2247 buf->element[15].flags & 0xff);
2248 QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
2249 buf->element[14].flags & 0xff);
2250 QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2251 QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
2252 return 1;
2253 }
2254 return 0;
2255}
2256
2257static struct sk_buff *
2258qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2259{
2260 struct sk_buff* skb;
2261 int add_len;
2262
2263 add_len = 0;
2264 if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN)
2265 add_len = sizeof(struct qeth_hdr);
2266#ifdef CONFIG_QETH_VLAN
2267 else
2268 add_len = VLAN_HLEN;
2269#endif
2270 skb = dev_alloc_skb(length + add_len);
2271 if (skb && add_len)
2272 skb_reserve(skb, add_len);
2273 return skb;
2274}
2275
2276static inline int
2277qeth_create_skb_frag(struct qdio_buffer_element *element,
2278 struct sk_buff **pskb,
2279 int offset, int *pfrag, int data_len)
2280{
2281 struct page *page = virt_to_page(element->addr);
2282 if (*pfrag == 0) {
2283 /* the upper protocol layers assume that there is data in the
2284 * skb itself. Copy a small amount (64 bytes) to make them
2285 * happy. */
2286 *pskb = dev_alloc_skb(64 + QETH_FAKE_LL_LEN_ETH);
2287 if (!(*pskb))
2288 return -ENOMEM;
2289 skb_reserve(*pskb, QETH_FAKE_LL_LEN_ETH);
2290 if (data_len <= 64) {
2291 memcpy(skb_put(*pskb, data_len), element->addr + offset,
2292 data_len);
2293 } else {
2294 get_page(page);
2295 memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
2296 skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
2297 data_len - 64);
2298 (*pskb)->data_len += data_len - 64;
2299 (*pskb)->len += data_len - 64;
2300 (*pskb)->truesize += data_len - 64;
2301 }
2302 } else {
2303 get_page(page);
2304 skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
2305 (*pskb)->data_len += data_len;
2306 (*pskb)->len += data_len;
2307 (*pskb)->truesize += data_len;
2308 }
2309 (*pfrag)++;
2310 return 0;
2311}
2312
2313static inline struct qeth_buffer_pool_entry *
2314qeth_find_free_buffer_pool_entry(struct qeth_card *card)
2315{
2316 struct list_head *plh;
2317 struct qeth_buffer_pool_entry *entry;
2318 int i, free;
2319 struct page *page;
2320
2321 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2322 return NULL;
2323
2324 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2325 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2326 free = 1;
2327 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2328 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2329 free = 0;
2330 break;
2331 }
2332 }
2333 if (free) {
2334 list_del_init(&entry->list);
2335 return entry;
2336 }
2337 }
2338
2339 /* no free buffer in pool so take first one and swap pages */
2340 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2341 struct qeth_buffer_pool_entry, list);
2342 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2343 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2344 page = alloc_page(GFP_ATOMIC|GFP_DMA);
2345 if (!page) {
2346 return NULL;
2347 } else {
2348 free_page((unsigned long)entry->elements[i]);
2349 entry->elements[i] = page_address(page);
2350 if (card->options.performance_stats)
2351 card->perf_stats.sg_alloc_page_rx++;
2352 }
2353 }
2354 }
2355 list_del_init(&entry->list);
2356 return entry;
2357}
2358
2359static struct sk_buff *
2360qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2361 struct qdio_buffer_element **__element, int *__offset,
2362 struct qeth_hdr **hdr)
2363{
2364 struct qdio_buffer_element *element = *__element;
2365 int offset = *__offset;
2366 struct sk_buff *skb = NULL;
2367 int skb_len;
2368 void *data_ptr;
2369 int data_len;
2370 int use_rx_sg = 0;
2371 int frag = 0;
2372
2373 QETH_DBF_TEXT(trace,6,"nextskb");
2374 /* qeth_hdr must not cross element boundaries */
2375 if (element->length < offset + sizeof(struct qeth_hdr)){
2376 if (qeth_is_last_sbale(element))
2377 return NULL;
2378 element++;
2379 offset = 0;
2380 if (element->length < sizeof(struct qeth_hdr))
2381 return NULL;
2382 }
2383 *hdr = element->addr + offset;
2384
2385 offset += sizeof(struct qeth_hdr);
2386 if (card->options.layer2)
2387 if (card->info.type == QETH_CARD_TYPE_OSN)
2388 skb_len = (*hdr)->hdr.osn.pdu_length;
2389 else
2390 skb_len = (*hdr)->hdr.l2.pkt_length;
2391 else
2392 skb_len = (*hdr)->hdr.l3.length;
2393
2394 if (!skb_len)
2395 return NULL;
2396 if ((skb_len >= card->options.rx_sg_cb) &&
2397 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
2398 (!atomic_read(&card->force_alloc_skb))) {
2399 use_rx_sg = 1;
2400 } else {
2401 if (card->options.fake_ll) {
2402 if (card->dev->type == ARPHRD_IEEE802_TR) {
2403 if (!(skb = qeth_get_skb(skb_len +
2404 QETH_FAKE_LL_LEN_TR, *hdr)))
2405 goto no_mem;
2406 skb_reserve(skb, QETH_FAKE_LL_LEN_TR);
2407 } else {
2408 if (!(skb = qeth_get_skb(skb_len +
2409 QETH_FAKE_LL_LEN_ETH, *hdr)))
2410 goto no_mem;
2411 skb_reserve(skb, QETH_FAKE_LL_LEN_ETH);
2412 }
2413 } else {
2414 skb = qeth_get_skb(skb_len, *hdr);
2415 if (!skb)
2416 goto no_mem;
2417 }
2418 }
2419
2420 data_ptr = element->addr + offset;
2421 while (skb_len) {
2422 data_len = min(skb_len, (int)(element->length - offset));
2423 if (data_len) {
2424 if (use_rx_sg) {
2425 if (qeth_create_skb_frag(element, &skb, offset,
2426 &frag, data_len))
2427 goto no_mem;
2428 } else {
2429 memcpy(skb_put(skb, data_len), data_ptr,
2430 data_len);
2431 }
2432 }
2433 skb_len -= data_len;
2434 if (skb_len){
2435 if (qeth_is_last_sbale(element)){
2436 QETH_DBF_TEXT(trace,4,"unexeob");
2437 QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card));
2438 QETH_DBF_TEXT(qerr,2,"unexeob");
2439 QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
2440 QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
2441 dev_kfree_skb_any(skb);
2442 card->stats.rx_errors++;
2443 return NULL;
2444 }
2445 element++;
2446 offset = 0;
2447 data_ptr = element->addr;
2448 } else {
2449 offset += data_len;
2450 }
2451 }
2452 *__element = element;
2453 *__offset = offset;
2454 if (use_rx_sg && card->options.performance_stats) {
2455 card->perf_stats.sg_skbs_rx++;
2456 card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
2457 }
2458 return skb;
2459no_mem:
2460 if (net_ratelimit()){
2461 PRINT_WARN("No memory for packet received on %s.\n",
2462 QETH_CARD_IFNAME(card));
2463 QETH_DBF_TEXT(trace,2,"noskbmem");
2464 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2465 }
2466 card->stats.rx_dropped++;
2467 return NULL;
2468}
2469
2470static __be16
2471qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2472{
2473 struct qeth_card *card;
2474 struct ethhdr *eth;
2475
2476 QETH_DBF_TEXT(trace,6,"typtrans");
2477
2478 card = (struct qeth_card *)dev->priv;
2479#ifdef CONFIG_TR
2480 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
2481 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
2482 return tr_type_trans(skb,dev);
2483#endif /* CONFIG_TR */
2484 skb_reset_mac_header(skb);
2485 skb_pull(skb, ETH_HLEN );
2486 eth = eth_hdr(skb);
2487
2488 if (*eth->h_dest & 1) {
2489 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
2490 skb->pkt_type = PACKET_BROADCAST;
2491 else
2492 skb->pkt_type = PACKET_MULTICAST;
2493 } else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
2494 skb->pkt_type = PACKET_OTHERHOST;
2495
2496 if (ntohs(eth->h_proto) >= 1536)
2497 return eth->h_proto;
2498 if (*(unsigned short *) (skb->data) == 0xFFFF)
2499 return htons(ETH_P_802_3);
2500 return htons(ETH_P_802_2);
2501}
2502
2503static void
2504qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2505 struct qeth_hdr *hdr)
2506{
2507 struct trh_hdr *fake_hdr;
2508 struct trllc *fake_llc;
2509 struct iphdr *ip_hdr;
2510
2511 QETH_DBF_TEXT(trace,5,"skbfktr");
2512 skb_set_mac_header(skb, (int)-QETH_FAKE_LL_LEN_TR);
2513 /* this is a fake ethernet header */
2514 fake_hdr = tr_hdr(skb);
2515
2516 /* the destination MAC address */
2517 switch (skb->pkt_type){
2518 case PACKET_MULTICAST:
2519 switch (skb->protocol){
2520#ifdef CONFIG_QETH_IPV6
2521 case __constant_htons(ETH_P_IPV6):
2522 ndisc_mc_map((struct in6_addr *)
2523 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2524 fake_hdr->daddr, card->dev, 0);
2525 break;
2526#endif /* CONFIG_QETH_IPV6 */
2527 case __constant_htons(ETH_P_IP):
2528 ip_hdr = (struct iphdr *)skb->data;
2529 ip_tr_mc_map(ip_hdr->daddr, fake_hdr->daddr);
2530 break;
2531 default:
2532 memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
2533 }
2534 break;
2535 case PACKET_BROADCAST:
2536 memset(fake_hdr->daddr, 0xff, TR_ALEN);
2537 break;
2538 default:
2539 memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
2540 }
2541 /* the source MAC address */
2542 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2543 memcpy(fake_hdr->saddr, &hdr->hdr.l3.dest_addr[2], TR_ALEN);
2544 else
2545 memset(fake_hdr->saddr, 0, TR_ALEN);
2546 fake_hdr->rcf=0;
2547 fake_llc = (struct trllc*)&(fake_hdr->rcf);
2548 fake_llc->dsap = EXTENDED_SAP;
2549 fake_llc->ssap = EXTENDED_SAP;
2550 fake_llc->llc = UI_CMD;
2551 fake_llc->protid[0] = 0;
2552 fake_llc->protid[1] = 0;
2553 fake_llc->protid[2] = 0;
2554 fake_llc->ethertype = ETH_P_IP;
2555}
2556
2557static void
2558qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
2559 struct qeth_hdr *hdr)
2560{
2561 struct ethhdr *fake_hdr;
2562 struct iphdr *ip_hdr;
2563
2564 QETH_DBF_TEXT(trace,5,"skbfketh");
2565 skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_ETH);
2566 /* this is a fake ethernet header */
2567 fake_hdr = eth_hdr(skb);
2568
2569 /* the destination MAC address */
2570 switch (skb->pkt_type){
2571 case PACKET_MULTICAST:
2572 switch (skb->protocol){
2573#ifdef CONFIG_QETH_IPV6
2574 case __constant_htons(ETH_P_IPV6):
2575 ndisc_mc_map((struct in6_addr *)
2576 skb->data + QETH_FAKE_LL_V6_ADDR_POS,
2577 fake_hdr->h_dest, card->dev, 0);
2578 break;
2579#endif /* CONFIG_QETH_IPV6 */
2580 case __constant_htons(ETH_P_IP):
2581 ip_hdr = (struct iphdr *)skb->data;
2582 ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
2583 break;
2584 default:
2585 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2586 }
2587 break;
2588 case PACKET_BROADCAST:
2589 memset(fake_hdr->h_dest, 0xff, ETH_ALEN);
2590 break;
2591 default:
2592 memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
2593 }
2594 /* the source MAC address */
2595 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
2596 memcpy(fake_hdr->h_source, &hdr->hdr.l3.dest_addr[2], ETH_ALEN);
2597 else
2598 memset(fake_hdr->h_source, 0, ETH_ALEN);
2599 /* the protocol */
2600 fake_hdr->h_proto = skb->protocol;
2601}
2602
2603static inline void
2604qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2605 struct qeth_hdr *hdr)
2606{
2607 if (card->dev->type == ARPHRD_IEEE802_TR)
2608 qeth_rebuild_skb_fake_ll_tr(card, skb, hdr);
2609 else
2610 qeth_rebuild_skb_fake_ll_eth(card, skb, hdr);
2611}
2612
2613static inline void
2614qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2615 struct qeth_hdr *hdr)
2616{
2617 skb->pkt_type = PACKET_HOST;
2618 skb->protocol = qeth_type_trans(skb, skb->dev);
2619 if (card->options.checksum_type == NO_CHECKSUMMING)
2620 skb->ip_summed = CHECKSUM_UNNECESSARY;
2621 else
2622 skb->ip_summed = CHECKSUM_NONE;
2623 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2624}
2625
2626static __u16
2627qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2628 struct qeth_hdr *hdr)
2629{
2630 unsigned short vlan_id = 0;
2631#ifdef CONFIG_QETH_IPV6
2632 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
2633 skb->pkt_type = PACKET_HOST;
2634 skb->protocol = qeth_type_trans(skb, card->dev);
2635 return 0;
2636 }
2637#endif /* CONFIG_QETH_IPV6 */
2638 skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
2639 ETH_P_IP);
2640 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK){
2641 case QETH_CAST_UNICAST:
2642 skb->pkt_type = PACKET_HOST;
2643 break;
2644 case QETH_CAST_MULTICAST:
2645 skb->pkt_type = PACKET_MULTICAST;
2646 card->stats.multicast++;
2647 break;
2648 case QETH_CAST_BROADCAST:
2649 skb->pkt_type = PACKET_BROADCAST;
2650 card->stats.multicast++;
2651 break;
2652 case QETH_CAST_ANYCAST:
2653 case QETH_CAST_NOCAST:
2654 default:
2655 skb->pkt_type = PACKET_HOST;
2656 }
2657
2658 if (hdr->hdr.l3.ext_flags &
2659 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
2660 vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
2661 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
2662 }
2663
2664 if (card->options.fake_ll)
2665 qeth_rebuild_skb_fake_ll(card, skb, hdr);
2666 else
2667 skb_reset_mac_header(skb);
2668 skb->ip_summed = card->options.checksum_type;
2669 if (card->options.checksum_type == HW_CHECKSUMMING){
2670 if ( (hdr->hdr.l3.ext_flags &
2671 (QETH_HDR_EXT_CSUM_HDR_REQ |
2672 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
2673 (QETH_HDR_EXT_CSUM_HDR_REQ |
2674 QETH_HDR_EXT_CSUM_TRANSP_REQ) )
2675 skb->ip_summed = CHECKSUM_UNNECESSARY;
2676 else
2677 skb->ip_summed = SW_CHECKSUMMING;
2678 }
2679 return vlan_id;
2680}
2681
2682static void
2683qeth_process_inbound_buffer(struct qeth_card *card,
2684 struct qeth_qdio_buffer *buf, int index)
2685{
2686 struct qdio_buffer_element *element;
2687 struct sk_buff *skb;
2688 struct qeth_hdr *hdr;
2689 int offset;
2690 int rxrc;
2691 __u16 vlan_tag = 0;
2692
2693 /* get first element of current buffer */
2694 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2695 offset = 0;
2696 if (card->options.performance_stats)
2697 card->perf_stats.bufs_rec++;
2698 while((skb = qeth_get_next_skb(card, buf->buffer, &element,
2699 &offset, &hdr))) {
2700 skb->dev = card->dev;
2701 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
2702 qeth_layer2_rebuild_skb(card, skb, hdr);
2703 else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
2704 vlan_tag = qeth_rebuild_skb(card, skb, hdr);
2705 else if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN) {
2706 skb_push(skb, sizeof(struct qeth_hdr));
2707 skb_copy_to_linear_data(skb, hdr,
2708 sizeof(struct qeth_hdr));
2709 } else { /* unknown header type */
2710 dev_kfree_skb_any(skb);
2711 QETH_DBF_TEXT(trace, 3, "inbunkno");
2712 QETH_DBF_HEX(control, 3, hdr, QETH_DBF_CONTROL_LEN);
2713 continue;
2714 }
2715 /* is device UP ? */
2716 if (!(card->dev->flags & IFF_UP)){
2717 dev_kfree_skb_any(skb);
2718 continue;
2719 }
2720 if (card->info.type == QETH_CARD_TYPE_OSN)
2721 rxrc = card->osn_info.data_cb(skb);
2722 else
2723#ifdef CONFIG_QETH_VLAN
2724 if (vlan_tag)
2725 if (card->vlangrp)
2726 vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
2727 else {
2728 dev_kfree_skb_any(skb);
2729 continue;
2730 }
2731 else
2732#endif
2733 rxrc = netif_rx(skb);
2734 card->dev->last_rx = jiffies;
2735 card->stats.rx_packets++;
2736 card->stats.rx_bytes += skb->len;
2737 }
2738}
2739
2740static int
2741qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2742{
2743 struct qeth_buffer_pool_entry *pool_entry;
2744 int i;
2745
2746 pool_entry = qeth_find_free_buffer_pool_entry(card);
2747 if (!pool_entry)
2748 return 1;
2749 /*
2750 * since the buffer is accessed only from the input_tasklet
2751 * there shouldn't be a need to synchronize; also, since we use
2752 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2753 * buffers
2754 */
2755 BUG_ON(!pool_entry);
2756
2757 buf->pool_entry = pool_entry;
2758 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
2759 buf->buffer->element[i].length = PAGE_SIZE;
2760 buf->buffer->element[i].addr = pool_entry->elements[i];
2761 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2762 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2763 else
2764 buf->buffer->element[i].flags = 0;
2765 }
2766 buf->state = QETH_QDIO_BUF_EMPTY;
2767 return 0;
2768}
2769
2770static void
2771qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2772 struct qeth_qdio_out_buffer *buf)
2773{
2774 int i;
2775 struct sk_buff *skb;
2776
2777 /* is PCI flag set on buffer? */
2778 if (buf->buffer->element[0].flags & 0x40)
2779 atomic_dec(&queue->set_pci_flags_count);
2780
2781 while ((skb = skb_dequeue(&buf->skb_list))){
2782 atomic_dec(&skb->users);
2783 dev_kfree_skb_any(skb);
2784 }
2785 qeth_eddp_buf_release_contexts(buf);
2786 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){
2787 buf->buffer->element[i].length = 0;
2788 buf->buffer->element[i].addr = NULL;
2789 buf->buffer->element[i].flags = 0;
2790 }
2791 buf->next_element_to_fill = 0;
2792 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2793}
2794
2795static void
2796qeth_queue_input_buffer(struct qeth_card *card, int index)
2797{
2798 struct qeth_qdio_q *queue = card->qdio.in_q;
2799 int count;
2800 int i;
2801 int rc;
2802 int newcount = 0;
2803
2804 QETH_DBF_TEXT(trace,6,"queinbuf");
2805 count = (index < queue->next_buf_to_init)?
2806 card->qdio.in_buf_pool.buf_count -
2807 (queue->next_buf_to_init - index) :
2808 card->qdio.in_buf_pool.buf_count -
2809 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2810 /* only requeue at a certain threshold to avoid SIGAs */
2811 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
2812 for (i = queue->next_buf_to_init;
2813 i < queue->next_buf_to_init + count; ++i) {
2814 if (qeth_init_input_buffer(card,
2815 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
2816 break;
2817 } else {
2818 newcount++;
2819 }
2820 }
2821
2822 if (newcount < count) {
2823 /* we are in memory shortage so we switch back to
2824 traditional skb allocation and drop packages */
2825 if (!atomic_read(&card->force_alloc_skb) &&
2826 net_ratelimit())
2827 PRINT_WARN("Switch to alloc skb\n");
2828 atomic_set(&card->force_alloc_skb, 3);
2829 count = newcount;
2830 } else {
2831 if ((atomic_read(&card->force_alloc_skb) == 1) &&
2832 net_ratelimit())
2833 PRINT_WARN("Switch to sg\n");
2834 atomic_add_unless(&card->force_alloc_skb, -1, 0);
2835 }
2836
2837 /*
2838 * according to old code it should be avoided to requeue all
2839 * 128 buffers in order to benefit from PCI avoidance.
2840 * this function keeps at least one buffer (the buffer at
2841 * 'index') un-requeued -> this buffer is the first buffer that
2842 * will be requeued the next time
2843 */
2844 if (card->options.performance_stats) {
2845 card->perf_stats.inbound_do_qdio_cnt++;
2846 card->perf_stats.inbound_do_qdio_start_time =
2847 qeth_get_micros();
2848 }
2849 rc = do_QDIO(CARD_DDEV(card),
2850 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2851 0, queue->next_buf_to_init, count, NULL);
2852 if (card->options.performance_stats)
2853 card->perf_stats.inbound_do_qdio_time +=
2854 qeth_get_micros() -
2855 card->perf_stats.inbound_do_qdio_start_time;
2856 if (rc){
2857 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2858 "return %i (device %s).\n",
2859 rc, CARD_DDEV_ID(card));
2860 QETH_DBF_TEXT(trace,2,"qinberr");
2861 QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card));
2862 }
2863 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2864 QDIO_MAX_BUFFERS_PER_Q;
2865 }
2866}
2867
2868static inline void
2869qeth_put_buffer_pool_entry(struct qeth_card *card,
2870 struct qeth_buffer_pool_entry *entry)
2871{
2872 QETH_DBF_TEXT(trace, 6, "ptbfplen");
2873 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2874}
2875
2876static void
2877qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2878 unsigned int qdio_err, unsigned int siga_err,
2879 unsigned int queue, int first_element, int count,
2880 unsigned long card_ptr)
2881{
2882 struct net_device *net_dev;
2883 struct qeth_card *card;
2884 struct qeth_qdio_buffer *buffer;
2885 int index;
2886 int i;
2887
2888 QETH_DBF_TEXT(trace, 6, "qdinput");
2889 card = (struct qeth_card *) card_ptr;
2890 net_dev = card->dev;
2891 if (card->options.performance_stats) {
2892 card->perf_stats.inbound_cnt++;
2893 card->perf_stats.inbound_start_time = qeth_get_micros();
2894 }
2895 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2896 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2897 QETH_DBF_TEXT(trace, 1,"qdinchk");
2898 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2899 QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count);
2900 QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status);
2901 qeth_schedule_recovery(card);
2902 return;
2903 }
2904 }
2905 for (i = first_element; i < (first_element + count); ++i) {
2906 index = i % QDIO_MAX_BUFFERS_PER_Q;
2907 buffer = &card->qdio.in_q->bufs[index];
2908 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
2909 qeth_check_qdio_errors(buffer->buffer,
2910 qdio_err, siga_err,"qinerr")))
2911 qeth_process_inbound_buffer(card, buffer, index);
2912 /* clear buffer and give back to hardware */
2913 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
2914 qeth_queue_input_buffer(card, index);
2915 }
2916 if (card->options.performance_stats)
2917 card->perf_stats.inbound_time += qeth_get_micros() -
2918 card->perf_stats.inbound_start_time;
2919}
2920
2921static int
2922qeth_handle_send_error(struct qeth_card *card,
2923 struct qeth_qdio_out_buffer *buffer,
2924 unsigned int qdio_err, unsigned int siga_err)
2925{
2926 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2927 int cc = siga_err & 3;
2928
2929 QETH_DBF_TEXT(trace, 6, "hdsnderr");
2930 qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr");
2931 switch (cc) {
2932 case 0:
2933 if (qdio_err){
2934 QETH_DBF_TEXT(trace, 1,"lnkfail");
2935 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2936 QETH_DBF_TEXT_(trace,1,"%04x %02x",
2937 (u16)qdio_err, (u8)sbalf15);
2938 return QETH_SEND_ERROR_LINK_FAILURE;
2939 }
2940 return QETH_SEND_ERROR_NONE;
2941 case 2:
2942 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2943 QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2944 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2945 return QETH_SEND_ERROR_KICK_IT;
2946 }
2947 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2948 return QETH_SEND_ERROR_RETRY;
2949 return QETH_SEND_ERROR_LINK_FAILURE;
2950 /* look at qdio_error and sbalf 15 */
2951 case 1:
2952 QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2953 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2954 return QETH_SEND_ERROR_LINK_FAILURE;
2955 case 3:
2956 default:
2957 QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2958 QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card));
2959 return QETH_SEND_ERROR_KICK_IT;
2960 }
2961}
2962
2963void
2964qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2965 int index, int count)
2966{
2967 struct qeth_qdio_out_buffer *buf;
2968 int rc;
2969 int i;
2970 unsigned int qdio_flags;
2971
2972 QETH_DBF_TEXT(trace, 6, "flushbuf");
2973
2974 for (i = index; i < index + count; ++i) {
2975 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2976 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2977 SBAL_FLAGS_LAST_ENTRY;
2978
2979 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2980 continue;
2981
2982 if (!queue->do_pack){
2983 if ((atomic_read(&queue->used_buffers) >=
2984 (QETH_HIGH_WATERMARK_PACK -
2985 QETH_WATERMARK_PACK_FUZZ)) &&
2986 !atomic_read(&queue->set_pci_flags_count)){
2987 /* it's likely that we'll go to packing
2988 * mode soon */
2989 atomic_inc(&queue->set_pci_flags_count);
2990 buf->buffer->element[0].flags |= 0x40;
2991 }
2992 } else {
2993 if (!atomic_read(&queue->set_pci_flags_count)){
2994 /*
2995 * there's no outstanding PCI any more, so we
2996 * have to request a PCI to be sure that the PCI
2997 * will wake at some time in the future then we
2998 * can flush packed buffers that might still be
2999 * hanging around, which can happen if no
3000 * further send was requested by the stack
3001 */
3002 atomic_inc(&queue->set_pci_flags_count);
3003 buf->buffer->element[0].flags |= 0x40;
3004 }
3005 }
3006 }
3007
3008 queue->card->dev->trans_start = jiffies;
3009 if (queue->card->options.performance_stats) {
3010 queue->card->perf_stats.outbound_do_qdio_cnt++;
3011 queue->card->perf_stats.outbound_do_qdio_start_time =
3012 qeth_get_micros();
3013 }
3014 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3015 if (under_int)
3016 qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
3017 if (atomic_read(&queue->set_pci_flags_count))
3018 qdio_flags |= QDIO_FLAG_PCI_OUT;
3019 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3020 queue->queue_no, index, count, NULL);
3021 if (queue->card->options.performance_stats)
3022 queue->card->perf_stats.outbound_do_qdio_time +=
3023 qeth_get_micros() -
3024 queue->card->perf_stats.outbound_do_qdio_start_time;
3025 if (rc){
3026 QETH_DBF_TEXT(trace, 2, "flushbuf");
3027 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
3028 QETH_DBF_TEXT_(trace, 2, "%s", CARD_DDEV_ID(queue->card));
3029 queue->card->stats.tx_errors += count;
3030 /* this must not happen under normal circumstances. if it
3031 * happens something is really wrong -> recover */
3032 qeth_schedule_recovery(queue->card);
3033 return;
3034 }
3035 atomic_add(count, &queue->used_buffers);
3036 if (queue->card->options.performance_stats)
3037 queue->card->perf_stats.bufs_sent += count;
3038}
3039
3040/*
3041 * Switched to packing state if the number of used buffers on a queue
3042 * reaches a certain limit.
3043 */
3044static void
3045qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3046{
3047 if (!queue->do_pack) {
3048 if (atomic_read(&queue->used_buffers)
3049 >= QETH_HIGH_WATERMARK_PACK){
3050 /* switch non-PACKING -> PACKING */
3051 QETH_DBF_TEXT(trace, 6, "np->pack");
3052 if (queue->card->options.performance_stats)
3053 queue->card->perf_stats.sc_dp_p++;
3054 queue->do_pack = 1;
3055 }
3056 }
3057}
3058
3059/*
3060 * Switches from packing to non-packing mode. If there is a packing
3061 * buffer on the queue this buffer will be prepared to be flushed.
3062 * In that case 1 is returned to inform the caller. If no buffer
3063 * has to be flushed, zero is returned.
3064 */
3065static int
3066qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3067{
3068 struct qeth_qdio_out_buffer *buffer;
3069 int flush_count = 0;
3070
3071 if (queue->do_pack) {
3072 if (atomic_read(&queue->used_buffers)
3073 <= QETH_LOW_WATERMARK_PACK) {
3074 /* switch PACKING -> non-PACKING */
3075 QETH_DBF_TEXT(trace, 6, "pack->np");
3076 if (queue->card->options.performance_stats)
3077 queue->card->perf_stats.sc_p_dp++;
3078 queue->do_pack = 0;
3079 /* flush packing buffers */
3080 buffer = &queue->bufs[queue->next_buf_to_fill];
3081 if ((atomic_read(&buffer->state) ==
3082 QETH_QDIO_BUF_EMPTY) &&
3083 (buffer->next_element_to_fill > 0)) {
3084 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
3085 flush_count++;
3086 queue->next_buf_to_fill =
3087 (queue->next_buf_to_fill + 1) %
3088 QDIO_MAX_BUFFERS_PER_Q;
3089 }
3090 }
3091 }
3092 return flush_count;
3093}
3094
3095/*
3096 * Called to flush a packing buffer if no more pci flags are on the queue.
3097 * Checks if there is a packing buffer and prepares it to be flushed.
3098 * In that case returns 1, otherwise zero.
3099 */
3100static int
3101qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
3102{
3103 struct qeth_qdio_out_buffer *buffer;
3104
3105 buffer = &queue->bufs[queue->next_buf_to_fill];
3106 if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3107 (buffer->next_element_to_fill > 0)){
3108 /* it's a packing buffer */
3109 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3110 queue->next_buf_to_fill =
3111 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
3112 return 1;
3113 }
3114 return 0;
3115}
3116
3117static void
3118qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3119{
3120 int index;
3121 int flush_cnt = 0;
3122 int q_was_packing = 0;
3123
3124 /*
3125 * check if weed have to switch to non-packing mode or if
3126 * we have to get a pci flag out on the queue
3127 */
3128 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3129 !atomic_read(&queue->set_pci_flags_count)){
3130 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3131 QETH_OUT_Q_UNLOCKED) {
3132 /*
3133 * If we get in here, there was no action in
3134 * do_send_packet. So, we check if there is a
3135 * packing buffer to be flushed here.
3136 */
3137 netif_stop_queue(queue->card->dev);
3138 index = queue->next_buf_to_fill;
3139 q_was_packing = queue->do_pack;
3140 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3141 if (!flush_cnt &&
3142 !atomic_read(&queue->set_pci_flags_count))
3143 flush_cnt +=
3144 qeth_flush_buffers_on_no_pci(queue);
3145 if (queue->card->options.performance_stats &&
3146 q_was_packing)
3147 queue->card->perf_stats.bufs_sent_pack +=
3148 flush_cnt;
3149 if (flush_cnt)
3150 qeth_flush_buffers(queue, 1, index, flush_cnt);
3151 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3152 }
3153 }
3154}
3155
3156static void
3157qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
3158 unsigned int qdio_error, unsigned int siga_error,
3159 unsigned int __queue, int first_element, int count,
3160 unsigned long card_ptr)
3161{
3162 struct qeth_card *card = (struct qeth_card *) card_ptr;
3163 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3164 struct qeth_qdio_out_buffer *buffer;
3165 int i;
3166
3167 QETH_DBF_TEXT(trace, 6, "qdouhdl");
3168 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
3169 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
3170 QETH_DBF_TEXT(trace, 2, "achkcond");
3171 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
3172 QETH_DBF_TEXT_(trace, 2, "%08x", status);
3173 netif_stop_queue(card->dev);
3174 qeth_schedule_recovery(card);
3175 return;
3176 }
3177 }
3178 if (card->options.performance_stats) {
3179 card->perf_stats.outbound_handler_cnt++;
3180 card->perf_stats.outbound_handler_start_time =
3181 qeth_get_micros();
3182 }
3183 for(i = first_element; i < (first_element + count); ++i){
3184 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
3185 /*we only handle the KICK_IT error by doing a recovery */
3186 if (qeth_handle_send_error(card, buffer,
3187 qdio_error, siga_error)
3188 == QETH_SEND_ERROR_KICK_IT){
3189 netif_stop_queue(card->dev);
3190 qeth_schedule_recovery(card);
3191 return;
3192 }
3193 qeth_clear_output_buffer(queue, buffer);
3194 }
3195 atomic_sub(count, &queue->used_buffers);
3196 /* check if we need to do something on this outbound queue */
3197 if (card->info.type != QETH_CARD_TYPE_IQD)
3198 qeth_check_outbound_queue(queue);
3199
3200 netif_wake_queue(queue->card->dev);
3201 if (card->options.performance_stats)
3202 card->perf_stats.outbound_handler_time += qeth_get_micros() -
3203 card->perf_stats.outbound_handler_start_time;
3204}
3205
3206static void
3207qeth_create_qib_param_field(struct qeth_card *card, char *param_field)
3208{
3209
3210 param_field[0] = _ascebc['P'];
3211 param_field[1] = _ascebc['C'];
3212 param_field[2] = _ascebc['I'];
3213 param_field[3] = _ascebc['T'];
3214 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
3215 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
3216 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
3217}
3218
3219static void
3220qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field)
3221{
3222 param_field[16] = _ascebc['B'];
3223 param_field[17] = _ascebc['L'];
3224 param_field[18] = _ascebc['K'];
3225 param_field[19] = _ascebc['T'];
3226 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
3227 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
3228 *((unsigned int *) (&param_field[28])) = card->info.blkt.inter_packet_jumbo;
3229}
3230
3231static void
3232qeth_initialize_working_pool_list(struct qeth_card *card)
3233{
3234 struct qeth_buffer_pool_entry *entry;
3235
3236 QETH_DBF_TEXT(trace,5,"inwrklst");
3237
3238 list_for_each_entry(entry,
3239 &card->qdio.init_pool.entry_list, init_list) {
3240 qeth_put_buffer_pool_entry(card,entry);
3241 }
3242}
3243
3244static void
3245qeth_clear_working_pool_list(struct qeth_card *card)
3246{
3247 struct qeth_buffer_pool_entry *pool_entry, *tmp;
3248
3249 QETH_DBF_TEXT(trace,5,"clwrklst");
3250 list_for_each_entry_safe(pool_entry, tmp,
3251 &card->qdio.in_buf_pool.entry_list, list){
3252 list_del(&pool_entry->list);
3253 }
3254}
3255
3256static void
3257qeth_free_buffer_pool(struct qeth_card *card)
3258{
3259 struct qeth_buffer_pool_entry *pool_entry, *tmp;
3260 int i=0;
3261 QETH_DBF_TEXT(trace,5,"freepool");
3262 list_for_each_entry_safe(pool_entry, tmp,
3263 &card->qdio.init_pool.entry_list, init_list){
3264 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
3265 free_page((unsigned long)pool_entry->elements[i]);
3266 list_del(&pool_entry->init_list);
3267 kfree(pool_entry);
3268 }
3269}
3270
3271static int
3272qeth_alloc_buffer_pool(struct qeth_card *card)
3273{
3274 struct qeth_buffer_pool_entry *pool_entry;
3275 void *ptr;
3276 int i, j;
3277
3278 QETH_DBF_TEXT(trace,5,"alocpool");
3279 for (i = 0; i < card->qdio.init_pool.buf_count; ++i){
3280 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
3281 if (!pool_entry){
3282 qeth_free_buffer_pool(card);
3283 return -ENOMEM;
3284 }
3285 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
3286 ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
3287 if (!ptr) {
3288 while (j > 0)
3289 free_page((unsigned long)
3290 pool_entry->elements[--j]);
3291 kfree(pool_entry);
3292 qeth_free_buffer_pool(card);
3293 return -ENOMEM;
3294 }
3295 pool_entry->elements[j] = ptr;
3296 }
3297 list_add(&pool_entry->init_list,
3298 &card->qdio.init_pool.entry_list);
3299 }
3300 return 0;
3301}
3302
3303int
3304qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
3305{
3306 QETH_DBF_TEXT(trace, 2, "realcbp");
3307
3308 if ((card->state != CARD_STATE_DOWN) &&
3309 (card->state != CARD_STATE_RECOVER))
3310 return -EPERM;
3311
3312 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
3313 qeth_clear_working_pool_list(card);
3314 qeth_free_buffer_pool(card);
3315 card->qdio.in_buf_pool.buf_count = bufcnt;
3316 card->qdio.init_pool.buf_count = bufcnt;
3317 return qeth_alloc_buffer_pool(card);
3318}
3319
3320static int
3321qeth_alloc_qdio_buffers(struct qeth_card *card)
3322{
3323 int i, j;
3324
3325 QETH_DBF_TEXT(setup, 2, "allcqdbf");
3326
3327 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
3328 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
3329 return 0;
3330
3331 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
3332 GFP_KERNEL|GFP_DMA);
3333 if (!card->qdio.in_q)
3334 goto out_nomem;
3335 QETH_DBF_TEXT(setup, 2, "inq");
3336 QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
3337 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
3338 /* give inbound qeth_qdio_buffers their qdio_buffers */
3339 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3340 card->qdio.in_q->bufs[i].buffer =
3341 &card->qdio.in_q->qdio_bufs[i];
3342 /* inbound buffer pool */
3343 if (qeth_alloc_buffer_pool(card))
3344 goto out_freeinq;
3345 /* outbound */
3346 card->qdio.out_qs =
3347 kmalloc(card->qdio.no_out_queues *
3348 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
3349 if (!card->qdio.out_qs)
3350 goto out_freepool;
3351 for (i = 0; i < card->qdio.no_out_queues; ++i) {
3352 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
3353 GFP_KERNEL|GFP_DMA);
3354 if (!card->qdio.out_qs[i])
3355 goto out_freeoutq;
3356 QETH_DBF_TEXT_(setup, 2, "outq %i", i);
3357 QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
3358 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
3359 card->qdio.out_qs[i]->queue_no = i;
3360 /* give outbound qeth_qdio_buffers their qdio_buffers */
3361 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3362 card->qdio.out_qs[i]->bufs[j].buffer =
3363 &card->qdio.out_qs[i]->qdio_bufs[j];
3364 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
3365 skb_list);
3366 lockdep_set_class(
3367 &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
3368 &qdio_out_skb_queue_key);
3369 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
3370 }
3371 }
3372 return 0;
3373
3374out_freeoutq:
3375 while (i > 0)
3376 kfree(card->qdio.out_qs[--i]);
3377 kfree(card->qdio.out_qs);
3378 card->qdio.out_qs = NULL;
3379out_freepool:
3380 qeth_free_buffer_pool(card);
3381out_freeinq:
3382 kfree(card->qdio.in_q);
3383 card->qdio.in_q = NULL;
3384out_nomem:
3385 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
3386 return -ENOMEM;
3387}
3388
3389static void
3390qeth_free_qdio_buffers(struct qeth_card *card)
3391{
3392 int i, j;
3393
3394 QETH_DBF_TEXT(trace, 2, "freeqdbf");
3395 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
3396 QETH_QDIO_UNINITIALIZED)
3397 return;
3398 kfree(card->qdio.in_q);
3399 card->qdio.in_q = NULL;
3400 /* inbound buffer pool */
3401 qeth_free_buffer_pool(card);
3402 /* free outbound qdio_qs */
3403 if (card->qdio.out_qs) {
3404 for (i = 0; i < card->qdio.no_out_queues; ++i) {
3405 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3406 qeth_clear_output_buffer(card->qdio.out_qs[i],
3407 &card->qdio.out_qs[i]->bufs[j]);
3408 kfree(card->qdio.out_qs[i]);
3409 }
3410 kfree(card->qdio.out_qs);
3411 card->qdio.out_qs = NULL;
3412 }
3413}
3414
3415static void
3416qeth_clear_qdio_buffers(struct qeth_card *card)
3417{
3418 int i, j;
3419
3420 QETH_DBF_TEXT(trace, 2, "clearqdbf");
3421 /* clear outbound buffers to free skbs */
3422 for (i = 0; i < card->qdio.no_out_queues; ++i)
3423 if (card->qdio.out_qs && card->qdio.out_qs[i]) {
3424 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
3425 qeth_clear_output_buffer(card->qdio.out_qs[i],
3426 &card->qdio.out_qs[i]->bufs[j]);
3427 }
3428}
3429
3430static void
3431qeth_init_qdio_info(struct qeth_card *card)
3432{
3433 QETH_DBF_TEXT(setup, 4, "intqdinf");
3434 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
3435 /* inbound */
3436 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
3437 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
3438 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
3439 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
3440 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
3441}
3442
3443static int
3444qeth_init_qdio_queues(struct qeth_card *card)
3445{
3446 int i, j;
3447 int rc;
3448
3449 QETH_DBF_TEXT(setup, 2, "initqdqs");
3450
3451 /* inbound queue */
3452 memset(card->qdio.in_q->qdio_bufs, 0,
3453 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3454 qeth_initialize_working_pool_list(card);
3455 /*give only as many buffers to hardware as we have buffer pool entries*/
3456 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
3457 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3458 card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1;
3459 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3460 card->qdio.in_buf_pool.buf_count - 1, NULL);
3461 if (rc) {
3462 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3463 return rc;
3464 }
3465 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
3466 if (rc) {
3467 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3468 return rc;
3469 }
3470 /* outbound queue */
3471 for (i = 0; i < card->qdio.no_out_queues; ++i){
3472 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
3473 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
3474 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){
3475 qeth_clear_output_buffer(card->qdio.out_qs[i],
3476 &card->qdio.out_qs[i]->bufs[j]);
3477 }
3478 card->qdio.out_qs[i]->card = card;
3479 card->qdio.out_qs[i]->next_buf_to_fill = 0;
3480 card->qdio.out_qs[i]->do_pack = 0;
3481 atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
3482 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
3483 atomic_set(&card->qdio.out_qs[i]->state,
3484 QETH_OUT_Q_UNLOCKED);
3485 }
3486 return 0;
3487}
3488
3489static int
3490qeth_qdio_establish(struct qeth_card *card)
3491{
3492 struct qdio_initialize init_data;
3493 char *qib_param_field;
3494 struct qdio_buffer **in_sbal_ptrs;
3495 struct qdio_buffer **out_sbal_ptrs;
3496 int i, j, k;
3497 int rc = 0;
3498
3499 QETH_DBF_TEXT(setup, 2, "qdioest");
3500
3501 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3502 GFP_KERNEL);
3503 if (!qib_param_field)
3504 return -ENOMEM;
3505
3506 qeth_create_qib_param_field(card, qib_param_field);
3507 qeth_create_qib_param_field_blkt(card, qib_param_field);
3508
3509 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3510 GFP_KERNEL);
3511 if (!in_sbal_ptrs) {
3512 kfree(qib_param_field);
3513 return -ENOMEM;
3514 }
3515 for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3516 in_sbal_ptrs[i] = (struct qdio_buffer *)
3517 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3518
3519 out_sbal_ptrs =
3520 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3521 sizeof(void *), GFP_KERNEL);
3522 if (!out_sbal_ptrs) {
3523 kfree(in_sbal_ptrs);
3524 kfree(qib_param_field);
3525 return -ENOMEM;
3526 }
3527 for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3528 for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){
3529 out_sbal_ptrs[k] = (struct qdio_buffer *)
3530 virt_to_phys(card->qdio.out_qs[i]->
3531 bufs[j].buffer);
3532 }
3533
3534 memset(&init_data, 0, sizeof(struct qdio_initialize));
3535 init_data.cdev = CARD_DDEV(card);
3536 init_data.q_format = qeth_get_qdio_q_format(card);
3537 init_data.qib_param_field_format = 0;
3538 init_data.qib_param_field = qib_param_field;
3539 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3540 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3541 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3542 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3543 init_data.no_input_qs = 1;
3544 init_data.no_output_qs = card->qdio.no_out_queues;
3545 init_data.input_handler = (qdio_handler_t *)
3546 qeth_qdio_input_handler;
3547 init_data.output_handler = (qdio_handler_t *)
3548 qeth_qdio_output_handler;
3549 init_data.int_parm = (unsigned long) card;
3550 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3551 QDIO_OUTBOUND_0COPY_SBALS |
3552 QDIO_USE_OUTBOUND_PCIS;
3553 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3554 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3555
3556 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3557 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED)
3558 if ((rc = qdio_initialize(&init_data)))
3559 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3560
3561 kfree(out_sbal_ptrs);
3562 kfree(in_sbal_ptrs);
3563 kfree(qib_param_field);
3564 return rc;
3565}
3566
3567static int
3568qeth_qdio_activate(struct qeth_card *card)
3569{
3570 QETH_DBF_TEXT(setup,3,"qdioact");
3571 return qdio_activate(CARD_DDEV(card), 0);
3572}
3573
3574static int
3575qeth_clear_channel(struct qeth_channel *channel)
3576{
3577 unsigned long flags;
3578 struct qeth_card *card;
3579 int rc;
3580
3581 QETH_DBF_TEXT(trace,3,"clearch");
3582 card = CARD_FROM_CDEV(channel->ccwdev);
3583 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3584 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
3585 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3586
3587 if (rc)
3588 return rc;
3589 rc = wait_event_interruptible_timeout(card->wait_q,
3590 channel->state==CH_STATE_STOPPED, QETH_TIMEOUT);
3591 if (rc == -ERESTARTSYS)
3592 return rc;
3593 if (channel->state != CH_STATE_STOPPED)
3594 return -ETIME;
3595 channel->state = CH_STATE_DOWN;
3596 return 0;
3597}
3598
3599static int
3600qeth_halt_channel(struct qeth_channel *channel)
3601{
3602 unsigned long flags;
3603 struct qeth_card *card;
3604 int rc;
3605
3606 QETH_DBF_TEXT(trace,3,"haltch");
3607 card = CARD_FROM_CDEV(channel->ccwdev);
3608 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
3609 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
3610 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
3611
3612 if (rc)
3613 return rc;
3614 rc = wait_event_interruptible_timeout(card->wait_q,
3615 channel->state==CH_STATE_HALTED, QETH_TIMEOUT);
3616 if (rc == -ERESTARTSYS)
3617 return rc;
3618 if (channel->state != CH_STATE_HALTED)
3619 return -ETIME;
3620 return 0;
3621}
3622
3623static int
3624qeth_halt_channels(struct qeth_card *card)
3625{
3626 int rc1 = 0, rc2=0, rc3 = 0;
3627
3628 QETH_DBF_TEXT(trace,3,"haltchs");
3629 rc1 = qeth_halt_channel(&card->read);
3630 rc2 = qeth_halt_channel(&card->write);
3631 rc3 = qeth_halt_channel(&card->data);
3632 if (rc1)
3633 return rc1;
3634 if (rc2)
3635 return rc2;
3636 return rc3;
3637}
3638static int
3639qeth_clear_channels(struct qeth_card *card)
3640{
3641 int rc1 = 0, rc2=0, rc3 = 0;
3642
3643 QETH_DBF_TEXT(trace,3,"clearchs");
3644 rc1 = qeth_clear_channel(&card->read);
3645 rc2 = qeth_clear_channel(&card->write);
3646 rc3 = qeth_clear_channel(&card->data);
3647 if (rc1)
3648 return rc1;
3649 if (rc2)
3650 return rc2;
3651 return rc3;
3652}
3653
3654static int
3655qeth_clear_halt_card(struct qeth_card *card, int halt)
3656{
3657 int rc = 0;
3658
3659 QETH_DBF_TEXT(trace,3,"clhacrd");
3660 QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
3661
3662 if (halt)
3663 rc = qeth_halt_channels(card);
3664 if (rc)
3665 return rc;
3666 return qeth_clear_channels(card);
3667}
3668
3669static int
3670qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
3671{
3672 int rc = 0;
3673
3674 QETH_DBF_TEXT(trace,3,"qdioclr");
3675 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
3676 QETH_QDIO_CLEANING)) {
3677 case QETH_QDIO_ESTABLISHED:
3678 if ((rc = qdio_cleanup(CARD_DDEV(card),
3679 (card->info.type == QETH_CARD_TYPE_IQD) ?
3680 QDIO_FLAG_CLEANUP_USING_HALT :
3681 QDIO_FLAG_CLEANUP_USING_CLEAR)))
3682 QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
3683 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3684 break;
3685 case QETH_QDIO_CLEANING:
3686 return rc;
3687 default:
3688 break;
3689 }
3690 if ((rc = qeth_clear_halt_card(card, use_halt)))
3691 QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
3692 card->state = CARD_STATE_DOWN;
3693 return rc;
3694}
3695
3696static int
3697qeth_dm_act(struct qeth_card *card)
3698{
3699 int rc;
3700 struct qeth_cmd_buffer *iob;
3701
3702 QETH_DBF_TEXT(setup,2,"dmact");
3703
3704 iob = qeth_wait_for_buffer(&card->write);
3705 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
3706
3707 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
3708 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
3709 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
3710 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3711 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
3712 return rc;
3713}
3714
3715static int
3716qeth_mpc_initialize(struct qeth_card *card)
3717{
3718 int rc;
3719
3720 QETH_DBF_TEXT(setup,2,"mpcinit");
3721
3722 if ((rc = qeth_issue_next_read(card))){
3723 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3724 return rc;
3725 }
3726 if ((rc = qeth_cm_enable(card))){
3727 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3728 goto out_qdio;
3729 }
3730 if ((rc = qeth_cm_setup(card))){
3731 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3732 goto out_qdio;
3733 }
3734 if ((rc = qeth_ulp_enable(card))){
3735 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3736 goto out_qdio;
3737 }
3738 if ((rc = qeth_ulp_setup(card))){
3739 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3740 goto out_qdio;
3741 }
3742 if ((rc = qeth_alloc_qdio_buffers(card))){
3743 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3744 goto out_qdio;
3745 }
3746 if ((rc = qeth_qdio_establish(card))){
3747 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3748 qeth_free_qdio_buffers(card);
3749 goto out_qdio;
3750 }
3751 if ((rc = qeth_qdio_activate(card))){
3752 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
3753 goto out_qdio;
3754 }
3755 if ((rc = qeth_dm_act(card))){
3756 QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
3757 goto out_qdio;
3758 }
3759
3760 return 0;
3761out_qdio:
3762 qeth_qdio_clear_card(card, card->info.type!=QETH_CARD_TYPE_IQD);
3763 return rc;
3764}
3765
3766static struct net_device *
3767qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
3768{
3769 struct net_device *dev = NULL;
3770
3771 switch (type) {
3772 case QETH_CARD_TYPE_OSAE:
3773 switch (linktype) {
3774 case QETH_LINK_TYPE_LANE_TR:
3775 case QETH_LINK_TYPE_HSTR:
3776#ifdef CONFIG_TR
3777 dev = alloc_trdev(0);
3778#endif /* CONFIG_TR */
3779 break;
3780 default:
3781 dev = alloc_etherdev(0);
3782 }
3783 break;
3784 case QETH_CARD_TYPE_IQD:
3785 dev = alloc_netdev(0, "hsi%d", ether_setup);
3786 break;
3787 case QETH_CARD_TYPE_OSN:
3788 dev = alloc_netdev(0, "osn%d", ether_setup);
3789 break;
3790 default:
3791 dev = alloc_etherdev(0);
3792 }
3793 return dev;
3794}
3795
3796/*hard_header fake function; used in case fake_ll is set */
3797static int
3798qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3799 unsigned short type, const void *daddr, const void *saddr,
3800 unsigned len)
3801{
3802 if(dev->type == ARPHRD_IEEE802_TR){
3803 struct trh_hdr *hdr;
3804 hdr = (struct trh_hdr *)skb_push(skb, QETH_FAKE_LL_LEN_TR);
3805 memcpy(hdr->saddr, dev->dev_addr, TR_ALEN);
3806 memcpy(hdr->daddr, "FAKELL", TR_ALEN);
3807 return QETH_FAKE_LL_LEN_TR;
3808
3809 } else {
3810 struct ethhdr *hdr;
3811 hdr = (struct ethhdr *)skb_push(skb, QETH_FAKE_LL_LEN_ETH);
3812 memcpy(hdr->h_source, dev->dev_addr, ETH_ALEN);
3813 memcpy(hdr->h_dest, "FAKELL", ETH_ALEN);
3814 if (type != ETH_P_802_3)
3815 hdr->h_proto = htons(type);
3816 else
3817 hdr->h_proto = htons(len);
3818 return QETH_FAKE_LL_LEN_ETH;
3819
3820 }
3821}
3822
3823static const struct header_ops qeth_fake_ops = {
3824 .create = qeth_fake_header,
3825 .parse = qeth_hard_header_parse,
3826};
3827
3828static int
3829qeth_send_packet(struct qeth_card *, struct sk_buff *);
3830
3831static int
3832qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3833{
3834 int rc;
3835 struct qeth_card *card;
3836
3837 QETH_DBF_TEXT(trace, 6, "hrdstxmi");
3838 card = (struct qeth_card *)dev->priv;
3839 if (skb==NULL) {
3840 card->stats.tx_dropped++;
3841 card->stats.tx_errors++;
3842 /* return OK; otherwise ksoftirqd goes to 100% */
3843 return NETDEV_TX_OK;
3844 }
3845 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
3846 card->stats.tx_dropped++;
3847 card->stats.tx_errors++;
3848 card->stats.tx_carrier_errors++;
3849 dev_kfree_skb_any(skb);
3850 /* return OK; otherwise ksoftirqd goes to 100% */
3851 return NETDEV_TX_OK;
3852 }
3853 if (card->options.performance_stats) {
3854 card->perf_stats.outbound_cnt++;
3855 card->perf_stats.outbound_start_time = qeth_get_micros();
3856 }
3857 netif_stop_queue(dev);
3858 if ((rc = qeth_send_packet(card, skb))) {
3859 if (rc == -EBUSY) {
3860 return NETDEV_TX_BUSY;
3861 } else {
3862 card->stats.tx_errors++;
3863 card->stats.tx_dropped++;
3864 dev_kfree_skb_any(skb);
3865 /*set to OK; otherwise ksoftirqd goes to 100% */
3866 rc = NETDEV_TX_OK;
3867 }
3868 }
3869 netif_wake_queue(dev);
3870 if (card->options.performance_stats)
3871 card->perf_stats.outbound_time += qeth_get_micros() -
3872 card->perf_stats.outbound_start_time;
3873 return rc;
3874}
3875
3876static int
3877qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
3878{
3879 int rc = 0;
3880#ifdef CONFIG_QETH_VLAN
3881 struct vlan_group *vg;
3882 int i;
3883
3884 if (!(vg = card->vlangrp))
3885 return rc;
3886
3887 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
3888 if (vlan_group_get_device(vg, i) == dev){
3889 rc = QETH_VLAN_CARD;
3890 break;
3891 }
3892 }
3893 if (rc && !(vlan_dev_info(dev)->real_dev->priv == (void *)card))
3894 return 0;
3895
3896#endif
3897 return rc;
3898}
3899
3900static int
3901qeth_verify_dev(struct net_device *dev)
3902{
3903 struct qeth_card *card;
3904 unsigned long flags;
3905 int rc = 0;
3906
3907 read_lock_irqsave(&qeth_card_list.rwlock, flags);
3908 list_for_each_entry(card, &qeth_card_list.list, list){
3909 if (card->dev == dev){
3910 rc = QETH_REAL_CARD;
3911 break;
3912 }
3913 rc = qeth_verify_vlan_dev(dev, card);
3914 if (rc)
3915 break;
3916 }
3917 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
3918
3919 return rc;
3920}
3921
3922static struct qeth_card *
3923qeth_get_card_from_dev(struct net_device *dev)
3924{
3925 struct qeth_card *card = NULL;
3926 int rc;
3927
3928 rc = qeth_verify_dev(dev);
3929 if (rc == QETH_REAL_CARD)
3930 card = (struct qeth_card *)dev->priv;
3931 else if (rc == QETH_VLAN_CARD)
3932 card = (struct qeth_card *)
3933 vlan_dev_info(dev)->real_dev->priv;
3934
3935 QETH_DBF_TEXT_(trace, 4, "%d", rc);
3936 return card ;
3937}
3938
3939static void
3940qeth_tx_timeout(struct net_device *dev)
3941{
3942 struct qeth_card *card;
3943
3944 card = (struct qeth_card *) dev->priv;
3945 card->stats.tx_errors++;
3946 qeth_schedule_recovery(card);
3947}
3948
3949static int
3950qeth_open(struct net_device *dev)
3951{
3952 struct qeth_card *card;
3953
3954 QETH_DBF_TEXT(trace, 4, "qethopen");
3955
3956 card = (struct qeth_card *) dev->priv;
3957
3958 if (card->state != CARD_STATE_SOFTSETUP)
3959 return -ENODEV;
3960
3961 if ( (card->info.type != QETH_CARD_TYPE_OSN) &&
3962 (card->options.layer2) &&
3963 (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
3964 QETH_DBF_TEXT(trace,4,"nomacadr");
3965 return -EPERM;
3966 }
3967 card->data.state = CH_STATE_UP;
3968 card->state = CARD_STATE_UP;
3969 card->dev->flags |= IFF_UP;
3970 netif_start_queue(dev);
3971
3972 if (!card->lan_online && netif_carrier_ok(dev))
3973 netif_carrier_off(dev);
3974 return 0;
3975}
3976
3977static int
3978qeth_stop(struct net_device *dev)
3979{
3980 struct qeth_card *card;
3981
3982 QETH_DBF_TEXT(trace, 4, "qethstop");
3983
3984 card = (struct qeth_card *) dev->priv;
3985
3986 netif_tx_disable(dev);
3987 card->dev->flags &= ~IFF_UP;
3988 if (card->state == CARD_STATE_UP)
3989 card->state = CARD_STATE_SOFTSETUP;
3990 return 0;
3991}
3992
3993static int
3994qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3995{
3996 int cast_type = RTN_UNSPEC;
3997
3998 if (card->info.type == QETH_CARD_TYPE_OSN)
3999 return cast_type;
4000
4001 if (skb->dst && skb->dst->neighbour){
4002 cast_type = skb->dst->neighbour->type;
4003 if ((cast_type == RTN_BROADCAST) ||
4004 (cast_type == RTN_MULTICAST) ||
4005 (cast_type == RTN_ANYCAST))
4006 return cast_type;
4007 else
4008 return RTN_UNSPEC;
4009 }
4010 /* try something else */
4011 if (skb->protocol == ETH_P_IPV6)
4012 return (skb_network_header(skb)[24] == 0xff) ?
4013 RTN_MULTICAST : 0;
4014 else if (skb->protocol == ETH_P_IP)
4015 return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
4016 RTN_MULTICAST : 0;
4017 /* ... */
4018 if (!memcmp(skb->data, skb->dev->broadcast, 6))
4019 return RTN_BROADCAST;
4020 else {
4021 u16 hdr_mac;
4022
4023 hdr_mac = *((u16 *)skb->data);
4024 /* tr multicast? */
4025 switch (card->info.link_type) {
4026 case QETH_LINK_TYPE_HSTR:
4027 case QETH_LINK_TYPE_LANE_TR:
4028 if ((hdr_mac == QETH_TR_MAC_NC) ||
4029 (hdr_mac == QETH_TR_MAC_C))
4030 return RTN_MULTICAST;
4031 break;
4032 /* eth or so multicast? */
4033 default:
4034 if ((hdr_mac == QETH_ETH_MAC_V4) ||
4035 (hdr_mac == QETH_ETH_MAC_V6))
4036 return RTN_MULTICAST;
4037 }
4038 }
4039 return cast_type;
4040}
4041
4042static int
4043qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
4044 int ipv, int cast_type)
4045{
4046 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
4047 return card->qdio.default_out_queue;
4048 switch (card->qdio.no_out_queues) {
4049 case 4:
4050 if (cast_type && card->info.is_multicast_different)
4051 return card->info.is_multicast_different &
4052 (card->qdio.no_out_queues - 1);
4053 if (card->qdio.do_prio_queueing && (ipv == 4)) {
4054 const u8 tos = ip_hdr(skb)->tos;
4055
4056 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
4057 if (tos & IP_TOS_NOTIMPORTANT)
4058 return 3;
4059 if (tos & IP_TOS_HIGHRELIABILITY)
4060 return 2;
4061 if (tos & IP_TOS_HIGHTHROUGHPUT)
4062 return 1;
4063 if (tos & IP_TOS_LOWDELAY)
4064 return 0;
4065 }
4066 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
4067 return 3 - (tos >> 6);
4068 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
4069 /* TODO: IPv6!!! */
4070 }
4071 return card->qdio.default_out_queue;
4072 case 1: /* fallthrough for single-out-queue 1920-device */
4073 default:
4074 return card->qdio.default_out_queue;
4075 }
4076}
4077
4078static inline int
4079qeth_get_ip_version(struct sk_buff *skb)
4080{
4081 switch (skb->protocol) {
4082 case ETH_P_IPV6:
4083 return 6;
4084 case ETH_P_IP:
4085 return 4;
4086 default:
4087 return 0;
4088 }
4089}
4090
4091static struct qeth_hdr *
4092__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
4093{
4094#ifdef CONFIG_QETH_VLAN
4095 u16 *tag;
4096 if (card->vlangrp && vlan_tx_tag_present(skb) &&
4097 ((ipv == 6) || card->options.layer2) ) {
4098 /*
4099 * Move the mac addresses (6 bytes src, 6 bytes dest)
4100 * to the beginning of the new header. We are using three
4101 * memcpys instead of one memmove to save cycles.
4102 */
4103 skb_push(skb, VLAN_HLEN);
4104 skb_copy_to_linear_data(skb, skb->data + 4, 4);
4105 skb_copy_to_linear_data_offset(skb, 4, skb->data + 8, 4);
4106 skb_copy_to_linear_data_offset(skb, 8, skb->data + 12, 4);
4107 tag = (u16 *)(skb->data + 12);
4108 /*
4109 * first two bytes = ETH_P_8021Q (0x8100)
4110 * second two bytes = VLANID
4111 */
4112 *tag = __constant_htons(ETH_P_8021Q);
4113 *(tag + 1) = htons(vlan_tx_tag_get(skb));
4114 }
4115#endif
4116 return ((struct qeth_hdr *)
4117 qeth_push_skb(card, skb, sizeof(struct qeth_hdr)));
4118}
4119
4120static void
4121__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb)
4122{
4123 if (orig_skb != new_skb)
4124 dev_kfree_skb_any(new_skb);
4125}
4126
4127static struct sk_buff *
4128qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
4129 struct qeth_hdr **hdr, int ipv)
4130{
4131 struct sk_buff *new_skb, *new_skb2;
4132
4133 QETH_DBF_TEXT(trace, 6, "prepskb");
4134 new_skb = skb;
4135 new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
4136 if (!new_skb)
4137 return NULL;
4138 new_skb2 = qeth_realloc_headroom(card, new_skb,
4139 sizeof(struct qeth_hdr));
4140 if (!new_skb2) {
4141 __qeth_free_new_skb(skb, new_skb);
4142 return NULL;
4143 }
4144 if (new_skb != skb)
4145 __qeth_free_new_skb(new_skb2, new_skb);
4146 new_skb = new_skb2;
4147 *hdr = __qeth_prepare_skb(card, new_skb, ipv);
4148 if (*hdr == NULL) {
4149 __qeth_free_new_skb(skb, new_skb);
4150 return NULL;
4151 }
4152 return new_skb;
4153}
4154
4155static inline u8
4156qeth_get_qeth_hdr_flags4(int cast_type)
4157{
4158 if (cast_type == RTN_MULTICAST)
4159 return QETH_CAST_MULTICAST;
4160 if (cast_type == RTN_BROADCAST)
4161 return QETH_CAST_BROADCAST;
4162 return QETH_CAST_UNICAST;
4163}
4164
4165static inline u8
4166qeth_get_qeth_hdr_flags6(int cast_type)
4167{
4168 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
4169 if (cast_type == RTN_MULTICAST)
4170 return ct | QETH_CAST_MULTICAST;
4171 if (cast_type == RTN_ANYCAST)
4172 return ct | QETH_CAST_ANYCAST;
4173 if (cast_type == RTN_BROADCAST)
4174 return ct | QETH_CAST_BROADCAST;
4175 return ct | QETH_CAST_UNICAST;
4176}
4177
4178static void
4179qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
4180 struct sk_buff *skb)
4181{
4182 __u16 hdr_mac;
4183
4184 if (!memcmp(skb->data+QETH_HEADER_SIZE,
4185 skb->dev->broadcast,6)) { /* broadcast? */
4186 *(__u32 *)hdr->hdr.l2.flags |=
4187 QETH_LAYER2_FLAG_BROADCAST << 8;
4188 return;
4189 }
4190 hdr_mac=*((__u16*)skb->data);
4191 /* tr multicast? */
4192 switch (card->info.link_type) {
4193 case QETH_LINK_TYPE_HSTR:
4194 case QETH_LINK_TYPE_LANE_TR:
4195 if ((hdr_mac == QETH_TR_MAC_NC) ||
4196 (hdr_mac == QETH_TR_MAC_C) )
4197 *(__u32 *)hdr->hdr.l2.flags |=
4198 QETH_LAYER2_FLAG_MULTICAST << 8;
4199 else
4200 *(__u32 *)hdr->hdr.l2.flags |=
4201 QETH_LAYER2_FLAG_UNICAST << 8;
4202 break;
4203 /* eth or so multicast? */
4204 default:
4205 if ( (hdr_mac==QETH_ETH_MAC_V4) ||
4206 (hdr_mac==QETH_ETH_MAC_V6) )
4207 *(__u32 *)hdr->hdr.l2.flags |=
4208 QETH_LAYER2_FLAG_MULTICAST << 8;
4209 else
4210 *(__u32 *)hdr->hdr.l2.flags |=
4211 QETH_LAYER2_FLAG_UNICAST << 8;
4212 }
4213}
4214
4215static void
4216qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4217 struct sk_buff *skb, int cast_type)
4218{
4219 memset(hdr, 0, sizeof(struct qeth_hdr));
4220 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
4221
4222 /* set byte 0 to "0x02" and byte 3 to casting flags */
4223 if (cast_type==RTN_MULTICAST)
4224 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_MULTICAST << 8;
4225 else if (cast_type==RTN_BROADCAST)
4226 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_BROADCAST << 8;
4227 else
4228 qeth_layer2_get_packet_type(card, hdr, skb);
4229
4230 hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
4231#ifdef CONFIG_QETH_VLAN
4232 /* VSWITCH relies on the VLAN
4233 * information to be present in
4234 * the QDIO header */
4235 if ((card->vlangrp != NULL) &&
4236 vlan_tx_tag_present(skb)) {
4237 *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_VLAN << 8;
4238 hdr->hdr.l2.vlan_id = vlan_tx_tag_get(skb);
4239 }
4240#endif
4241}
4242
4243void
4244qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4245 struct sk_buff *skb, int ipv, int cast_type)
4246{
4247 QETH_DBF_TEXT(trace, 6, "fillhdr");
4248
4249 memset(hdr, 0, sizeof(struct qeth_hdr));
4250 if (card->options.layer2) {
4251 qeth_layer2_fill_header(card, hdr, skb, cast_type);
4252 return;
4253 }
4254 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
4255 hdr->hdr.l3.ext_flags = 0;
4256#ifdef CONFIG_QETH_VLAN
4257 /*
4258 * before we're going to overwrite this location with next hop ip.
4259 * v6 uses passthrough, v4 sets the tag in the QDIO header.
4260 */
4261 if (card->vlangrp && vlan_tx_tag_present(skb)) {
4262 hdr->hdr.l3.ext_flags = (ipv == 4) ?
4263 QETH_HDR_EXT_VLAN_FRAME :
4264 QETH_HDR_EXT_INCLUDE_VLAN_TAG;
4265 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
4266 }
4267#endif /* CONFIG_QETH_VLAN */
4268 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
4269 if (ipv == 4) { /* IPv4 */
4270 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags4(cast_type);
4271 memset(hdr->hdr.l3.dest_addr, 0, 12);
4272 if ((skb->dst) && (skb->dst->neighbour)) {
4273 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
4274 *((u32 *) skb->dst->neighbour->primary_key);
4275 } else {
4276 /* fill in destination address used in ip header */
4277 *((u32 *)(&hdr->hdr.l3.dest_addr[12])) =
4278 ip_hdr(skb)->daddr;
4279 }
4280 } else if (ipv == 6) { /* IPv6 or passthru */
4281 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type);
4282 if ((skb->dst) && (skb->dst->neighbour)) {
4283 memcpy(hdr->hdr.l3.dest_addr,
4284 skb->dst->neighbour->primary_key, 16);
4285 } else {
4286 /* fill in destination address used in ip header */
4287 memcpy(hdr->hdr.l3.dest_addr,
4288 &ipv6_hdr(skb)->daddr, 16);
4289 }
4290 } else { /* passthrough */
4291 if((skb->dev->type == ARPHRD_IEEE802_TR) &&
4292 !memcmp(skb->data + sizeof(struct qeth_hdr) +
4293 sizeof(__u16), skb->dev->broadcast, 6)) {
4294 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
4295 QETH_HDR_PASSTHRU;
4296 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
4297 skb->dev->broadcast, 6)) { /* broadcast? */
4298 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
4299 QETH_HDR_PASSTHRU;
4300 } else {
4301 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
4302 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
4303 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
4304 }
4305 }
4306}
4307
4308static void
4309__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4310 int is_tso, int *next_element_to_fill)
4311{
4312 int length = skb->len;
4313 int length_here;
4314 int element;
4315 char *data;
4316 int first_lap ;
4317
4318 element = *next_element_to_fill;
4319 data = skb->data;
4320 first_lap = (is_tso == 0 ? 1 : 0);
4321
4322 while (length > 0) {
4323 /* length_here is the remaining amount of data in this page */
4324 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
4325 if (length < length_here)
4326 length_here = length;
4327
4328 buffer->element[element].addr = data;
4329 buffer->element[element].length = length_here;
4330 length -= length_here;
4331 if (!length) {
4332 if (first_lap)
4333 buffer->element[element].flags = 0;
4334 else
4335 buffer->element[element].flags =
4336 SBAL_FLAGS_LAST_FRAG;
4337 } else {
4338 if (first_lap)
4339 buffer->element[element].flags =
4340 SBAL_FLAGS_FIRST_FRAG;
4341 else
4342 buffer->element[element].flags =
4343 SBAL_FLAGS_MIDDLE_FRAG;
4344 }
4345 data += length_here;
4346 element++;
4347 first_lap = 0;
4348 }
4349 *next_element_to_fill = element;
4350}
4351
4352static int
4353qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4354 struct qeth_qdio_out_buffer *buf,
4355 struct sk_buff *skb)
4356{
4357 struct qdio_buffer *buffer;
4358 struct qeth_hdr_tso *hdr;
4359 int flush_cnt = 0, hdr_len, large_send = 0;
4360
4361 QETH_DBF_TEXT(trace, 6, "qdfillbf");
4362
4363 buffer = buf->buffer;
4364 atomic_inc(&skb->users);
4365 skb_queue_tail(&buf->skb_list, skb);
4366
4367 hdr = (struct qeth_hdr_tso *) skb->data;
4368 /*check first on TSO ....*/
4369 if (hdr->hdr.hdr.l3.id == QETH_HEADER_TYPE_TSO) {
4370 int element = buf->next_element_to_fill;
4371
4372 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
4373 /*fill first buffer entry only with header information */
4374 buffer->element[element].addr = skb->data;
4375 buffer->element[element].length = hdr_len;
4376 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
4377 buf->next_element_to_fill++;
4378 skb->data += hdr_len;
4379 skb->len -= hdr_len;
4380 large_send = 1;
4381 }
4382 if (skb_shinfo(skb)->nr_frags == 0)
4383 __qeth_fill_buffer(skb, buffer, large_send,
4384 (int *)&buf->next_element_to_fill);
4385 else
4386 __qeth_fill_buffer_frag(skb, buffer, large_send,
4387 (int *)&buf->next_element_to_fill);
4388
4389 if (!queue->do_pack) {
4390 QETH_DBF_TEXT(trace, 6, "fillbfnp");
4391 /* set state to PRIMED -> will be flushed */
4392 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4393 flush_cnt = 1;
4394 } else {
4395 QETH_DBF_TEXT(trace, 6, "fillbfpa");
4396 if (queue->card->options.performance_stats)
4397 queue->card->perf_stats.skbs_sent_pack++;
4398 if (buf->next_element_to_fill >=
4399 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
4400 /*
4401 * packed buffer if full -> set state PRIMED
4402 * -> will be flushed
4403 */
4404 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
4405 flush_cnt = 1;
4406 }
4407 }
4408 return flush_cnt;
4409}
4410
4411static int
4412qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4413 struct sk_buff *skb, struct qeth_hdr *hdr,
4414 int elements_needed,
4415 struct qeth_eddp_context *ctx)
4416{
4417 struct qeth_qdio_out_buffer *buffer;
4418 int buffers_needed = 0;
4419 int flush_cnt = 0;
4420 int index;
4421
4422 QETH_DBF_TEXT(trace, 6, "dosndpfa");
4423
4424 /* spin until we get the queue ... */
4425 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4426 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4427 /* ... now we've got the queue */
4428 index = queue->next_buf_to_fill;
4429 buffer = &queue->bufs[queue->next_buf_to_fill];
4430 /*
4431 * check if buffer is empty to make sure that we do not 'overtake'
4432 * ourselves and try to fill a buffer that is already primed
4433 */
4434 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4435 goto out;
4436 if (ctx == NULL)
4437 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
4438 QDIO_MAX_BUFFERS_PER_Q;
4439 else {
4440 buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx);
4441 if (buffers_needed < 0)
4442 goto out;
4443 queue->next_buf_to_fill =
4444 (queue->next_buf_to_fill + buffers_needed) %
4445 QDIO_MAX_BUFFERS_PER_Q;
4446 }
4447 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4448 if (ctx == NULL) {
4449 qeth_fill_buffer(queue, buffer, skb);
4450 qeth_flush_buffers(queue, 0, index, 1);
4451 } else {
4452 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
4453 WARN_ON(buffers_needed != flush_cnt);
4454 qeth_flush_buffers(queue, 0, index, flush_cnt);
4455 }
4456 return 0;
4457out:
4458 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4459 return -EBUSY;
4460}
4461
4462static int
4463qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4464 struct sk_buff *skb, struct qeth_hdr *hdr,
4465 int elements_needed, struct qeth_eddp_context *ctx)
4466{
4467 struct qeth_qdio_out_buffer *buffer;
4468 int start_index;
4469 int flush_count = 0;
4470 int do_pack = 0;
4471 int tmp;
4472 int rc = 0;
4473
4474 QETH_DBF_TEXT(trace, 6, "dosndpkt");
4475
4476 /* spin until we get the queue ... */
4477 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4478 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4479 start_index = queue->next_buf_to_fill;
4480 buffer = &queue->bufs[queue->next_buf_to_fill];
4481 /*
4482 * check if buffer is empty to make sure that we do not 'overtake'
4483 * ourselves and try to fill a buffer that is already primed
4484 */
4485 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4486 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4487 return -EBUSY;
4488 }
4489 /* check if we need to switch packing state of this queue */
4490 qeth_switch_to_packing_if_needed(queue);
4491 if (queue->do_pack){
4492 do_pack = 1;
4493 if (ctx == NULL) {
4494 /* does packet fit in current buffer? */
4495 if((QETH_MAX_BUFFER_ELEMENTS(card) -
4496 buffer->next_element_to_fill) < elements_needed){
4497 /* ... no -> set state PRIMED */
4498 atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED);
4499 flush_count++;
4500 queue->next_buf_to_fill =
4501 (queue->next_buf_to_fill + 1) %
4502 QDIO_MAX_BUFFERS_PER_Q;
4503 buffer = &queue->bufs[queue->next_buf_to_fill];
4504 /* we did a step forward, so check buffer state
4505 * again */
4506 if (atomic_read(&buffer->state) !=
4507 QETH_QDIO_BUF_EMPTY){
4508 qeth_flush_buffers(queue, 0, start_index, flush_count);
4509 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4510 return -EBUSY;
4511 }
4512 }
4513 } else {
4514 /* check if we have enough elements (including following
4515 * free buffers) to handle eddp context */
4516 if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
4517 if (net_ratelimit())
4518 PRINT_WARN("eddp tx_dropped 1\n");
4519 rc = -EBUSY;
4520 goto out;
4521 }
4522 }
4523 }
4524 if (ctx == NULL)
4525 tmp = qeth_fill_buffer(queue, buffer, skb);
4526 else {
4527 tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill);
4528 if (tmp < 0) {
4529 printk("eddp tx_dropped 2\n");
4530 rc = - EBUSY;
4531 goto out;
4532 }
4533 }
4534 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
4535 QDIO_MAX_BUFFERS_PER_Q;
4536 flush_count += tmp;
4537out:
4538 if (flush_count)
4539 qeth_flush_buffers(queue, 0, start_index, flush_count);
4540 else if (!atomic_read(&queue->set_pci_flags_count))
4541 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
4542 /*
4543 * queue->state will go from LOCKED -> UNLOCKED or from
4544 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4545 * (switch packing state or flush buffer to get another pci flag out).
4546 * In that case we will enter this loop
4547 */
4548 while (atomic_dec_return(&queue->state)){
4549 flush_count = 0;
4550 start_index = queue->next_buf_to_fill;
4551 /* check if we can go back to non-packing state */
4552 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
4553 /*
4554 * check if we need to flush a packing buffer to get a pci
4555 * flag out on the queue
4556 */
4557 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
4558 flush_count += qeth_flush_buffers_on_no_pci(queue);
4559 if (flush_count)
4560 qeth_flush_buffers(queue, 0, start_index, flush_count);
4561 }
4562 /* at this point the queue is UNLOCKED again */
4563 if (queue->card->options.performance_stats && do_pack)
4564 queue->card->perf_stats.bufs_sent_pack += flush_count;
4565
4566 return rc;
4567}
4568
4569static int
4570qeth_get_elements_no(struct qeth_card *card, void *hdr,
4571 struct sk_buff *skb, int elems)
4572{
4573 int elements_needed = 0;
4574
4575 if (skb_shinfo(skb)->nr_frags > 0)
4576 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
4577 if (elements_needed == 0)
4578 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
4579 + skb->len) >> PAGE_SHIFT);
4580 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){
4581 PRINT_ERR("Invalid size of IP packet "
4582 "(Number=%d / Length=%d). Discarded.\n",
4583 (elements_needed+elems), skb->len);
4584 return 0;
4585 }
4586 return elements_needed;
4587}
4588
4589static void qeth_tx_csum(struct sk_buff *skb)
4590{
4591 int tlen;
4592
4593 if (skb->protocol == htons(ETH_P_IP)) {
4594 tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
4595 switch (ip_hdr(skb)->protocol) {
4596 case IPPROTO_TCP:
4597 tcp_hdr(skb)->check = 0;
4598 tcp_hdr(skb)->check = csum_tcpudp_magic(
4599 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
4600 tlen, ip_hdr(skb)->protocol,
4601 skb_checksum(skb, skb_transport_offset(skb),
4602 tlen, 0));
4603 break;
4604 case IPPROTO_UDP:
4605 udp_hdr(skb)->check = 0;
4606 udp_hdr(skb)->check = csum_tcpudp_magic(
4607 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
4608 tlen, ip_hdr(skb)->protocol,
4609 skb_checksum(skb, skb_transport_offset(skb),
4610 tlen, 0));
4611 break;
4612 }
4613 } else if (skb->protocol == htons(ETH_P_IPV6)) {
4614 switch (ipv6_hdr(skb)->nexthdr) {
4615 case IPPROTO_TCP:
4616 tcp_hdr(skb)->check = 0;
4617 tcp_hdr(skb)->check = csum_ipv6_magic(
4618 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
4619 ipv6_hdr(skb)->payload_len,
4620 ipv6_hdr(skb)->nexthdr,
4621 skb_checksum(skb, skb_transport_offset(skb),
4622 ipv6_hdr(skb)->payload_len, 0));
4623 break;
4624 case IPPROTO_UDP:
4625 udp_hdr(skb)->check = 0;
4626 udp_hdr(skb)->check = csum_ipv6_magic(
4627 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
4628 ipv6_hdr(skb)->payload_len,
4629 ipv6_hdr(skb)->nexthdr,
4630 skb_checksum(skb, skb_transport_offset(skb),
4631 ipv6_hdr(skb)->payload_len, 0));
4632 break;
4633 }
4634 }
4635}
4636
4637static int
4638qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4639{
4640 int ipv = 0;
4641 int cast_type;
4642 struct qeth_qdio_out_q *queue;
4643 struct qeth_hdr *hdr = NULL;
4644 int elements_needed = 0;
4645 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
4646 struct qeth_eddp_context *ctx = NULL;
4647 int tx_bytes = skb->len;
4648 unsigned short nr_frags = skb_shinfo(skb)->nr_frags;
4649 unsigned short tso_size = skb_shinfo(skb)->gso_size;
4650 struct sk_buff *new_skb, *new_skb2;
4651 int rc;
4652
4653 QETH_DBF_TEXT(trace, 6, "sendpkt");
4654
4655 new_skb = skb;
4656 if ((card->info.type == QETH_CARD_TYPE_OSN) &&
4657 (skb->protocol == htons(ETH_P_IPV6)))
4658 return -EPERM;
4659 cast_type = qeth_get_cast_type(card, skb);
4660 if ((cast_type == RTN_BROADCAST) &&
4661 (card->info.broadcast_capable == 0))
4662 return -EPERM;
4663 queue = card->qdio.out_qs
4664 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
4665 if (!card->options.layer2) {
4666 ipv = qeth_get_ip_version(skb);
4667 if ((card->dev->header_ops == &qeth_fake_ops) && ipv) {
4668 new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
4669 if (!new_skb)
4670 return -ENOMEM;
4671 if(card->dev->type == ARPHRD_IEEE802_TR){
4672 skb_pull(new_skb, QETH_FAKE_LL_LEN_TR);
4673 } else {
4674 skb_pull(new_skb, QETH_FAKE_LL_LEN_ETH);
4675 }
4676 }
4677 }
4678 if (skb_is_gso(skb))
4679 large_send = card->options.large_send;
4680 /* check on OSN device*/
4681 if (card->info.type == QETH_CARD_TYPE_OSN)
4682 hdr = (struct qeth_hdr *)new_skb->data;
4683 /*are we able to do TSO ? */
4684 if ((large_send == QETH_LARGE_SEND_TSO) &&
4685 (cast_type == RTN_UNSPEC)) {
4686 rc = qeth_tso_prepare_packet(card, new_skb, ipv, cast_type);
4687 if (rc) {
4688 __qeth_free_new_skb(skb, new_skb);
4689 return rc;
4690 }
4691 elements_needed++;
4692 } else if (card->info.type != QETH_CARD_TYPE_OSN) {
4693 new_skb2 = qeth_prepare_skb(card, new_skb, &hdr, ipv);
4694 if (!new_skb2) {
4695 __qeth_free_new_skb(skb, new_skb);
4696 return -EINVAL;
4697 }
4698 if (new_skb != skb)
4699 __qeth_free_new_skb(new_skb2, new_skb);
4700 new_skb = new_skb2;
4701 qeth_fill_header(card, hdr, new_skb, ipv, cast_type);
4702 }
4703 if (large_send == QETH_LARGE_SEND_EDDP) {
4704 ctx = qeth_eddp_create_context(card, new_skb, hdr,
4705 skb->sk->sk_protocol);
4706 if (ctx == NULL) {
4707 __qeth_free_new_skb(skb, new_skb);
4708 PRINT_WARN("could not create eddp context\n");
4709 return -EINVAL;
4710 }
4711 } else {
4712 int elems = qeth_get_elements_no(card,(void*) hdr, new_skb,
4713 elements_needed);
4714 if (!elems) {
4715 __qeth_free_new_skb(skb, new_skb);
4716 return -EINVAL;
4717 }
4718 elements_needed += elems;
4719 }
4720
4721 if ((large_send == QETH_LARGE_SEND_NO) &&
4722 (skb->ip_summed == CHECKSUM_PARTIAL))
4723 qeth_tx_csum(new_skb);
4724
4725 if (card->info.type != QETH_CARD_TYPE_IQD)
4726 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
4727 elements_needed, ctx);
4728 else {
4729 if ((!card->options.layer2) &&
4730 (ipv == 0)) {
4731 __qeth_free_new_skb(skb, new_skb);
4732 return -EPERM;
4733 }
4734 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
4735 elements_needed, ctx);
4736 }
4737 if (!rc) {
4738 card->stats.tx_packets++;
4739 card->stats.tx_bytes += tx_bytes;
4740 if (new_skb != skb)
4741 dev_kfree_skb_any(skb);
4742 if (card->options.performance_stats) {
4743 if (tso_size &&
4744 !(large_send == QETH_LARGE_SEND_NO)) {
4745 card->perf_stats.large_send_bytes += tx_bytes;
4746 card->perf_stats.large_send_cnt++;
4747 }
4748 if (nr_frags > 0) {
4749 card->perf_stats.sg_skbs_sent++;
4750 /* nr_frags + skb->data */
4751 card->perf_stats.sg_frags_sent +=
4752 nr_frags + 1;
4753 }
4754 }
4755 } else {
4756 card->stats.tx_dropped++;
4757 __qeth_free_new_skb(skb, new_skb);
4758 }
4759 if (ctx != NULL) {
4760 /* drop creator's reference */
4761 qeth_eddp_put_context(ctx);
4762 /* free skb; it's not referenced by a buffer */
4763 if (!rc)
4764 dev_kfree_skb_any(new_skb);
4765 }
4766 return rc;
4767}
4768
4769static int
4770qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4771{
4772 struct qeth_card *card = (struct qeth_card *) dev->priv;
4773 int rc = 0;
4774
4775 switch(regnum){
4776 case MII_BMCR: /* Basic mode control register */
4777 rc = BMCR_FULLDPLX;
4778 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
4779 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4780 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
4781 rc |= BMCR_SPEED100;
4782 break;
4783 case MII_BMSR: /* Basic mode status register */
4784 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4785 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4786 BMSR_100BASE4;
4787 break;
4788 case MII_PHYSID1: /* PHYS ID 1 */
4789 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4790 dev->dev_addr[2];
4791 rc = (rc >> 5) & 0xFFFF;
4792 break;
4793 case MII_PHYSID2: /* PHYS ID 2 */
4794 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4795 break;
4796 case MII_ADVERTISE: /* Advertisement control reg */
4797 rc = ADVERTISE_ALL;
4798 break;
4799 case MII_LPA: /* Link partner ability reg */
4800 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4801 LPA_100BASE4 | LPA_LPACK;
4802 break;
4803 case MII_EXPANSION: /* Expansion register */
4804 break;
4805 case MII_DCOUNTER: /* disconnect counter */
4806 break;
4807 case MII_FCSCOUNTER: /* false carrier counter */
4808 break;
4809 case MII_NWAYTEST: /* N-way auto-neg test register */
4810 break;
4811 case MII_RERRCOUNTER: /* rx error counter */
4812 rc = card->stats.rx_errors;
4813 break;
4814 case MII_SREVISION: /* silicon revision */
4815 break;
4816 case MII_RESV1: /* reserved 1 */
4817 break;
4818 case MII_LBRERROR: /* loopback, rx, bypass error */
4819 break;
4820 case MII_PHYADDR: /* physical address */
4821 break;
4822 case MII_RESV2: /* reserved 2 */
4823 break;
4824 case MII_TPISTATUS: /* TPI status for 10mbps */
4825 break;
4826 case MII_NCONFIG: /* network interface config */
4827 break;
4828 default:
4829 break;
4830 }
4831 return rc;
4832}
4833
4834
4835static const char *
4836qeth_arp_get_error_cause(int *rc)
4837{
4838 switch (*rc) {
4839 case QETH_IPA_ARP_RC_FAILED:
4840 *rc = -EIO;
4841 return "operation failed";
4842 case QETH_IPA_ARP_RC_NOTSUPP:
4843 *rc = -EOPNOTSUPP;
4844 return "operation not supported";
4845 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
4846 *rc = -EINVAL;
4847 return "argument out of range";
4848 case QETH_IPA_ARP_RC_Q_NOTSUPP:
4849 *rc = -EOPNOTSUPP;
4850 return "query operation not supported";
4851 case QETH_IPA_ARP_RC_Q_NO_DATA:
4852 *rc = -ENOENT;
4853 return "no query data available";
4854 default:
4855 return "unknown error";
4856 }
4857}
4858
4859static int
4860qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
4861 __u16, long);
4862
4863static int
4864qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4865{
4866 int tmp;
4867 int rc;
4868
4869 QETH_DBF_TEXT(trace,3,"arpstnoe");
4870
4871 /*
4872 * currently GuestLAN only supports the ARP assist function
4873 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
4874 * thus we say EOPNOTSUPP for this ARP function
4875 */
4876 if (card->info.guestlan)
4877 return -EOPNOTSUPP;
4878 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
4879 PRINT_WARN("ARP processing not supported "
4880 "on %s!\n", QETH_CARD_IFNAME(card));
4881 return -EOPNOTSUPP;
4882 }
4883 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
4884 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
4885 no_entries);
4886 if (rc) {
4887 tmp = rc;
4888 PRINT_WARN("Could not set number of ARP entries on %s: "
4889 "%s (0x%x/%d)\n",
4890 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4891 tmp, tmp);
4892 }
4893 return rc;
4894}
4895
4896static void
4897qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4898 struct qeth_arp_query_data *qdata,
4899 int entry_size, int uentry_size)
4900{
4901 char *entry_ptr;
4902 char *uentry_ptr;
4903 int i;
4904
4905 entry_ptr = (char *)&qdata->data;
4906 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
4907 for (i = 0; i < qdata->no_entries; ++i){
4908 /* strip off 32 bytes "media specific information" */
4909 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
4910 entry_ptr += entry_size;
4911 uentry_ptr += uentry_size;
4912 }
4913}
4914
4915static int
4916qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply,
4917 unsigned long data)
4918{
4919 struct qeth_ipa_cmd *cmd;
4920 struct qeth_arp_query_data *qdata;
4921 struct qeth_arp_query_info *qinfo;
4922 int entry_size;
4923 int uentry_size;
4924 int i;
4925
4926 QETH_DBF_TEXT(trace,4,"arpquecb");
4927
4928 qinfo = (struct qeth_arp_query_info *) reply->param;
4929 cmd = (struct qeth_ipa_cmd *) data;
4930 if (cmd->hdr.return_code) {
4931 QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code);
4932 return 0;
4933 }
4934 if (cmd->data.setassparms.hdr.return_code) {
4935 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
4936 QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code);
4937 return 0;
4938 }
4939 qdata = &cmd->data.setassparms.data.query_arp;
4940 switch(qdata->reply_bits){
4941 case 5:
4942 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
4943 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4944 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
4945 break;
4946 case 7:
4947 /* fall through to default */
4948 default:
4949 /* tr is the same as eth -> entry7 */
4950 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
4951 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4952 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
4953 break;
4954 }
4955 /* check if there is enough room in userspace */
4956 if ((qinfo->udata_len - qinfo->udata_offset) <
4957 qdata->no_entries * uentry_size){
4958 QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
4959 cmd->hdr.return_code = -ENOMEM;
4960 PRINT_WARN("query ARP user space buffer is too small for "
4961 "the returned number of ARP entries. "
4962 "Aborting query!\n");
4963 goto out_error;
4964 }
4965 QETH_DBF_TEXT_(trace, 4, "anore%i",
4966 cmd->data.setassparms.hdr.number_of_replies);
4967 QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
4968 QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
4969
4970 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
4971 /* strip off "media specific information" */
4972 qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size,
4973 uentry_size);
4974 } else
4975 /*copy entries to user buffer*/
4976 memcpy(qinfo->udata + qinfo->udata_offset,
4977 (char *)&qdata->data, qdata->no_entries*uentry_size);
4978
4979 qinfo->no_entries += qdata->no_entries;
4980 qinfo->udata_offset += (qdata->no_entries*uentry_size);
4981 /* check if all replies received ... */
4982 if (cmd->data.setassparms.hdr.seq_no <
4983 cmd->data.setassparms.hdr.number_of_replies)
4984 return 1;
4985 memcpy(qinfo->udata, &qinfo->no_entries, 4);
4986 /* keep STRIP_ENTRIES flag so the user program can distinguish
4987 * stripped entries from normal ones */
4988 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
4989 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
4990 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2);
4991 return 0;
4992out_error:
4993 i = 0;
4994 memcpy(qinfo->udata, &i, 4);
4995 return 0;
4996}
4997
4998static int
4999qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
5000 int len, int (*reply_cb)(struct qeth_card *,
5001 struct qeth_reply *,
5002 unsigned long),
5003 void *reply_param)
5004{
5005 QETH_DBF_TEXT(trace,4,"sendarp");
5006
5007 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
5008 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
5009 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
5010 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
5011 reply_cb, reply_param);
5012}
5013
5014static int
5015qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
5016 int len, int (*reply_cb)(struct qeth_card *,
5017 struct qeth_reply *,
5018 unsigned long),
5019 void *reply_param)
5020{
5021 u16 s1, s2;
5022
5023 QETH_DBF_TEXT(trace,4,"sendsnmp");
5024
5025 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
5026 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
5027 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
5028 /* adjust PDU length fields in IPA_PDU_HEADER */
5029 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
5030 s2 = (u32) len;
5031 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
5032 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
5033 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
5034 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
5035 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
5036 reply_cb, reply_param);
5037}
5038
5039static struct qeth_cmd_buffer *
5040qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs,
5041 __u16, __u16, enum qeth_prot_versions);
5042static int
5043qeth_arp_query(struct qeth_card *card, char __user *udata)
5044{
5045 struct qeth_cmd_buffer *iob;
5046 struct qeth_arp_query_info qinfo = {0, };
5047 int tmp;
5048 int rc;
5049
5050 QETH_DBF_TEXT(trace,3,"arpquery");
5051
5052 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
5053 IPA_ARP_PROCESSING)) {
5054 PRINT_WARN("ARP processing not supported "
5055 "on %s!\n", QETH_CARD_IFNAME(card));
5056 return -EOPNOTSUPP;
5057 }
5058 /* get size of userspace buffer and mask_bits -> 6 bytes */
5059 if (copy_from_user(&qinfo, udata, 6))
5060 return -EFAULT;
5061 if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL)))
5062 return -ENOMEM;
5063 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
5064 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5065 IPA_CMD_ASS_ARP_QUERY_INFO,
5066 sizeof(int),QETH_PROT_IPV4);
5067
5068 rc = qeth_send_ipa_arp_cmd(card, iob,
5069 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
5070 qeth_arp_query_cb, (void *)&qinfo);
5071 if (rc) {
5072 tmp = rc;
5073 PRINT_WARN("Error while querying ARP cache on %s: %s "
5074 "(0x%x/%d)\n",
5075 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
5076 tmp, tmp);
5077 if (copy_to_user(udata, qinfo.udata, 4))
5078 rc = -EFAULT;
5079 } else {
5080 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
5081 rc = -EFAULT;
5082 }
5083 kfree(qinfo.udata);
5084 return rc;
5085}
5086
5087/**
5088 * SNMP command callback
5089 */
5090static int
5091qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply,
5092 unsigned long sdata)
5093{
5094 struct qeth_ipa_cmd *cmd;
5095 struct qeth_arp_query_info *qinfo;
5096 struct qeth_snmp_cmd *snmp;
5097 unsigned char *data;
5098 __u16 data_len;
5099
5100 QETH_DBF_TEXT(trace,3,"snpcmdcb");
5101
5102 cmd = (struct qeth_ipa_cmd *) sdata;
5103 data = (unsigned char *)((char *)cmd - reply->offset);
5104 qinfo = (struct qeth_arp_query_info *) reply->param;
5105 snmp = &cmd->data.setadapterparms.data.snmp;
5106
5107 if (cmd->hdr.return_code) {
5108 QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code);
5109 return 0;
5110 }
5111 if (cmd->data.setadapterparms.hdr.return_code) {
5112 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
5113 QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code);
5114 return 0;
5115 }
5116 data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data));
5117 if (cmd->data.setadapterparms.hdr.seq_no == 1)
5118 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
5119 else
5120 data_len -= (__u16)((char*)&snmp->request - (char *)cmd);
5121
5122 /* check if there is enough room in userspace */
5123 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
5124 QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
5125 cmd->hdr.return_code = -ENOMEM;
5126 return 0;
5127 }
5128 QETH_DBF_TEXT_(trace, 4, "snore%i",
5129 cmd->data.setadapterparms.hdr.used_total);
5130 QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no);
5131 /*copy entries to user buffer*/
5132 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
5133 memcpy(qinfo->udata + qinfo->udata_offset,
5134 (char *)snmp,
5135 data_len + offsetof(struct qeth_snmp_cmd,data));
5136 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
5137 } else {
5138 memcpy(qinfo->udata + qinfo->udata_offset,
5139 (char *)&snmp->request, data_len);
5140 }
5141 qinfo->udata_offset += data_len;
5142 /* check if all replies received ... */
5143 QETH_DBF_TEXT_(trace, 4, "srtot%i",
5144 cmd->data.setadapterparms.hdr.used_total);
5145 QETH_DBF_TEXT_(trace, 4, "srseq%i",
5146 cmd->data.setadapterparms.hdr.seq_no);
5147 if (cmd->data.setadapterparms.hdr.seq_no <
5148 cmd->data.setadapterparms.hdr.used_total)
5149 return 1;
5150 return 0;
5151}
5152
5153static struct qeth_cmd_buffer *
5154qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds,
5155 enum qeth_prot_versions );
5156
5157static struct qeth_cmd_buffer *
5158qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen)
5159{
5160 struct qeth_cmd_buffer *iob;
5161 struct qeth_ipa_cmd *cmd;
5162
5163 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS,
5164 QETH_PROT_IPV4);
5165 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5166 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
5167 cmd->data.setadapterparms.hdr.command_code = command;
5168 cmd->data.setadapterparms.hdr.used_total = 1;
5169 cmd->data.setadapterparms.hdr.seq_no = 1;
5170
5171 return iob;
5172}
5173
5174/**
5175 * function to send SNMP commands to OSA-E card
5176 */
5177static int
5178qeth_snmp_command(struct qeth_card *card, char __user *udata)
5179{
5180 struct qeth_cmd_buffer *iob;
5181 struct qeth_ipa_cmd *cmd;
5182 struct qeth_snmp_ureq *ureq;
5183 int req_len;
5184 struct qeth_arp_query_info qinfo = {0, };
5185 int rc = 0;
5186
5187 QETH_DBF_TEXT(trace,3,"snmpcmd");
5188
5189 if (card->info.guestlan)
5190 return -EOPNOTSUPP;
5191
5192 if ((!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) &&
5193 (!card->options.layer2) ) {
5194 PRINT_WARN("SNMP Query MIBS not supported "
5195 "on %s!\n", QETH_CARD_IFNAME(card));
5196 return -EOPNOTSUPP;
5197 }
5198 /* skip 4 bytes (data_len struct member) to get req_len */
5199 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
5200 return -EFAULT;
5201 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
5202 if (!ureq) {
5203 QETH_DBF_TEXT(trace, 2, "snmpnome");
5204 return -ENOMEM;
5205 }
5206 if (copy_from_user(ureq, udata,
5207 req_len+sizeof(struct qeth_snmp_ureq_hdr))){
5208 kfree(ureq);
5209 return -EFAULT;
5210 }
5211 qinfo.udata_len = ureq->hdr.data_len;
5212 if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL))){
5213 kfree(ureq);
5214 return -ENOMEM;
5215 }
5216 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
5217
5218 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
5219 QETH_SNMP_SETADP_CMDLENGTH + req_len);
5220 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5221 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
5222 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
5223 qeth_snmp_command_cb, (void *)&qinfo);
5224 if (rc)
5225 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
5226 QETH_CARD_IFNAME(card), rc);
5227 else {
5228 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
5229 rc = -EFAULT;
5230 }
5231
5232 kfree(ureq);
5233 kfree(qinfo.udata);
5234 return rc;
5235}
5236
5237static int
5238qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *,
5239 unsigned long);
5240
5241static int
5242qeth_default_setadapterparms_cb(struct qeth_card *card,
5243 struct qeth_reply *reply,
5244 unsigned long data);
5245static int
5246qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *,
5247 __u16, long,
5248 int (*reply_cb)
5249 (struct qeth_card *, struct qeth_reply *, unsigned long),
5250 void *reply_param);
5251
5252static int
5253qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
5254{
5255 struct qeth_cmd_buffer *iob;
5256 char buf[16];
5257 int tmp;
5258 int rc;
5259
5260 QETH_DBF_TEXT(trace,3,"arpadent");
5261
5262 /*
5263 * currently GuestLAN only supports the ARP assist function
5264 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
5265 * thus we say EOPNOTSUPP for this ARP function
5266 */
5267 if (card->info.guestlan)
5268 return -EOPNOTSUPP;
5269 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5270 PRINT_WARN("ARP processing not supported "
5271 "on %s!\n", QETH_CARD_IFNAME(card));
5272 return -EOPNOTSUPP;
5273 }
5274
5275 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5276 IPA_CMD_ASS_ARP_ADD_ENTRY,
5277 sizeof(struct qeth_arp_cache_entry),
5278 QETH_PROT_IPV4);
5279 rc = qeth_send_setassparms(card, iob,
5280 sizeof(struct qeth_arp_cache_entry),
5281 (unsigned long) entry,
5282 qeth_default_setassparms_cb, NULL);
5283 if (rc) {
5284 tmp = rc;
5285 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
5286 PRINT_WARN("Could not add ARP entry for address %s on %s: "
5287 "%s (0x%x/%d)\n",
5288 buf, QETH_CARD_IFNAME(card),
5289 qeth_arp_get_error_cause(&rc), tmp, tmp);
5290 }
5291 return rc;
5292}
5293
5294static int
5295qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
5296{
5297 struct qeth_cmd_buffer *iob;
5298 char buf[16] = {0, };
5299 int tmp;
5300 int rc;
5301
5302 QETH_DBF_TEXT(trace,3,"arprment");
5303
5304 /*
5305 * currently GuestLAN only supports the ARP assist function
5306 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
5307 * thus we say EOPNOTSUPP for this ARP function
5308 */
5309 if (card->info.guestlan)
5310 return -EOPNOTSUPP;
5311 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5312 PRINT_WARN("ARP processing not supported "
5313 "on %s!\n", QETH_CARD_IFNAME(card));
5314 return -EOPNOTSUPP;
5315 }
5316 memcpy(buf, entry, 12);
5317 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
5318 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
5319 12,
5320 QETH_PROT_IPV4);
5321 rc = qeth_send_setassparms(card, iob,
5322 12, (unsigned long)buf,
5323 qeth_default_setassparms_cb, NULL);
5324 if (rc) {
5325 tmp = rc;
5326 memset(buf, 0, 16);
5327 qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
5328 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
5329 "%s (0x%x/%d)\n",
5330 buf, QETH_CARD_IFNAME(card),
5331 qeth_arp_get_error_cause(&rc), tmp, tmp);
5332 }
5333 return rc;
5334}
5335
5336static int
5337qeth_arp_flush_cache(struct qeth_card *card)
5338{
5339 int rc;
5340 int tmp;
5341
5342 QETH_DBF_TEXT(trace,3,"arpflush");
5343
5344 /*
5345 * currently GuestLAN only supports the ARP assist function
5346 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
5347 * thus we say EOPNOTSUPP for this ARP function
5348 */
5349 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
5350 return -EOPNOTSUPP;
5351 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
5352 PRINT_WARN("ARP processing not supported "
5353 "on %s!\n", QETH_CARD_IFNAME(card));
5354 return -EOPNOTSUPP;
5355 }
5356 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
5357 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
5358 if (rc){
5359 tmp = rc;
5360 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
5361 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
5362 tmp, tmp);
5363 }
5364 return rc;
5365}
5366
5367static int
5368qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5369{
5370 struct qeth_card *card = (struct qeth_card *)dev->priv;
5371 struct qeth_arp_cache_entry arp_entry;
5372 struct mii_ioctl_data *mii_data;
5373 int rc = 0;
5374
5375 if (!card)
5376 return -ENODEV;
5377
5378 if ((card->state != CARD_STATE_UP) &&
5379 (card->state != CARD_STATE_SOFTSETUP))
5380 return -ENODEV;
5381
5382 if (card->info.type == QETH_CARD_TYPE_OSN)
5383 return -EPERM;
5384
5385 switch (cmd){
5386 case SIOC_QETH_ARP_SET_NO_ENTRIES:
5387 if ( !capable(CAP_NET_ADMIN) ||
5388 (card->options.layer2) ) {
5389 rc = -EPERM;
5390 break;
5391 }
5392 rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
5393 break;
5394 case SIOC_QETH_ARP_QUERY_INFO:
5395 if ( !capable(CAP_NET_ADMIN) ||
5396 (card->options.layer2) ) {
5397 rc = -EPERM;
5398 break;
5399 }
5400 rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data);
5401 break;
5402 case SIOC_QETH_ARP_ADD_ENTRY:
5403 if ( !capable(CAP_NET_ADMIN) ||
5404 (card->options.layer2) ) {
5405 rc = -EPERM;
5406 break;
5407 }
5408 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
5409 sizeof(struct qeth_arp_cache_entry)))
5410 rc = -EFAULT;
5411 else
5412 rc = qeth_arp_add_entry(card, &arp_entry);
5413 break;
5414 case SIOC_QETH_ARP_REMOVE_ENTRY:
5415 if ( !capable(CAP_NET_ADMIN) ||
5416 (card->options.layer2) ) {
5417 rc = -EPERM;
5418 break;
5419 }
5420 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
5421 sizeof(struct qeth_arp_cache_entry)))
5422 rc = -EFAULT;
5423 else
5424 rc = qeth_arp_remove_entry(card, &arp_entry);
5425 break;
5426 case SIOC_QETH_ARP_FLUSH_CACHE:
5427 if ( !capable(CAP_NET_ADMIN) ||
5428 (card->options.layer2) ) {
5429 rc = -EPERM;
5430 break;
5431 }
5432 rc = qeth_arp_flush_cache(card);
5433 break;
5434 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5435 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5436 break;
5437 case SIOC_QETH_GET_CARD_TYPE:
5438 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
5439 !card->info.guestlan)
5440 return 1;
5441 return 0;
5442 break;
5443 case SIOCGMIIPHY:
5444 mii_data = if_mii(rq);
5445 mii_data->phy_id = 0;
5446 break;
5447 case SIOCGMIIREG:
5448 mii_data = if_mii(rq);
5449 if (mii_data->phy_id != 0)
5450 rc = -EINVAL;
5451 else
5452 mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id,
5453 mii_data->reg_num);
5454 break;
5455 default:
5456 rc = -EOPNOTSUPP;
5457 }
5458 if (rc)
5459 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
5460 return rc;
5461}
5462
5463static struct net_device_stats *
5464qeth_get_stats(struct net_device *dev)
5465{
5466 struct qeth_card *card;
5467
5468 card = (struct qeth_card *) (dev->priv);
5469
5470 QETH_DBF_TEXT(trace,5,"getstat");
5471
5472 return &card->stats;
5473}
5474
5475static int
5476qeth_change_mtu(struct net_device *dev, int new_mtu)
5477{
5478 struct qeth_card *card;
5479 char dbf_text[15];
5480
5481 card = (struct qeth_card *) (dev->priv);
5482
5483 QETH_DBF_TEXT(trace,4,"chgmtu");
5484 sprintf(dbf_text, "%8x", new_mtu);
5485 QETH_DBF_TEXT(trace,4,dbf_text);
5486
5487 if (new_mtu < 64)
5488 return -EINVAL;
5489 if (new_mtu > 65535)
5490 return -EINVAL;
5491 if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) &&
5492 (!qeth_mtu_is_valid(card, new_mtu)))
5493 return -EINVAL;
5494 dev->mtu = new_mtu;
5495 return 0;
5496}
5497
5498#ifdef CONFIG_QETH_VLAN
5499static void
5500qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5501{
5502 struct qeth_card *card;
5503 unsigned long flags;
5504
5505 QETH_DBF_TEXT(trace,4,"vlanreg");
5506
5507 card = (struct qeth_card *) dev->priv;
5508 spin_lock_irqsave(&card->vlanlock, flags);
5509 card->vlangrp = grp;
5510 spin_unlock_irqrestore(&card->vlanlock, flags);
5511}
5512
5513static void
5514qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5515 unsigned short vid)
5516{
5517 int i;
5518 struct sk_buff *skb;
5519 struct sk_buff_head tmp_list;
5520
5521 skb_queue_head_init(&tmp_list);
5522 lockdep_set_class(&tmp_list.lock, &qdio_out_skb_queue_key);
5523 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
5524 while ((skb = skb_dequeue(&buf->skb_list))){
5525 if (vlan_tx_tag_present(skb) &&
5526 (vlan_tx_tag_get(skb) == vid)) {
5527 atomic_dec(&skb->users);
5528 dev_kfree_skb(skb);
5529 } else
5530 skb_queue_tail(&tmp_list, skb);
5531 }
5532 }
5533 while ((skb = skb_dequeue(&tmp_list)))
5534 skb_queue_tail(&buf->skb_list, skb);
5535}
5536
5537static void
5538qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid)
5539{
5540 int i, j;
5541
5542 QETH_DBF_TEXT(trace, 4, "frvlskbs");
5543 for (i = 0; i < card->qdio.no_out_queues; ++i){
5544 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
5545 qeth_free_vlan_buffer(card, &card->qdio.
5546 out_qs[i]->bufs[j], vid);
5547 }
5548}
5549
5550static void
5551qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
5552{
5553 struct in_device *in_dev;
5554 struct in_ifaddr *ifa;
5555 struct qeth_ipaddr *addr;
5556
5557 QETH_DBF_TEXT(trace, 4, "frvaddr4");
5558
5559 rcu_read_lock();
5560 in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid));
5561 if (!in_dev)
5562 goto out;
5563 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
5564 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
5565 if (addr){
5566 addr->u.a4.addr = ifa->ifa_address;
5567 addr->u.a4.mask = ifa->ifa_mask;
5568 addr->type = QETH_IP_TYPE_NORMAL;
5569 if (!qeth_delete_ip(card, addr))
5570 kfree(addr);
5571 }
5572 }
5573out:
5574 rcu_read_unlock();
5575}
5576
5577static void
5578qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
5579{
5580#ifdef CONFIG_QETH_IPV6
5581 struct inet6_dev *in6_dev;
5582 struct inet6_ifaddr *ifa;
5583 struct qeth_ipaddr *addr;
5584
5585 QETH_DBF_TEXT(trace, 4, "frvaddr6");
5586
5587 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
5588 if (!in6_dev)
5589 return;
5590 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
5591 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
5592 if (addr){
5593 memcpy(&addr->u.a6.addr, &ifa->addr,
5594 sizeof(struct in6_addr));
5595 addr->u.a6.pfxlen = ifa->prefix_len;
5596 addr->type = QETH_IP_TYPE_NORMAL;
5597 if (!qeth_delete_ip(card, addr))
5598 kfree(addr);
5599 }
5600 }
5601 in6_dev_put(in6_dev);
5602#endif /* CONFIG_QETH_IPV6 */
5603}
5604
5605static void
5606qeth_free_vlan_addresses(struct qeth_card *card, unsigned short vid)
5607{
5608 if (card->options.layer2 || !card->vlangrp)
5609 return;
5610 qeth_free_vlan_addresses4(card, vid);
5611 qeth_free_vlan_addresses6(card, vid);
5612}
5613
5614static int
5615qeth_layer2_send_setdelvlan_cb(struct qeth_card *card,
5616 struct qeth_reply *reply,
5617 unsigned long data)
5618{
5619 struct qeth_ipa_cmd *cmd;
5620
5621 QETH_DBF_TEXT(trace, 2, "L2sdvcb");
5622 cmd = (struct qeth_ipa_cmd *) data;
5623 if (cmd->hdr.return_code) {
5624 PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
5625 "Continuing\n",cmd->data.setdelvlan.vlan_id,
5626 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
5627 QETH_DBF_TEXT_(trace, 2, "L2VL%4x", cmd->hdr.command);
5628 QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card));
5629 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
5630 }
5631 return 0;
5632}
5633
5634static int
5635qeth_layer2_send_setdelvlan(struct qeth_card *card, __u16 i,
5636 enum qeth_ipa_cmds ipacmd)
5637{
5638 struct qeth_ipa_cmd *cmd;
5639 struct qeth_cmd_buffer *iob;
5640
5641 QETH_DBF_TEXT_(trace, 4, "L2sdv%x",ipacmd);
5642 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
5643 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
5644 cmd->data.setdelvlan.vlan_id = i;
5645 return qeth_send_ipa_cmd(card, iob,
5646 qeth_layer2_send_setdelvlan_cb, NULL);
5647}
5648
5649static void
5650qeth_layer2_process_vlans(struct qeth_card *card, int clear)
5651{
5652 unsigned short i;
5653
5654 QETH_DBF_TEXT(trace, 3, "L2prcvln");
5655
5656 if (!card->vlangrp)
5657 return;
5658 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5659 if (vlan_group_get_device(card->vlangrp, i) == NULL)
5660 continue;
5661 if (clear)
5662 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
5663 else
5664 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_SETVLAN);
5665 }
5666}
5667
5668/*add_vid is layer 2 used only ....*/
5669static void
5670qeth_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
5671{
5672 struct qeth_card *card;
5673
5674 QETH_DBF_TEXT_(trace, 4, "aid:%d", vid);
5675
5676 card = (struct qeth_card *) dev->priv;
5677 if (!card->options.layer2)
5678 return;
5679 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
5680}
5681
5682/*... kill_vid used for both modes*/
5683static void
5684qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
5685{
5686 struct qeth_card *card;
5687 unsigned long flags;
5688
5689 QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
5690
5691 card = (struct qeth_card *) dev->priv;
5692 /* free all skbs for the vlan device */
5693 qeth_free_vlan_skbs(card, vid);
5694 spin_lock_irqsave(&card->vlanlock, flags);
5695 /* unregister IP addresses of vlan device */
5696 qeth_free_vlan_addresses(card, vid);
5697 vlan_group_set_device(card->vlangrp, vid, NULL);
5698 spin_unlock_irqrestore(&card->vlanlock, flags);
5699 if (card->options.layer2)
5700 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
5701 qeth_set_multicast_list(card->dev);
5702}
5703#endif
5704/**
5705 * Examine hardware response to SET_PROMISC_MODE
5706 */
5707static int
5708qeth_setadp_promisc_mode_cb(struct qeth_card *card,
5709 struct qeth_reply *reply,
5710 unsigned long data)
5711{
5712 struct qeth_ipa_cmd *cmd;
5713 struct qeth_ipacmd_setadpparms *setparms;
5714
5715 QETH_DBF_TEXT(trace,4,"prmadpcb");
5716
5717 cmd = (struct qeth_ipa_cmd *) data;
5718 setparms = &(cmd->data.setadapterparms);
5719
5720 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
5721 if (cmd->hdr.return_code) {
5722 QETH_DBF_TEXT_(trace,4,"prmrc%2.2x",cmd->hdr.return_code);
5723 setparms->data.mode = SET_PROMISC_MODE_OFF;
5724 }
5725 card->info.promisc_mode = setparms->data.mode;
5726 return 0;
5727}
5728/*
5729 * Set promiscuous mode (on or off) (SET_PROMISC_MODE command)
5730 */
5731static void
5732qeth_setadp_promisc_mode(struct qeth_card *card)
5733{
5734 enum qeth_ipa_promisc_modes mode;
5735 struct net_device *dev = card->dev;
5736 struct qeth_cmd_buffer *iob;
5737 struct qeth_ipa_cmd *cmd;
5738
5739 QETH_DBF_TEXT(trace, 4, "setprom");
5740
5741 if (((dev->flags & IFF_PROMISC) &&
5742 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
5743 (!(dev->flags & IFF_PROMISC) &&
5744 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
5745 return;
5746 mode = SET_PROMISC_MODE_OFF;
5747 if (dev->flags & IFF_PROMISC)
5748 mode = SET_PROMISC_MODE_ON;
5749 QETH_DBF_TEXT_(trace, 4, "mode:%x", mode);
5750
5751 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
5752 sizeof(struct qeth_ipacmd_setadpparms));
5753 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
5754 cmd->data.setadapterparms.data.mode = mode;
5755 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
5756}
5757
5758/**
5759 * set multicast address on card
5760 */
5761static void
5762qeth_set_multicast_list(struct net_device *dev)
5763{
5764 struct qeth_card *card = (struct qeth_card *) dev->priv;
5765
5766 if (card->info.type == QETH_CARD_TYPE_OSN)
5767 return ;
5768
5769 QETH_DBF_TEXT(trace, 3, "setmulti");
5770 qeth_delete_mc_addresses(card);
5771 if (card->options.layer2) {
5772 qeth_layer2_add_multicast(card);
5773 goto out;
5774 }
5775 qeth_add_multicast_ipv4(card);
5776#ifdef CONFIG_QETH_IPV6
5777 qeth_add_multicast_ipv6(card);
5778#endif
5779out:
5780 qeth_set_ip_addr_list(card);
5781 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
5782 return;
5783 qeth_setadp_promisc_mode(card);
5784}
5785
5786static int
5787qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np)
5788{
5789 return 0;
5790}
5791
5792static void
5793qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
5794{
5795 if (dev->type == ARPHRD_IEEE802_TR)
5796 ip_tr_mc_map(ipm, mac);
5797 else
5798 ip_eth_mc_map(ipm, mac);
5799}
5800
5801static struct qeth_ipaddr *
5802qeth_get_addr_buffer(enum qeth_prot_versions prot)
5803{
5804 struct qeth_ipaddr *addr;
5805
5806 addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
5807 if (addr == NULL) {
5808 PRINT_WARN("Not enough memory to add address\n");
5809 return NULL;
5810 }
5811 addr->type = QETH_IP_TYPE_NORMAL;
5812 addr->proto = prot;
5813 return addr;
5814}
5815
5816int
5817qeth_osn_assist(struct net_device *dev,
5818 void *data,
5819 int data_len)
5820{
5821 struct qeth_cmd_buffer *iob;
5822 struct qeth_card *card;
5823 int rc;
5824
5825 QETH_DBF_TEXT(trace, 2, "osnsdmc");
5826 if (!dev)
5827 return -ENODEV;
5828 card = (struct qeth_card *)dev->priv;
5829 if (!card)
5830 return -ENODEV;
5831 if ((card->state != CARD_STATE_UP) &&
5832 (card->state != CARD_STATE_SOFTSETUP))
5833 return -ENODEV;
5834 iob = qeth_wait_for_buffer(&card->write);
5835 memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
5836 rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
5837 return rc;
5838}
5839
5840static struct net_device *
5841qeth_netdev_by_devno(unsigned char *read_dev_no)
5842{
5843 struct qeth_card *card;
5844 struct net_device *ndev;
5845 unsigned char *readno;
5846 __u16 temp_dev_no, card_dev_no;
5847 char *endp;
5848 unsigned long flags;
5849
5850 ndev = NULL;
5851 memcpy(&temp_dev_no, read_dev_no, 2);
5852 read_lock_irqsave(&qeth_card_list.rwlock, flags);
5853 list_for_each_entry(card, &qeth_card_list.list, list) {
5854 readno = CARD_RDEV_ID(card);
5855 readno += (strlen(readno) - 4);
5856 card_dev_no = simple_strtoul(readno, &endp, 16);
5857 if (card_dev_no == temp_dev_no) {
5858 ndev = card->dev;
5859 break;
5860 }
5861 }
5862 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
5863 return ndev;
5864}
5865
5866int
5867qeth_osn_register(unsigned char *read_dev_no,
5868 struct net_device **dev,
5869 int (*assist_cb)(struct net_device *, void *),
5870 int (*data_cb)(struct sk_buff *))
5871{
5872 struct qeth_card * card;
5873
5874 QETH_DBF_TEXT(trace, 2, "osnreg");
5875 *dev = qeth_netdev_by_devno(read_dev_no);
5876 if (*dev == NULL)
5877 return -ENODEV;
5878 card = (struct qeth_card *)(*dev)->priv;
5879 if (!card)
5880 return -ENODEV;
5881 if ((assist_cb == NULL) || (data_cb == NULL))
5882 return -EINVAL;
5883 card->osn_info.assist_cb = assist_cb;
5884 card->osn_info.data_cb = data_cb;
5885 return 0;
5886}
5887
5888void
5889qeth_osn_deregister(struct net_device * dev)
5890{
5891 struct qeth_card *card;
5892
5893 QETH_DBF_TEXT(trace, 2, "osndereg");
5894 if (!dev)
5895 return;
5896 card = (struct qeth_card *)dev->priv;
5897 if (!card)
5898 return;
5899 card->osn_info.assist_cb = NULL;
5900 card->osn_info.data_cb = NULL;
5901 return;
5902}
5903
5904static void
5905qeth_delete_mc_addresses(struct qeth_card *card)
5906{
5907 struct qeth_ipaddr *iptodo;
5908 unsigned long flags;
5909
5910 QETH_DBF_TEXT(trace,4,"delmc");
5911 iptodo = qeth_get_addr_buffer(QETH_PROT_IPV4);
5912 if (!iptodo) {
5913 QETH_DBF_TEXT(trace, 2, "dmcnomem");
5914 return;
5915 }
5916 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
5917 spin_lock_irqsave(&card->ip_lock, flags);
5918 if (!__qeth_insert_ip_todo(card, iptodo, 0))
5919 kfree(iptodo);
5920 spin_unlock_irqrestore(&card->ip_lock, flags);
5921}
5922
5923static void
5924qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5925{
5926 struct qeth_ipaddr *ipm;
5927 struct ip_mc_list *im4;
5928 char buf[MAX_ADDR_LEN];
5929
5930 QETH_DBF_TEXT(trace,4,"addmc");
5931 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
5932 qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
5933 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
5934 if (!ipm)
5935 continue;
5936 ipm->u.a4.addr = im4->multiaddr;
5937 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
5938 ipm->is_multicast = 1;
5939 if (!qeth_add_ip(card,ipm))
5940 kfree(ipm);
5941 }
5942}
5943
5944static inline void
5945qeth_add_vlan_mc(struct qeth_card *card)
5946{
5947#ifdef CONFIG_QETH_VLAN
5948 struct in_device *in_dev;
5949 struct vlan_group *vg;
5950 int i;
5951
5952 QETH_DBF_TEXT(trace,4,"addmcvl");
5953 if ( ((card->options.layer2 == 0) &&
5954 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
5955 (card->vlangrp == NULL) )
5956 return ;
5957
5958 vg = card->vlangrp;
5959 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
5960 struct net_device *netdev = vlan_group_get_device(vg, i);
5961 if (netdev == NULL ||
5962 !(netdev->flags & IFF_UP))
5963 continue;
5964 in_dev = in_dev_get(netdev);
5965 if (!in_dev)
5966 continue;
5967 read_lock(&in_dev->mc_list_lock);
5968 qeth_add_mc(card,in_dev);
5969 read_unlock(&in_dev->mc_list_lock);
5970 in_dev_put(in_dev);
5971 }
5972#endif
5973}
5974
5975static void
5976qeth_add_multicast_ipv4(struct qeth_card *card)
5977{
5978 struct in_device *in4_dev;
5979
5980 QETH_DBF_TEXT(trace,4,"chkmcv4");
5981 in4_dev = in_dev_get(card->dev);
5982 if (in4_dev == NULL)
5983 return;
5984 read_lock(&in4_dev->mc_list_lock);
5985 qeth_add_mc(card, in4_dev);
5986 qeth_add_vlan_mc(card);
5987 read_unlock(&in4_dev->mc_list_lock);
5988 in_dev_put(in4_dev);
5989}
5990
5991static void
5992qeth_layer2_add_multicast(struct qeth_card *card)
5993{
5994 struct qeth_ipaddr *ipm;
5995 struct dev_mc_list *dm;
5996
5997 QETH_DBF_TEXT(trace,4,"L2addmc");
5998 for (dm = card->dev->mc_list; dm; dm = dm->next) {
5999 ipm = qeth_get_addr_buffer(QETH_PROT_IPV4);
6000 if (!ipm)
6001 continue;
6002 memcpy(ipm->mac,dm->dmi_addr,MAX_ADDR_LEN);
6003 ipm->is_multicast = 1;
6004 if (!qeth_add_ip(card, ipm))
6005 kfree(ipm);
6006 }
6007}
6008
6009#ifdef CONFIG_QETH_IPV6
6010static void
6011qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
6012{
6013 struct qeth_ipaddr *ipm;
6014 struct ifmcaddr6 *im6;
6015 char buf[MAX_ADDR_LEN];
6016
6017 QETH_DBF_TEXT(trace,4,"addmc6");
6018 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
6019 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
6020 ipm = qeth_get_addr_buffer(QETH_PROT_IPV6);
6021 if (!ipm)
6022 continue;
6023 ipm->is_multicast = 1;
6024 memcpy(ipm->mac,buf,OSA_ADDR_LEN);
6025 memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr,
6026 sizeof(struct in6_addr));
6027 if (!qeth_add_ip(card,ipm))
6028 kfree(ipm);
6029 }
6030}
6031
6032static inline void
6033qeth_add_vlan_mc6(struct qeth_card *card)
6034{
6035#ifdef CONFIG_QETH_VLAN
6036 struct inet6_dev *in_dev;
6037 struct vlan_group *vg;
6038 int i;
6039
6040 QETH_DBF_TEXT(trace,4,"admc6vl");
6041 if ( ((card->options.layer2 == 0) &&
6042 (!qeth_is_supported(card,IPA_FULL_VLAN))) ||
6043 (card->vlangrp == NULL))
6044 return ;
6045
6046 vg = card->vlangrp;
6047 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
6048 struct net_device *netdev = vlan_group_get_device(vg, i);
6049 if (netdev == NULL ||
6050 !(netdev->flags & IFF_UP))
6051 continue;
6052 in_dev = in6_dev_get(netdev);
6053 if (!in_dev)
6054 continue;
6055 read_lock_bh(&in_dev->lock);
6056 qeth_add_mc6(card,in_dev);
6057 read_unlock_bh(&in_dev->lock);
6058 in6_dev_put(in_dev);
6059 }
6060#endif /* CONFIG_QETH_VLAN */
6061}
6062
6063static void
6064qeth_add_multicast_ipv6(struct qeth_card *card)
6065{
6066 struct inet6_dev *in6_dev;
6067
6068 QETH_DBF_TEXT(trace,4,"chkmcv6");
6069 if (!qeth_is_supported(card, IPA_IPV6))
6070 return ;
6071 in6_dev = in6_dev_get(card->dev);
6072 if (in6_dev == NULL)
6073 return;
6074 read_lock_bh(&in6_dev->lock);
6075 qeth_add_mc6(card, in6_dev);
6076 qeth_add_vlan_mc6(card);
6077 read_unlock_bh(&in6_dev->lock);
6078 in6_dev_put(in6_dev);
6079}
6080#endif /* CONFIG_QETH_IPV6 */
6081
6082static int
6083qeth_layer2_send_setdelmac(struct qeth_card *card, __u8 *mac,
6084 enum qeth_ipa_cmds ipacmd,
6085 int (*reply_cb) (struct qeth_card *,
6086 struct qeth_reply*,
6087 unsigned long))
6088{
6089 struct qeth_ipa_cmd *cmd;
6090 struct qeth_cmd_buffer *iob;
6091
6092 QETH_DBF_TEXT(trace, 2, "L2sdmac");
6093 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
6094 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6095 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
6096 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
6097 return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
6098}
6099
6100static int
6101qeth_layer2_send_setgroupmac_cb(struct qeth_card *card,
6102 struct qeth_reply *reply,
6103 unsigned long data)
6104{
6105 struct qeth_ipa_cmd *cmd;
6106 __u8 *mac;
6107
6108 QETH_DBF_TEXT(trace, 2, "L2Sgmacb");
6109 cmd = (struct qeth_ipa_cmd *) data;
6110 mac = &cmd->data.setdelmac.mac[0];
6111 /* MAC already registered, needed in couple/uncouple case */
6112 if (cmd->hdr.return_code == 0x2005) {
6113 PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
6114 "already existing on %s \n",
6115 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
6116 QETH_CARD_IFNAME(card));
6117 cmd->hdr.return_code = 0;
6118 }
6119 if (cmd->hdr.return_code)
6120 PRINT_ERR("Could not set group MAC " \
6121 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
6122 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
6123 QETH_CARD_IFNAME(card),cmd->hdr.return_code);
6124 return 0;
6125}
6126
6127static int
6128qeth_layer2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
6129{
6130 QETH_DBF_TEXT(trace, 2, "L2Sgmac");
6131 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
6132 qeth_layer2_send_setgroupmac_cb);
6133}
6134
6135static int
6136qeth_layer2_send_delgroupmac_cb(struct qeth_card *card,
6137 struct qeth_reply *reply,
6138 unsigned long data)
6139{
6140 struct qeth_ipa_cmd *cmd;
6141 __u8 *mac;
6142
6143 QETH_DBF_TEXT(trace, 2, "L2Dgmacb");
6144 cmd = (struct qeth_ipa_cmd *) data;
6145 mac = &cmd->data.setdelmac.mac[0];
6146 if (cmd->hdr.return_code)
6147 PRINT_ERR("Could not delete group MAC " \
6148 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
6149 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
6150 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
6151 return 0;
6152}
6153
6154static int
6155qeth_layer2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
6156{
6157 QETH_DBF_TEXT(trace, 2, "L2Dgmac");
6158 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
6159 qeth_layer2_send_delgroupmac_cb);
6160}
6161
6162static int
6163qeth_layer2_send_setmac_cb(struct qeth_card *card,
6164 struct qeth_reply *reply,
6165 unsigned long data)
6166{
6167 struct qeth_ipa_cmd *cmd;
6168
6169 QETH_DBF_TEXT(trace, 2, "L2Smaccb");
6170 cmd = (struct qeth_ipa_cmd *) data;
6171 if (cmd->hdr.return_code) {
6172 QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code);
6173 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
6174 cmd->hdr.return_code = -EIO;
6175 } else {
6176 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
6177 memcpy(card->dev->dev_addr,cmd->data.setdelmac.mac,
6178 OSA_ADDR_LEN);
6179 PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
6180 "successfully registered on device %s\n",
6181 card->dev->dev_addr[0], card->dev->dev_addr[1],
6182 card->dev->dev_addr[2], card->dev->dev_addr[3],
6183 card->dev->dev_addr[4], card->dev->dev_addr[5],
6184 card->dev->name);
6185 }
6186 return 0;
6187}
6188
6189static int
6190qeth_layer2_send_setmac(struct qeth_card *card, __u8 *mac)
6191{
6192 QETH_DBF_TEXT(trace, 2, "L2Setmac");
6193 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
6194 qeth_layer2_send_setmac_cb);
6195}
6196
6197static int
6198qeth_layer2_send_delmac_cb(struct qeth_card *card,
6199 struct qeth_reply *reply,
6200 unsigned long data)
6201{
6202 struct qeth_ipa_cmd *cmd;
6203
6204 QETH_DBF_TEXT(trace, 2, "L2Dmaccb");
6205 cmd = (struct qeth_ipa_cmd *) data;
6206 if (cmd->hdr.return_code) {
6207 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
6208 cmd->hdr.return_code = -EIO;
6209 return 0;
6210 }
6211 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
6212
6213 return 0;
6214}
6215static int
6216qeth_layer2_send_delmac(struct qeth_card *card, __u8 *mac)
6217{
6218 QETH_DBF_TEXT(trace, 2, "L2Delmac");
6219 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
6220 return 0;
6221 return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
6222 qeth_layer2_send_delmac_cb);
6223}
6224
6225static int
6226qeth_layer2_set_mac_address(struct net_device *dev, void *p)
6227{
6228 struct sockaddr *addr = p;
6229 struct qeth_card *card;
6230 int rc = 0;
6231
6232 QETH_DBF_TEXT(trace, 3, "setmac");
6233
6234 if (qeth_verify_dev(dev) != QETH_REAL_CARD) {
6235 QETH_DBF_TEXT(trace, 3, "setmcINV");
6236 return -EOPNOTSUPP;
6237 }
6238 card = (struct qeth_card *) dev->priv;
6239
6240 if (!card->options.layer2) {
6241 PRINT_WARN("Setting MAC address on %s is not supported "
6242 "in Layer 3 mode.\n", dev->name);
6243 QETH_DBF_TEXT(trace, 3, "setmcLY3");
6244 return -EOPNOTSUPP;
6245 }
6246 if (card->info.type == QETH_CARD_TYPE_OSN) {
6247 PRINT_WARN("Setting MAC address on %s is not supported.\n",
6248 dev->name);
6249 QETH_DBF_TEXT(trace, 3, "setmcOSN");
6250 return -EOPNOTSUPP;
6251 }
6252 QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
6253 QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
6254 rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]);
6255 if (!rc)
6256 rc = qeth_layer2_send_setmac(card, addr->sa_data);
6257 return rc;
6258}
6259
6260static void
6261qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd,
6262 __u8 command, enum qeth_prot_versions prot)
6263{
6264 memset(cmd, 0, sizeof (struct qeth_ipa_cmd));
6265 cmd->hdr.command = command;
6266 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
6267 cmd->hdr.seqno = card->seqno.ipa;
6268 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
6269 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
6270 if (card->options.layer2)
6271 cmd->hdr.prim_version_no = 2;
6272 else
6273 cmd->hdr.prim_version_no = 1;
6274 cmd->hdr.param_count = 1;
6275 cmd->hdr.prot_version = prot;
6276 cmd->hdr.ipa_supported = 0;
6277 cmd->hdr.ipa_enabled = 0;
6278}
6279
6280static struct qeth_cmd_buffer *
6281qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
6282 enum qeth_prot_versions prot)
6283{
6284 struct qeth_cmd_buffer *iob;
6285 struct qeth_ipa_cmd *cmd;
6286
6287 iob = qeth_wait_for_buffer(&card->write);
6288 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6289 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
6290
6291 return iob;
6292}
6293
6294static int
6295qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
6296{
6297 int rc;
6298 struct qeth_cmd_buffer *iob;
6299 struct qeth_ipa_cmd *cmd;
6300
6301 QETH_DBF_TEXT(trace,4,"setdelmc");
6302
6303 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
6304 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6305 memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN);
6306 if (addr->proto == QETH_PROT_IPV6)
6307 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
6308 sizeof(struct in6_addr));
6309 else
6310 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4);
6311
6312 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6313
6314 return rc;
6315}
6316static void
6317qeth_fill_netmask(u8 *netmask, unsigned int len)
6318{
6319 int i,j;
6320 for (i=0;i<16;i++) {
6321 j=(len)-(i*8);
6322 if (j >= 8)
6323 netmask[i] = 0xff;
6324 else if (j > 0)
6325 netmask[i] = (u8)(0xFF00>>j);
6326 else
6327 netmask[i] = 0;
6328 }
6329}
6330
6331static int
6332qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr,
6333 int ipacmd, unsigned int flags)
6334{
6335 int rc;
6336 struct qeth_cmd_buffer *iob;
6337 struct qeth_ipa_cmd *cmd;
6338 __u8 netmask[16];
6339
6340 QETH_DBF_TEXT(trace,4,"setdelip");
6341 QETH_DBF_TEXT_(trace,4,"flags%02X", flags);
6342
6343 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
6344 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6345 if (addr->proto == QETH_PROT_IPV6) {
6346 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
6347 sizeof(struct in6_addr));
6348 qeth_fill_netmask(netmask,addr->u.a6.pfxlen);
6349 memcpy(cmd->data.setdelip6.mask, netmask,
6350 sizeof(struct in6_addr));
6351 cmd->data.setdelip6.flags = flags;
6352 } else {
6353 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
6354 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
6355 cmd->data.setdelip4.flags = flags;
6356 }
6357
6358 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
6359
6360 return rc;
6361}
6362
6363static int
6364qeth_layer2_register_addr_entry(struct qeth_card *card,
6365 struct qeth_ipaddr *addr)
6366{
6367 if (!addr->is_multicast)
6368 return 0;
6369 QETH_DBF_TEXT(trace, 2, "setgmac");
6370 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
6371 return qeth_layer2_send_setgroupmac(card, &addr->mac[0]);
6372}
6373
6374static int
6375qeth_layer2_deregister_addr_entry(struct qeth_card *card,
6376 struct qeth_ipaddr *addr)
6377{
6378 if (!addr->is_multicast)
6379 return 0;
6380 QETH_DBF_TEXT(trace, 2, "delgmac");
6381 QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN);
6382 return qeth_layer2_send_delgroupmac(card, &addr->mac[0]);
6383}
6384
6385static int
6386qeth_layer3_register_addr_entry(struct qeth_card *card,
6387 struct qeth_ipaddr *addr)
6388{
6389 char buf[50];
6390 int rc;
6391 int cnt = 3;
6392
6393 if (addr->proto == QETH_PROT_IPV4) {
6394 QETH_DBF_TEXT(trace, 2,"setaddr4");
6395 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
6396 } else if (addr->proto == QETH_PROT_IPV6) {
6397 QETH_DBF_TEXT(trace, 2, "setaddr6");
6398 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
6399 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
6400 } else {
6401 QETH_DBF_TEXT(trace, 2, "setaddr?");
6402 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
6403 }
6404 do {
6405 if (addr->is_multicast)
6406 rc = qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM);
6407 else
6408 rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP,
6409 addr->set_flags);
6410 if (rc)
6411 QETH_DBF_TEXT(trace, 2, "failed");
6412 } while ((--cnt > 0) && rc);
6413 if (rc){
6414 QETH_DBF_TEXT(trace, 2, "FAILED");
6415 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
6416 PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
6417 buf, rc, rc);
6418 }
6419 return rc;
6420}
6421
6422static int
6423qeth_layer3_deregister_addr_entry(struct qeth_card *card,
6424 struct qeth_ipaddr *addr)
6425{
6426 //char buf[50];
6427 int rc;
6428
6429 if (addr->proto == QETH_PROT_IPV4) {
6430 QETH_DBF_TEXT(trace, 2,"deladdr4");
6431 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
6432 } else if (addr->proto == QETH_PROT_IPV6) {
6433 QETH_DBF_TEXT(trace, 2, "deladdr6");
6434 QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8);
6435 QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8);
6436 } else {
6437 QETH_DBF_TEXT(trace, 2, "deladdr?");
6438 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
6439 }
6440 if (addr->is_multicast)
6441 rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM);
6442 else
6443 rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP,
6444 addr->del_flags);
6445 if (rc) {
6446 QETH_DBF_TEXT(trace, 2, "failed");
6447 /* TODO: re-activate this warning as soon as we have a
6448 * clean mirco code
6449 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
6450 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
6451 buf, rc);
6452 */
6453 }
6454 return rc;
6455}
6456
6457static int
6458qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
6459{
6460 if (card->options.layer2)
6461 return qeth_layer2_register_addr_entry(card, addr);
6462
6463 return qeth_layer3_register_addr_entry(card, addr);
6464}
6465
6466static int
6467qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr)
6468{
6469 if (card->options.layer2)
6470 return qeth_layer2_deregister_addr_entry(card, addr);
6471
6472 return qeth_layer3_deregister_addr_entry(card, addr);
6473}
6474
6475static u32
6476qeth_ethtool_get_tx_csum(struct net_device *dev)
6477{
6478 return (dev->features & NETIF_F_HW_CSUM) != 0;
6479}
6480
6481static int
6482qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data)
6483{
6484 if (data)
6485 dev->features |= NETIF_F_HW_CSUM;
6486 else
6487 dev->features &= ~NETIF_F_HW_CSUM;
6488
6489 return 0;
6490}
6491
6492static u32
6493qeth_ethtool_get_rx_csum(struct net_device *dev)
6494{
6495 struct qeth_card *card = (struct qeth_card *)dev->priv;
6496
6497 return (card->options.checksum_type == HW_CHECKSUMMING);
6498}
6499
6500static int
6501qeth_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6502{
6503 struct qeth_card *card = (struct qeth_card *)dev->priv;
6504
6505 if ((card->state != CARD_STATE_DOWN) &&
6506 (card->state != CARD_STATE_RECOVER))
6507 return -EPERM;
6508 if (data)
6509 card->options.checksum_type = HW_CHECKSUMMING;
6510 else
6511 card->options.checksum_type = SW_CHECKSUMMING;
6512 return 0;
6513}
6514
6515static u32
6516qeth_ethtool_get_sg(struct net_device *dev)
6517{
6518 struct qeth_card *card = (struct qeth_card *)dev->priv;
6519
6520 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
6521 (dev->features & NETIF_F_SG));
6522}
6523
6524static int
6525qeth_ethtool_set_sg(struct net_device *dev, u32 data)
6526{
6527 struct qeth_card *card = (struct qeth_card *)dev->priv;
6528
6529 if (data) {
6530 if (card->options.large_send != QETH_LARGE_SEND_NO)
6531 dev->features |= NETIF_F_SG;
6532 else {
6533 dev->features &= ~NETIF_F_SG;
6534 return -EINVAL;
6535 }
6536 } else
6537 dev->features &= ~NETIF_F_SG;
6538 return 0;
6539}
6540
6541static u32
6542qeth_ethtool_get_tso(struct net_device *dev)
6543{
6544 struct qeth_card *card = (struct qeth_card *)dev->priv;
6545
6546 return ((card->options.large_send != QETH_LARGE_SEND_NO) &&
6547 (dev->features & NETIF_F_TSO));
6548}
6549
6550static int
6551qeth_ethtool_set_tso(struct net_device *dev, u32 data)
6552{
6553 struct qeth_card *card = (struct qeth_card *)dev->priv;
6554
6555 if (data) {
6556 if (card->options.large_send != QETH_LARGE_SEND_NO)
6557 dev->features |= NETIF_F_TSO;
6558 else {
6559 dev->features &= ~NETIF_F_TSO;
6560 return -EINVAL;
6561 }
6562 } else
6563 dev->features &= ~NETIF_F_TSO;
6564 return 0;
6565}
6566
6567static struct ethtool_ops qeth_ethtool_ops = {
6568 .get_tx_csum = qeth_ethtool_get_tx_csum,
6569 .set_tx_csum = qeth_ethtool_set_tx_csum,
6570 .get_rx_csum = qeth_ethtool_get_rx_csum,
6571 .set_rx_csum = qeth_ethtool_set_rx_csum,
6572 .get_sg = qeth_ethtool_get_sg,
6573 .set_sg = qeth_ethtool_set_sg,
6574 .get_tso = qeth_ethtool_get_tso,
6575 .set_tso = qeth_ethtool_set_tso,
6576};
6577
6578static int
6579qeth_hard_header_parse(const struct sk_buff *skb, unsigned char *haddr)
6580{
6581 const struct qeth_card *card;
6582 const struct ethhdr *eth;
6583 struct net_device *dev = skb->dev;
6584
6585 if (dev->type != ARPHRD_IEEE802_TR)
6586 return 0;
6587
6588 card = qeth_get_card_from_dev(dev);
6589 if (card->options.layer2)
6590 goto haveheader;
6591#ifdef CONFIG_QETH_IPV6
6592 /* cause of the manipulated arp constructor and the ARP
6593 flag for OSAE devices we have some nasty exceptions */
6594 if (card->info.type == QETH_CARD_TYPE_OSAE) {
6595 if (!card->options.fake_ll) {
6596 if ((skb->pkt_type==PACKET_OUTGOING) &&
6597 (skb->protocol==ETH_P_IPV6))
6598 goto haveheader;
6599 else
6600 return 0;
6601 } else {
6602 if ((skb->pkt_type==PACKET_OUTGOING) &&
6603 (skb->protocol==ETH_P_IP))
6604 return 0;
6605 else
6606 goto haveheader;
6607 }
6608 }
6609#endif
6610 if (!card->options.fake_ll)
6611 return 0;
6612haveheader:
6613 eth = eth_hdr(skb);
6614 memcpy(haddr, eth->h_source, ETH_ALEN);
6615 return ETH_ALEN;
6616}
6617
6618static const struct header_ops qeth_null_ops = {
6619 .parse = qeth_hard_header_parse,
6620};
6621
6622static int
6623qeth_netdev_init(struct net_device *dev)
6624{
6625 struct qeth_card *card;
6626
6627 card = (struct qeth_card *) dev->priv;
6628
6629 QETH_DBF_TEXT(trace,3,"initdev");
6630
6631 dev->tx_timeout = &qeth_tx_timeout;
6632 dev->watchdog_timeo = QETH_TX_TIMEOUT;
6633 dev->open = qeth_open;
6634 dev->stop = qeth_stop;
6635 dev->hard_start_xmit = qeth_hard_start_xmit;
6636 dev->do_ioctl = qeth_do_ioctl;
6637 dev->get_stats = qeth_get_stats;
6638 dev->change_mtu = qeth_change_mtu;
6639 dev->neigh_setup = qeth_neigh_setup;
6640 dev->set_multicast_list = qeth_set_multicast_list;
6641#ifdef CONFIG_QETH_VLAN
6642 dev->vlan_rx_register = qeth_vlan_rx_register;
6643 dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid;
6644 dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid;
6645#endif
6646 if (qeth_get_netdev_flags(card) & IFF_NOARP)
6647 dev->header_ops = &qeth_null_ops;
6648
6649#ifdef CONFIG_QETH_IPV6
6650 /*IPv6 address autoconfiguration stuff*/
6651 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
6652 card->dev->dev_id = card->info.unique_id & 0xffff;
6653#endif
6654 if (card->options.fake_ll &&
6655 (qeth_get_netdev_flags(card) & IFF_NOARP))
6656 dev->header_ops = &qeth_fake_ops;
6657
6658 dev->set_mac_address = qeth_layer2_set_mac_address;
6659 dev->flags |= qeth_get_netdev_flags(card);
6660 if ((card->options.fake_broadcast) ||
6661 (card->info.broadcast_capable))
6662 dev->flags |= IFF_BROADCAST;
6663 dev->hard_header_len =
6664 qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
6665 dev->addr_len = OSA_ADDR_LEN;
6666 dev->mtu = card->info.initial_mtu;
6667 if (card->info.type != QETH_CARD_TYPE_OSN)
6668 SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
6669 return 0;
6670}
6671
6672static void
6673qeth_init_func_level(struct qeth_card *card)
6674{
6675 if (card->ipato.enabled) {
6676 if (card->info.type == QETH_CARD_TYPE_IQD)
6677 card->info.func_level =
6678 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
6679 else
6680 card->info.func_level =
6681 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
6682 } else {
6683 if (card->info.type == QETH_CARD_TYPE_IQD)
6684 /*FIXME:why do we have same values for dis and ena for osae??? */
6685 card->info.func_level =
6686 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
6687 else
6688 card->info.func_level =
6689 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
6690 }
6691}
6692
6693/**
6694 * hardsetup card, initialize MPC and QDIO stuff
6695 */
6696static int
6697qeth_hardsetup_card(struct qeth_card *card)
6698{
6699 int retries = 3;
6700 int rc;
6701
6702 QETH_DBF_TEXT(setup, 2, "hrdsetup");
6703
6704 atomic_set(&card->force_alloc_skb, 0);
6705retry:
6706 if (retries < 3){
6707 PRINT_WARN("Retrying to do IDX activates.\n");
6708 ccw_device_set_offline(CARD_DDEV(card));
6709 ccw_device_set_offline(CARD_WDEV(card));
6710 ccw_device_set_offline(CARD_RDEV(card));
6711 ccw_device_set_online(CARD_RDEV(card));
6712 ccw_device_set_online(CARD_WDEV(card));
6713 ccw_device_set_online(CARD_DDEV(card));
6714 }
6715 rc = qeth_qdio_clear_card(card,card->info.type!=QETH_CARD_TYPE_IQD);
6716 if (rc == -ERESTARTSYS) {
6717 QETH_DBF_TEXT(setup, 2, "break1");
6718 return rc;
6719 } else if (rc) {
6720 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
6721 if (--retries < 0)
6722 goto out;
6723 else
6724 goto retry;
6725 }
6726 if ((rc = qeth_get_unitaddr(card))){
6727 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
6728 return rc;
6729 }
6730 qeth_init_tokens(card);
6731 qeth_init_func_level(card);
6732 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
6733 if (rc == -ERESTARTSYS) {
6734 QETH_DBF_TEXT(setup, 2, "break2");
6735 return rc;
6736 } else if (rc) {
6737 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
6738 if (--retries < 0)
6739 goto out;
6740 else
6741 goto retry;
6742 }
6743 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
6744 if (rc == -ERESTARTSYS) {
6745 QETH_DBF_TEXT(setup, 2, "break3");
6746 return rc;
6747 } else if (rc) {
6748 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
6749 if (--retries < 0)
6750 goto out;
6751 else
6752 goto retry;
6753 }
6754 if ((rc = qeth_mpc_initialize(card))){
6755 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
6756 goto out;
6757 }
6758 /*network device will be recovered*/
6759 if (card->dev) {
6760 card->dev->header_ops = card->orig_header_ops;
6761 if (card->options.fake_ll &&
6762 (qeth_get_netdev_flags(card) & IFF_NOARP))
6763 card->dev->header_ops = &qeth_fake_ops;
6764 return 0;
6765 }
6766 /* at first set_online allocate netdev */
6767 card->dev = qeth_get_netdevice(card->info.type,
6768 card->info.link_type);
6769 if (!card->dev){
6770 qeth_qdio_clear_card(card, card->info.type !=
6771 QETH_CARD_TYPE_IQD);
6772 rc = -ENODEV;
6773 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
6774 goto out;
6775 }
6776 card->dev->priv = card;
6777 card->orig_header_ops = card->dev->header_ops;
6778 card->dev->type = qeth_get_arphdr_type(card->info.type,
6779 card->info.link_type);
6780 card->dev->init = qeth_netdev_init;
6781 return 0;
6782out:
6783 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
6784 return rc;
6785}
6786
6787static int
6788qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6789 unsigned long data)
6790{
6791 struct qeth_ipa_cmd *cmd;
6792
6793 QETH_DBF_TEXT(trace,4,"defadpcb");
6794
6795 cmd = (struct qeth_ipa_cmd *) data;
6796 if (cmd->hdr.return_code == 0){
6797 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6798 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6799 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6800#ifdef CONFIG_QETH_IPV6
6801 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6802 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
6803#endif
6804 }
6805 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
6806 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
6807 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
6808 QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
6809 }
6810 return 0;
6811}
6812
6813static int
6814qeth_default_setadapterparms_cb(struct qeth_card *card,
6815 struct qeth_reply *reply,
6816 unsigned long data)
6817{
6818 struct qeth_ipa_cmd *cmd;
6819
6820 QETH_DBF_TEXT(trace,4,"defadpcb");
6821
6822 cmd = (struct qeth_ipa_cmd *) data;
6823 if (cmd->hdr.return_code == 0)
6824 cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code;
6825 return 0;
6826}
6827
6828
6829
6830static int
6831qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply,
6832 unsigned long data)
6833{
6834 struct qeth_ipa_cmd *cmd;
6835
6836 QETH_DBF_TEXT(trace,3,"quyadpcb");
6837
6838 cmd = (struct qeth_ipa_cmd *) data;
6839 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
6840 card->info.link_type =
6841 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
6842 card->options.adp.supported_funcs =
6843 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
6844 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
6845}
6846
6847static int
6848qeth_query_setadapterparms(struct qeth_card *card)
6849{
6850 int rc;
6851 struct qeth_cmd_buffer *iob;
6852
6853 QETH_DBF_TEXT(trace,3,"queryadp");
6854 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
6855 sizeof(struct qeth_ipacmd_setadpparms));
6856 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
6857 return rc;
6858}
6859
6860static int
6861qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
6862 struct qeth_reply *reply,
6863 unsigned long data)
6864{
6865 struct qeth_ipa_cmd *cmd;
6866
6867 QETH_DBF_TEXT(trace,4,"chgmaccb");
6868
6869 cmd = (struct qeth_ipa_cmd *) data;
6870 if (!card->options.layer2 ||
6871 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
6872 memcpy(card->dev->dev_addr,
6873 &cmd->data.setadapterparms.data.change_addr.addr,
6874 OSA_ADDR_LEN);
6875 card->info.mac_bits |= QETH_LAYER2_MAC_READ;
6876 }
6877 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
6878 return 0;
6879}
6880
6881static int
6882qeth_setadpparms_change_macaddr(struct qeth_card *card)
6883{
6884 int rc;
6885 struct qeth_cmd_buffer *iob;
6886 struct qeth_ipa_cmd *cmd;
6887
6888 QETH_DBF_TEXT(trace,4,"chgmac");
6889
6890 iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS,
6891 sizeof(struct qeth_ipacmd_setadpparms));
6892 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6893 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
6894 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
6895 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
6896 card->dev->dev_addr, OSA_ADDR_LEN);
6897 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
6898 NULL);
6899 return rc;
6900}
6901
6902static int
6903qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6904{
6905 int rc;
6906 struct qeth_cmd_buffer *iob;
6907 struct qeth_ipa_cmd *cmd;
6908
6909 QETH_DBF_TEXT(trace,4,"adpmode");
6910
6911 iob = qeth_get_adapter_cmd(card, command,
6912 sizeof(struct qeth_ipacmd_setadpparms));
6913 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
6914 cmd->data.setadapterparms.data.mode = mode;
6915 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
6916 NULL);
6917 return rc;
6918}
6919
6920static int
6921qeth_setadapter_hstr(struct qeth_card *card)
6922{
6923 int rc;
6924
6925 QETH_DBF_TEXT(trace,4,"adphstr");
6926
6927 if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) {
6928 rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE,
6929 card->options.broadcast_mode);
6930 if (rc)
6931 PRINT_WARN("couldn't set broadcast mode on "
6932 "device %s: x%x\n",
6933 CARD_BUS_ID(card), rc);
6934 rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS,
6935 card->options.macaddr_mode);
6936 if (rc)
6937 PRINT_WARN("couldn't set macaddr mode on "
6938 "device %s: x%x\n", CARD_BUS_ID(card), rc);
6939 return rc;
6940 }
6941 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
6942 PRINT_WARN("set adapter parameters not available "
6943 "to set broadcast mode, using ALLRINGS "
6944 "on device %s:\n", CARD_BUS_ID(card));
6945 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
6946 PRINT_WARN("set adapter parameters not available "
6947 "to set macaddr mode, using NONCANONICAL "
6948 "on device %s:\n", CARD_BUS_ID(card));
6949 return 0;
6950}
6951
6952static int
6953qeth_setadapter_parms(struct qeth_card *card)
6954{
6955 int rc;
6956
6957 QETH_DBF_TEXT(setup, 2, "setadprm");
6958
6959 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){
6960 PRINT_WARN("set adapter parameters not supported "
6961 "on device %s.\n",
6962 CARD_BUS_ID(card));
6963 QETH_DBF_TEXT(setup, 2, " notsupp");
6964 return 0;
6965 }
6966 rc = qeth_query_setadapterparms(card);
6967 if (rc) {
6968 PRINT_WARN("couldn't set adapter parameters on device %s: "
6969 "x%x\n", CARD_BUS_ID(card), rc);
6970 return rc;
6971 }
6972 if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) {
6973 rc = qeth_setadpparms_change_macaddr(card);
6974 if (rc)
6975 PRINT_WARN("couldn't get MAC address on "
6976 "device %s: x%x\n",
6977 CARD_BUS_ID(card), rc);
6978 }
6979
6980 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
6981 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
6982 rc = qeth_setadapter_hstr(card);
6983
6984 return rc;
6985}
6986
6987static int
6988qeth_layer2_initialize(struct qeth_card *card)
6989{
6990 int rc = 0;
6991
6992
6993 QETH_DBF_TEXT(setup, 2, "doL2init");
6994 QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card));
6995
6996 rc = qeth_query_setadapterparms(card);
6997 if (rc) {
6998 PRINT_WARN("could not query adapter parameters on device %s: "
6999 "x%x\n", CARD_BUS_ID(card), rc);
7000 }
7001
7002 rc = qeth_setadpparms_change_macaddr(card);
7003 if (rc) {
7004 PRINT_WARN("couldn't get MAC address on "
7005 "device %s: x%x\n",
7006 CARD_BUS_ID(card), rc);
7007 QETH_DBF_TEXT_(setup, 2,"1err%d",rc);
7008 return rc;
7009 }
7010 QETH_DBF_HEX(setup,2, card->dev->dev_addr, OSA_ADDR_LEN);
7011
7012 rc = qeth_layer2_send_setmac(card, &card->dev->dev_addr[0]);
7013 if (rc)
7014 QETH_DBF_TEXT_(setup, 2,"2err%d",rc);
7015 return 0;
7016}
7017
7018
7019static int
7020qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd,
7021 enum qeth_prot_versions prot)
7022{
7023 int rc;
7024 struct qeth_cmd_buffer *iob;
7025
7026 iob = qeth_get_ipacmd_buffer(card,ipacmd,prot);
7027 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7028
7029 return rc;
7030}
7031
7032static int
7033qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot)
7034{
7035 int rc;
7036
7037 QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot);
7038
7039 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot);
7040 return rc;
7041}
7042
7043static int
7044qeth_send_stoplan(struct qeth_card *card)
7045{
7046 int rc = 0;
7047
7048 /*
7049 * TODO: according to the IPA format document page 14,
7050 * TCP/IP (we!) never issue a STOPLAN
7051 * is this right ?!?
7052 */
7053 QETH_DBF_TEXT(trace, 2, "stoplan");
7054
7055 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4);
7056 return rc;
7057}
7058
7059static int
7060qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
7061 unsigned long data)
7062{
7063 struct qeth_ipa_cmd *cmd;
7064
7065 QETH_DBF_TEXT(setup, 2, "qipasscb");
7066
7067 cmd = (struct qeth_ipa_cmd *) data;
7068 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
7069 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
7070 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
7071 /* Disable IPV6 support hard coded for Hipersockets */
7072 if(card->info.type == QETH_CARD_TYPE_IQD)
7073 card->options.ipa4.supported_funcs &= ~IPA_IPV6;
7074 } else {
7075#ifdef CONFIG_QETH_IPV6
7076 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
7077 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
7078#endif
7079 }
7080 QETH_DBF_TEXT(setup, 2, "suppenbl");
7081 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_supported);
7082 QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_enabled);
7083 return 0;
7084}
7085
7086static int
7087qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
7088{
7089 int rc;
7090 struct qeth_cmd_buffer *iob;
7091
7092 QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
7093 if (card->options.layer2) {
7094 QETH_DBF_TEXT(setup, 2, "noprmly2");
7095 return -EPERM;
7096 }
7097
7098 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot);
7099 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
7100 return rc;
7101}
7102
7103static struct qeth_cmd_buffer *
7104qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func,
7105 __u16 cmd_code, __u16 len,
7106 enum qeth_prot_versions prot)
7107{
7108 struct qeth_cmd_buffer *iob;
7109 struct qeth_ipa_cmd *cmd;
7110
7111 QETH_DBF_TEXT(trace,4,"getasscm");
7112 iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot);
7113
7114 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7115 cmd->data.setassparms.hdr.assist_no = ipa_func;
7116 cmd->data.setassparms.hdr.length = 8 + len;
7117 cmd->data.setassparms.hdr.command_code = cmd_code;
7118 cmd->data.setassparms.hdr.return_code = 0;
7119 cmd->data.setassparms.hdr.seq_no = 0;
7120
7121 return iob;
7122}
7123
7124static int
7125qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob,
7126 __u16 len, long data,
7127 int (*reply_cb)
7128 (struct qeth_card *,struct qeth_reply *,unsigned long),
7129 void *reply_param)
7130{
7131 int rc;
7132 struct qeth_ipa_cmd *cmd;
7133
7134 QETH_DBF_TEXT(trace,4,"sendassp");
7135
7136 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7137 if (len <= sizeof(__u32))
7138 cmd->data.setassparms.data.flags_32bit = (__u32) data;
7139 else /* (len > sizeof(__u32)) */
7140 memcpy(&cmd->data.setassparms.data, (void *) data, len);
7141
7142 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
7143 return rc;
7144}
7145
7146#ifdef CONFIG_QETH_IPV6
7147static int
7148qeth_send_simple_setassparms_ipv6(struct qeth_card *card,
7149 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
7150
7151{
7152 int rc;
7153 struct qeth_cmd_buffer *iob;
7154
7155 QETH_DBF_TEXT(trace,4,"simassp6");
7156 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
7157 0, QETH_PROT_IPV6);
7158 rc = qeth_send_setassparms(card, iob, 0, 0,
7159 qeth_default_setassparms_cb, NULL);
7160 return rc;
7161}
7162#endif
7163
7164static int
7165qeth_send_simple_setassparms(struct qeth_card *card,
7166 enum qeth_ipa_funcs ipa_func,
7167 __u16 cmd_code, long data)
7168{
7169 int rc;
7170 int length = 0;
7171 struct qeth_cmd_buffer *iob;
7172
7173 QETH_DBF_TEXT(trace,4,"simassp4");
7174 if (data)
7175 length = sizeof(__u32);
7176 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
7177 length, QETH_PROT_IPV4);
7178 rc = qeth_send_setassparms(card, iob, length, data,
7179 qeth_default_setassparms_cb, NULL);
7180 return rc;
7181}
7182
7183static int
7184qeth_start_ipa_arp_processing(struct qeth_card *card)
7185{
7186 int rc;
7187
7188 QETH_DBF_TEXT(trace,3,"ipaarp");
7189
7190 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
7191 PRINT_WARN("ARP processing not supported "
7192 "on %s!\n", QETH_CARD_IFNAME(card));
7193 return 0;
7194 }
7195 rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING,
7196 IPA_CMD_ASS_START, 0);
7197 if (rc) {
7198 PRINT_WARN("Could not start ARP processing "
7199 "assist on %s: 0x%x\n",
7200 QETH_CARD_IFNAME(card), rc);
7201 }
7202 return rc;
7203}
7204
7205static int
7206qeth_start_ipa_ip_fragmentation(struct qeth_card *card)
7207{
7208 int rc;
7209
7210 QETH_DBF_TEXT(trace,3,"ipaipfrg");
7211
7212 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
7213 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
7214 QETH_CARD_IFNAME(card));
7215 return -EOPNOTSUPP;
7216 }
7217
7218 rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
7219 IPA_CMD_ASS_START, 0);
7220 if (rc) {
7221 PRINT_WARN("Could not start Hardware IP fragmentation "
7222 "assist on %s: 0x%x\n",
7223 QETH_CARD_IFNAME(card), rc);
7224 } else
7225 PRINT_INFO("Hardware IP fragmentation enabled \n");
7226 return rc;
7227}
7228
7229static int
7230qeth_start_ipa_source_mac(struct qeth_card *card)
7231{
7232 int rc;
7233
7234 QETH_DBF_TEXT(trace,3,"stsrcmac");
7235
7236 if (!card->options.fake_ll)
7237 return -EOPNOTSUPP;
7238
7239 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
7240 PRINT_INFO("Inbound source address not "
7241 "supported on %s\n", QETH_CARD_IFNAME(card));
7242 return -EOPNOTSUPP;
7243 }
7244
7245 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
7246 IPA_CMD_ASS_START, 0);
7247 if (rc)
7248 PRINT_WARN("Could not start inbound source "
7249 "assist on %s: 0x%x\n",
7250 QETH_CARD_IFNAME(card), rc);
7251 return rc;
7252}
7253
7254static int
7255qeth_start_ipa_vlan(struct qeth_card *card)
7256{
7257 int rc = 0;
7258
7259 QETH_DBF_TEXT(trace,3,"strtvlan");
7260
7261#ifdef CONFIG_QETH_VLAN
7262 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
7263 PRINT_WARN("VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
7264 return -EOPNOTSUPP;
7265 }
7266
7267 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
7268 IPA_CMD_ASS_START,0);
7269 if (rc) {
7270 PRINT_WARN("Could not start vlan "
7271 "assist on %s: 0x%x\n",
7272 QETH_CARD_IFNAME(card), rc);
7273 } else {
7274 PRINT_INFO("VLAN enabled \n");
7275 card->dev->features |=
7276 NETIF_F_HW_VLAN_FILTER |
7277 NETIF_F_HW_VLAN_TX |
7278 NETIF_F_HW_VLAN_RX;
7279 }
7280#endif /* QETH_VLAN */
7281 return rc;
7282}
7283
7284static int
7285qeth_start_ipa_multicast(struct qeth_card *card)
7286{
7287 int rc;
7288
7289 QETH_DBF_TEXT(trace,3,"stmcast");
7290
7291 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
7292 PRINT_WARN("Multicast not supported on %s\n",
7293 QETH_CARD_IFNAME(card));
7294 return -EOPNOTSUPP;
7295 }
7296
7297 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
7298 IPA_CMD_ASS_START,0);
7299 if (rc) {
7300 PRINT_WARN("Could not start multicast "
7301 "assist on %s: rc=%i\n",
7302 QETH_CARD_IFNAME(card), rc);
7303 } else {
7304 PRINT_INFO("Multicast enabled\n");
7305 card->dev->flags |= IFF_MULTICAST;
7306 }
7307 return rc;
7308}
7309
7310#ifdef CONFIG_QETH_IPV6
7311static int
7312qeth_softsetup_ipv6(struct qeth_card *card)
7313{
7314 int rc;
7315
7316 QETH_DBF_TEXT(trace,3,"softipv6");
7317
7318 rc = qeth_send_startlan(card, QETH_PROT_IPV6);
7319 if (rc) {
7320 PRINT_ERR("IPv6 startlan failed on %s\n",
7321 QETH_CARD_IFNAME(card));
7322 return rc;
7323 }
7324 rc = qeth_query_ipassists(card,QETH_PROT_IPV6);
7325 if (rc) {
7326 PRINT_ERR("IPv6 query ipassist failed on %s\n",
7327 QETH_CARD_IFNAME(card));
7328 return rc;
7329 }
7330 rc = qeth_send_simple_setassparms(card, IPA_IPV6,
7331 IPA_CMD_ASS_START, 3);
7332 if (rc) {
7333 PRINT_WARN("IPv6 start assist (version 4) failed "
7334 "on %s: 0x%x\n",
7335 QETH_CARD_IFNAME(card), rc);
7336 return rc;
7337 }
7338 rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6,
7339 IPA_CMD_ASS_START);
7340 if (rc) {
7341 PRINT_WARN("IPV6 start assist (version 6) failed "
7342 "on %s: 0x%x\n",
7343 QETH_CARD_IFNAME(card), rc);
7344 return rc;
7345 }
7346 rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
7347 IPA_CMD_ASS_START);
7348 if (rc) {
7349 PRINT_WARN("Could not enable passthrough "
7350 "on %s: 0x%x\n",
7351 QETH_CARD_IFNAME(card), rc);
7352 return rc;
7353 }
7354 PRINT_INFO("IPV6 enabled \n");
7355 return 0;
7356}
7357
7358#endif
7359
7360static int
7361qeth_start_ipa_ipv6(struct qeth_card *card)
7362{
7363 int rc = 0;
7364#ifdef CONFIG_QETH_IPV6
7365 QETH_DBF_TEXT(trace,3,"strtipv6");
7366
7367 if (!qeth_is_supported(card, IPA_IPV6)) {
7368 PRINT_WARN("IPv6 not supported on %s\n",
7369 QETH_CARD_IFNAME(card));
7370 return 0;
7371 }
7372 rc = qeth_softsetup_ipv6(card);
7373#endif
7374 return rc ;
7375}
7376
7377static int
7378qeth_start_ipa_broadcast(struct qeth_card *card)
7379{
7380 int rc;
7381
7382 QETH_DBF_TEXT(trace,3,"stbrdcst");
7383 card->info.broadcast_capable = 0;
7384 if (!qeth_is_supported(card, IPA_FILTERING)) {
7385 PRINT_WARN("Broadcast not supported on %s\n",
7386 QETH_CARD_IFNAME(card));
7387 rc = -EOPNOTSUPP;
7388 goto out;
7389 }
7390 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7391 IPA_CMD_ASS_START, 0);
7392 if (rc) {
7393 PRINT_WARN("Could not enable broadcasting filtering "
7394 "on %s: 0x%x\n",
7395 QETH_CARD_IFNAME(card), rc);
7396 goto out;
7397 }
7398
7399 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7400 IPA_CMD_ASS_CONFIGURE, 1);
7401 if (rc) {
7402 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
7403 QETH_CARD_IFNAME(card), rc);
7404 goto out;
7405 }
7406 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
7407 PRINT_INFO("Broadcast enabled \n");
7408 rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
7409 IPA_CMD_ASS_ENABLE, 1);
7410 if (rc) {
7411 PRINT_WARN("Could not set up broadcast echo filtering on "
7412 "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
7413 goto out;
7414 }
7415 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
7416out:
7417 if (card->info.broadcast_capable)
7418 card->dev->flags |= IFF_BROADCAST;
7419 else
7420 card->dev->flags &= ~IFF_BROADCAST;
7421 return rc;
7422}
7423
7424static int
7425qeth_send_checksum_command(struct qeth_card *card)
7426{
7427 int rc;
7428
7429 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
7430 IPA_CMD_ASS_START, 0);
7431 if (rc) {
7432 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
7433 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
7434 QETH_CARD_IFNAME(card), rc);
7435 return rc;
7436 }
7437 rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
7438 IPA_CMD_ASS_ENABLE,
7439 card->info.csum_mask);
7440 if (rc) {
7441 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
7442 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
7443 QETH_CARD_IFNAME(card), rc);
7444 return rc;
7445 }
7446 return 0;
7447}
7448
7449static int
7450qeth_start_ipa_checksum(struct qeth_card *card)
7451{
7452 int rc = 0;
7453
7454 QETH_DBF_TEXT(trace,3,"strtcsum");
7455
7456 if (card->options.checksum_type == NO_CHECKSUMMING) {
7457 PRINT_WARN("Using no checksumming on %s.\n",
7458 QETH_CARD_IFNAME(card));
7459 return 0;
7460 }
7461 if (card->options.checksum_type == SW_CHECKSUMMING) {
7462 PRINT_WARN("Using SW checksumming on %s.\n",
7463 QETH_CARD_IFNAME(card));
7464 return 0;
7465 }
7466 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
7467 PRINT_WARN("Inbound HW Checksumming not "
7468 "supported on %s,\ncontinuing "
7469 "using Inbound SW Checksumming\n",
7470 QETH_CARD_IFNAME(card));
7471 card->options.checksum_type = SW_CHECKSUMMING;
7472 return 0;
7473 }
7474 rc = qeth_send_checksum_command(card);
7475 if (!rc) {
7476 PRINT_INFO("HW Checksumming (inbound) enabled \n");
7477 }
7478 return rc;
7479}
7480
7481static int
7482qeth_start_ipa_tso(struct qeth_card *card)
7483{
7484 int rc;
7485
7486 QETH_DBF_TEXT(trace,3,"sttso");
7487
7488 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
7489 PRINT_WARN("Outbound TSO not supported on %s\n",
7490 QETH_CARD_IFNAME(card));
7491 rc = -EOPNOTSUPP;
7492 } else {
7493 rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
7494 IPA_CMD_ASS_START,0);
7495 if (rc)
7496 PRINT_WARN("Could not start outbound TSO "
7497 "assist on %s: rc=%i\n",
7498 QETH_CARD_IFNAME(card), rc);
7499 else
7500 PRINT_INFO("Outbound TSO enabled\n");
7501 }
7502 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){
7503 card->options.large_send = QETH_LARGE_SEND_NO;
7504 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
7505 NETIF_F_HW_CSUM);
7506 }
7507 return rc;
7508}
7509
7510static int
7511qeth_start_ipassists(struct qeth_card *card)
7512{
7513 QETH_DBF_TEXT(trace,3,"strtipas");
7514 qeth_start_ipa_arp_processing(card); /* go on*/
7515 qeth_start_ipa_ip_fragmentation(card); /* go on*/
7516 qeth_start_ipa_source_mac(card); /* go on*/
7517 qeth_start_ipa_vlan(card); /* go on*/
7518 qeth_start_ipa_multicast(card); /* go on*/
7519 qeth_start_ipa_ipv6(card); /* go on*/
7520 qeth_start_ipa_broadcast(card); /* go on*/
7521 qeth_start_ipa_checksum(card); /* go on*/
7522 qeth_start_ipa_tso(card); /* go on*/
7523 return 0;
7524}
7525
7526static int
7527qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type,
7528 enum qeth_prot_versions prot)
7529{
7530 int rc;
7531 struct qeth_ipa_cmd *cmd;
7532 struct qeth_cmd_buffer *iob;
7533
7534 QETH_DBF_TEXT(trace,4,"setroutg");
7535 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
7536 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7537 cmd->data.setrtg.type = (type);
7538 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7539
7540 return rc;
7541
7542}
7543
7544static void
7545qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type,
7546 enum qeth_prot_versions prot)
7547{
7548 if (card->info.type == QETH_CARD_TYPE_IQD) {
7549 switch (*type) {
7550 case NO_ROUTER:
7551 case PRIMARY_CONNECTOR:
7552 case SECONDARY_CONNECTOR:
7553 case MULTICAST_ROUTER:
7554 return;
7555 default:
7556 goto out_inval;
7557 }
7558 } else {
7559 switch (*type) {
7560 case NO_ROUTER:
7561 case PRIMARY_ROUTER:
7562 case SECONDARY_ROUTER:
7563 return;
7564 case MULTICAST_ROUTER:
7565 if (qeth_is_ipafunc_supported(card, prot,
7566 IPA_OSA_MC_ROUTER))
7567 return;
7568 default:
7569 goto out_inval;
7570 }
7571 }
7572out_inval:
7573 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
7574 "Router status set to 'no router'.\n",
7575 ((*type == PRIMARY_ROUTER)? "primary router" :
7576 (*type == SECONDARY_ROUTER)? "secondary router" :
7577 (*type == PRIMARY_CONNECTOR)? "primary connector" :
7578 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
7579 (*type == MULTICAST_ROUTER)? "multicast router" :
7580 "unknown"),
7581 card->dev->name);
7582 *type = NO_ROUTER;
7583}
7584
7585int
7586qeth_setrouting_v4(struct qeth_card *card)
7587{
7588 int rc;
7589
7590 QETH_DBF_TEXT(trace,3,"setrtg4");
7591
7592 qeth_correct_routing_type(card, &card->options.route4.type,
7593 QETH_PROT_IPV4);
7594
7595 rc = qeth_send_setrouting(card, card->options.route4.type,
7596 QETH_PROT_IPV4);
7597 if (rc) {
7598 card->options.route4.type = NO_ROUTER;
7599 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7600 "Type set to 'no router'.\n",
7601 rc, QETH_CARD_IFNAME(card));
7602 }
7603 return rc;
7604}
7605
7606int
7607qeth_setrouting_v6(struct qeth_card *card)
7608{
7609 int rc = 0;
7610
7611 QETH_DBF_TEXT(trace,3,"setrtg6");
7612#ifdef CONFIG_QETH_IPV6
7613
7614 if (!qeth_is_supported(card, IPA_IPV6))
7615 return 0;
7616 qeth_correct_routing_type(card, &card->options.route6.type,
7617 QETH_PROT_IPV6);
7618
7619 rc = qeth_send_setrouting(card, card->options.route6.type,
7620 QETH_PROT_IPV6);
7621 if (rc) {
7622 card->options.route6.type = NO_ROUTER;
7623 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
7624 "Type set to 'no router'.\n",
7625 rc, QETH_CARD_IFNAME(card));
7626 }
7627#endif
7628 return rc;
7629}
7630
7631int
7632qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type)
7633{
7634 int rc = 0;
7635
7636 if (card->dev == NULL) {
7637 card->options.large_send = type;
7638 return 0;
7639 }
7640 if (card->state == CARD_STATE_UP)
7641 netif_tx_disable(card->dev);
7642 card->options.large_send = type;
7643 switch (card->options.large_send) {
7644 case QETH_LARGE_SEND_EDDP:
7645 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
7646 NETIF_F_HW_CSUM;
7647 break;
7648 case QETH_LARGE_SEND_TSO:
7649 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){
7650 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
7651 NETIF_F_HW_CSUM;
7652 } else {
7653 PRINT_WARN("TSO not supported on %s. "
7654 "large_send set to 'no'.\n",
7655 card->dev->name);
7656 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
7657 NETIF_F_HW_CSUM);
7658 card->options.large_send = QETH_LARGE_SEND_NO;
7659 rc = -EOPNOTSUPP;
7660 }
7661 break;
7662 default: /* includes QETH_LARGE_SEND_NO */
7663 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
7664 NETIF_F_HW_CSUM);
7665 break;
7666 }
7667 if (card->state == CARD_STATE_UP)
7668 netif_wake_queue(card->dev);
7669 return rc;
7670}
7671
7672/*
7673 * softsetup card: init IPA stuff
7674 */
7675static int
7676qeth_softsetup_card(struct qeth_card *card)
7677{
7678 int rc;
7679
7680 QETH_DBF_TEXT(setup, 2, "softsetp");
7681
7682 if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){
7683 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7684 if (rc == 0xe080){
7685 PRINT_WARN("LAN on card %s if offline! "
7686 "Waiting for STARTLAN from card.\n",
7687 CARD_BUS_ID(card));
7688 card->lan_online = 0;
7689 }
7690 return rc;
7691 } else
7692 card->lan_online = 1;
7693 if (card->info.type==QETH_CARD_TYPE_OSN)
7694 goto out;
7695 qeth_set_large_send(card, card->options.large_send);
7696 if (card->options.layer2) {
7697 card->dev->features |=
7698 NETIF_F_HW_VLAN_FILTER |
7699 NETIF_F_HW_VLAN_TX |
7700 NETIF_F_HW_VLAN_RX;
7701 card->dev->flags|=IFF_MULTICAST|IFF_BROADCAST;
7702 card->info.broadcast_capable=1;
7703 if ((rc = qeth_layer2_initialize(card))) {
7704 QETH_DBF_TEXT_(setup, 2, "L2err%d", rc);
7705 return rc;
7706 }
7707#ifdef CONFIG_QETH_VLAN
7708 qeth_layer2_process_vlans(card, 0);
7709#endif
7710 goto out;
7711 }
7712 if ((rc = qeth_setadapter_parms(card)))
7713 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7714 if ((rc = qeth_start_ipassists(card)))
7715 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
7716 if ((rc = qeth_setrouting_v4(card)))
7717 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
7718 if ((rc = qeth_setrouting_v6(card)))
7719 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
7720out:
7721 netif_tx_disable(card->dev);
7722 return 0;
7723}
7724
7725#ifdef CONFIG_QETH_IPV6
7726static int
7727qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply,
7728 unsigned long data)
7729{
7730 struct qeth_ipa_cmd *cmd;
7731
7732 cmd = (struct qeth_ipa_cmd *) data;
7733 if (cmd->hdr.return_code == 0)
7734 card->info.unique_id = *((__u16 *)
7735 &cmd->data.create_destroy_addr.unique_id[6]);
7736 else {
7737 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7738 UNIQUE_ID_NOT_BY_CARD;
7739 PRINT_WARN("couldn't get a unique id from the card on device "
7740 "%s (result=x%x), using default id. ipv6 "
7741 "autoconfig on other lpars may lead to duplicate "
7742 "ip addresses. please use manually "
7743 "configured ones.\n",
7744 CARD_BUS_ID(card), cmd->hdr.return_code);
7745 }
7746 return 0;
7747}
7748#endif
7749
7750static int
7751qeth_put_unique_id(struct qeth_card *card)
7752{
7753
7754 int rc = 0;
7755#ifdef CONFIG_QETH_IPV6
7756 struct qeth_cmd_buffer *iob;
7757 struct qeth_ipa_cmd *cmd;
7758
7759 QETH_DBF_TEXT(trace,2,"puniqeid");
7760
7761 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
7762 UNIQUE_ID_NOT_BY_CARD)
7763 return -1;
7764 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
7765 QETH_PROT_IPV6);
7766 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7767 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7768 card->info.unique_id;
7769 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
7770 card->dev->dev_addr, OSA_ADDR_LEN);
7771 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
7772#else
7773 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7774 UNIQUE_ID_NOT_BY_CARD;
7775#endif
7776 return rc;
7777}
7778
7779/**
7780 * Clear IP List
7781 */
7782static void
7783qeth_clear_ip_list(struct qeth_card *card, int clean, int recover)
7784{
7785 struct qeth_ipaddr *addr, *tmp;
7786 unsigned long flags;
7787
7788 QETH_DBF_TEXT(trace,4,"clearip");
7789 spin_lock_irqsave(&card->ip_lock, flags);
7790 /* clear todo list */
7791 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry){
7792 list_del(&addr->entry);
7793 kfree(addr);
7794 }
7795
7796 while (!list_empty(&card->ip_list)) {
7797 addr = list_entry(card->ip_list.next,
7798 struct qeth_ipaddr, entry);
7799 list_del_init(&addr->entry);
7800 if (clean) {
7801 spin_unlock_irqrestore(&card->ip_lock, flags);
7802 qeth_deregister_addr_entry(card, addr);
7803 spin_lock_irqsave(&card->ip_lock, flags);
7804 }
7805 if (!recover || addr->is_multicast) {
7806 kfree(addr);
7807 continue;
7808 }
7809 list_add_tail(&addr->entry, card->ip_tbd_list);
7810 }
7811 spin_unlock_irqrestore(&card->ip_lock, flags);
7812}
7813
7814static void
7815qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7816 int clear_start_mask)
7817{
7818 unsigned long flags;
7819
7820 spin_lock_irqsave(&card->thread_mask_lock, flags);
7821 card->thread_allowed_mask = threads;
7822 if (clear_start_mask)
7823 card->thread_start_mask &= threads;
7824 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7825 wake_up(&card->wait_q);
7826}
7827
7828static int
7829qeth_threads_running(struct qeth_card *card, unsigned long threads)
7830{
7831 unsigned long flags;
7832 int rc = 0;
7833
7834 spin_lock_irqsave(&card->thread_mask_lock, flags);
7835 rc = (card->thread_running_mask & threads);
7836 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
7837 return rc;
7838}
7839
7840static int
7841qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
7842{
7843 return wait_event_interruptible(card->wait_q,
7844 qeth_threads_running(card, threads) == 0);
7845}
7846
7847static int
7848qeth_stop_card(struct qeth_card *card, int recovery_mode)
7849{
7850 int rc = 0;
7851
7852 QETH_DBF_TEXT(setup ,2,"stopcard");
7853 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
7854
7855 qeth_set_allowed_threads(card, 0, 1);
7856 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
7857 return -ERESTARTSYS;
7858 if (card->read.state == CH_STATE_UP &&
7859 card->write.state == CH_STATE_UP &&
7860 (card->state == CARD_STATE_UP)) {
7861 if (recovery_mode &&
7862 card->info.type != QETH_CARD_TYPE_OSN) {
7863 qeth_stop(card->dev);
7864 } else {
7865 rtnl_lock();
7866 dev_close(card->dev);
7867 rtnl_unlock();
7868 }
7869 if (!card->use_hard_stop) {
7870 __u8 *mac = &card->dev->dev_addr[0];
7871 rc = qeth_layer2_send_delmac(card, mac);
7872 QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc);
7873 if ((rc = qeth_send_stoplan(card)))
7874 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
7875 }
7876 card->state = CARD_STATE_SOFTSETUP;
7877 }
7878 if (card->state == CARD_STATE_SOFTSETUP) {
7879#ifdef CONFIG_QETH_VLAN
7880 if (card->options.layer2)
7881 qeth_layer2_process_vlans(card, 1);
7882#endif
7883 qeth_clear_ip_list(card, !card->use_hard_stop, 1);
7884 qeth_clear_ipacmd_list(card);
7885 card->state = CARD_STATE_HARDSETUP;
7886 }
7887 if (card->state == CARD_STATE_HARDSETUP) {
7888 if ((!card->use_hard_stop) &&
7889 (!card->options.layer2))
7890 if ((rc = qeth_put_unique_id(card)))
7891 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7892 qeth_qdio_clear_card(card, 0);
7893 qeth_clear_qdio_buffers(card);
7894 qeth_clear_working_pool_list(card);
7895 card->state = CARD_STATE_DOWN;
7896 }
7897 if (card->state == CARD_STATE_DOWN) {
7898 qeth_clear_cmd_buffers(&card->read);
7899 qeth_clear_cmd_buffers(&card->write);
7900 }
7901 card->use_hard_stop = 0;
7902 return rc;
7903}
7904
7905
7906static int
7907qeth_get_unique_id(struct qeth_card *card)
7908{
7909 int rc = 0;
7910#ifdef CONFIG_QETH_IPV6
7911 struct qeth_cmd_buffer *iob;
7912 struct qeth_ipa_cmd *cmd;
7913
7914 QETH_DBF_TEXT(setup, 2, "guniqeid");
7915
7916 if (!qeth_is_supported(card,IPA_IPV6)) {
7917 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7918 UNIQUE_ID_NOT_BY_CARD;
7919 return 0;
7920 }
7921
7922 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
7923 QETH_PROT_IPV6);
7924 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
7925 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
7926 card->info.unique_id;
7927
7928 rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL);
7929#else
7930 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
7931 UNIQUE_ID_NOT_BY_CARD;
7932#endif
7933 return rc;
7934}
7935static void
7936qeth_print_status_with_portname(struct qeth_card *card)
7937{
7938 char dbf_text[15];
7939 int i;
7940
7941 sprintf(dbf_text, "%s", card->info.portname + 1);
7942 for (i = 0; i < 8; i++)
7943 dbf_text[i] =
7944 (char) _ebcasc[(__u8) dbf_text[i]];
7945 dbf_text[8] = 0;
7946 printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n"
7947 "with link type %s (portname: %s)\n",
7948 CARD_RDEV_ID(card),
7949 CARD_WDEV_ID(card),
7950 CARD_DDEV_ID(card),
7951 qeth_get_cardname(card),
7952 (card->info.mcl_level[0]) ? " (level: " : "",
7953 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7954 (card->info.mcl_level[0]) ? ")" : "",
7955 qeth_get_cardname_short(card),
7956 dbf_text);
7957
7958}
7959
7960static void
7961qeth_print_status_no_portname(struct qeth_card *card)
7962{
7963 if (card->info.portname[0])
7964 printk("qeth: Device %s/%s/%s is a%s "
7965 "card%s%s%s\nwith link type %s "
7966 "(no portname needed by interface).\n",
7967 CARD_RDEV_ID(card),
7968 CARD_WDEV_ID(card),
7969 CARD_DDEV_ID(card),
7970 qeth_get_cardname(card),
7971 (card->info.mcl_level[0]) ? " (level: " : "",
7972 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7973 (card->info.mcl_level[0]) ? ")" : "",
7974 qeth_get_cardname_short(card));
7975 else
7976 printk("qeth: Device %s/%s/%s is a%s "
7977 "card%s%s%s\nwith link type %s.\n",
7978 CARD_RDEV_ID(card),
7979 CARD_WDEV_ID(card),
7980 CARD_DDEV_ID(card),
7981 qeth_get_cardname(card),
7982 (card->info.mcl_level[0]) ? " (level: " : "",
7983 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
7984 (card->info.mcl_level[0]) ? ")" : "",
7985 qeth_get_cardname_short(card));
7986}
7987
7988static void
7989qeth_print_status_message(struct qeth_card *card)
7990{
7991 switch (card->info.type) {
7992 case QETH_CARD_TYPE_OSAE:
7993 /* VM will use a non-zero first character
7994 * to indicate a HiperSockets like reporting
7995 * of the level OSA sets the first character to zero
7996 * */
7997 if (!card->info.mcl_level[0]) {
7998 sprintf(card->info.mcl_level,"%02x%02x",
7999 card->info.mcl_level[2],
8000 card->info.mcl_level[3]);
8001
8002 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
8003 break;
8004 }
8005 /* fallthrough */
8006 case QETH_CARD_TYPE_IQD:
8007 if (card->info.guestlan) {
8008 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
8009 card->info.mcl_level[0]];
8010 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
8011 card->info.mcl_level[1]];
8012 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
8013 card->info.mcl_level[2]];
8014 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
8015 card->info.mcl_level[3]];
8016 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
8017 }
8018 break;
8019 default:
8020 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
8021 }
8022 if (card->info.portname_required)
8023 qeth_print_status_with_portname(card);
8024 else
8025 qeth_print_status_no_portname(card);
8026}
8027
8028static int
8029qeth_register_netdev(struct qeth_card *card)
8030{
8031 QETH_DBF_TEXT(setup, 3, "regnetd");
8032 if (card->dev->reg_state != NETREG_UNINITIALIZED)
8033 return 0;
8034 /* sysfs magic */
8035 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
8036 return register_netdev(card->dev);
8037}
8038
8039static void
8040qeth_start_again(struct qeth_card *card, int recovery_mode)
8041{
8042 QETH_DBF_TEXT(setup ,2, "startag");
8043
8044 if (recovery_mode &&
8045 card->info.type != QETH_CARD_TYPE_OSN) {
8046 qeth_open(card->dev);
8047 } else {
8048 rtnl_lock();
8049 dev_open(card->dev);
8050 rtnl_unlock();
8051 }
8052 /* this also sets saved unicast addresses */
8053 qeth_set_multicast_list(card->dev);
8054}
8055
8056
8057/* Layer 2 specific stuff */
8058#define IGNORE_PARAM_EQ(option,value,reset_value,msg) \
8059 if (card->options.option == value) { \
8060 PRINT_ERR("%s not supported with layer 2 " \
8061 "functionality, ignoring option on read" \
8062 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
8063 card->options.option = reset_value; \
8064 }
8065#define IGNORE_PARAM_NEQ(option,value,reset_value,msg) \
8066 if (card->options.option != value) { \
8067 PRINT_ERR("%s not supported with layer 2 " \
8068 "functionality, ignoring option on read" \
8069 "channel device %s .\n",msg,CARD_RDEV_ID(card)); \
8070 card->options.option = reset_value; \
8071 }
8072
8073
8074static void qeth_make_parameters_consistent(struct qeth_card *card)
8075{
8076
8077 if (card->options.layer2 == 0)
8078 return;
8079 if (card->info.type == QETH_CARD_TYPE_OSN)
8080 return;
8081 if (card->info.type == QETH_CARD_TYPE_IQD) {
8082 PRINT_ERR("Device %s does not support layer 2 functionality." \
8083 " Ignoring layer2 option.\n",CARD_BUS_ID(card));
8084 card->options.layer2 = 0;
8085 return;
8086 }
8087 IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
8088 "Routing options are");
8089#ifdef CONFIG_QETH_IPV6
8090 IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
8091 "Routing options are");
8092#endif
8093 IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
8094 QETH_CHECKSUM_DEFAULT,
8095 "Checksumming options are");
8096 IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
8097 QETH_TR_BROADCAST_ALLRINGS,
8098 "Broadcast mode options are");
8099 IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
8100 QETH_TR_MACADDR_NONCANONICAL,
8101 "Canonical MAC addr options are");
8102 IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
8103 "Broadcast faking options are");
8104 IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
8105 DEFAULT_ADD_HHLEN,"Option add_hhlen is");
8106 IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
8107}
8108
8109
8110static int
8111__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode)
8112{
8113 struct qeth_card *card = gdev->dev.driver_data;
8114 int rc = 0;
8115 enum qeth_card_states recover_flag;
8116
8117 BUG_ON(!card);
8118 QETH_DBF_TEXT(setup ,2, "setonlin");
8119 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
8120
8121 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
8122 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){
8123 PRINT_WARN("set_online of card %s interrupted by user!\n",
8124 CARD_BUS_ID(card));
8125 return -ERESTARTSYS;
8126 }
8127
8128 recover_flag = card->state;
8129 if ((rc = ccw_device_set_online(CARD_RDEV(card))) ||
8130 (rc = ccw_device_set_online(CARD_WDEV(card))) ||
8131 (rc = ccw_device_set_online(CARD_DDEV(card)))){
8132 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
8133 return -EIO;
8134 }
8135
8136 qeth_make_parameters_consistent(card);
8137
8138 if ((rc = qeth_hardsetup_card(card))){
8139 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
8140 goto out_remove;
8141 }
8142 card->state = CARD_STATE_HARDSETUP;
8143
8144 if (!(rc = qeth_query_ipassists(card,QETH_PROT_IPV4)))
8145 rc = qeth_get_unique_id(card);
8146
8147 if (rc && card->options.layer2 == 0) {
8148 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
8149 goto out_remove;
8150 }
8151 qeth_print_status_message(card);
8152 if ((rc = qeth_register_netdev(card))){
8153 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
8154 goto out_remove;
8155 }
8156 if ((rc = qeth_softsetup_card(card))){
8157 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
8158 goto out_remove;
8159 }
8160
8161 if ((rc = qeth_init_qdio_queues(card))){
8162 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
8163 goto out_remove;
8164 }
8165 card->state = CARD_STATE_SOFTSETUP;
8166 netif_carrier_on(card->dev);
8167
8168 qeth_set_allowed_threads(card, 0xffffffff, 0);
8169 if (recover_flag == CARD_STATE_RECOVER)
8170 qeth_start_again(card, recovery_mode);
8171 qeth_notify_processes();
8172 return 0;
8173out_remove:
8174 card->use_hard_stop = 1;
8175 qeth_stop_card(card, 0);
8176 ccw_device_set_offline(CARD_DDEV(card));
8177 ccw_device_set_offline(CARD_WDEV(card));
8178 ccw_device_set_offline(CARD_RDEV(card));
8179 if (recover_flag == CARD_STATE_RECOVER)
8180 card->state = CARD_STATE_RECOVER;
8181 else
8182 card->state = CARD_STATE_DOWN;
8183 return -ENODEV;
8184}
8185
8186static int
8187qeth_set_online(struct ccwgroup_device *gdev)
8188{
8189 return __qeth_set_online(gdev, 0);
8190}
8191
8192static struct ccw_device_id qeth_ids[] = {
8193 {CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE},
8194 {CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD},
8195 {CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN},
8196 {},
8197};
8198MODULE_DEVICE_TABLE(ccw, qeth_ids);
8199
8200struct device *qeth_root_dev = NULL;
8201
8202struct ccwgroup_driver qeth_ccwgroup_driver = {
8203 .owner = THIS_MODULE,
8204 .name = "qeth",
8205 .driver_id = 0xD8C5E3C8,
8206 .probe = qeth_probe_device,
8207 .remove = qeth_remove_device,
8208 .set_online = qeth_set_online,
8209 .set_offline = qeth_set_offline,
8210};
8211
8212struct ccw_driver qeth_ccw_driver = {
8213 .name = "qeth",
8214 .ids = qeth_ids,
8215 .probe = ccwgroup_probe_ccwdev,
8216 .remove = ccwgroup_remove_ccwdev,
8217};
8218
8219
8220static void
8221qeth_unregister_dbf_views(void)
8222{
8223 if (qeth_dbf_setup)
8224 debug_unregister(qeth_dbf_setup);
8225 if (qeth_dbf_qerr)
8226 debug_unregister(qeth_dbf_qerr);
8227 if (qeth_dbf_sense)
8228 debug_unregister(qeth_dbf_sense);
8229 if (qeth_dbf_misc)
8230 debug_unregister(qeth_dbf_misc);
8231 if (qeth_dbf_data)
8232 debug_unregister(qeth_dbf_data);
8233 if (qeth_dbf_control)
8234 debug_unregister(qeth_dbf_control);
8235 if (qeth_dbf_trace)
8236 debug_unregister(qeth_dbf_trace);
8237}
8238static int
8239qeth_register_dbf_views(void)
8240{
8241 qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
8242 QETH_DBF_SETUP_PAGES,
8243 QETH_DBF_SETUP_NR_AREAS,
8244 QETH_DBF_SETUP_LEN);
8245 qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
8246 QETH_DBF_MISC_PAGES,
8247 QETH_DBF_MISC_NR_AREAS,
8248 QETH_DBF_MISC_LEN);
8249 qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
8250 QETH_DBF_DATA_PAGES,
8251 QETH_DBF_DATA_NR_AREAS,
8252 QETH_DBF_DATA_LEN);
8253 qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
8254 QETH_DBF_CONTROL_PAGES,
8255 QETH_DBF_CONTROL_NR_AREAS,
8256 QETH_DBF_CONTROL_LEN);
8257 qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
8258 QETH_DBF_SENSE_PAGES,
8259 QETH_DBF_SENSE_NR_AREAS,
8260 QETH_DBF_SENSE_LEN);
8261 qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
8262 QETH_DBF_QERR_PAGES,
8263 QETH_DBF_QERR_NR_AREAS,
8264 QETH_DBF_QERR_LEN);
8265 qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
8266 QETH_DBF_TRACE_PAGES,
8267 QETH_DBF_TRACE_NR_AREAS,
8268 QETH_DBF_TRACE_LEN);
8269
8270 if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
8271 (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
8272 (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
8273 (qeth_dbf_trace == NULL)) {
8274 qeth_unregister_dbf_views();
8275 return -ENOMEM;
8276 }
8277 debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
8278 debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
8279
8280 debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
8281 debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
8282
8283 debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
8284 debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
8285
8286 debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
8287 debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
8288
8289 debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
8290 debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
8291
8292 debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
8293 debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
8294
8295 debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
8296 debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
8297
8298 return 0;
8299}
8300
8301#ifdef CONFIG_QETH_IPV6
8302extern struct neigh_table arp_tbl;
8303static struct neigh_ops *arp_direct_ops;
8304static int (*qeth_old_arp_constructor) (struct neighbour *);
8305
8306static struct neigh_ops arp_direct_ops_template = {
8307 .family = AF_INET,
8308 .solicit = NULL,
8309 .error_report = NULL,
8310 .output = dev_queue_xmit,
8311 .connected_output = dev_queue_xmit,
8312 .hh_output = dev_queue_xmit,
8313 .queue_xmit = dev_queue_xmit
8314};
8315
8316static int
8317qeth_arp_constructor(struct neighbour *neigh)
8318{
8319 struct net_device *dev = neigh->dev;
8320 struct in_device *in_dev;
8321 struct neigh_parms *parms;
8322 struct qeth_card *card;
8323
8324 card = qeth_get_card_from_dev(dev);
8325 if (card == NULL)
8326 goto out;
8327 if((card->options.layer2) ||
8328 (card->dev->header_ops == &qeth_fake_ops))
8329 goto out;
8330
8331 rcu_read_lock();
8332 in_dev = __in_dev_get_rcu(dev);
8333 if (in_dev == NULL) {
8334 rcu_read_unlock();
8335 return -EINVAL;
8336 }
8337
8338 parms = in_dev->arp_parms;
8339 __neigh_parms_put(neigh->parms);
8340 neigh->parms = neigh_parms_clone(parms);
8341 rcu_read_unlock();
8342
8343 neigh->type = inet_addr_type(&init_net, *(__be32 *) neigh->primary_key);
8344 neigh->nud_state = NUD_NOARP;
8345 neigh->ops = arp_direct_ops;
8346 neigh->output = neigh->ops->queue_xmit;
8347 return 0;
8348out:
8349 return qeth_old_arp_constructor(neigh);
8350}
8351#endif /*CONFIG_QETH_IPV6*/
8352
8353/*
8354 * IP address takeover related functions
8355 */
8356static void
8357qeth_clear_ipato_list(struct qeth_card *card)
8358{
8359 struct qeth_ipato_entry *ipatoe, *tmp;
8360 unsigned long flags;
8361
8362 spin_lock_irqsave(&card->ip_lock, flags);
8363 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
8364 list_del(&ipatoe->entry);
8365 kfree(ipatoe);
8366 }
8367 spin_unlock_irqrestore(&card->ip_lock, flags);
8368}
8369
8370int
8371qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new)
8372{
8373 struct qeth_ipato_entry *ipatoe;
8374 unsigned long flags;
8375 int rc = 0;
8376
8377 QETH_DBF_TEXT(trace, 2, "addipato");
8378 spin_lock_irqsave(&card->ip_lock, flags);
8379 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
8380 if (ipatoe->proto != new->proto)
8381 continue;
8382 if (!memcmp(ipatoe->addr, new->addr,
8383 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
8384 (ipatoe->mask_bits == new->mask_bits)){
8385 PRINT_WARN("ipato entry already exists!\n");
8386 rc = -EEXIST;
8387 break;
8388 }
8389 }
8390 if (!rc) {
8391 list_add_tail(&new->entry, &card->ipato.entries);
8392 }
8393 spin_unlock_irqrestore(&card->ip_lock, flags);
8394 return rc;
8395}
8396
8397void
8398qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
8399 u8 *addr, int mask_bits)
8400{
8401 struct qeth_ipato_entry *ipatoe, *tmp;
8402 unsigned long flags;
8403
8404 QETH_DBF_TEXT(trace, 2, "delipato");
8405 spin_lock_irqsave(&card->ip_lock, flags);
8406 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){
8407 if (ipatoe->proto != proto)
8408 continue;
8409 if (!memcmp(ipatoe->addr, addr,
8410 (proto == QETH_PROT_IPV4)? 4:16) &&
8411 (ipatoe->mask_bits == mask_bits)){
8412 list_del(&ipatoe->entry);
8413 kfree(ipatoe);
8414 }
8415 }
8416 spin_unlock_irqrestore(&card->ip_lock, flags);
8417}
8418
8419static void
8420qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
8421{
8422 int i, j;
8423 u8 octet;
8424
8425 for (i = 0; i < len; ++i){
8426 octet = addr[i];
8427 for (j = 7; j >= 0; --j){
8428 bits[i*8 + j] = octet & 1;
8429 octet >>= 1;
8430 }
8431 }
8432}
8433
8434static int
8435qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr)
8436{
8437 struct qeth_ipato_entry *ipatoe;
8438 u8 addr_bits[128] = {0, };
8439 u8 ipatoe_bits[128] = {0, };
8440 int rc = 0;
8441
8442 if (!card->ipato.enabled)
8443 return 0;
8444
8445 qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
8446 (addr->proto == QETH_PROT_IPV4)? 4:16);
8447 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
8448 if (addr->proto != ipatoe->proto)
8449 continue;
8450 qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
8451 (ipatoe->proto==QETH_PROT_IPV4) ?
8452 4:16);
8453 if (addr->proto == QETH_PROT_IPV4)
8454 rc = !memcmp(addr_bits, ipatoe_bits,
8455 min(32, ipatoe->mask_bits));
8456 else
8457 rc = !memcmp(addr_bits, ipatoe_bits,
8458 min(128, ipatoe->mask_bits));
8459 if (rc)
8460 break;
8461 }
8462 /* invert? */
8463 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
8464 rc = !rc;
8465 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
8466 rc = !rc;
8467
8468 return rc;
8469}
8470
8471/*
8472 * VIPA related functions
8473 */
8474int
8475qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8476 const u8 *addr)
8477{
8478 struct qeth_ipaddr *ipaddr;
8479 unsigned long flags;
8480 int rc = 0;
8481
8482 ipaddr = qeth_get_addr_buffer(proto);
8483 if (ipaddr){
8484 if (proto == QETH_PROT_IPV4){
8485 QETH_DBF_TEXT(trace, 2, "addvipa4");
8486 memcpy(&ipaddr->u.a4.addr, addr, 4);
8487 ipaddr->u.a4.mask = 0;
8488#ifdef CONFIG_QETH_IPV6
8489 } else if (proto == QETH_PROT_IPV6){
8490 QETH_DBF_TEXT(trace, 2, "addvipa6");
8491 memcpy(&ipaddr->u.a6.addr, addr, 16);
8492 ipaddr->u.a6.pfxlen = 0;
8493#endif
8494 }
8495 ipaddr->type = QETH_IP_TYPE_VIPA;
8496 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
8497 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
8498 } else
8499 return -ENOMEM;
8500 spin_lock_irqsave(&card->ip_lock, flags);
8501 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
8502 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
8503 rc = -EEXIST;
8504 spin_unlock_irqrestore(&card->ip_lock, flags);
8505 if (rc){
8506 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
8507 return rc;
8508 }
8509 if (!qeth_add_ip(card, ipaddr))
8510 kfree(ipaddr);
8511 qeth_set_ip_addr_list(card);
8512 return rc;
8513}
8514
8515void
8516qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8517 const u8 *addr)
8518{
8519 struct qeth_ipaddr *ipaddr;
8520
8521 ipaddr = qeth_get_addr_buffer(proto);
8522 if (ipaddr){
8523 if (proto == QETH_PROT_IPV4){
8524 QETH_DBF_TEXT(trace, 2, "delvipa4");
8525 memcpy(&ipaddr->u.a4.addr, addr, 4);
8526 ipaddr->u.a4.mask = 0;
8527#ifdef CONFIG_QETH_IPV6
8528 } else if (proto == QETH_PROT_IPV6){
8529 QETH_DBF_TEXT(trace, 2, "delvipa6");
8530 memcpy(&ipaddr->u.a6.addr, addr, 16);
8531 ipaddr->u.a6.pfxlen = 0;
8532#endif
8533 }
8534 ipaddr->type = QETH_IP_TYPE_VIPA;
8535 } else
8536 return;
8537 if (!qeth_delete_ip(card, ipaddr))
8538 kfree(ipaddr);
8539 qeth_set_ip_addr_list(card);
8540}
8541
8542/*
8543 * proxy ARP related functions
8544 */
8545int
8546qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8547 const u8 *addr)
8548{
8549 struct qeth_ipaddr *ipaddr;
8550 unsigned long flags;
8551 int rc = 0;
8552
8553 ipaddr = qeth_get_addr_buffer(proto);
8554 if (ipaddr){
8555 if (proto == QETH_PROT_IPV4){
8556 QETH_DBF_TEXT(trace, 2, "addrxip4");
8557 memcpy(&ipaddr->u.a4.addr, addr, 4);
8558 ipaddr->u.a4.mask = 0;
8559#ifdef CONFIG_QETH_IPV6
8560 } else if (proto == QETH_PROT_IPV6){
8561 QETH_DBF_TEXT(trace, 2, "addrxip6");
8562 memcpy(&ipaddr->u.a6.addr, addr, 16);
8563 ipaddr->u.a6.pfxlen = 0;
8564#endif
8565 }
8566 ipaddr->type = QETH_IP_TYPE_RXIP;
8567 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
8568 ipaddr->del_flags = 0;
8569 } else
8570 return -ENOMEM;
8571 spin_lock_irqsave(&card->ip_lock, flags);
8572 if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
8573 __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
8574 rc = -EEXIST;
8575 spin_unlock_irqrestore(&card->ip_lock, flags);
8576 if (rc){
8577 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
8578 return rc;
8579 }
8580 if (!qeth_add_ip(card, ipaddr))
8581 kfree(ipaddr);
8582 qeth_set_ip_addr_list(card);
8583 return 0;
8584}
8585
8586void
8587qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8588 const u8 *addr)
8589{
8590 struct qeth_ipaddr *ipaddr;
8591
8592 ipaddr = qeth_get_addr_buffer(proto);
8593 if (ipaddr){
8594 if (proto == QETH_PROT_IPV4){
8595 QETH_DBF_TEXT(trace, 2, "addrxip4");
8596 memcpy(&ipaddr->u.a4.addr, addr, 4);
8597 ipaddr->u.a4.mask = 0;
8598#ifdef CONFIG_QETH_IPV6
8599 } else if (proto == QETH_PROT_IPV6){
8600 QETH_DBF_TEXT(trace, 2, "addrxip6");
8601 memcpy(&ipaddr->u.a6.addr, addr, 16);
8602 ipaddr->u.a6.pfxlen = 0;
8603#endif
8604 }
8605 ipaddr->type = QETH_IP_TYPE_RXIP;
8606 } else
8607 return;
8608 if (!qeth_delete_ip(card, ipaddr))
8609 kfree(ipaddr);
8610 qeth_set_ip_addr_list(card);
8611}
8612
8613/**
8614 * IP event handler
8615 */
8616static int
8617qeth_ip_event(struct notifier_block *this,
8618 unsigned long event,void *ptr)
8619{
8620 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
8621 struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev;
8622 struct qeth_ipaddr *addr;
8623 struct qeth_card *card;
8624
8625 if (dev->nd_net != &init_net)
8626 return NOTIFY_DONE;
8627
8628 QETH_DBF_TEXT(trace,3,"ipevent");
8629 card = qeth_get_card_from_dev(dev);
8630 if (!card)
8631 return NOTIFY_DONE;
8632 if (card->options.layer2)
8633 return NOTIFY_DONE;
8634
8635 addr = qeth_get_addr_buffer(QETH_PROT_IPV4);
8636 if (addr != NULL) {
8637 addr->u.a4.addr = ifa->ifa_address;
8638 addr->u.a4.mask = ifa->ifa_mask;
8639 addr->type = QETH_IP_TYPE_NORMAL;
8640 } else
8641 goto out;
8642
8643 switch(event) {
8644 case NETDEV_UP:
8645 if (!qeth_add_ip(card, addr))
8646 kfree(addr);
8647 break;
8648 case NETDEV_DOWN:
8649 if (!qeth_delete_ip(card, addr))
8650 kfree(addr);
8651 break;
8652 default:
8653 break;
8654 }
8655 qeth_set_ip_addr_list(card);
8656out:
8657 return NOTIFY_DONE;
8658}
8659
8660static struct notifier_block qeth_ip_notifier = {
8661 qeth_ip_event,
8662 NULL,
8663};
8664
8665#ifdef CONFIG_QETH_IPV6
8666/**
8667 * IPv6 event handler
8668 */
8669static int
8670qeth_ip6_event(struct notifier_block *this,
8671 unsigned long event,void *ptr)
8672{
8673
8674 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
8675 struct net_device *dev = (struct net_device *)ifa->idev->dev;
8676 struct qeth_ipaddr *addr;
8677 struct qeth_card *card;
8678
8679 QETH_DBF_TEXT(trace,3,"ip6event");
8680
8681 card = qeth_get_card_from_dev(dev);
8682 if (!card)
8683 return NOTIFY_DONE;
8684 if (!qeth_is_supported(card, IPA_IPV6))
8685 return NOTIFY_DONE;
8686
8687 addr = qeth_get_addr_buffer(QETH_PROT_IPV6);
8688 if (addr != NULL) {
8689 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
8690 addr->u.a6.pfxlen = ifa->prefix_len;
8691 addr->type = QETH_IP_TYPE_NORMAL;
8692 } else
8693 goto out;
8694
8695 switch(event) {
8696 case NETDEV_UP:
8697 if (!qeth_add_ip(card, addr))
8698 kfree(addr);
8699 break;
8700 case NETDEV_DOWN:
8701 if (!qeth_delete_ip(card, addr))
8702 kfree(addr);
8703 break;
8704 default:
8705 break;
8706 }
8707 qeth_set_ip_addr_list(card);
8708out:
8709 return NOTIFY_DONE;
8710}
8711
8712static struct notifier_block qeth_ip6_notifier = {
8713 qeth_ip6_event,
8714 NULL,
8715};
8716#endif
8717
8718static int
8719__qeth_reboot_event_card(struct device *dev, void *data)
8720{
8721 struct qeth_card *card;
8722
8723 card = (struct qeth_card *) dev->driver_data;
8724 qeth_clear_ip_list(card, 0, 0);
8725 qeth_qdio_clear_card(card, 0);
8726 qeth_clear_qdio_buffers(card);
8727 return 0;
8728}
8729
8730static int
8731qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
8732{
8733 int ret;
8734
8735 ret = driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL,
8736 __qeth_reboot_event_card);
8737 return ret ? NOTIFY_BAD : NOTIFY_DONE;
8738}
8739
8740
8741static struct notifier_block qeth_reboot_notifier = {
8742 qeth_reboot_event,
8743 NULL,
8744};
8745
8746static int
8747qeth_register_notifiers(void)
8748{
8749 int r;
8750
8751 QETH_DBF_TEXT(trace,5,"regnotif");
8752 if ((r = register_reboot_notifier(&qeth_reboot_notifier)))
8753 return r;
8754 if ((r = register_inetaddr_notifier(&qeth_ip_notifier)))
8755 goto out_reboot;
8756#ifdef CONFIG_QETH_IPV6
8757 if ((r = register_inet6addr_notifier(&qeth_ip6_notifier)))
8758 goto out_ipv4;
8759#endif
8760 return 0;
8761
8762#ifdef CONFIG_QETH_IPV6
8763out_ipv4:
8764 unregister_inetaddr_notifier(&qeth_ip_notifier);
8765#endif
8766out_reboot:
8767 unregister_reboot_notifier(&qeth_reboot_notifier);
8768 return r;
8769}
8770
8771/**
8772 * unregister all event notifiers
8773 */
8774static void
8775qeth_unregister_notifiers(void)
8776{
8777
8778 QETH_DBF_TEXT(trace,5,"unregnot");
8779 BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier));
8780 BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier));
8781#ifdef CONFIG_QETH_IPV6
8782 BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier));
8783#endif /* QETH_IPV6 */
8784
8785}
8786
8787#ifdef CONFIG_QETH_IPV6
8788static int
8789qeth_ipv6_init(void)
8790{
8791 qeth_old_arp_constructor = arp_tbl.constructor;
8792 write_lock_bh(&arp_tbl.lock);
8793 arp_tbl.constructor = qeth_arp_constructor;
8794 write_unlock_bh(&arp_tbl.lock);
8795
8796 arp_direct_ops = (struct neigh_ops*)
8797 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
8798 if (!arp_direct_ops)
8799 return -ENOMEM;
8800
8801 memcpy(arp_direct_ops, &arp_direct_ops_template,
8802 sizeof(struct neigh_ops));
8803
8804 return 0;
8805}
8806
8807static void
8808qeth_ipv6_uninit(void)
8809{
8810 write_lock_bh(&arp_tbl.lock);
8811 arp_tbl.constructor = qeth_old_arp_constructor;
8812 write_unlock_bh(&arp_tbl.lock);
8813 kfree(arp_direct_ops);
8814}
8815#endif /* CONFIG_QETH_IPV6 */
8816
8817static void
8818qeth_sysfs_unregister(void)
8819{
8820 s390_root_dev_unregister(qeth_root_dev);
8821 qeth_remove_driver_attributes();
8822 ccw_driver_unregister(&qeth_ccw_driver);
8823 ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
8824}
8825
8826/**
8827 * register qeth at sysfs
8828 */
8829static int
8830qeth_sysfs_register(void)
8831{
8832 int rc;
8833
8834 rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
8835 if (rc)
8836 goto out;
8837
8838 rc = ccw_driver_register(&qeth_ccw_driver);
8839 if (rc)
8840 goto out_ccw_driver;
8841
8842 rc = qeth_create_driver_attributes();
8843 if (rc)
8844 goto out_qeth_attr;
8845
8846 qeth_root_dev = s390_root_dev_register("qeth");
8847 rc = IS_ERR(qeth_root_dev) ? PTR_ERR(qeth_root_dev) : 0;
8848 if (!rc)
8849 goto out;
8850
8851 qeth_remove_driver_attributes();
8852out_qeth_attr:
8853 ccw_driver_unregister(&qeth_ccw_driver);
8854out_ccw_driver:
8855 ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
8856out:
8857 return rc;
8858}
8859
8860/***
8861 * init function
8862 */
8863static int __init
8864qeth_init(void)
8865{
8866 int rc;
8867
8868 PRINT_INFO("loading %s\n", version);
8869
8870 INIT_LIST_HEAD(&qeth_card_list.list);
8871 INIT_LIST_HEAD(&qeth_notify_list);
8872 spin_lock_init(&qeth_notify_lock);
8873 rwlock_init(&qeth_card_list.rwlock);
8874
8875 rc = qeth_register_dbf_views();
8876 if (rc)
8877 goto out_err;
8878
8879 rc = qeth_sysfs_register();
8880 if (rc)
8881 goto out_dbf;
8882
8883#ifdef CONFIG_QETH_IPV6
8884 rc = qeth_ipv6_init();
8885 if (rc) {
8886 PRINT_ERR("Out of memory during ipv6 init code = %d\n", rc);
8887 goto out_sysfs;
8888 }
8889#endif /* QETH_IPV6 */
8890 rc = qeth_register_notifiers();
8891 if (rc)
8892 goto out_ipv6;
8893 rc = qeth_create_procfs_entries();
8894 if (rc)
8895 goto out_notifiers;
8896
8897 return rc;
8898
8899out_notifiers:
8900 qeth_unregister_notifiers();
8901out_ipv6:
8902#ifdef CONFIG_QETH_IPV6
8903 qeth_ipv6_uninit();
8904out_sysfs:
8905#endif /* QETH_IPV6 */
8906 qeth_sysfs_unregister();
8907out_dbf:
8908 qeth_unregister_dbf_views();
8909out_err:
8910 PRINT_ERR("Initialization failed with code %d\n", rc);
8911 return rc;
8912}
8913
8914static void
8915__exit qeth_exit(void)
8916{
8917 struct qeth_card *card, *tmp;
8918 unsigned long flags;
8919
8920 QETH_DBF_TEXT(trace,1, "cleanup.");
8921
8922 /*
8923 * Weed would not need to clean up our devices here, because the
8924 * common device layer calls qeth_remove_device for each device
8925 * as soon as we unregister our driver (done in qeth_sysfs_unregister).
8926 * But we do cleanup here so we can do a "soft" shutdown of our cards.
8927 * qeth_remove_device called by the common device layer would otherwise
8928 * do a "hard" shutdown (card->use_hard_stop is set to one in
8929 * qeth_remove_device).
8930 */
8931again:
8932 read_lock_irqsave(&qeth_card_list.rwlock, flags);
8933 list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){
8934 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8935 qeth_set_offline(card->gdev);
8936 qeth_remove_device(card->gdev);
8937 goto again;
8938 }
8939 read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
8940#ifdef CONFIG_QETH_IPV6
8941 qeth_ipv6_uninit();
8942#endif
8943 qeth_unregister_notifiers();
8944 qeth_remove_procfs_entries();
8945 qeth_sysfs_unregister();
8946 qeth_unregister_dbf_views();
8947 printk("qeth: removed\n");
8948}
8949
8950EXPORT_SYMBOL(qeth_osn_register);
8951EXPORT_SYMBOL(qeth_osn_deregister);
8952EXPORT_SYMBOL(qeth_osn_assist);
8953module_init(qeth_init);
8954module_exit(qeth_exit);
8955MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
8956MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \
8957 "Copyright 2000,2003 IBM Corporation\n");
8958
8959MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_mpc.c b/drivers/s390/net/qeth_mpc.c
deleted file mode 100644
index f29a4bc4f6f2..000000000000
--- a/drivers/s390/net/qeth_mpc.c
+++ /dev/null
@@ -1,269 +0,0 @@
1/*
2 * linux/drivers/s390/net/qeth_mpc.c
3 *
4 * Linux on zSeries OSA Express and HiperSockets support
5 *
6 * Copyright 2000,2003 IBM Corporation
7 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>
8 * Thomas Spatzier <tspat@de.ibm.com>
9 *
10 */
11#include <asm/cio.h>
12#include "qeth_mpc.h"
13
14unsigned char IDX_ACTIVATE_READ[]={
15 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
16 0x19,0x01,0x01,0x80, 0x00,0x00,0x00,0x00,
17 0x00,0x00,0x00,0x00, 0x00,0x00,0xc8,0xc1,
18 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00,
19 0x00,0x00
20};
21
22unsigned char IDX_ACTIVATE_WRITE[]={
23 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
24 0x15,0x01,0x01,0x80, 0x00,0x00,0x00,0x00,
25 0xff,0xff,0x00,0x00, 0x00,0x00,0xc8,0xc1,
26 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00,
27 0x00,0x00
28};
29
30unsigned char CM_ENABLE[]={
31 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x01,
32 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x63,
33 0x10,0x00,0x00,0x01,
34 0x00,0x00,0x00,0x00,
35 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00,
36 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x23,
37 0x00,0x00,0x23,0x05, 0x00,0x00,0x00,0x00,
38 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
39 0x01,0x00,0x00,0x23, 0x00,0x00,0x00,0x40,
40 0x00,0x0c,0x41,0x02, 0x00,0x17,0x00,0x00,
41 0x00,0x00,0x00,0x00,
42 0x00,0x0b,0x04,0x01,
43 0x7e,0x04,0x05,0x00, 0x01,0x01,0x0f,
44 0x00,
45 0x0c,0x04,0x02,0xff, 0xff,0xff,0xff,0xff,
46 0xff,0xff,0xff
47};
48
49unsigned char CM_SETUP[]={
50 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x02,
51 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x64,
52 0x10,0x00,0x00,0x01,
53 0x00,0x00,0x00,0x00,
54 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00,
55 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x24,
56 0x00,0x00,0x24,0x05, 0x00,0x00,0x00,0x00,
57 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
58 0x01,0x00,0x00,0x24, 0x00,0x00,0x00,0x40,
59 0x00,0x0c,0x41,0x04, 0x00,0x18,0x00,0x00,
60 0x00,0x00,0x00,0x00,
61 0x00,0x09,0x04,0x04,
62 0x05,0x00,0x01,0x01, 0x11,
63 0x00,0x09,0x04,
64 0x05,0x05,0x00,0x00, 0x00,0x00,
65 0x00,0x06,
66 0x04,0x06,0xc8,0x00
67};
68
69unsigned char ULP_ENABLE[]={
70 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x03,
71 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6b,
72 0x10,0x00,0x00,0x01,
73 0x00,0x00,0x00,0x00,
74 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x01,
75 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x2b,
76 0x00,0x00,0x2b,0x05, 0x20,0x01,0x00,0x00,
77 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
78 0x01,0x00,0x00,0x2b, 0x00,0x00,0x00,0x40,
79 0x00,0x0c,0x41,0x02, 0x00,0x1f,0x00,0x00,
80 0x00,0x00,0x00,0x00,
81 0x00,0x0b,0x04,0x01,
82 0x03,0x04,0x05,0x00, 0x01,0x01,0x12,
83 0x00,
84 0x14,0x04,0x0a,0x00, 0x20,0x00,0x00,0xff,
85 0xff,0x00,0x08,0xc8, 0xe8,0xc4,0xf1,0xc7,
86 0xf1,0x00,0x00
87};
88
89unsigned char ULP_SETUP[]={
90 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x04,
91 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6c,
92 0x10,0x00,0x00,0x01,
93 0x00,0x00,0x00,0x00,
94 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x02,
95 0x00,0x00,0x00,0x01, 0x00,0x24,0x00,0x2c,
96 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00,
97 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
98 0x01,0x00,0x00,0x2c, 0x00,0x00,0x00,0x40,
99 0x00,0x0c,0x41,0x04, 0x00,0x20,0x00,0x00,
100 0x00,0x00,0x00,0x00,
101 0x00,0x09,0x04,0x04,
102 0x05,0x00,0x01,0x01, 0x14,
103 0x00,0x09,0x04,
104 0x05,0x05,0x30,0x01, 0x00,0x00,
105 0x00,0x06,
106 0x04,0x06,0x40,0x00,
107 0x00,0x08,0x04,0x0b,
108 0x00,0x00,0x00,0x00
109};
110
111unsigned char DM_ACT[]={
112 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x05,
113 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x55,
114 0x10,0x00,0x00,0x01,
115 0x00,0x00,0x00,0x00,
116 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x03,
117 0x00,0x00,0x00,0x02, 0x00,0x24,0x00,0x15,
118 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00,
119 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
120 0x01,0x00,0x00,0x15, 0x00,0x00,0x00,0x40,
121 0x00,0x0c,0x43,0x60, 0x00,0x09,0x00,0x00,
122 0x00,0x00,0x00,0x00,
123 0x00,0x09,0x04,0x04,
124 0x05,0x40,0x01,0x01, 0x00
125};
126
127unsigned char IPA_PDU_HEADER[]={
128 0x00,0xe0,0x00,0x00, 0x77,0x77,0x77,0x77,
129 0x00,0x00,0x00,0x14, 0x00,0x00,
130 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))/256,
131 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))%256,
132 0x10,0x00,0x00,0x01, 0x00,0x00,0x00,0x00,
133 0xc1,0x03,0x00,0x01, 0x00,0x00,0x00,0x00,
134 0x00,0x00,0x00,0x00, 0x00,0x24,
135 sizeof(struct qeth_ipa_cmd)/256,
136 sizeof(struct qeth_ipa_cmd)%256,
137 0x00,
138 sizeof(struct qeth_ipa_cmd)/256,
139 sizeof(struct qeth_ipa_cmd)%256,
140 0x05,
141 0x77,0x77,0x77,0x77,
142 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
143 0x01,0x00,
144 sizeof(struct qeth_ipa_cmd)/256,
145 sizeof(struct qeth_ipa_cmd)%256,
146 0x00,0x00,0x00,0x40,
147};
148
149unsigned char WRITE_CCW[]={
150 0x01,CCW_FLAG_SLI,0,0,
151 0,0,0,0
152};
153
154unsigned char READ_CCW[]={
155 0x02,CCW_FLAG_SLI,0,0,
156 0,0,0,0
157};
158
159
160struct ipa_rc_msg {
161 enum qeth_ipa_return_codes rc;
162 char *msg;
163};
164
165static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
166 {IPA_RC_SUCCESS, "success"},
167 {IPA_RC_NOTSUPP, "Command not supported"},
168 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
169 {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
170 {IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"},
171 {IPA_RC_DUP_IPV6_REMOTE,"ipv6 address already registered remote"},
172 {IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"},
173 {IPA_RC_UNREGISTERED_ADDR, "Address not registered"},
174 {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
175 {IPA_RC_ID_NOT_FOUND, "Identifier not found"},
176 {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"},
177 {IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"},
178 {IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"},
179 {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
180 {IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
181 {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"},
182 {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"},
183 {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"},
184 {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
185 {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
186 {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
187 {IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"},
188 {IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"},
189 {IPA_RC_INVALID_LANTYPE, "Invalid LAN type"},
190 {IPA_RC_INVALID_LANNUM, "Invalid LAN num"},
191 {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"},
192 {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"},
193 {IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"},
194 {IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"},
195 {IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"},
196 {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"},
197 {IPA_RC_MULTICAST_FULL, "No task available, multicast full"},
198 {IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"},
199 {IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"},
200 {IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"},
201 {IPA_RC_PRIMARY_ALREADY_DEFINED,"Primary already defined"},
202 {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"},
203 {IPA_RC_INVALID_SETRTG_INDICATOR,"Invalid SETRTG indicator"},
204 {IPA_RC_MC_ADDR_ALREADY_DEFINED,"Multicast address already defined"},
205 {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
206 {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
207 {IPA_RC_FFFF, "Unknown Error"}
208};
209
210
211
212char *
213qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
214{
215 int x = 0;
216 qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
217 sizeof(struct ipa_rc_msg) - 1].rc = rc;
218 while(qeth_ipa_rc_msg[x].rc != rc)
219 x++;
220 return qeth_ipa_rc_msg[x].msg;
221}
222
223
224struct ipa_cmd_names {
225 enum qeth_ipa_cmds cmd;
226 char *name;
227};
228
229static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
230 {IPA_CMD_STARTLAN, "startlan"},
231 {IPA_CMD_STOPLAN, "stoplan"},
232 {IPA_CMD_SETVMAC, "setvmac"},
233 {IPA_CMD_DELVMAC, "delvmca"},
234 {IPA_CMD_SETGMAC, "setgmac"},
235 {IPA_CMD_DELGMAC, "delgmac"},
236 {IPA_CMD_SETVLAN, "setvlan"},
237 {IPA_CMD_DELVLAN, "delvlan"},
238 {IPA_CMD_SETCCID, "setccid"},
239 {IPA_CMD_DELCCID, "delccid"},
240 {IPA_CMD_MODCCID, "setip"},
241 {IPA_CMD_SETIP, "setip"},
242 {IPA_CMD_QIPASSIST, "qipassist"},
243 {IPA_CMD_SETASSPARMS, "setassparms"},
244 {IPA_CMD_SETIPM, "setipm"},
245 {IPA_CMD_DELIPM, "delipm"},
246 {IPA_CMD_SETRTG, "setrtg"},
247 {IPA_CMD_DELIP, "delip"},
248 {IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
249 {IPA_CMD_SET_DIAG_ASS, "set_diag_ass"},
250 {IPA_CMD_CREATE_ADDR, "create_addr"},
251 {IPA_CMD_DESTROY_ADDR, "destroy_addr"},
252 {IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"},
253 {IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"},
254 {IPA_CMD_UNKNOWN, "unknown"},
255};
256
257char *
258qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
259{
260 int x = 0;
261 qeth_ipa_cmd_names[
262 sizeof(qeth_ipa_cmd_names)/
263 sizeof(struct ipa_cmd_names)-1].cmd = cmd;
264 while(qeth_ipa_cmd_names[x].cmd != cmd)
265 x++;
266 return qeth_ipa_cmd_names[x].name;
267}
268
269
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
deleted file mode 100644
index 46ecd03a597e..000000000000
--- a/drivers/s390/net/qeth_proc.c
+++ /dev/null
@@ -1,316 +0,0 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_fs.c
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 * This file contains code related to procfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 *
10 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 *
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/list.h>
18#include <linux/rwsem.h>
19
20#include "qeth.h"
21#include "qeth_mpc.h"
22#include "qeth_fs.h"
23
24/***** /proc/qeth *****/
25#define QETH_PROCFILE_NAME "qeth"
26static struct proc_dir_entry *qeth_procfile;
27
28static int
29qeth_procfile_seq_match(struct device *dev, void *data)
30{
31 return(dev ? 1 : 0);
32}
33
34static void *
35qeth_procfile_seq_start(struct seq_file *s, loff_t *offset)
36{
37 struct device *dev = NULL;
38 loff_t nr = 0;
39
40 if (*offset == 0)
41 return SEQ_START_TOKEN;
42 while (1) {
43 dev = driver_find_device(&qeth_ccwgroup_driver.driver, dev,
44 NULL, qeth_procfile_seq_match);
45 if (++nr == *offset)
46 break;
47 put_device(dev);
48 }
49 return dev;
50}
51
52static void
53qeth_procfile_seq_stop(struct seq_file *s, void* it)
54{
55}
56
57static void *
58qeth_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset)
59{
60 struct device *prev, *next;
61
62 if (it == SEQ_START_TOKEN)
63 prev = NULL;
64 else
65 prev = (struct device *) it;
66 next = driver_find_device(&qeth_ccwgroup_driver.driver,
67 prev, NULL, qeth_procfile_seq_match);
68 (*offset)++;
69 return (void *) next;
70}
71
72static inline const char *
73qeth_get_router_str(struct qeth_card *card, int ipv)
74{
75 enum qeth_routing_types routing_type = NO_ROUTER;
76
77 if (ipv == 4) {
78 routing_type = card->options.route4.type;
79 } else {
80#ifdef CONFIG_QETH_IPV6
81 routing_type = card->options.route6.type;
82#else
83 return "n/a";
84#endif /* CONFIG_QETH_IPV6 */
85 }
86
87 switch (routing_type){
88 case PRIMARY_ROUTER:
89 return "pri";
90 case SECONDARY_ROUTER:
91 return "sec";
92 case MULTICAST_ROUTER:
93 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
94 return "mc+";
95 return "mc";
96 case PRIMARY_CONNECTOR:
97 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
98 return "p+c";
99 return "p.c";
100 case SECONDARY_CONNECTOR:
101 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
102 return "s+c";
103 return "s.c";
104 default: /* NO_ROUTER */
105 return "no";
106 }
107}
108
109static int
110qeth_procfile_seq_show(struct seq_file *s, void *it)
111{
112 struct device *device;
113 struct qeth_card *card;
114 char tmp[12]; /* for qeth_get_prioq_str */
115
116 if (it == SEQ_START_TOKEN){
117 seq_printf(s, "devices CHPID interface "
118 "cardtype port chksum prio-q'ing rtr4 "
119 "rtr6 fsz cnt\n");
120 seq_printf(s, "-------------------------- ----- ---------- "
121 "-------------- ---- ------ ---------- ---- "
122 "---- ----- -----\n");
123 } else {
124 device = (struct device *) it;
125 card = device->driver_data;
126 seq_printf(s, "%s/%s/%s x%02X %-10s %-14s %-4i ",
127 CARD_RDEV_ID(card),
128 CARD_WDEV_ID(card),
129 CARD_DDEV_ID(card),
130 card->info.chpid,
131 QETH_CARD_IFNAME(card),
132 qeth_get_cardname_short(card),
133 card->info.portno);
134 if (card->lan_online)
135 seq_printf(s, "%-6s %-10s %-4s %-4s %-5s %-5i\n",
136 qeth_get_checksum_str(card),
137 qeth_get_prioq_str(card, tmp),
138 qeth_get_router_str(card, 4),
139 qeth_get_router_str(card, 6),
140 qeth_get_bufsize_str(card),
141 card->qdio.in_buf_pool.buf_count);
142 else
143 seq_printf(s, " +++ LAN OFFLINE +++\n");
144 put_device(device);
145 }
146 return 0;
147}
148
149static const struct seq_operations qeth_procfile_seq_ops = {
150 .start = qeth_procfile_seq_start,
151 .stop = qeth_procfile_seq_stop,
152 .next = qeth_procfile_seq_next,
153 .show = qeth_procfile_seq_show,
154};
155
156static int
157qeth_procfile_open(struct inode *inode, struct file *file)
158{
159 return seq_open(file, &qeth_procfile_seq_ops);
160}
161
162static const struct file_operations qeth_procfile_fops = {
163 .owner = THIS_MODULE,
164 .open = qeth_procfile_open,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = seq_release,
168};
169
170/***** /proc/qeth_perf *****/
171#define QETH_PERF_PROCFILE_NAME "qeth_perf"
172static struct proc_dir_entry *qeth_perf_procfile;
173
174static int
175qeth_perf_procfile_seq_show(struct seq_file *s, void *it)
176{
177 struct device *device;
178 struct qeth_card *card;
179
180
181 if (it == SEQ_START_TOKEN)
182 return 0;
183
184 device = (struct device *) it;
185 card = device->driver_data;
186 seq_printf(s, "For card with devnos %s/%s/%s (%s):\n",
187 CARD_RDEV_ID(card),
188 CARD_WDEV_ID(card),
189 CARD_DDEV_ID(card),
190 QETH_CARD_IFNAME(card)
191 );
192 if (!card->options.performance_stats)
193 seq_printf(s, "Performance statistics are deactivated.\n");
194 seq_printf(s, " Skb's/buffers received : %lu/%u\n"
195 " Skb's/buffers sent : %lu/%u\n\n",
196 card->stats.rx_packets -
197 card->perf_stats.initial_rx_packets,
198 card->perf_stats.bufs_rec,
199 card->stats.tx_packets -
200 card->perf_stats.initial_tx_packets,
201 card->perf_stats.bufs_sent
202 );
203 seq_printf(s, " Skb's/buffers sent without packing : %lu/%u\n"
204 " Skb's/buffers sent with packing : %u/%u\n\n",
205 card->stats.tx_packets - card->perf_stats.initial_tx_packets
206 - card->perf_stats.skbs_sent_pack,
207 card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack,
208 card->perf_stats.skbs_sent_pack,
209 card->perf_stats.bufs_sent_pack
210 );
211 seq_printf(s, " Skbs sent in SG mode : %u\n"
212 " Skb fragments sent in SG mode : %u\n\n",
213 card->perf_stats.sg_skbs_sent,
214 card->perf_stats.sg_frags_sent);
215 seq_printf(s, " Skbs received in SG mode : %u\n"
216 " Skb fragments received in SG mode : %u\n"
217 " Page allocations for rx SG mode : %u\n\n",
218 card->perf_stats.sg_skbs_rx,
219 card->perf_stats.sg_frags_rx,
220 card->perf_stats.sg_alloc_page_rx);
221 seq_printf(s, " large_send tx (in Kbytes) : %u\n"
222 " large_send count : %u\n\n",
223 card->perf_stats.large_send_bytes >> 10,
224 card->perf_stats.large_send_cnt);
225 seq_printf(s, " Packing state changes no pkg.->packing : %u/%u\n"
226 " Watermarks L/H : %i/%i\n"
227 " Current buffer usage (outbound q's) : "
228 "%i/%i/%i/%i\n\n",
229 card->perf_stats.sc_dp_p, card->perf_stats.sc_p_dp,
230 QETH_LOW_WATERMARK_PACK, QETH_HIGH_WATERMARK_PACK,
231 atomic_read(&card->qdio.out_qs[0]->used_buffers),
232 (card->qdio.no_out_queues > 1)?
233 atomic_read(&card->qdio.out_qs[1]->used_buffers)
234 : 0,
235 (card->qdio.no_out_queues > 2)?
236 atomic_read(&card->qdio.out_qs[2]->used_buffers)
237 : 0,
238 (card->qdio.no_out_queues > 3)?
239 atomic_read(&card->qdio.out_qs[3]->used_buffers)
240 : 0
241 );
242 seq_printf(s, " Inbound handler time (in us) : %u\n"
243 " Inbound handler count : %u\n"
244 " Inbound do_QDIO time (in us) : %u\n"
245 " Inbound do_QDIO count : %u\n\n"
246 " Outbound handler time (in us) : %u\n"
247 " Outbound handler count : %u\n\n"
248 " Outbound time (in us, incl QDIO) : %u\n"
249 " Outbound count : %u\n"
250 " Outbound do_QDIO time (in us) : %u\n"
251 " Outbound do_QDIO count : %u\n\n",
252 card->perf_stats.inbound_time,
253 card->perf_stats.inbound_cnt,
254 card->perf_stats.inbound_do_qdio_time,
255 card->perf_stats.inbound_do_qdio_cnt,
256 card->perf_stats.outbound_handler_time,
257 card->perf_stats.outbound_handler_cnt,
258 card->perf_stats.outbound_time,
259 card->perf_stats.outbound_cnt,
260 card->perf_stats.outbound_do_qdio_time,
261 card->perf_stats.outbound_do_qdio_cnt
262 );
263 put_device(device);
264 return 0;
265}
266
267static const struct seq_operations qeth_perf_procfile_seq_ops = {
268 .start = qeth_procfile_seq_start,
269 .stop = qeth_procfile_seq_stop,
270 .next = qeth_procfile_seq_next,
271 .show = qeth_perf_procfile_seq_show,
272};
273
274static int
275qeth_perf_procfile_open(struct inode *inode, struct file *file)
276{
277 return seq_open(file, &qeth_perf_procfile_seq_ops);
278}
279
280static const struct file_operations qeth_perf_procfile_fops = {
281 .owner = THIS_MODULE,
282 .open = qeth_perf_procfile_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = seq_release,
286};
287
288int __init
289qeth_create_procfs_entries(void)
290{
291 qeth_procfile = create_proc_entry(QETH_PROCFILE_NAME,
292 S_IFREG | 0444, NULL);
293 if (qeth_procfile)
294 qeth_procfile->proc_fops = &qeth_procfile_fops;
295
296 qeth_perf_procfile = create_proc_entry(QETH_PERF_PROCFILE_NAME,
297 S_IFREG | 0444, NULL);
298 if (qeth_perf_procfile)
299 qeth_perf_procfile->proc_fops = &qeth_perf_procfile_fops;
300
301 if (qeth_procfile &&
302 qeth_perf_procfile)
303 return 0;
304 else
305 return -ENOMEM;
306}
307
308void __exit
309qeth_remove_procfs_entries(void)
310{
311 if (qeth_procfile)
312 remove_proc_entry(QETH_PROCFILE_NAME, NULL);
313 if (qeth_perf_procfile)
314 remove_proc_entry(QETH_PERF_PROCFILE_NAME, NULL);
315}
316
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
deleted file mode 100644
index 2cc3f3a0e393..000000000000
--- a/drivers/s390/net/qeth_sys.c
+++ /dev/null
@@ -1,1858 +0,0 @@
1/*
2 *
3 * linux/drivers/s390/net/qeth_sys.c
4 *
5 * Linux on zSeries OSA Express and HiperSockets support
6 * This file contains code related to sysfs.
7 *
8 * Copyright 2000,2003 IBM Corporation
9 *
10 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 * Frank Pavlic <fpavlic@de.ibm.com>
12 *
13 */
14#include <linux/list.h>
15#include <linux/rwsem.h>
16
17#include <asm/ebcdic.h>
18
19#include "qeth.h"
20#include "qeth_mpc.h"
21#include "qeth_fs.h"
22
23/*****************************************************************************/
24/* */
25/* /sys-fs stuff UNDER DEVELOPMENT !!! */
26/* */
27/*****************************************************************************/
28//low/high watermark
29
30static ssize_t
31qeth_dev_state_show(struct device *dev, struct device_attribute *attr, char *buf)
32{
33 struct qeth_card *card = dev->driver_data;
34 if (!card)
35 return -EINVAL;
36
37 switch (card->state) {
38 case CARD_STATE_DOWN:
39 return sprintf(buf, "DOWN\n");
40 case CARD_STATE_HARDSETUP:
41 return sprintf(buf, "HARDSETUP\n");
42 case CARD_STATE_SOFTSETUP:
43 return sprintf(buf, "SOFTSETUP\n");
44 case CARD_STATE_UP:
45 if (card->lan_online)
46 return sprintf(buf, "UP (LAN ONLINE)\n");
47 else
48 return sprintf(buf, "UP (LAN OFFLINE)\n");
49 case CARD_STATE_RECOVER:
50 return sprintf(buf, "RECOVER\n");
51 default:
52 return sprintf(buf, "UNKNOWN\n");
53 }
54}
55
56static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
57
58static ssize_t
59qeth_dev_chpid_show(struct device *dev, struct device_attribute *attr, char *buf)
60{
61 struct qeth_card *card = dev->driver_data;
62 if (!card)
63 return -EINVAL;
64
65 return sprintf(buf, "%02X\n", card->info.chpid);
66}
67
68static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
69
70static ssize_t
71qeth_dev_if_name_show(struct device *dev, struct device_attribute *attr, char *buf)
72{
73 struct qeth_card *card = dev->driver_data;
74 if (!card)
75 return -EINVAL;
76 return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
77}
78
79static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
80
81static ssize_t
82qeth_dev_card_type_show(struct device *dev, struct device_attribute *attr, char *buf)
83{
84 struct qeth_card *card = dev->driver_data;
85 if (!card)
86 return -EINVAL;
87
88 return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
89}
90
91static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
92
93static ssize_t
94qeth_dev_portno_show(struct device *dev, struct device_attribute *attr, char *buf)
95{
96 struct qeth_card *card = dev->driver_data;
97 if (!card)
98 return -EINVAL;
99
100 return sprintf(buf, "%i\n", card->info.portno);
101}
102
103static ssize_t
104qeth_dev_portno_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
105{
106 struct qeth_card *card = dev->driver_data;
107 char *tmp;
108 unsigned int portno;
109
110 if (!card)
111 return -EINVAL;
112
113 if ((card->state != CARD_STATE_DOWN) &&
114 (card->state != CARD_STATE_RECOVER))
115 return -EPERM;
116
117 portno = simple_strtoul(buf, &tmp, 16);
118 if (portno > MAX_PORTNO){
119 PRINT_WARN("portno 0x%X is out of range\n", portno);
120 return -EINVAL;
121 }
122
123 card->info.portno = portno;
124 return count;
125}
126
127static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
128
129static ssize_t
130qeth_dev_portname_show(struct device *dev, struct device_attribute *attr, char *buf)
131{
132 struct qeth_card *card = dev->driver_data;
133 char portname[9] = {0, };
134
135 if (!card)
136 return -EINVAL;
137
138 if (card->info.portname_required) {
139 memcpy(portname, card->info.portname + 1, 8);
140 EBCASC(portname, 8);
141 return sprintf(buf, "%s\n", portname);
142 } else
143 return sprintf(buf, "no portname required\n");
144}
145
146static ssize_t
147qeth_dev_portname_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
148{
149 struct qeth_card *card = dev->driver_data;
150 char *tmp;
151 int i;
152
153 if (!card)
154 return -EINVAL;
155
156 if ((card->state != CARD_STATE_DOWN) &&
157 (card->state != CARD_STATE_RECOVER))
158 return -EPERM;
159
160 tmp = strsep((char **) &buf, "\n");
161 if ((strlen(tmp) > 8) || (strlen(tmp) == 0))
162 return -EINVAL;
163
164 card->info.portname[0] = strlen(tmp);
165 /* for beauty reasons */
166 for (i = 1; i < 9; i++)
167 card->info.portname[i] = ' ';
168 strcpy(card->info.portname + 1, tmp);
169 ASCEBC(card->info.portname + 1, 8);
170
171 return count;
172}
173
174static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
175 qeth_dev_portname_store);
176
177static ssize_t
178qeth_dev_checksum_show(struct device *dev, struct device_attribute *attr, char *buf)
179{
180 struct qeth_card *card = dev->driver_data;
181
182 if (!card)
183 return -EINVAL;
184
185 return sprintf(buf, "%s checksumming\n", qeth_get_checksum_str(card));
186}
187
188static ssize_t
189qeth_dev_checksum_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
190{
191 struct qeth_card *card = dev->driver_data;
192 char *tmp;
193
194 if (!card)
195 return -EINVAL;
196
197 if ((card->state != CARD_STATE_DOWN) &&
198 (card->state != CARD_STATE_RECOVER))
199 return -EPERM;
200
201 tmp = strsep((char **) &buf, "\n");
202 if (!strcmp(tmp, "sw_checksumming"))
203 card->options.checksum_type = SW_CHECKSUMMING;
204 else if (!strcmp(tmp, "hw_checksumming"))
205 card->options.checksum_type = HW_CHECKSUMMING;
206 else if (!strcmp(tmp, "no_checksumming"))
207 card->options.checksum_type = NO_CHECKSUMMING;
208 else {
209 PRINT_WARN("Unknown checksumming type '%s'\n", tmp);
210 return -EINVAL;
211 }
212 return count;
213}
214
215static DEVICE_ATTR(checksumming, 0644, qeth_dev_checksum_show,
216 qeth_dev_checksum_store);
217
218static ssize_t
219qeth_dev_prioqing_show(struct device *dev, struct device_attribute *attr, char *buf)
220{
221 struct qeth_card *card = dev->driver_data;
222
223 if (!card)
224 return -EINVAL;
225
226 switch (card->qdio.do_prio_queueing) {
227 case QETH_PRIO_Q_ING_PREC:
228 return sprintf(buf, "%s\n", "by precedence");
229 case QETH_PRIO_Q_ING_TOS:
230 return sprintf(buf, "%s\n", "by type of service");
231 default:
232 return sprintf(buf, "always queue %i\n",
233 card->qdio.default_out_queue);
234 }
235}
236
237static ssize_t
238qeth_dev_prioqing_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
239{
240 struct qeth_card *card = dev->driver_data;
241 char *tmp;
242
243 if (!card)
244 return -EINVAL;
245
246 if ((card->state != CARD_STATE_DOWN) &&
247 (card->state != CARD_STATE_RECOVER))
248 return -EPERM;
249
250 /* check if 1920 devices are supported ,
251 * if though we have to permit priority queueing
252 */
253 if (card->qdio.no_out_queues == 1) {
254 PRINT_WARN("Priority queueing disabled due "
255 "to hardware limitations!\n");
256 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
257 return -EPERM;
258 }
259
260 tmp = strsep((char **) &buf, "\n");
261 if (!strcmp(tmp, "prio_queueing_prec"))
262 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
263 else if (!strcmp(tmp, "prio_queueing_tos"))
264 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
265 else if (!strcmp(tmp, "no_prio_queueing:0")) {
266 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
267 card->qdio.default_out_queue = 0;
268 } else if (!strcmp(tmp, "no_prio_queueing:1")) {
269 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
270 card->qdio.default_out_queue = 1;
271 } else if (!strcmp(tmp, "no_prio_queueing:2")) {
272 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
273 card->qdio.default_out_queue = 2;
274 } else if (!strcmp(tmp, "no_prio_queueing:3")) {
275 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
276 card->qdio.default_out_queue = 3;
277 } else if (!strcmp(tmp, "no_prio_queueing")) {
278 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
279 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
280 } else {
281 PRINT_WARN("Unknown queueing type '%s'\n", tmp);
282 return -EINVAL;
283 }
284 return count;
285}
286
287static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
288 qeth_dev_prioqing_store);
289
290static ssize_t
291qeth_dev_bufcnt_show(struct device *dev, struct device_attribute *attr, char *buf)
292{
293 struct qeth_card *card = dev->driver_data;
294
295 if (!card)
296 return -EINVAL;
297
298 return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
299}
300
301static ssize_t
302qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
303{
304 struct qeth_card *card = dev->driver_data;
305 char *tmp;
306 int cnt, old_cnt;
307 int rc;
308
309 if (!card)
310 return -EINVAL;
311
312 if ((card->state != CARD_STATE_DOWN) &&
313 (card->state != CARD_STATE_RECOVER))
314 return -EPERM;
315
316 old_cnt = card->qdio.in_buf_pool.buf_count;
317 cnt = simple_strtoul(buf, &tmp, 10);
318 cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
319 ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
320 if (old_cnt != cnt) {
321 if ((rc = qeth_realloc_buffer_pool(card, cnt)))
322 PRINT_WARN("Error (%d) while setting "
323 "buffer count.\n", rc);
324 }
325 return count;
326}
327
328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
329 qeth_dev_bufcnt_store);
330
331static ssize_t
332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route,
333 char *buf)
334{
335 switch (route->type) {
336 case PRIMARY_ROUTER:
337 return sprintf(buf, "%s\n", "primary router");
338 case SECONDARY_ROUTER:
339 return sprintf(buf, "%s\n", "secondary router");
340 case MULTICAST_ROUTER:
341 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
342 return sprintf(buf, "%s\n", "multicast router+");
343 else
344 return sprintf(buf, "%s\n", "multicast router");
345 case PRIMARY_CONNECTOR:
346 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
347 return sprintf(buf, "%s\n", "primary connector+");
348 else
349 return sprintf(buf, "%s\n", "primary connector");
350 case SECONDARY_CONNECTOR:
351 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
352 return sprintf(buf, "%s\n", "secondary connector+");
353 else
354 return sprintf(buf, "%s\n", "secondary connector");
355 default:
356 return sprintf(buf, "%s\n", "no");
357 }
358}
359
360static ssize_t
361qeth_dev_route4_show(struct device *dev, struct device_attribute *attr, char *buf)
362{
363 struct qeth_card *card = dev->driver_data;
364
365 if (!card)
366 return -EINVAL;
367
368 return qeth_dev_route_show(card, &card->options.route4, buf);
369}
370
371static ssize_t
372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route,
373 enum qeth_prot_versions prot, const char *buf, size_t count)
374{
375 enum qeth_routing_types old_route_type = route->type;
376 char *tmp;
377 int rc;
378
379 tmp = strsep((char **) &buf, "\n");
380
381 if (!strcmp(tmp, "no_router")){
382 route->type = NO_ROUTER;
383 } else if (!strcmp(tmp, "primary_connector")) {
384 route->type = PRIMARY_CONNECTOR;
385 } else if (!strcmp(tmp, "secondary_connector")) {
386 route->type = SECONDARY_CONNECTOR;
387 } else if (!strcmp(tmp, "primary_router")) {
388 route->type = PRIMARY_ROUTER;
389 } else if (!strcmp(tmp, "secondary_router")) {
390 route->type = SECONDARY_ROUTER;
391 } else if (!strcmp(tmp, "multicast_router")) {
392 route->type = MULTICAST_ROUTER;
393 } else {
394 PRINT_WARN("Invalid routing type '%s'.\n", tmp);
395 return -EINVAL;
396 }
397 if (((card->state == CARD_STATE_SOFTSETUP) ||
398 (card->state == CARD_STATE_UP)) &&
399 (old_route_type != route->type)){
400 if (prot == QETH_PROT_IPV4)
401 rc = qeth_setrouting_v4(card);
402 else if (prot == QETH_PROT_IPV6)
403 rc = qeth_setrouting_v6(card);
404 }
405 return count;
406}
407
408static ssize_t
409qeth_dev_route4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
410{
411 struct qeth_card *card = dev->driver_data;
412
413 if (!card)
414 return -EINVAL;
415
416 return qeth_dev_route_store(card, &card->options.route4,
417 QETH_PROT_IPV4, buf, count);
418}
419
420static DEVICE_ATTR(route4, 0644, qeth_dev_route4_show, qeth_dev_route4_store);
421
422#ifdef CONFIG_QETH_IPV6
423static ssize_t
424qeth_dev_route6_show(struct device *dev, struct device_attribute *attr, char *buf)
425{
426 struct qeth_card *card = dev->driver_data;
427
428 if (!card)
429 return -EINVAL;
430
431 if (!qeth_is_supported(card, IPA_IPV6))
432 return sprintf(buf, "%s\n", "n/a");
433
434 return qeth_dev_route_show(card, &card->options.route6, buf);
435}
436
437static ssize_t
438qeth_dev_route6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
439{
440 struct qeth_card *card = dev->driver_data;
441
442 if (!card)
443 return -EINVAL;
444
445 if (!qeth_is_supported(card, IPA_IPV6)){
446 PRINT_WARN("IPv6 not supported for interface %s.\n"
447 "Routing status no changed.\n",
448 QETH_CARD_IFNAME(card));
449 return -ENOTSUPP;
450 }
451
452 return qeth_dev_route_store(card, &card->options.route6,
453 QETH_PROT_IPV6, buf, count);
454}
455
456static DEVICE_ATTR(route6, 0644, qeth_dev_route6_show, qeth_dev_route6_store);
457#endif
458
459static ssize_t
460qeth_dev_add_hhlen_show(struct device *dev, struct device_attribute *attr, char *buf)
461{
462 struct qeth_card *card = dev->driver_data;
463
464 if (!card)
465 return -EINVAL;
466
467 return sprintf(buf, "%i\n", card->options.add_hhlen);
468}
469
470static ssize_t
471qeth_dev_add_hhlen_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
472{
473 struct qeth_card *card = dev->driver_data;
474 char *tmp;
475 int i;
476
477 if (!card)
478 return -EINVAL;
479
480 if ((card->state != CARD_STATE_DOWN) &&
481 (card->state != CARD_STATE_RECOVER))
482 return -EPERM;
483
484 i = simple_strtoul(buf, &tmp, 10);
485 if ((i < 0) || (i > MAX_ADD_HHLEN)) {
486 PRINT_WARN("add_hhlen out of range\n");
487 return -EINVAL;
488 }
489 card->options.add_hhlen = i;
490
491 return count;
492}
493
494static DEVICE_ATTR(add_hhlen, 0644, qeth_dev_add_hhlen_show,
495 qeth_dev_add_hhlen_store);
496
497static ssize_t
498qeth_dev_fake_ll_show(struct device *dev, struct device_attribute *attr, char *buf)
499{
500 struct qeth_card *card = dev->driver_data;
501
502 if (!card)
503 return -EINVAL;
504
505 return sprintf(buf, "%i\n", card->options.fake_ll? 1:0);
506}
507
508static ssize_t
509qeth_dev_fake_ll_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
510{
511 struct qeth_card *card = dev->driver_data;
512 char *tmp;
513 int i;
514
515 if (!card)
516 return -EINVAL;
517
518 if ((card->state != CARD_STATE_DOWN) &&
519 (card->state != CARD_STATE_RECOVER))
520 return -EPERM;
521
522 i = simple_strtoul(buf, &tmp, 16);
523 if ((i != 0) && (i != 1)) {
524 PRINT_WARN("fake_ll: write 0 or 1 to this file!\n");
525 return -EINVAL;
526 }
527 card->options.fake_ll = i;
528 return count;
529}
530
531static DEVICE_ATTR(fake_ll, 0644, qeth_dev_fake_ll_show,
532 qeth_dev_fake_ll_store);
533
534static ssize_t
535qeth_dev_fake_broadcast_show(struct device *dev, struct device_attribute *attr, char *buf)
536{
537 struct qeth_card *card = dev->driver_data;
538
539 if (!card)
540 return -EINVAL;
541
542 return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
543}
544
545static ssize_t
546qeth_dev_fake_broadcast_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
547{
548 struct qeth_card *card = dev->driver_data;
549 char *tmp;
550 int i;
551
552 if (!card)
553 return -EINVAL;
554
555 if ((card->state != CARD_STATE_DOWN) &&
556 (card->state != CARD_STATE_RECOVER))
557 return -EPERM;
558
559 i = simple_strtoul(buf, &tmp, 16);
560 if ((i == 0) || (i == 1))
561 card->options.fake_broadcast = i;
562 else {
563 PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n");
564 return -EINVAL;
565 }
566 return count;
567}
568
569static DEVICE_ATTR(fake_broadcast, 0644, qeth_dev_fake_broadcast_show,
570 qeth_dev_fake_broadcast_store);
571
572static ssize_t
573qeth_dev_recover_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
574{
575 struct qeth_card *card = dev->driver_data;
576 char *tmp;
577 int i;
578
579 if (!card)
580 return -EINVAL;
581
582 if (card->state != CARD_STATE_UP)
583 return -EPERM;
584
585 i = simple_strtoul(buf, &tmp, 16);
586 if (i == 1)
587 qeth_schedule_recovery(card);
588
589 return count;
590}
591
592static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
593
594static ssize_t
595qeth_dev_broadcast_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
596{
597 struct qeth_card *card = dev->driver_data;
598
599 if (!card)
600 return -EINVAL;
601
602 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
603 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
604 return sprintf(buf, "n/a\n");
605
606 return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
607 QETH_TR_BROADCAST_ALLRINGS)?
608 "all rings":"local");
609}
610
611static ssize_t
612qeth_dev_broadcast_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
613{
614 struct qeth_card *card = dev->driver_data;
615 char *tmp;
616
617 if (!card)
618 return -EINVAL;
619
620 if ((card->state != CARD_STATE_DOWN) &&
621 (card->state != CARD_STATE_RECOVER))
622 return -EPERM;
623
624 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
625 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){
626 PRINT_WARN("Device is not a tokenring device!\n");
627 return -EINVAL;
628 }
629
630 tmp = strsep((char **) &buf, "\n");
631
632 if (!strcmp(tmp, "local")){
633 card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
634 return count;
635 } else if (!strcmp(tmp, "all_rings")) {
636 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
637 return count;
638 } else {
639 PRINT_WARN("broadcast_mode: invalid mode %s!\n",
640 tmp);
641 return -EINVAL;
642 }
643 return count;
644}
645
646static DEVICE_ATTR(broadcast_mode, 0644, qeth_dev_broadcast_mode_show,
647 qeth_dev_broadcast_mode_store);
648
649static ssize_t
650qeth_dev_canonical_macaddr_show(struct device *dev, struct device_attribute *attr, char *buf)
651{
652 struct qeth_card *card = dev->driver_data;
653
654 if (!card)
655 return -EINVAL;
656
657 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
658 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
659 return sprintf(buf, "n/a\n");
660
661 return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
662 QETH_TR_MACADDR_CANONICAL)? 1:0);
663}
664
665static ssize_t
666qeth_dev_canonical_macaddr_store(struct device *dev, struct device_attribute *attr, const char *buf,
667 size_t count)
668{
669 struct qeth_card *card = dev->driver_data;
670 char *tmp;
671 int i;
672
673 if (!card)
674 return -EINVAL;
675
676 if ((card->state != CARD_STATE_DOWN) &&
677 (card->state != CARD_STATE_RECOVER))
678 return -EPERM;
679
680 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
681 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){
682 PRINT_WARN("Device is not a tokenring device!\n");
683 return -EINVAL;
684 }
685
686 i = simple_strtoul(buf, &tmp, 16);
687 if ((i == 0) || (i == 1))
688 card->options.macaddr_mode = i?
689 QETH_TR_MACADDR_CANONICAL :
690 QETH_TR_MACADDR_NONCANONICAL;
691 else {
692 PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n");
693 return -EINVAL;
694 }
695 return count;
696}
697
698static DEVICE_ATTR(canonical_macaddr, 0644, qeth_dev_canonical_macaddr_show,
699 qeth_dev_canonical_macaddr_store);
700
701static ssize_t
702qeth_dev_layer2_show(struct device *dev, struct device_attribute *attr, char *buf)
703{
704 struct qeth_card *card = dev->driver_data;
705
706 if (!card)
707 return -EINVAL;
708
709 return sprintf(buf, "%i\n", card->options.layer2 ? 1:0);
710}
711
712static ssize_t
713qeth_dev_layer2_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
714{
715 struct qeth_card *card = dev->driver_data;
716 char *tmp;
717 int i;
718
719 if (!card)
720 return -EINVAL;
721 if (card->info.type == QETH_CARD_TYPE_IQD) {
722 PRINT_WARN("Layer2 on Hipersockets is not supported! \n");
723 return -EPERM;
724 }
725
726 if (((card->state != CARD_STATE_DOWN) &&
727 (card->state != CARD_STATE_RECOVER)))
728 return -EPERM;
729
730 i = simple_strtoul(buf, &tmp, 16);
731 if ((i == 0) || (i == 1))
732 card->options.layer2 = i;
733 else {
734 PRINT_WARN("layer2: write 0 or 1 to this file!\n");
735 return -EINVAL;
736 }
737 return count;
738}
739
740static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
741 qeth_dev_layer2_store);
742
743static ssize_t
744qeth_dev_performance_stats_show(struct device *dev, struct device_attribute *attr, char *buf)
745{
746 struct qeth_card *card = dev->driver_data;
747
748 if (!card)
749 return -EINVAL;
750
751 return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
752}
753
754static ssize_t
755qeth_dev_performance_stats_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
756{
757 struct qeth_card *card = dev->driver_data;
758 char *tmp;
759 int i;
760
761 if (!card)
762 return -EINVAL;
763
764 i = simple_strtoul(buf, &tmp, 16);
765 if ((i == 0) || (i == 1)) {
766 if (i == card->options.performance_stats)
767 return count;
768 card->options.performance_stats = i;
769 if (i == 0)
770 memset(&card->perf_stats, 0,
771 sizeof(struct qeth_perf_stats));
772 card->perf_stats.initial_rx_packets = card->stats.rx_packets;
773 card->perf_stats.initial_tx_packets = card->stats.tx_packets;
774 } else {
775 PRINT_WARN("performance_stats: write 0 or 1 to this file!\n");
776 return -EINVAL;
777 }
778 return count;
779}
780
781static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
782 qeth_dev_performance_stats_store);
783
784static ssize_t
785qeth_dev_large_send_show(struct device *dev, struct device_attribute *attr, char *buf)
786{
787 struct qeth_card *card = dev->driver_data;
788
789 if (!card)
790 return -EINVAL;
791
792 switch (card->options.large_send) {
793 case QETH_LARGE_SEND_NO:
794 return sprintf(buf, "%s\n", "no");
795 case QETH_LARGE_SEND_EDDP:
796 return sprintf(buf, "%s\n", "EDDP");
797 case QETH_LARGE_SEND_TSO:
798 return sprintf(buf, "%s\n", "TSO");
799 default:
800 return sprintf(buf, "%s\n", "N/A");
801 }
802}
803
804static ssize_t
805qeth_dev_large_send_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
806{
807 struct qeth_card *card = dev->driver_data;
808 enum qeth_large_send_types type;
809 int rc = 0;
810 char *tmp;
811
812 if (!card)
813 return -EINVAL;
814 tmp = strsep((char **) &buf, "\n");
815 if (!strcmp(tmp, "no")){
816 type = QETH_LARGE_SEND_NO;
817 } else if (!strcmp(tmp, "EDDP")) {
818 type = QETH_LARGE_SEND_EDDP;
819 } else if (!strcmp(tmp, "TSO")) {
820 type = QETH_LARGE_SEND_TSO;
821 } else {
822 PRINT_WARN("large_send: invalid mode %s!\n", tmp);
823 return -EINVAL;
824 }
825 if (card->options.large_send == type)
826 return count;
827 if ((rc = qeth_set_large_send(card, type)))
828 return rc;
829 return count;
830}
831
832static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show,
833 qeth_dev_large_send_store);
834
835static ssize_t
836qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value )
837{
838
839 if (!card)
840 return -EINVAL;
841
842 return sprintf(buf, "%i\n", value);
843}
844
845static ssize_t
846qeth_dev_blkt_store(struct qeth_card *card, const char *buf, size_t count,
847 int *value, int max_value)
848{
849 char *tmp;
850 int i;
851
852 if (!card)
853 return -EINVAL;
854
855 if ((card->state != CARD_STATE_DOWN) &&
856 (card->state != CARD_STATE_RECOVER))
857 return -EPERM;
858
859 i = simple_strtoul(buf, &tmp, 10);
860 if (i <= max_value) {
861 *value = i;
862 } else {
863 PRINT_WARN("blkt total time: write values between"
864 " 0 and %d to this file!\n", max_value);
865 return -EINVAL;
866 }
867 return count;
868}
869
870static ssize_t
871qeth_dev_blkt_total_show(struct device *dev, struct device_attribute *attr, char *buf)
872{
873 struct qeth_card *card = dev->driver_data;
874
875 return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
876}
877
878
879static ssize_t
880qeth_dev_blkt_total_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
881{
882 struct qeth_card *card = dev->driver_data;
883
884 return qeth_dev_blkt_store(card, buf, count,
885 &card->info.blkt.time_total,1000);
886}
887
888
889
890static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
891 qeth_dev_blkt_total_store);
892
893static ssize_t
894qeth_dev_blkt_inter_show(struct device *dev, struct device_attribute *attr, char *buf)
895{
896 struct qeth_card *card = dev->driver_data;
897
898 return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
899}
900
901
902static ssize_t
903qeth_dev_blkt_inter_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
904{
905 struct qeth_card *card = dev->driver_data;
906
907 return qeth_dev_blkt_store(card, buf, count,
908 &card->info.blkt.inter_packet,100);
909}
910
911static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
912 qeth_dev_blkt_inter_store);
913
914static ssize_t
915qeth_dev_blkt_inter_jumbo_show(struct device *dev, struct device_attribute *attr, char *buf)
916{
917 struct qeth_card *card = dev->driver_data;
918
919 return qeth_dev_blkt_show(buf, card,
920 card->info.blkt.inter_packet_jumbo);
921}
922
923
924static ssize_t
925qeth_dev_blkt_inter_jumbo_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
926{
927 struct qeth_card *card = dev->driver_data;
928
929 return qeth_dev_blkt_store(card, buf, count,
930 &card->info.blkt.inter_packet_jumbo,100);
931}
932
933static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
934 qeth_dev_blkt_inter_jumbo_store);
935
936static struct device_attribute * qeth_blkt_device_attrs[] = {
937 &dev_attr_total,
938 &dev_attr_inter,
939 &dev_attr_inter_jumbo,
940 NULL,
941};
942
943static struct attribute_group qeth_device_blkt_group = {
944 .name = "blkt",
945 .attrs = (struct attribute **)qeth_blkt_device_attrs,
946};
947
948static struct device_attribute * qeth_device_attrs[] = {
949 &dev_attr_state,
950 &dev_attr_chpid,
951 &dev_attr_if_name,
952 &dev_attr_card_type,
953 &dev_attr_portno,
954 &dev_attr_portname,
955 &dev_attr_checksumming,
956 &dev_attr_priority_queueing,
957 &dev_attr_buffer_count,
958 &dev_attr_route4,
959#ifdef CONFIG_QETH_IPV6
960 &dev_attr_route6,
961#endif
962 &dev_attr_add_hhlen,
963 &dev_attr_fake_ll,
964 &dev_attr_fake_broadcast,
965 &dev_attr_recover,
966 &dev_attr_broadcast_mode,
967 &dev_attr_canonical_macaddr,
968 &dev_attr_layer2,
969 &dev_attr_large_send,
970 &dev_attr_performance_stats,
971 NULL,
972};
973
974static struct attribute_group qeth_device_attr_group = {
975 .attrs = (struct attribute **)qeth_device_attrs,
976};
977
978static struct device_attribute * qeth_osn_device_attrs[] = {
979 &dev_attr_state,
980 &dev_attr_chpid,
981 &dev_attr_if_name,
982 &dev_attr_card_type,
983 &dev_attr_buffer_count,
984 &dev_attr_recover,
985 NULL,
986};
987
988static struct attribute_group qeth_osn_device_attr_group = {
989 .attrs = (struct attribute **)qeth_osn_device_attrs,
990};
991
992#define QETH_DEVICE_ATTR(_id,_name,_mode,_show,_store) \
993struct device_attribute dev_attr_##_id = { \
994 .attr = {.name=__stringify(_name), .mode=_mode, },\
995 .show = _show, \
996 .store = _store, \
997};
998
999static int
1000qeth_check_layer2(struct qeth_card *card)
1001{
1002 if (card->options.layer2)
1003 return -EPERM;
1004 return 0;
1005}
1006
1007
1008static ssize_t
1009qeth_dev_ipato_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
1010{
1011 struct qeth_card *card = dev->driver_data;
1012
1013 if (!card)
1014 return -EINVAL;
1015
1016 if (qeth_check_layer2(card))
1017 return -EPERM;
1018 return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
1019}
1020
1021static ssize_t
1022qeth_dev_ipato_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1023{
1024 struct qeth_card *card = dev->driver_data;
1025 char *tmp;
1026
1027 if (!card)
1028 return -EINVAL;
1029
1030 if ((card->state != CARD_STATE_DOWN) &&
1031 (card->state != CARD_STATE_RECOVER))
1032 return -EPERM;
1033
1034 if (qeth_check_layer2(card))
1035 return -EPERM;
1036
1037 tmp = strsep((char **) &buf, "\n");
1038 if (!strcmp(tmp, "toggle")){
1039 card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
1040 } else if (!strcmp(tmp, "1")){
1041 card->ipato.enabled = 1;
1042 } else if (!strcmp(tmp, "0")){
1043 card->ipato.enabled = 0;
1044 } else {
1045 PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to "
1046 "this file\n");
1047 return -EINVAL;
1048 }
1049 return count;
1050}
1051
1052static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
1053 qeth_dev_ipato_enable_show,
1054 qeth_dev_ipato_enable_store);
1055
1056static ssize_t
1057qeth_dev_ipato_invert4_show(struct device *dev, struct device_attribute *attr, char *buf)
1058{
1059 struct qeth_card *card = dev->driver_data;
1060
1061 if (!card)
1062 return -EINVAL;
1063
1064 if (qeth_check_layer2(card))
1065 return -EPERM;
1066
1067 return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
1068}
1069
1070static ssize_t
1071qeth_dev_ipato_invert4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1072{
1073 struct qeth_card *card = dev->driver_data;
1074 char *tmp;
1075
1076 if (!card)
1077 return -EINVAL;
1078
1079 if (qeth_check_layer2(card))
1080 return -EPERM;
1081
1082 tmp = strsep((char **) &buf, "\n");
1083 if (!strcmp(tmp, "toggle")){
1084 card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
1085 } else if (!strcmp(tmp, "1")){
1086 card->ipato.invert4 = 1;
1087 } else if (!strcmp(tmp, "0")){
1088 card->ipato.invert4 = 0;
1089 } else {
1090 PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to "
1091 "this file\n");
1092 return -EINVAL;
1093 }
1094 return count;
1095}
1096
1097static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
1098 qeth_dev_ipato_invert4_show,
1099 qeth_dev_ipato_invert4_store);
1100
1101static ssize_t
1102qeth_dev_ipato_add_show(char *buf, struct qeth_card *card,
1103 enum qeth_prot_versions proto)
1104{
1105 struct qeth_ipato_entry *ipatoe;
1106 unsigned long flags;
1107 char addr_str[40];
1108 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1109 int i = 0;
1110
1111 if (qeth_check_layer2(card))
1112 return -EPERM;
1113
1114 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1115 /* add strlen for "/<mask>\n" */
1116 entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
1117 spin_lock_irqsave(&card->ip_lock, flags);
1118 list_for_each_entry(ipatoe, &card->ipato.entries, entry){
1119 if (ipatoe->proto != proto)
1120 continue;
1121 /* String must not be longer than PAGE_SIZE. So we check if
1122 * string length gets near PAGE_SIZE. Then we can savely display
1123 * the next IPv6 address (worst case, compared to IPv4) */
1124 if ((PAGE_SIZE - i) <= entry_len)
1125 break;
1126 qeth_ipaddr_to_string(proto, ipatoe->addr, addr_str);
1127 i += snprintf(buf + i, PAGE_SIZE - i,
1128 "%s/%i\n", addr_str, ipatoe->mask_bits);
1129 }
1130 spin_unlock_irqrestore(&card->ip_lock, flags);
1131 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1132
1133 return i;
1134}
1135
1136static ssize_t
1137qeth_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char *buf)
1138{
1139 struct qeth_card *card = dev->driver_data;
1140
1141 if (!card)
1142 return -EINVAL;
1143
1144 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
1145}
1146
1147static int
1148qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1149 u8 *addr, int *mask_bits)
1150{
1151 const char *start, *end;
1152 char *tmp;
1153 char buffer[40] = {0, };
1154
1155 start = buf;
1156 /* get address string */
1157 end = strchr(start, '/');
1158 if (!end || (end - start >= 40)){
1159 PRINT_WARN("Invalid format for ipato_addx/delx. "
1160 "Use <ip addr>/<mask bits>\n");
1161 return -EINVAL;
1162 }
1163 strncpy(buffer, start, end - start);
1164 if (qeth_string_to_ipaddr(buffer, proto, addr)){
1165 PRINT_WARN("Invalid IP address format!\n");
1166 return -EINVAL;
1167 }
1168 start = end + 1;
1169 *mask_bits = simple_strtoul(start, &tmp, 10);
1170 if (!strlen(start) ||
1171 (tmp == start) ||
1172 (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
1173 PRINT_WARN("Invalid mask bits for ipato_addx/delx !\n");
1174 return -EINVAL;
1175 }
1176 return 0;
1177}
1178
1179static ssize_t
1180qeth_dev_ipato_add_store(const char *buf, size_t count,
1181 struct qeth_card *card, enum qeth_prot_versions proto)
1182{
1183 struct qeth_ipato_entry *ipatoe;
1184 u8 addr[16];
1185 int mask_bits;
1186 int rc;
1187
1188 if (qeth_check_layer2(card))
1189 return -EPERM;
1190 if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits)))
1191 return rc;
1192
1193 if (!(ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL))){
1194 PRINT_WARN("No memory to allocate ipato entry\n");
1195 return -ENOMEM;
1196 }
1197 ipatoe->proto = proto;
1198 memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
1199 ipatoe->mask_bits = mask_bits;
1200
1201 if ((rc = qeth_add_ipato_entry(card, ipatoe))){
1202 kfree(ipatoe);
1203 return rc;
1204 }
1205
1206 return count;
1207}
1208
1209static ssize_t
1210qeth_dev_ipato_add4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1211{
1212 struct qeth_card *card = dev->driver_data;
1213
1214 if (!card)
1215 return -EINVAL;
1216
1217 return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
1218}
1219
1220static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
1221 qeth_dev_ipato_add4_show,
1222 qeth_dev_ipato_add4_store);
1223
1224static ssize_t
1225qeth_dev_ipato_del_store(const char *buf, size_t count,
1226 struct qeth_card *card, enum qeth_prot_versions proto)
1227{
1228 u8 addr[16];
1229 int mask_bits;
1230 int rc;
1231
1232 if (qeth_check_layer2(card))
1233 return -EPERM;
1234 if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits)))
1235 return rc;
1236
1237 qeth_del_ipato_entry(card, proto, addr, mask_bits);
1238
1239 return count;
1240}
1241
1242static ssize_t
1243qeth_dev_ipato_del4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1244{
1245 struct qeth_card *card = dev->driver_data;
1246
1247 if (!card)
1248 return -EINVAL;
1249
1250 return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
1251}
1252
1253static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
1254 qeth_dev_ipato_del4_store);
1255
1256#ifdef CONFIG_QETH_IPV6
1257static ssize_t
1258qeth_dev_ipato_invert6_show(struct device *dev, struct device_attribute *attr, char *buf)
1259{
1260 struct qeth_card *card = dev->driver_data;
1261
1262 if (!card)
1263 return -EINVAL;
1264
1265 if (qeth_check_layer2(card))
1266 return -EPERM;
1267
1268 return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
1269}
1270
1271static ssize_t
1272qeth_dev_ipato_invert6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1273{
1274 struct qeth_card *card = dev->driver_data;
1275 char *tmp;
1276
1277 if (!card)
1278 return -EINVAL;
1279
1280 if (qeth_check_layer2(card))
1281 return -EPERM;
1282
1283 tmp = strsep((char **) &buf, "\n");
1284 if (!strcmp(tmp, "toggle")){
1285 card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
1286 } else if (!strcmp(tmp, "1")){
1287 card->ipato.invert6 = 1;
1288 } else if (!strcmp(tmp, "0")){
1289 card->ipato.invert6 = 0;
1290 } else {
1291 PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to "
1292 "this file\n");
1293 return -EINVAL;
1294 }
1295 return count;
1296}
1297
1298static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
1299 qeth_dev_ipato_invert6_show,
1300 qeth_dev_ipato_invert6_store);
1301
1302
1303static ssize_t
1304qeth_dev_ipato_add6_show(struct device *dev, struct device_attribute *attr, char *buf)
1305{
1306 struct qeth_card *card = dev->driver_data;
1307
1308 if (!card)
1309 return -EINVAL;
1310
1311 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
1312}
1313
1314static ssize_t
1315qeth_dev_ipato_add6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1316{
1317 struct qeth_card *card = dev->driver_data;
1318
1319 if (!card)
1320 return -EINVAL;
1321
1322 return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
1323}
1324
1325static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
1326 qeth_dev_ipato_add6_show,
1327 qeth_dev_ipato_add6_store);
1328
1329static ssize_t
1330qeth_dev_ipato_del6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1331{
1332 struct qeth_card *card = dev->driver_data;
1333
1334 if (!card)
1335 return -EINVAL;
1336
1337 return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
1338}
1339
1340static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
1341 qeth_dev_ipato_del6_store);
1342#endif /* CONFIG_QETH_IPV6 */
1343
1344static struct device_attribute * qeth_ipato_device_attrs[] = {
1345 &dev_attr_ipato_enable,
1346 &dev_attr_ipato_invert4,
1347 &dev_attr_ipato_add4,
1348 &dev_attr_ipato_del4,
1349#ifdef CONFIG_QETH_IPV6
1350 &dev_attr_ipato_invert6,
1351 &dev_attr_ipato_add6,
1352 &dev_attr_ipato_del6,
1353#endif
1354 NULL,
1355};
1356
1357static struct attribute_group qeth_device_ipato_group = {
1358 .name = "ipa_takeover",
1359 .attrs = (struct attribute **)qeth_ipato_device_attrs,
1360};
1361
1362static ssize_t
1363qeth_dev_vipa_add_show(char *buf, struct qeth_card *card,
1364 enum qeth_prot_versions proto)
1365{
1366 struct qeth_ipaddr *ipaddr;
1367 char addr_str[40];
1368 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1369 unsigned long flags;
1370 int i = 0;
1371
1372 if (qeth_check_layer2(card))
1373 return -EPERM;
1374
1375 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1376 entry_len += 2; /* \n + terminator */
1377 spin_lock_irqsave(&card->ip_lock, flags);
1378 list_for_each_entry(ipaddr, &card->ip_list, entry){
1379 if (ipaddr->proto != proto)
1380 continue;
1381 if (ipaddr->type != QETH_IP_TYPE_VIPA)
1382 continue;
1383 /* String must not be longer than PAGE_SIZE. So we check if
1384 * string length gets near PAGE_SIZE. Then we can savely display
1385 * the next IPv6 address (worst case, compared to IPv4) */
1386 if ((PAGE_SIZE - i) <= entry_len)
1387 break;
1388 qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str);
1389 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
1390 }
1391 spin_unlock_irqrestore(&card->ip_lock, flags);
1392 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1393
1394 return i;
1395}
1396
1397static ssize_t
1398qeth_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char *buf)
1399{
1400 struct qeth_card *card = dev->driver_data;
1401
1402 if (!card)
1403 return -EINVAL;
1404
1405 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
1406}
1407
1408static int
1409qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1410 u8 *addr)
1411{
1412 if (qeth_string_to_ipaddr(buf, proto, addr)){
1413 PRINT_WARN("Invalid IP address format!\n");
1414 return -EINVAL;
1415 }
1416 return 0;
1417}
1418
1419static ssize_t
1420qeth_dev_vipa_add_store(const char *buf, size_t count,
1421 struct qeth_card *card, enum qeth_prot_versions proto)
1422{
1423 u8 addr[16] = {0, };
1424 int rc;
1425
1426 if (qeth_check_layer2(card))
1427 return -EPERM;
1428 if ((rc = qeth_parse_vipae(buf, proto, addr)))
1429 return rc;
1430
1431 if ((rc = qeth_add_vipa(card, proto, addr)))
1432 return rc;
1433
1434 return count;
1435}
1436
1437static ssize_t
1438qeth_dev_vipa_add4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1439{
1440 struct qeth_card *card = dev->driver_data;
1441
1442 if (!card)
1443 return -EINVAL;
1444
1445 return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
1446}
1447
1448static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
1449 qeth_dev_vipa_add4_show,
1450 qeth_dev_vipa_add4_store);
1451
1452static ssize_t
1453qeth_dev_vipa_del_store(const char *buf, size_t count,
1454 struct qeth_card *card, enum qeth_prot_versions proto)
1455{
1456 u8 addr[16];
1457 int rc;
1458
1459 if (qeth_check_layer2(card))
1460 return -EPERM;
1461 if ((rc = qeth_parse_vipae(buf, proto, addr)))
1462 return rc;
1463
1464 qeth_del_vipa(card, proto, addr);
1465
1466 return count;
1467}
1468
1469static ssize_t
1470qeth_dev_vipa_del4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1471{
1472 struct qeth_card *card = dev->driver_data;
1473
1474 if (!card)
1475 return -EINVAL;
1476
1477 return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
1478}
1479
1480static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
1481 qeth_dev_vipa_del4_store);
1482
1483#ifdef CONFIG_QETH_IPV6
1484static ssize_t
1485qeth_dev_vipa_add6_show(struct device *dev, struct device_attribute *attr, char *buf)
1486{
1487 struct qeth_card *card = dev->driver_data;
1488
1489 if (!card)
1490 return -EINVAL;
1491
1492 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
1493}
1494
1495static ssize_t
1496qeth_dev_vipa_add6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1497{
1498 struct qeth_card *card = dev->driver_data;
1499
1500 if (!card)
1501 return -EINVAL;
1502
1503 return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
1504}
1505
1506static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
1507 qeth_dev_vipa_add6_show,
1508 qeth_dev_vipa_add6_store);
1509
1510static ssize_t
1511qeth_dev_vipa_del6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1512{
1513 struct qeth_card *card = dev->driver_data;
1514
1515 if (!card)
1516 return -EINVAL;
1517
1518 if (qeth_check_layer2(card))
1519 return -EPERM;
1520
1521 return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
1522}
1523
1524static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
1525 qeth_dev_vipa_del6_store);
1526#endif /* CONFIG_QETH_IPV6 */
1527
1528static struct device_attribute * qeth_vipa_device_attrs[] = {
1529 &dev_attr_vipa_add4,
1530 &dev_attr_vipa_del4,
1531#ifdef CONFIG_QETH_IPV6
1532 &dev_attr_vipa_add6,
1533 &dev_attr_vipa_del6,
1534#endif
1535 NULL,
1536};
1537
1538static struct attribute_group qeth_device_vipa_group = {
1539 .name = "vipa",
1540 .attrs = (struct attribute **)qeth_vipa_device_attrs,
1541};
1542
1543static ssize_t
1544qeth_dev_rxip_add_show(char *buf, struct qeth_card *card,
1545 enum qeth_prot_versions proto)
1546{
1547 struct qeth_ipaddr *ipaddr;
1548 char addr_str[40];
1549 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
1550 unsigned long flags;
1551 int i = 0;
1552
1553 if (qeth_check_layer2(card))
1554 return -EPERM;
1555
1556 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
1557 entry_len += 2; /* \n + terminator */
1558 spin_lock_irqsave(&card->ip_lock, flags);
1559 list_for_each_entry(ipaddr, &card->ip_list, entry){
1560 if (ipaddr->proto != proto)
1561 continue;
1562 if (ipaddr->type != QETH_IP_TYPE_RXIP)
1563 continue;
1564 /* String must not be longer than PAGE_SIZE. So we check if
1565 * string length gets near PAGE_SIZE. Then we can savely display
1566 * the next IPv6 address (worst case, compared to IPv4) */
1567 if ((PAGE_SIZE - i) <= entry_len)
1568 break;
1569 qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str);
1570 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
1571 }
1572 spin_unlock_irqrestore(&card->ip_lock, flags);
1573 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
1574
1575 return i;
1576}
1577
1578static ssize_t
1579qeth_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char *buf)
1580{
1581 struct qeth_card *card = dev->driver_data;
1582
1583 if (!card)
1584 return -EINVAL;
1585
1586 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
1587}
1588
1589static int
1590qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1591 u8 *addr)
1592{
1593 if (qeth_string_to_ipaddr(buf, proto, addr)){
1594 PRINT_WARN("Invalid IP address format!\n");
1595 return -EINVAL;
1596 }
1597 return 0;
1598}
1599
1600static ssize_t
1601qeth_dev_rxip_add_store(const char *buf, size_t count,
1602 struct qeth_card *card, enum qeth_prot_versions proto)
1603{
1604 u8 addr[16] = {0, };
1605 int rc;
1606
1607 if (qeth_check_layer2(card))
1608 return -EPERM;
1609 if ((rc = qeth_parse_rxipe(buf, proto, addr)))
1610 return rc;
1611
1612 if ((rc = qeth_add_rxip(card, proto, addr)))
1613 return rc;
1614
1615 return count;
1616}
1617
1618static ssize_t
1619qeth_dev_rxip_add4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1620{
1621 struct qeth_card *card = dev->driver_data;
1622
1623 if (!card)
1624 return -EINVAL;
1625
1626 return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
1627}
1628
1629static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
1630 qeth_dev_rxip_add4_show,
1631 qeth_dev_rxip_add4_store);
1632
1633static ssize_t
1634qeth_dev_rxip_del_store(const char *buf, size_t count,
1635 struct qeth_card *card, enum qeth_prot_versions proto)
1636{
1637 u8 addr[16];
1638 int rc;
1639
1640 if (qeth_check_layer2(card))
1641 return -EPERM;
1642 if ((rc = qeth_parse_rxipe(buf, proto, addr)))
1643 return rc;
1644
1645 qeth_del_rxip(card, proto, addr);
1646
1647 return count;
1648}
1649
1650static ssize_t
1651qeth_dev_rxip_del4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1652{
1653 struct qeth_card *card = dev->driver_data;
1654
1655 if (!card)
1656 return -EINVAL;
1657
1658 return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
1659}
1660
1661static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
1662 qeth_dev_rxip_del4_store);
1663
1664#ifdef CONFIG_QETH_IPV6
1665static ssize_t
1666qeth_dev_rxip_add6_show(struct device *dev, struct device_attribute *attr, char *buf)
1667{
1668 struct qeth_card *card = dev->driver_data;
1669
1670 if (!card)
1671 return -EINVAL;
1672
1673 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
1674}
1675
1676static ssize_t
1677qeth_dev_rxip_add6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1678{
1679 struct qeth_card *card = dev->driver_data;
1680
1681 if (!card)
1682 return -EINVAL;
1683
1684 return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
1685}
1686
1687static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
1688 qeth_dev_rxip_add6_show,
1689 qeth_dev_rxip_add6_store);
1690
1691static ssize_t
1692qeth_dev_rxip_del6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1693{
1694 struct qeth_card *card = dev->driver_data;
1695
1696 if (!card)
1697 return -EINVAL;
1698
1699 return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
1700}
1701
1702static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
1703 qeth_dev_rxip_del6_store);
1704#endif /* CONFIG_QETH_IPV6 */
1705
1706static struct device_attribute * qeth_rxip_device_attrs[] = {
1707 &dev_attr_rxip_add4,
1708 &dev_attr_rxip_del4,
1709#ifdef CONFIG_QETH_IPV6
1710 &dev_attr_rxip_add6,
1711 &dev_attr_rxip_del6,
1712#endif
1713 NULL,
1714};
1715
1716static struct attribute_group qeth_device_rxip_group = {
1717 .name = "rxip",
1718 .attrs = (struct attribute **)qeth_rxip_device_attrs,
1719};
1720
1721int
1722qeth_create_device_attributes(struct device *dev)
1723{
1724 int ret;
1725 struct qeth_card *card = dev->driver_data;
1726
1727 if (card->info.type == QETH_CARD_TYPE_OSN)
1728 return sysfs_create_group(&dev->kobj,
1729 &qeth_osn_device_attr_group);
1730
1731 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group)))
1732 return ret;
1733 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group))){
1734 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1735 return ret;
1736 }
1737 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group))){
1738 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1739 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1740 return ret;
1741 }
1742 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group))){
1743 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1744 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1745 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1746 return ret;
1747 }
1748 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group))){
1749 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1750 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1751 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1752 sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
1753 return ret;
1754 }
1755 return 0;
1756}
1757
1758void
1759qeth_remove_device_attributes(struct device *dev)
1760{
1761 struct qeth_card *card = dev->driver_data;
1762
1763 if (card->info.type == QETH_CARD_TYPE_OSN) {
1764 sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
1765 return;
1766 }
1767 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
1768 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1769 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1770 sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
1771 sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
1772}
1773
1774/**********************/
1775/* DRIVER ATTRIBUTES */
1776/**********************/
1777static ssize_t
1778qeth_driver_group_store(struct device_driver *ddrv, const char *buf,
1779 size_t count)
1780{
1781 const char *start, *end;
1782 char bus_ids[3][BUS_ID_SIZE], *argv[3];
1783 int i;
1784 int err;
1785
1786 start = buf;
1787 for (i = 0; i < 3; i++) {
1788 static const char delim[] = { ',', ',', '\n' };
1789 int len;
1790
1791 if (!(end = strchr(start, delim[i])))
1792 return -EINVAL;
1793 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start);
1794 strncpy(bus_ids[i], start, len);
1795 bus_ids[i][len] = '\0';
1796 start = end + 1;
1797 argv[i] = bus_ids[i];
1798 }
1799 err = ccwgroup_create(qeth_root_dev, qeth_ccwgroup_driver.driver_id,
1800 &qeth_ccw_driver, 3, argv);
1801 if (err)
1802 return err;
1803 else
1804 return count;
1805}
1806
1807
1808static DRIVER_ATTR(group, 0200, NULL, qeth_driver_group_store);
1809
1810static ssize_t
1811qeth_driver_notifier_register_store(struct device_driver *ddrv, const char *buf,
1812 size_t count)
1813{
1814 int rc;
1815 int signum;
1816 char *tmp, *tmp2;
1817
1818 tmp = strsep((char **) &buf, "\n");
1819 if (!strncmp(tmp, "unregister", 10)){
1820 if ((rc = qeth_notifier_unregister(current)))
1821 return rc;
1822 return count;
1823 }
1824
1825 signum = simple_strtoul(tmp, &tmp2, 10);
1826 if ((signum < 0) || (signum > 32)){
1827 PRINT_WARN("Signal number %d is out of range\n", signum);
1828 return -EINVAL;
1829 }
1830 if ((rc = qeth_notifier_register(current, signum)))
1831 return rc;
1832
1833 return count;
1834}
1835
1836static DRIVER_ATTR(notifier_register, 0200, NULL,
1837 qeth_driver_notifier_register_store);
1838
1839int
1840qeth_create_driver_attributes(void)
1841{
1842 int rc;
1843
1844 if ((rc = driver_create_file(&qeth_ccwgroup_driver.driver,
1845 &driver_attr_group)))
1846 return rc;
1847 return driver_create_file(&qeth_ccwgroup_driver.driver,
1848 &driver_attr_notifier_register);
1849}
1850
1851void
1852qeth_remove_driver_attributes(void)
1853{
1854 driver_remove_file(&qeth_ccwgroup_driver.driver,
1855 &driver_attr_group);
1856 driver_remove_file(&qeth_ccwgroup_driver.driver,
1857 &driver_attr_notifier_register);
1858}
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h
deleted file mode 100644
index c20e923cf9ad..000000000000
--- a/drivers/s390/net/qeth_tso.h
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * linux/drivers/s390/net/qeth_tso.h
3 *
4 * Header file for qeth TCP Segmentation Offload support.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>
9 *
10 */
11#ifndef __QETH_TSO_H__
12#define __QETH_TSO_H__
13
14#include <linux/skbuff.h>
15#include <linux/tcp.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <net/ip6_checksum.h>
19#include "qeth.h"
20#include "qeth_mpc.h"
21
22
23static inline struct qeth_hdr_tso *
24qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
25{
26 QETH_DBF_TEXT(trace, 5, "tsoprsk");
27 return qeth_push_skb(card, *skb, sizeof(struct qeth_hdr_tso));
28}
29
30/**
31 * fill header for a TSO packet
32 */
33static inline void
34qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
35{
36 struct qeth_hdr_tso *hdr;
37 struct tcphdr *tcph;
38 struct iphdr *iph;
39
40 QETH_DBF_TEXT(trace, 5, "tsofhdr");
41
42 hdr = (struct qeth_hdr_tso *) skb->data;
43 iph = ip_hdr(skb);
44 tcph = tcp_hdr(skb);
45 /*fix header to TSO values ...*/
46 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
47 /*set values which are fix for the first approach ...*/
48 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
49 hdr->ext.imb_hdr_no = 1;
50 hdr->ext.hdr_type = 1;
51 hdr->ext.hdr_version = 1;
52 hdr->ext.hdr_len = 28;
53 /*insert non-fix values */
54 hdr->ext.mss = skb_shinfo(skb)->gso_size;
55 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
56 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
57 sizeof(struct qeth_hdr_tso));
58}
59
60/**
61 * change some header values as requested by hardware
62 */
63static inline void
64qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
65{
66 struct iphdr *iph = ip_hdr(skb);
67 struct ipv6hdr *ip6h = ipv6_hdr(skb);
68 struct tcphdr *tcph = tcp_hdr(skb);
69
70 tcph->check = 0;
71 if (skb->protocol == ETH_P_IPV6) {
72 ip6h->payload_len = 0;
73 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
74 0, IPPROTO_TCP, 0);
75 return;
76 }
77 /*OSA want us to set these values ...*/
78 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
79 0, IPPROTO_TCP, 0);
80 iph->tot_len = 0;
81 iph->check = 0;
82}
83
84static inline int
85qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
86 int ipv, int cast_type)
87{
88 struct qeth_hdr_tso *hdr;
89
90 QETH_DBF_TEXT(trace, 5, "tsoprep");
91
92 hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
93 if (hdr == NULL) {
94 QETH_DBF_TEXT(trace, 4, "tsoperr");
95 return -ENOMEM;
96 }
97 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
98 /*fill first 32 bytes of qdio header as used
99 *FIXME: TSO has two struct members
100 * with different names but same size
101 * */
102 qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
103 qeth_tso_fill_header(card, skb);
104 qeth_tso_set_tcpip_header(card, skb);
105 return 0;
106}
107
108static inline void
109__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer,
110 int is_tso, int *next_element_to_fill)
111{
112 struct skb_frag_struct *frag;
113 int fragno;
114 unsigned long addr;
115 int element, cnt, dlen;
116
117 fragno = skb_shinfo(skb)->nr_frags;
118 element = *next_element_to_fill;
119 dlen = 0;
120
121 if (is_tso)
122 buffer->element[element].flags =
123 SBAL_FLAGS_MIDDLE_FRAG;
124 else
125 buffer->element[element].flags =
126 SBAL_FLAGS_FIRST_FRAG;
127 if ( (dlen = (skb->len - skb->data_len)) ) {
128 buffer->element[element].addr = skb->data;
129 buffer->element[element].length = dlen;
130 element++;
131 }
132 for (cnt = 0; cnt < fragno; cnt++) {
133 frag = &skb_shinfo(skb)->frags[cnt];
134 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
135 frag->page_offset;
136 buffer->element[element].addr = (char *)addr;
137 buffer->element[element].length = frag->size;
138 if (cnt < (fragno - 1))
139 buffer->element[element].flags =
140 SBAL_FLAGS_MIDDLE_FRAG;
141 else
142 buffer->element[element].flags =
143 SBAL_FLAGS_LAST_FRAG;
144 element++;
145 }
146 *next_element_to_fill = element;
147}
148#endif /* __QETH_TSO_H__ */