aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/net
diff options
context:
space:
mode:
authorFrank Blaschka <frank.blaschka@de.ibm.com>2008-02-15 03:19:42 -0500
committerJeff Garzik <jeff@garzik.org>2008-03-17 07:49:26 -0400
commit4a71df50047f0db65ea09b1be155852e81a45eba (patch)
treec1dee8950578440685da91d2553c78f0ff1fd370 /drivers/s390/net
parent04885948b101c44cbec9dacfb49b28290a899012 (diff)
qeth: new qeth device driver
List of major changes and improvements: no manipulation of the global ARP constructor clean code split into core, layer 2 and layer 3 functionality better exploitation of the ethtool interface better representation of the various hardware capabilities fix packet socket support (tcpdump), no fake_ll required osasnmpd notification via udev events coding style and beautification Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/s390/net')
-rw-r--r--drivers/s390/net/Kconfig31
-rw-r--r--drivers/s390/net/Makefile7
-rw-r--r--drivers/s390/net/qeth_core.h916
-rw-r--r--drivers/s390/net/qeth_core_main.c4540
-rw-r--r--drivers/s390/net/qeth_core_mpc.c266
-rw-r--r--drivers/s390/net/qeth_core_mpc.h566
-rw-r--r--drivers/s390/net/qeth_core_offl.c701
-rw-r--r--drivers/s390/net/qeth_core_offl.h76
-rw-r--r--drivers/s390/net/qeth_core_sys.c651
-rw-r--r--drivers/s390/net/qeth_l2_main.c1242
-rw-r--r--drivers/s390/net/qeth_l3.h76
-rw-r--r--drivers/s390/net/qeth_l3_main.c3388
-rw-r--r--drivers/s390/net/qeth_l3_sys.c1051
13 files changed, 13495 insertions, 16 deletions
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 773f5a6d5822..a7745c82b4ae 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -67,23 +67,26 @@ config QETH
67 To compile this driver as a module, choose M. 67 To compile this driver as a module, choose M.
68 The module name is qeth.ko. 68 The module name is qeth.ko.
69 69
70config QETH_L2
71 tristate "qeth layer 2 device support"
72 depends on QETH
73 help
74 Select this option to be able to run qeth devices in layer 2 mode.
75 To compile as a module, choose M. The module name is qeth_l2.ko.
76 If unsure, choose y.
70 77
71comment "Gigabit Ethernet default settings" 78config QETH_L3
72 depends on QETH 79 tristate "qeth layer 3 device support"
80 depends on QETH
81 help
82 Select this option to be able to run qeth devices in layer 3 mode.
83 To compile as a module choose M. The module name is qeth_l3.ko.
84 If unsure, choose Y.
73 85
74config QETH_IPV6 86config QETH_IPV6
75 bool "IPv6 support for gigabit ethernet" 87 bool
76 depends on (QETH = IPV6) || (QETH && IPV6 = 'y') 88 depends on (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
77 help 89 default y
78 If CONFIG_QETH is switched on, this option will include IPv6
79 support in the qeth device driver.
80
81config QETH_VLAN
82 bool "VLAN support for gigabit ethernet"
83 depends on (QETH = VLAN_8021Q) || (QETH && VLAN_8021Q = 'y')
84 help
85 If CONFIG_QETH is switched on, this option will include IEEE
86 802.1q VLAN support in the qeth device driver.
87 90
88config CCWGROUP 91config CCWGROUP
89 tristate 92 tristate
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index f6d189a8a451..6382c04d2bdf 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -8,6 +8,9 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
9obj-$(CONFIG_LCS) += lcs.o cu3088.o 9obj-$(CONFIG_LCS) += lcs.o cu3088.o
10obj-$(CONFIG_CLAW) += claw.o cu3088.o 10obj-$(CONFIG_CLAW) += claw.o cu3088.o
11qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o 11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_core_offl.o
12qeth-$(CONFIG_PROC_FS) += qeth_proc.o
13obj-$(CONFIG_QETH) += qeth.o 12obj-$(CONFIG_QETH) += qeth.o
13qeth_l2-y += qeth_l2_main.o
14obj-$(CONFIG_QETH_L2) += qeth_l2.o
15qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
16obj-$(CONFIG_QETH_L3) += qeth_l3.o
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
new file mode 100644
index 000000000000..9485e363ca11
--- /dev/null
+++ b/drivers/s390/net/qeth_core.h
@@ -0,0 +1,916 @@
1/*
2 * drivers/s390/net/qeth_core.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#ifndef __QETH_CORE_H__
12#define __QETH_CORE_H__
13
14#include <linux/if.h>
15#include <linux/if_arp.h>
16#include <linux/if_tr.h>
17#include <linux/trdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/if_vlan.h>
20#include <linux/ctype.h>
21#include <linux/in6.h>
22#include <linux/bitops.h>
23#include <linux/seq_file.h>
24#include <linux/ethtool.h>
25
26#include <net/ipv6.h>
27#include <net/if_inet6.h>
28#include <net/addrconf.h>
29
30#include <asm/debug.h>
31#include <asm/qdio.h>
32#include <asm/ccwdev.h>
33#include <asm/ccwgroup.h>
34
35#include "qeth_core_mpc.h"
36
37/**
38 * Debug Facility stuff
39 */
40#define QETH_DBF_SETUP_NAME "qeth_setup"
41#define QETH_DBF_SETUP_LEN 8
42#define QETH_DBF_SETUP_PAGES 8
43#define QETH_DBF_SETUP_NR_AREAS 1
44#define QETH_DBF_SETUP_LEVEL 5
45
46#define QETH_DBF_MISC_NAME "qeth_misc"
47#define QETH_DBF_MISC_LEN 128
48#define QETH_DBF_MISC_PAGES 2
49#define QETH_DBF_MISC_NR_AREAS 1
50#define QETH_DBF_MISC_LEVEL 2
51
52#define QETH_DBF_DATA_NAME "qeth_data"
53#define QETH_DBF_DATA_LEN 96
54#define QETH_DBF_DATA_PAGES 8
55#define QETH_DBF_DATA_NR_AREAS 1
56#define QETH_DBF_DATA_LEVEL 2
57
58#define QETH_DBF_CONTROL_NAME "qeth_control"
59#define QETH_DBF_CONTROL_LEN 256
60#define QETH_DBF_CONTROL_PAGES 8
61#define QETH_DBF_CONTROL_NR_AREAS 1
62#define QETH_DBF_CONTROL_LEVEL 5
63
64#define QETH_DBF_TRACE_NAME "qeth_trace"
65#define QETH_DBF_TRACE_LEN 8
66#define QETH_DBF_TRACE_PAGES 4
67#define QETH_DBF_TRACE_NR_AREAS 1
68#define QETH_DBF_TRACE_LEVEL 3
69
70#define QETH_DBF_SENSE_NAME "qeth_sense"
71#define QETH_DBF_SENSE_LEN 64
72#define QETH_DBF_SENSE_PAGES 2
73#define QETH_DBF_SENSE_NR_AREAS 1
74#define QETH_DBF_SENSE_LEVEL 2
75
76#define QETH_DBF_QERR_NAME "qeth_qerr"
77#define QETH_DBF_QERR_LEN 8
78#define QETH_DBF_QERR_PAGES 2
79#define QETH_DBF_QERR_NR_AREAS 1
80#define QETH_DBF_QERR_LEVEL 2
81
82#define QETH_DBF_TEXT(name, level, text) \
83 do { \
84 debug_text_event(qeth_dbf_##name, level, text); \
85 } while (0)
86
87#define QETH_DBF_HEX(name, level, addr, len) \
88 do { \
89 debug_event(qeth_dbf_##name, level, (void *)(addr), len); \
90 } while (0)
91
92/* Allow to sort out low debug levels early to avoid wasted sprints */
93static inline int qeth_dbf_passes(debug_info_t *dbf_grp, int level)
94{
95 return (level <= dbf_grp->level);
96}
97
98/**
99 * some more debug stuff
100 */
101#define PRINTK_HEADER "qeth: "
102
103#define SENSE_COMMAND_REJECT_BYTE 0
104#define SENSE_COMMAND_REJECT_FLAG 0x80
105#define SENSE_RESETTING_EVENT_BYTE 1
106#define SENSE_RESETTING_EVENT_FLAG 0x80
107
108/*
109 * Common IO related definitions
110 */
111#define CARD_RDEV(card) card->read.ccwdev
112#define CARD_WDEV(card) card->write.ccwdev
113#define CARD_DDEV(card) card->data.ccwdev
114#define CARD_BUS_ID(card) card->gdev->dev.bus_id
115#define CARD_RDEV_ID(card) card->read.ccwdev->dev.bus_id
116#define CARD_WDEV_ID(card) card->write.ccwdev->dev.bus_id
117#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id
118#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id
119
120/**
121 * card stuff
122 */
123struct qeth_perf_stats {
124 unsigned int bufs_rec;
125 unsigned int bufs_sent;
126
127 unsigned int skbs_sent_pack;
128 unsigned int bufs_sent_pack;
129
130 unsigned int sc_dp_p;
131 unsigned int sc_p_dp;
132 /* qdio_input_handler: number of times called, time spent in */
133 __u64 inbound_start_time;
134 unsigned int inbound_cnt;
135 unsigned int inbound_time;
136 /* qeth_send_packet: number of times called, time spent in */
137 __u64 outbound_start_time;
138 unsigned int outbound_cnt;
139 unsigned int outbound_time;
140 /* qdio_output_handler: number of times called, time spent in */
141 __u64 outbound_handler_start_time;
142 unsigned int outbound_handler_cnt;
143 unsigned int outbound_handler_time;
144 /* number of calls to and time spent in do_QDIO for inbound queue */
145 __u64 inbound_do_qdio_start_time;
146 unsigned int inbound_do_qdio_cnt;
147 unsigned int inbound_do_qdio_time;
148 /* number of calls to and time spent in do_QDIO for outbound queues */
149 __u64 outbound_do_qdio_start_time;
150 unsigned int outbound_do_qdio_cnt;
151 unsigned int outbound_do_qdio_time;
152 /* eddp data */
153 unsigned int large_send_bytes;
154 unsigned int large_send_cnt;
155 unsigned int sg_skbs_sent;
156 unsigned int sg_frags_sent;
157 /* initial values when measuring starts */
158 unsigned long initial_rx_packets;
159 unsigned long initial_tx_packets;
160 /* inbound scatter gather data */
161 unsigned int sg_skbs_rx;
162 unsigned int sg_frags_rx;
163 unsigned int sg_alloc_page_rx;
164};
165
166/* Routing stuff */
167struct qeth_routing_info {
168 enum qeth_routing_types type;
169};
170
171/* IPA stuff */
172struct qeth_ipa_info {
173 __u32 supported_funcs;
174 __u32 enabled_funcs;
175};
176
177static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
178 enum qeth_ipa_funcs func)
179{
180 return (ipa->supported_funcs & func);
181}
182
183static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
184 enum qeth_ipa_funcs func)
185{
186 return (ipa->supported_funcs & ipa->enabled_funcs & func);
187}
188
189#define qeth_adp_supported(c, f) \
190 qeth_is_ipa_supported(&c->options.adp, f)
191#define qeth_adp_enabled(c, f) \
192 qeth_is_ipa_enabled(&c->options.adp, f)
193#define qeth_is_supported(c, f) \
194 qeth_is_ipa_supported(&c->options.ipa4, f)
195#define qeth_is_enabled(c, f) \
196 qeth_is_ipa_enabled(&c->options.ipa4, f)
197#define qeth_is_supported6(c, f) \
198 qeth_is_ipa_supported(&c->options.ipa6, f)
199#define qeth_is_enabled6(c, f) \
200 qeth_is_ipa_enabled(&c->options.ipa6, f)
201#define qeth_is_ipafunc_supported(c, prot, f) \
202 ((prot == QETH_PROT_IPV6) ? \
203 qeth_is_supported6(c, f) : qeth_is_supported(c, f))
204#define qeth_is_ipafunc_enabled(c, prot, f) \
205 ((prot == QETH_PROT_IPV6) ? \
206 qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
207
208#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101
209#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101
210#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
211#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
212
213#define QETH_MODELLIST_ARRAY \
214 {{0x1731, 0x01, 0x1732, 0x01, QETH_CARD_TYPE_OSAE, 1, \
215 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
216 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
217 QETH_MAX_QUEUES, 0}, \
218 {0x1731, 0x05, 0x1732, 0x05, QETH_CARD_TYPE_IQD, 0, \
219 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
220 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
221 QETH_MAX_QUEUES, 0x103}, \
222 {0x1731, 0x06, 0x1732, 0x06, QETH_CARD_TYPE_OSN, 0, \
223 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
224 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
225 QETH_MAX_QUEUES, 0}, \
226 {0, 0, 0, 0, 0, 0, 0, 0, 0} }
227
228#define QETH_REAL_CARD 1
229#define QETH_VLAN_CARD 2
230#define QETH_BUFSIZE 4096
231
232/**
233 * some more defs
234 */
235#define QETH_TX_TIMEOUT 100 * HZ
236#define QETH_RCD_TIMEOUT 60 * HZ
237#define QETH_HEADER_SIZE 32
238#define QETH_MAX_PORTNO 15
239
240/*IPv6 address autoconfiguration stuff*/
241#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
242#define UNIQUE_ID_NOT_BY_CARD 0x10000
243
244/*****************************************************************************/
245/* QDIO queue and buffer handling */
246/*****************************************************************************/
247#define QETH_MAX_QUEUES 4
248#define QETH_IN_BUF_SIZE_DEFAULT 65536
249#define QETH_IN_BUF_COUNT_DEFAULT 16
250#define QETH_IN_BUF_COUNT_MIN 8
251#define QETH_IN_BUF_COUNT_MAX 128
252#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
253#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
254 ((card)->qdio.in_buf_pool.buf_count / 2)
255
256/* buffers we have to be behind before we get a PCI */
257#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
258/*enqueued free buffers left before we get a PCI*/
259#define QETH_PCI_THRESHOLD_B(card) 0
260/*not used unless the microcode gets patched*/
261#define QETH_PCI_TIMER_VALUE(card) 3
262
263#define QETH_MIN_INPUT_THRESHOLD 1
264#define QETH_MAX_INPUT_THRESHOLD 500
265#define QETH_MIN_OUTPUT_THRESHOLD 1
266#define QETH_MAX_OUTPUT_THRESHOLD 300
267
268/* priority queing */
269#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
270#define QETH_DEFAULT_QUEUE 2
271#define QETH_NO_PRIO_QUEUEING 0
272#define QETH_PRIO_Q_ING_PREC 1
273#define QETH_PRIO_Q_ING_TOS 2
274#define IP_TOS_LOWDELAY 0x10
275#define IP_TOS_HIGHTHROUGHPUT 0x08
276#define IP_TOS_HIGHRELIABILITY 0x04
277#define IP_TOS_NOTIMPORTANT 0x02
278
279/* Packing */
280#define QETH_LOW_WATERMARK_PACK 2
281#define QETH_HIGH_WATERMARK_PACK 5
282#define QETH_WATERMARK_PACK_FUZZ 1
283
284#define QETH_IP_HEADER_SIZE 40
285
286/* large receive scatter gather copy break */
287#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
288
289struct qeth_hdr_layer3 {
290 __u8 id;
291 __u8 flags;
292 __u16 inbound_checksum; /*TSO:__u16 seqno */
293 __u32 token; /*TSO: __u32 reserved */
294 __u16 length;
295 __u8 vlan_prio;
296 __u8 ext_flags;
297 __u16 vlan_id;
298 __u16 frame_offset;
299 __u8 dest_addr[16];
300} __attribute__ ((packed));
301
302struct qeth_hdr_layer2 {
303 __u8 id;
304 __u8 flags[3];
305 __u8 port_no;
306 __u8 hdr_length;
307 __u16 pkt_length;
308 __u16 seq_no;
309 __u16 vlan_id;
310 __u32 reserved;
311 __u8 reserved2[16];
312} __attribute__ ((packed));
313
314struct qeth_hdr_osn {
315 __u8 id;
316 __u8 reserved;
317 __u16 seq_no;
318 __u16 reserved2;
319 __u16 control_flags;
320 __u16 pdu_length;
321 __u8 reserved3[18];
322 __u32 ccid;
323} __attribute__ ((packed));
324
325struct qeth_hdr {
326 union {
327 struct qeth_hdr_layer2 l2;
328 struct qeth_hdr_layer3 l3;
329 struct qeth_hdr_osn osn;
330 } hdr;
331} __attribute__ ((packed));
332
333/*TCP Segmentation Offload header*/
334struct qeth_hdr_ext_tso {
335 __u16 hdr_tot_len;
336 __u8 imb_hdr_no;
337 __u8 reserved;
338 __u8 hdr_type;
339 __u8 hdr_version;
340 __u16 hdr_len;
341 __u32 payload_len;
342 __u16 mss;
343 __u16 dg_hdr_len;
344 __u8 padding[16];
345} __attribute__ ((packed));
346
347struct qeth_hdr_tso {
348 struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
349 struct qeth_hdr_ext_tso ext;
350} __attribute__ ((packed));
351
352
353/* flags for qeth_hdr.flags */
354#define QETH_HDR_PASSTHRU 0x10
355#define QETH_HDR_IPV6 0x80
356#define QETH_HDR_CAST_MASK 0x07
357enum qeth_cast_flags {
358 QETH_CAST_UNICAST = 0x06,
359 QETH_CAST_MULTICAST = 0x04,
360 QETH_CAST_BROADCAST = 0x05,
361 QETH_CAST_ANYCAST = 0x07,
362 QETH_CAST_NOCAST = 0x00,
363};
364
365enum qeth_layer2_frame_flags {
366 QETH_LAYER2_FLAG_MULTICAST = 0x01,
367 QETH_LAYER2_FLAG_BROADCAST = 0x02,
368 QETH_LAYER2_FLAG_UNICAST = 0x04,
369 QETH_LAYER2_FLAG_VLAN = 0x10,
370};
371
372enum qeth_header_ids {
373 QETH_HEADER_TYPE_LAYER3 = 0x01,
374 QETH_HEADER_TYPE_LAYER2 = 0x02,
375 QETH_HEADER_TYPE_TSO = 0x03,
376 QETH_HEADER_TYPE_OSN = 0x04,
377};
378/* flags for qeth_hdr.ext_flags */
379#define QETH_HDR_EXT_VLAN_FRAME 0x01
380#define QETH_HDR_EXT_TOKEN_ID 0x02
381#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
382#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
383#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
384#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
385#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/
386
387static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
388{
389 return (sbale->flags & SBAL_FLAGS_LAST_ENTRY);
390}
391
392enum qeth_qdio_buffer_states {
393 /*
394 * inbound: read out by driver; owned by hardware in order to be filled
395 * outbound: owned by driver in order to be filled
396 */
397 QETH_QDIO_BUF_EMPTY,
398 /*
399 * inbound: filled by hardware; owned by driver in order to be read out
400 * outbound: filled by driver; owned by hardware in order to be sent
401 */
402 QETH_QDIO_BUF_PRIMED,
403};
404
405enum qeth_qdio_info_states {
406 QETH_QDIO_UNINITIALIZED,
407 QETH_QDIO_ALLOCATED,
408 QETH_QDIO_ESTABLISHED,
409 QETH_QDIO_CLEANING
410};
411
412struct qeth_buffer_pool_entry {
413 struct list_head list;
414 struct list_head init_list;
415 void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
416};
417
418struct qeth_qdio_buffer_pool {
419 struct list_head entry_list;
420 int buf_count;
421};
422
423struct qeth_qdio_buffer {
424 struct qdio_buffer *buffer;
425 /* the buffer pool entry currently associated to this buffer */
426 struct qeth_buffer_pool_entry *pool_entry;
427};
428
429struct qeth_qdio_q {
430 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
431 struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
432 int next_buf_to_init;
433} __attribute__ ((aligned(256)));
434
435/* possible types of qeth large_send support */
436enum qeth_large_send_types {
437 QETH_LARGE_SEND_NO,
438 QETH_LARGE_SEND_EDDP,
439 QETH_LARGE_SEND_TSO,
440};
441
442struct qeth_qdio_out_buffer {
443 struct qdio_buffer *buffer;
444 atomic_t state;
445 int next_element_to_fill;
446 struct sk_buff_head skb_list;
447 struct list_head ctx_list;
448};
449
450struct qeth_card;
451
452enum qeth_out_q_states {
453 QETH_OUT_Q_UNLOCKED,
454 QETH_OUT_Q_LOCKED,
455 QETH_OUT_Q_LOCKED_FLUSH,
456};
457
458struct qeth_qdio_out_q {
459 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
460 struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
461 int queue_no;
462 struct qeth_card *card;
463 atomic_t state;
464 int do_pack;
465 /*
466 * index of buffer to be filled by driver; state EMPTY or PACKING
467 */
468 int next_buf_to_fill;
469 /*
470 * number of buffers that are currently filled (PRIMED)
471 * -> these buffers are hardware-owned
472 */
473 atomic_t used_buffers;
474 /* indicates whether PCI flag must be set (or if one is outstanding) */
475 atomic_t set_pci_flags_count;
476} __attribute__ ((aligned(256)));
477
478struct qeth_qdio_info {
479 atomic_t state;
480 /* input */
481 struct qeth_qdio_q *in_q;
482 struct qeth_qdio_buffer_pool in_buf_pool;
483 struct qeth_qdio_buffer_pool init_pool;
484 int in_buf_size;
485
486 /* output */
487 int no_out_queues;
488 struct qeth_qdio_out_q **out_qs;
489
490 /* priority queueing */
491 int do_prio_queueing;
492 int default_out_queue;
493};
494
495enum qeth_send_errors {
496 QETH_SEND_ERROR_NONE,
497 QETH_SEND_ERROR_LINK_FAILURE,
498 QETH_SEND_ERROR_RETRY,
499 QETH_SEND_ERROR_KICK_IT,
500};
501
502#define QETH_ETH_MAC_V4 0x0100 /* like v4 */
503#define QETH_ETH_MAC_V6 0x3333 /* like v6 */
504/* tr mc mac is longer, but that will be enough to detect mc frames */
505#define QETH_TR_MAC_NC 0xc000 /* non-canonical */
506#define QETH_TR_MAC_C 0x0300 /* canonical */
507
508#define DEFAULT_ADD_HHLEN 0
509#define MAX_ADD_HHLEN 1024
510
511/**
512 * buffer stuff for read channel
513 */
514#define QETH_CMD_BUFFER_NO 8
515
516/**
517 * channel state machine
518 */
519enum qeth_channel_states {
520 CH_STATE_UP,
521 CH_STATE_DOWN,
522 CH_STATE_ACTIVATING,
523 CH_STATE_HALTED,
524 CH_STATE_STOPPED,
525 CH_STATE_RCD,
526 CH_STATE_RCD_DONE,
527};
528/**
529 * card state machine
530 */
531enum qeth_card_states {
532 CARD_STATE_DOWN,
533 CARD_STATE_HARDSETUP,
534 CARD_STATE_SOFTSETUP,
535 CARD_STATE_UP,
536 CARD_STATE_RECOVER,
537};
538
539/**
540 * Protocol versions
541 */
542enum qeth_prot_versions {
543 QETH_PROT_IPV4 = 0x0004,
544 QETH_PROT_IPV6 = 0x0006,
545};
546
547enum qeth_ip_types {
548 QETH_IP_TYPE_NORMAL,
549 QETH_IP_TYPE_VIPA,
550 QETH_IP_TYPE_RXIP,
551 QETH_IP_TYPE_DEL_ALL_MC,
552};
553
554enum qeth_cmd_buffer_state {
555 BUF_STATE_FREE,
556 BUF_STATE_LOCKED,
557 BUF_STATE_PROCESSED,
558};
559
560struct qeth_ipato {
561 int enabled;
562 int invert4;
563 int invert6;
564 struct list_head entries;
565};
566
567struct qeth_channel;
568
569struct qeth_cmd_buffer {
570 enum qeth_cmd_buffer_state state;
571 struct qeth_channel *channel;
572 unsigned char *data;
573 int rc;
574 void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
575};
576
577/**
578 * definition of a qeth channel, used for read and write
579 */
580struct qeth_channel {
581 enum qeth_channel_states state;
582 struct ccw1 ccw;
583 spinlock_t iob_lock;
584 wait_queue_head_t wait_q;
585 struct tasklet_struct irq_tasklet;
586 struct ccw_device *ccwdev;
587/*command buffer for control data*/
588 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
589 atomic_t irq_pending;
590 int io_buf_no;
591 int buf_no;
592};
593
594/**
595 * OSA card related definitions
596 */
597struct qeth_token {
598 __u32 issuer_rm_w;
599 __u32 issuer_rm_r;
600 __u32 cm_filter_w;
601 __u32 cm_filter_r;
602 __u32 cm_connection_w;
603 __u32 cm_connection_r;
604 __u32 ulp_filter_w;
605 __u32 ulp_filter_r;
606 __u32 ulp_connection_w;
607 __u32 ulp_connection_r;
608};
609
610struct qeth_seqno {
611 __u32 trans_hdr;
612 __u32 pdu_hdr;
613 __u32 pdu_hdr_ack;
614 __u16 ipa;
615 __u32 pkt_seqno;
616};
617
618struct qeth_reply {
619 struct list_head list;
620 wait_queue_head_t wait_q;
621 int (*callback)(struct qeth_card *, struct qeth_reply *,
622 unsigned long);
623 u32 seqno;
624 unsigned long offset;
625 atomic_t received;
626 int rc;
627 void *param;
628 struct qeth_card *card;
629 atomic_t refcnt;
630};
631
632
633struct qeth_card_blkt {
634 int time_total;
635 int inter_packet;
636 int inter_packet_jumbo;
637};
638
639#define QETH_BROADCAST_WITH_ECHO 0x01
640#define QETH_BROADCAST_WITHOUT_ECHO 0x02
641#define QETH_LAYER2_MAC_READ 0x01
642#define QETH_LAYER2_MAC_REGISTERED 0x02
643struct qeth_card_info {
644 unsigned short unit_addr2;
645 unsigned short cula;
646 unsigned short chpid;
647 __u16 func_level;
648 char mcl_level[QETH_MCL_LENGTH + 1];
649 int guestlan;
650 int mac_bits;
651 int portname_required;
652 int portno;
653 char portname[9];
654 enum qeth_card_types type;
655 enum qeth_link_types link_type;
656 int is_multicast_different;
657 int initial_mtu;
658 int max_mtu;
659 int broadcast_capable;
660 int unique_id;
661 struct qeth_card_blkt blkt;
662 __u32 csum_mask;
663 enum qeth_ipa_promisc_modes promisc_mode;
664};
665
666struct qeth_card_options {
667 struct qeth_routing_info route4;
668 struct qeth_ipa_info ipa4;
669 struct qeth_ipa_info adp; /*Adapter parameters*/
670 struct qeth_routing_info route6;
671 struct qeth_ipa_info ipa6;
672 enum qeth_checksum_types checksum_type;
673 int broadcast_mode;
674 int macaddr_mode;
675 int fake_broadcast;
676 int add_hhlen;
677 int fake_ll;
678 int layer2;
679 enum qeth_large_send_types large_send;
680 int performance_stats;
681 int rx_sg_cb;
682};
683
684/*
685 * thread bits for qeth_card thread masks
686 */
687enum qeth_threads {
688 QETH_RECOVER_THREAD = 1,
689};
690
691struct qeth_osn_info {
692 int (*assist_cb)(struct net_device *dev, void *data);
693 int (*data_cb)(struct sk_buff *skb);
694};
695
696enum qeth_discipline_id {
697 QETH_DISCIPLINE_LAYER3 = 0,
698 QETH_DISCIPLINE_LAYER2 = 1,
699};
700
701struct qeth_discipline {
702 qdio_handler_t *input_handler;
703 qdio_handler_t *output_handler;
704 int (*recover)(void *ptr);
705 struct ccwgroup_driver *ccwgdriver;
706};
707
708struct qeth_vlan_vid {
709 struct list_head list;
710 unsigned short vid;
711};
712
713struct qeth_mc_mac {
714 struct list_head list;
715 __u8 mc_addr[MAX_ADDR_LEN];
716 unsigned char mc_addrlen;
717};
718
719struct qeth_card {
720 struct list_head list;
721 enum qeth_card_states state;
722 int lan_online;
723 spinlock_t lock;
724 struct ccwgroup_device *gdev;
725 struct qeth_channel read;
726 struct qeth_channel write;
727 struct qeth_channel data;
728
729 struct net_device *dev;
730 struct net_device_stats stats;
731
732 struct qeth_card_info info;
733 struct qeth_token token;
734 struct qeth_seqno seqno;
735 struct qeth_card_options options;
736
737 wait_queue_head_t wait_q;
738 spinlock_t vlanlock;
739 spinlock_t mclock;
740 struct vlan_group *vlangrp;
741 struct list_head vid_list;
742 struct list_head mc_list;
743 struct work_struct kernel_thread_starter;
744 spinlock_t thread_mask_lock;
745 unsigned long thread_start_mask;
746 unsigned long thread_allowed_mask;
747 unsigned long thread_running_mask;
748 spinlock_t ip_lock;
749 struct list_head ip_list;
750 struct list_head *ip_tbd_list;
751 struct qeth_ipato ipato;
752 struct list_head cmd_waiter_list;
753 /* QDIO buffer handling */
754 struct qeth_qdio_info qdio;
755 struct qeth_perf_stats perf_stats;
756 int use_hard_stop;
757 struct qeth_osn_info osn_info;
758 struct qeth_discipline discipline;
759 atomic_t force_alloc_skb;
760};
761
762struct qeth_card_list_struct {
763 struct list_head list;
764 rwlock_t rwlock;
765};
766
767/*some helper functions*/
768#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
769
770static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
771{
772 struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
773 dev_get_drvdata(&cdev->dev))->dev);
774 return card;
775}
776
777static inline int qeth_get_micros(void)
778{
779 return (int) (get_clock() >> 12);
780}
781
782static inline void *qeth_push_skb(struct qeth_card *card, struct sk_buff *skb,
783 int size)
784{
785 void *hdr;
786
787 hdr = (void *) skb_push(skb, size);
788 /*
789 * sanity check, the Linux memory allocation scheme should
790 * never present us cases like this one (the qdio header size plus
791 * the first 40 bytes of the paket cross a 4k boundary)
792 */
793 if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
794 (((unsigned long) hdr + size +
795 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
796 PRINT_ERR("Misaligned packet on interface %s. Discarded.",
797 QETH_CARD_IFNAME(card));
798 return NULL;
799 }
800 return hdr;
801}
802
803static inline int qeth_get_ip_version(struct sk_buff *skb)
804{
805 switch (skb->protocol) {
806 case ETH_P_IPV6:
807 return 6;
808 case ETH_P_IP:
809 return 4;
810 default:
811 return 0;
812 }
813}
814
815struct qeth_eddp_context;
816extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
817extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
818const char *qeth_get_cardname_short(struct qeth_card *);
819int qeth_realloc_buffer_pool(struct qeth_card *, int);
820int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
821void qeth_core_free_discipline(struct qeth_card *);
822int qeth_core_create_device_attributes(struct device *);
823void qeth_core_remove_device_attributes(struct device *);
824int qeth_core_create_osn_attributes(struct device *);
825void qeth_core_remove_osn_attributes(struct device *);
826
827/* exports for qeth discipline device drivers */
828extern struct qeth_card_list_struct qeth_core_card_list;
829extern debug_info_t *qeth_dbf_setup;
830extern debug_info_t *qeth_dbf_data;
831extern debug_info_t *qeth_dbf_misc;
832extern debug_info_t *qeth_dbf_control;
833extern debug_info_t *qeth_dbf_trace;
834extern debug_info_t *qeth_dbf_sense;
835extern debug_info_t *qeth_dbf_qerr;
836
837void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
838int qeth_threads_running(struct qeth_card *, unsigned long);
839int qeth_wait_for_threads(struct qeth_card *, unsigned long);
840int qeth_do_run_thread(struct qeth_card *, unsigned long);
841void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
842void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
843int qeth_core_hardsetup_card(struct qeth_card *);
844void qeth_print_status_message(struct qeth_card *);
845int qeth_init_qdio_queues(struct qeth_card *);
846int qeth_send_startlan(struct qeth_card *);
847int qeth_send_stoplan(struct qeth_card *);
848int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
849 int (*reply_cb)
850 (struct qeth_card *, struct qeth_reply *, unsigned long),
851 void *);
852struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
853 enum qeth_ipa_cmds, enum qeth_prot_versions);
854int qeth_query_setadapterparms(struct qeth_card *);
855int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int,
856 unsigned int, const char *);
857void qeth_put_buffer_pool_entry(struct qeth_card *,
858 struct qeth_buffer_pool_entry *);
859void qeth_queue_input_buffer(struct qeth_card *, int);
860struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
861 struct qdio_buffer *, struct qdio_buffer_element **, int *,
862 struct qeth_hdr **);
863void qeth_schedule_recovery(struct qeth_card *);
864void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
865 unsigned int, unsigned int,
866 unsigned int, int, int,
867 unsigned long);
868void qeth_clear_ipacmd_list(struct qeth_card *);
869int qeth_qdio_clear_card(struct qeth_card *, int);
870void qeth_clear_working_pool_list(struct qeth_card *);
871void qeth_clear_cmd_buffers(struct qeth_channel *);
872void qeth_clear_qdio_buffers(struct qeth_card *);
873void qeth_setadp_promisc_mode(struct qeth_card *);
874struct net_device_stats *qeth_get_stats(struct net_device *);
875int qeth_change_mtu(struct net_device *, int);
876int qeth_setadpparms_change_macaddr(struct qeth_card *);
877void qeth_tx_timeout(struct net_device *);
878void qeth_prepare_control_data(struct qeth_card *, int,
879 struct qeth_cmd_buffer *);
880void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
881void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
882struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
883int qeth_mdio_read(struct net_device *, int, int);
884int qeth_snmp_command(struct qeth_card *, char __user *);
885int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
886struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
887int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
888 unsigned long);
889int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
890 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
891 void *reply_param);
892int qeth_get_cast_type(struct qeth_card *, struct sk_buff *);
893int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
894struct sk_buff *qeth_prepare_skb(struct qeth_card *, struct sk_buff *,
895 struct qeth_hdr **);
896int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
897int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
898 struct sk_buff *, struct qeth_hdr *, int,
899 struct qeth_eddp_context *);
900int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
901 struct sk_buff *, struct qeth_hdr *,
902 int, struct qeth_eddp_context *);
903int qeth_core_get_stats_count(struct net_device *);
904void qeth_core_get_ethtool_stats(struct net_device *,
905 struct ethtool_stats *, u64 *);
906void qeth_core_get_strings(struct net_device *, u32, u8 *);
907void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
908
909/* exports for OSN */
910int qeth_osn_assist(struct net_device *, void *, int);
911int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
912 int (*assist_cb)(struct net_device *, void *),
913 int (*data_cb)(struct sk_buff *));
914void qeth_osn_deregister(struct net_device *);
915
916#endif /* __QETH_CORE_H__ */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
new file mode 100644
index 000000000000..95c6fcf58953
--- /dev/null
+++ b/drivers/s390/net/qeth_core_main.c
@@ -0,0 +1,4540 @@
1/*
2 * drivers/s390/net/qeth_core_main.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/tcp.h>
19#include <linux/mii.h>
20#include <linux/kthread.h>
21
22#include <asm-s390/ebcdic.h>
23#include <asm-s390/io.h>
24#include <asm/s390_rdev.h>
25
26#include "qeth_core.h"
27#include "qeth_core_offl.h"
28
29#define QETH_DBF_TEXT_(name, level, text...) \
30 do { \
31 if (qeth_dbf_passes(qeth_dbf_##name, level)) { \
32 char *dbf_txt_buf = \
33 get_cpu_var(qeth_core_dbf_txt_buf); \
34 sprintf(dbf_txt_buf, text); \
35 debug_text_event(qeth_dbf_##name, level, dbf_txt_buf); \
36 put_cpu_var(qeth_core_dbf_txt_buf); \
37 } \
38 } while (0)
39
40struct qeth_card_list_struct qeth_core_card_list;
41EXPORT_SYMBOL_GPL(qeth_core_card_list);
42debug_info_t *qeth_dbf_setup;
43EXPORT_SYMBOL_GPL(qeth_dbf_setup);
44debug_info_t *qeth_dbf_data;
45EXPORT_SYMBOL_GPL(qeth_dbf_data);
46debug_info_t *qeth_dbf_misc;
47EXPORT_SYMBOL_GPL(qeth_dbf_misc);
48debug_info_t *qeth_dbf_control;
49EXPORT_SYMBOL_GPL(qeth_dbf_control);
50debug_info_t *qeth_dbf_trace;
51EXPORT_SYMBOL_GPL(qeth_dbf_trace);
52debug_info_t *qeth_dbf_sense;
53EXPORT_SYMBOL_GPL(qeth_dbf_sense);
54debug_info_t *qeth_dbf_qerr;
55EXPORT_SYMBOL_GPL(qeth_dbf_qerr);
56
57static struct device *qeth_core_root_dev;
58static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
59static struct lock_class_key qdio_out_skb_queue_key;
60static DEFINE_PER_CPU(char[256], qeth_core_dbf_txt_buf);
61
62static void qeth_send_control_data_cb(struct qeth_channel *,
63 struct qeth_cmd_buffer *);
64static int qeth_issue_next_read(struct qeth_card *);
65static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
66static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
67static void qeth_free_buffer_pool(struct qeth_card *);
68static int qeth_qdio_establish(struct qeth_card *);
69
70
71static inline void __qeth_fill_buffer_frag(struct sk_buff *skb,
72 struct qdio_buffer *buffer, int is_tso,
73 int *next_element_to_fill)
74{
75 struct skb_frag_struct *frag;
76 int fragno;
77 unsigned long addr;
78 int element, cnt, dlen;
79
80 fragno = skb_shinfo(skb)->nr_frags;
81 element = *next_element_to_fill;
82 dlen = 0;
83
84 if (is_tso)
85 buffer->element[element].flags =
86 SBAL_FLAGS_MIDDLE_FRAG;
87 else
88 buffer->element[element].flags =
89 SBAL_FLAGS_FIRST_FRAG;
90 dlen = skb->len - skb->data_len;
91 if (dlen) {
92 buffer->element[element].addr = skb->data;
93 buffer->element[element].length = dlen;
94 element++;
95 }
96 for (cnt = 0; cnt < fragno; cnt++) {
97 frag = &skb_shinfo(skb)->frags[cnt];
98 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
99 frag->page_offset;
100 buffer->element[element].addr = (char *)addr;
101 buffer->element[element].length = frag->size;
102 if (cnt < (fragno - 1))
103 buffer->element[element].flags =
104 SBAL_FLAGS_MIDDLE_FRAG;
105 else
106 buffer->element[element].flags =
107 SBAL_FLAGS_LAST_FRAG;
108 element++;
109 }
110 *next_element_to_fill = element;
111}
112
113static inline const char *qeth_get_cardname(struct qeth_card *card)
114{
115 if (card->info.guestlan) {
116 switch (card->info.type) {
117 case QETH_CARD_TYPE_OSAE:
118 return " Guest LAN QDIO";
119 case QETH_CARD_TYPE_IQD:
120 return " Guest LAN Hiper";
121 default:
122 return " unknown";
123 }
124 } else {
125 switch (card->info.type) {
126 case QETH_CARD_TYPE_OSAE:
127 return " OSD Express";
128 case QETH_CARD_TYPE_IQD:
129 return " HiperSockets";
130 case QETH_CARD_TYPE_OSN:
131 return " OSN QDIO";
132 default:
133 return " unknown";
134 }
135 }
136 return " n/a";
137}
138
139/* max length to be returned: 14 */
140const char *qeth_get_cardname_short(struct qeth_card *card)
141{
142 if (card->info.guestlan) {
143 switch (card->info.type) {
144 case QETH_CARD_TYPE_OSAE:
145 return "GuestLAN QDIO";
146 case QETH_CARD_TYPE_IQD:
147 return "GuestLAN Hiper";
148 default:
149 return "unknown";
150 }
151 } else {
152 switch (card->info.type) {
153 case QETH_CARD_TYPE_OSAE:
154 switch (card->info.link_type) {
155 case QETH_LINK_TYPE_FAST_ETH:
156 return "OSD_100";
157 case QETH_LINK_TYPE_HSTR:
158 return "HSTR";
159 case QETH_LINK_TYPE_GBIT_ETH:
160 return "OSD_1000";
161 case QETH_LINK_TYPE_10GBIT_ETH:
162 return "OSD_10GIG";
163 case QETH_LINK_TYPE_LANE_ETH100:
164 return "OSD_FE_LANE";
165 case QETH_LINK_TYPE_LANE_TR:
166 return "OSD_TR_LANE";
167 case QETH_LINK_TYPE_LANE_ETH1000:
168 return "OSD_GbE_LANE";
169 case QETH_LINK_TYPE_LANE:
170 return "OSD_ATM_LANE";
171 default:
172 return "OSD_Express";
173 }
174 case QETH_CARD_TYPE_IQD:
175 return "HiperSockets";
176 case QETH_CARD_TYPE_OSN:
177 return "OSN";
178 default:
179 return "unknown";
180 }
181 }
182 return "n/a";
183}
184
185void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
186 int clear_start_mask)
187{
188 unsigned long flags;
189
190 spin_lock_irqsave(&card->thread_mask_lock, flags);
191 card->thread_allowed_mask = threads;
192 if (clear_start_mask)
193 card->thread_start_mask &= threads;
194 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
195 wake_up(&card->wait_q);
196}
197EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
198
199int qeth_threads_running(struct qeth_card *card, unsigned long threads)
200{
201 unsigned long flags;
202 int rc = 0;
203
204 spin_lock_irqsave(&card->thread_mask_lock, flags);
205 rc = (card->thread_running_mask & threads);
206 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
207 return rc;
208}
209EXPORT_SYMBOL_GPL(qeth_threads_running);
210
211int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
212{
213 return wait_event_interruptible(card->wait_q,
214 qeth_threads_running(card, threads) == 0);
215}
216EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
217
218void qeth_clear_working_pool_list(struct qeth_card *card)
219{
220 struct qeth_buffer_pool_entry *pool_entry, *tmp;
221
222 QETH_DBF_TEXT(trace, 5, "clwrklst");
223 list_for_each_entry_safe(pool_entry, tmp,
224 &card->qdio.in_buf_pool.entry_list, list){
225 list_del(&pool_entry->list);
226 }
227}
228EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
229
230static int qeth_alloc_buffer_pool(struct qeth_card *card)
231{
232 struct qeth_buffer_pool_entry *pool_entry;
233 void *ptr;
234 int i, j;
235
236 QETH_DBF_TEXT(trace, 5, "alocpool");
237 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
238 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
239 if (!pool_entry) {
240 qeth_free_buffer_pool(card);
241 return -ENOMEM;
242 }
243 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
244 ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
245 if (!ptr) {
246 while (j > 0)
247 free_page((unsigned long)
248 pool_entry->elements[--j]);
249 kfree(pool_entry);
250 qeth_free_buffer_pool(card);
251 return -ENOMEM;
252 }
253 pool_entry->elements[j] = ptr;
254 }
255 list_add(&pool_entry->init_list,
256 &card->qdio.init_pool.entry_list);
257 }
258 return 0;
259}
260
261int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
262{
263 QETH_DBF_TEXT(trace, 2, "realcbp");
264
265 if ((card->state != CARD_STATE_DOWN) &&
266 (card->state != CARD_STATE_RECOVER))
267 return -EPERM;
268
269 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
270 qeth_clear_working_pool_list(card);
271 qeth_free_buffer_pool(card);
272 card->qdio.in_buf_pool.buf_count = bufcnt;
273 card->qdio.init_pool.buf_count = bufcnt;
274 return qeth_alloc_buffer_pool(card);
275}
276
277int qeth_set_large_send(struct qeth_card *card,
278 enum qeth_large_send_types type)
279{
280 int rc = 0;
281
282 if (card->dev == NULL) {
283 card->options.large_send = type;
284 return 0;
285 }
286 if (card->state == CARD_STATE_UP)
287 netif_tx_disable(card->dev);
288 card->options.large_send = type;
289 switch (card->options.large_send) {
290 case QETH_LARGE_SEND_EDDP:
291 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
292 NETIF_F_HW_CSUM;
293 break;
294 case QETH_LARGE_SEND_TSO:
295 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
296 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
297 NETIF_F_HW_CSUM;
298 } else {
299 PRINT_WARN("TSO not supported on %s. "
300 "large_send set to 'no'.\n",
301 card->dev->name);
302 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
303 NETIF_F_HW_CSUM);
304 card->options.large_send = QETH_LARGE_SEND_NO;
305 rc = -EOPNOTSUPP;
306 }
307 break;
308 default: /* includes QETH_LARGE_SEND_NO */
309 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
310 NETIF_F_HW_CSUM);
311 break;
312 }
313 if (card->state == CARD_STATE_UP)
314 netif_wake_queue(card->dev);
315 return rc;
316}
317EXPORT_SYMBOL_GPL(qeth_set_large_send);
318
319static int qeth_issue_next_read(struct qeth_card *card)
320{
321 int rc;
322 struct qeth_cmd_buffer *iob;
323
324 QETH_DBF_TEXT(trace, 5, "issnxrd");
325 if (card->read.state != CH_STATE_UP)
326 return -EIO;
327 iob = qeth_get_buffer(&card->read);
328 if (!iob) {
329 PRINT_WARN("issue_next_read failed: no iob available!\n");
330 return -ENOMEM;
331 }
332 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
333 QETH_DBF_TEXT(trace, 6, "noirqpnd");
334 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
335 (addr_t) iob, 0, 0);
336 if (rc) {
337 PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
338 atomic_set(&card->read.irq_pending, 0);
339 qeth_schedule_recovery(card);
340 wake_up(&card->wait_q);
341 }
342 return rc;
343}
344
345static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
346{
347 struct qeth_reply *reply;
348
349 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
350 if (reply) {
351 atomic_set(&reply->refcnt, 1);
352 atomic_set(&reply->received, 0);
353 reply->card = card;
354 };
355 return reply;
356}
357
358static void qeth_get_reply(struct qeth_reply *reply)
359{
360 WARN_ON(atomic_read(&reply->refcnt) <= 0);
361 atomic_inc(&reply->refcnt);
362}
363
364static void qeth_put_reply(struct qeth_reply *reply)
365{
366 WARN_ON(atomic_read(&reply->refcnt) <= 0);
367 if (atomic_dec_and_test(&reply->refcnt))
368 kfree(reply);
369}
370
371static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd,
372 struct qeth_card *card)
373{
374 int rc;
375 int com;
376 char *ipa_name;
377
378 com = cmd->hdr.command;
379 rc = cmd->hdr.return_code;
380 ipa_name = qeth_get_ipa_cmd_name(com);
381
382 PRINT_ERR("%s(x%X) for %s returned x%X \"%s\"\n", ipa_name, com,
383 QETH_CARD_IFNAME(card), rc, qeth_get_ipa_msg(rc));
384}
385
386static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
387 struct qeth_cmd_buffer *iob)
388{
389 struct qeth_ipa_cmd *cmd = NULL;
390
391 QETH_DBF_TEXT(trace, 5, "chkipad");
392 if (IS_IPA(iob->data)) {
393 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
394 if (IS_IPA_REPLY(cmd)) {
395 if (cmd->hdr.return_code &&
396 (cmd->hdr.command < IPA_CMD_SETCCID ||
397 cmd->hdr.command > IPA_CMD_MODCCID))
398 qeth_issue_ipa_msg(cmd, card);
399 return cmd;
400 } else {
401 switch (cmd->hdr.command) {
402 case IPA_CMD_STOPLAN:
403 PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
404 "there is a network problem or "
405 "someone pulled the cable or "
406 "disabled the port.\n",
407 QETH_CARD_IFNAME(card),
408 card->info.chpid);
409 card->lan_online = 0;
410 if (card->dev && netif_carrier_ok(card->dev))
411 netif_carrier_off(card->dev);
412 return NULL;
413 case IPA_CMD_STARTLAN:
414 PRINT_INFO("Link reestablished on %s "
415 "(CHPID 0x%X). Scheduling "
416 "IP address reset.\n",
417 QETH_CARD_IFNAME(card),
418 card->info.chpid);
419 netif_carrier_on(card->dev);
420 qeth_schedule_recovery(card);
421 return NULL;
422 case IPA_CMD_MODCCID:
423 return cmd;
424 case IPA_CMD_REGISTER_LOCAL_ADDR:
425 QETH_DBF_TEXT(trace, 3, "irla");
426 break;
427 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
428 QETH_DBF_TEXT(trace, 3, "urla");
429 break;
430 default:
431 PRINT_WARN("Received data is IPA "
432 "but not a reply!\n");
433 break;
434 }
435 }
436 }
437 return cmd;
438}
439
440void qeth_clear_ipacmd_list(struct qeth_card *card)
441{
442 struct qeth_reply *reply, *r;
443 unsigned long flags;
444
445 QETH_DBF_TEXT(trace, 4, "clipalst");
446
447 spin_lock_irqsave(&card->lock, flags);
448 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
449 qeth_get_reply(reply);
450 reply->rc = -EIO;
451 atomic_inc(&reply->received);
452 list_del_init(&reply->list);
453 wake_up(&reply->wait_q);
454 qeth_put_reply(reply);
455 }
456 spin_unlock_irqrestore(&card->lock, flags);
457}
458EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
459
460static int qeth_check_idx_response(unsigned char *buffer)
461{
462 if (!buffer)
463 return 0;
464
465 QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN);
466 if ((buffer[2] & 0xc0) == 0xc0) {
467 PRINT_WARN("received an IDX TERMINATE "
468 "with cause code 0x%02x%s\n",
469 buffer[4],
470 ((buffer[4] == 0x22) ?
471 " -- try another portname" : ""));
472 QETH_DBF_TEXT(trace, 2, "ckidxres");
473 QETH_DBF_TEXT(trace, 2, " idxterm");
474 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
475 return -EIO;
476 }
477 return 0;
478}
479
480static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
481 __u32 len)
482{
483 struct qeth_card *card;
484
485 QETH_DBF_TEXT(trace, 4, "setupccw");
486 card = CARD_FROM_CDEV(channel->ccwdev);
487 if (channel == &card->read)
488 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
489 else
490 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
491 channel->ccw.count = len;
492 channel->ccw.cda = (__u32) __pa(iob);
493}
494
495static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
496{
497 __u8 index;
498
499 QETH_DBF_TEXT(trace, 6, "getbuff");
500 index = channel->io_buf_no;
501 do {
502 if (channel->iob[index].state == BUF_STATE_FREE) {
503 channel->iob[index].state = BUF_STATE_LOCKED;
504 channel->io_buf_no = (channel->io_buf_no + 1) %
505 QETH_CMD_BUFFER_NO;
506 memset(channel->iob[index].data, 0, QETH_BUFSIZE);
507 return channel->iob + index;
508 }
509 index = (index + 1) % QETH_CMD_BUFFER_NO;
510 } while (index != channel->io_buf_no);
511
512 return NULL;
513}
514
515void qeth_release_buffer(struct qeth_channel *channel,
516 struct qeth_cmd_buffer *iob)
517{
518 unsigned long flags;
519
520 QETH_DBF_TEXT(trace, 6, "relbuff");
521 spin_lock_irqsave(&channel->iob_lock, flags);
522 memset(iob->data, 0, QETH_BUFSIZE);
523 iob->state = BUF_STATE_FREE;
524 iob->callback = qeth_send_control_data_cb;
525 iob->rc = 0;
526 spin_unlock_irqrestore(&channel->iob_lock, flags);
527}
528EXPORT_SYMBOL_GPL(qeth_release_buffer);
529
530static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
531{
532 struct qeth_cmd_buffer *buffer = NULL;
533 unsigned long flags;
534
535 spin_lock_irqsave(&channel->iob_lock, flags);
536 buffer = __qeth_get_buffer(channel);
537 spin_unlock_irqrestore(&channel->iob_lock, flags);
538 return buffer;
539}
540
541struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
542{
543 struct qeth_cmd_buffer *buffer;
544 wait_event(channel->wait_q,
545 ((buffer = qeth_get_buffer(channel)) != NULL));
546 return buffer;
547}
548EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
549
550void qeth_clear_cmd_buffers(struct qeth_channel *channel)
551{
552 int cnt;
553
554 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
555 qeth_release_buffer(channel, &channel->iob[cnt]);
556 channel->buf_no = 0;
557 channel->io_buf_no = 0;
558}
559EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
560
561static void qeth_send_control_data_cb(struct qeth_channel *channel,
562 struct qeth_cmd_buffer *iob)
563{
564 struct qeth_card *card;
565 struct qeth_reply *reply, *r;
566 struct qeth_ipa_cmd *cmd;
567 unsigned long flags;
568 int keep_reply;
569
570 QETH_DBF_TEXT(trace, 4, "sndctlcb");
571
572 card = CARD_FROM_CDEV(channel->ccwdev);
573 if (qeth_check_idx_response(iob->data)) {
574 qeth_clear_ipacmd_list(card);
575 qeth_schedule_recovery(card);
576 goto out;
577 }
578
579 cmd = qeth_check_ipa_data(card, iob);
580 if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
581 goto out;
582 /*in case of OSN : check if cmd is set */
583 if (card->info.type == QETH_CARD_TYPE_OSN &&
584 cmd &&
585 cmd->hdr.command != IPA_CMD_STARTLAN &&
586 card->osn_info.assist_cb != NULL) {
587 card->osn_info.assist_cb(card->dev, cmd);
588 goto out;
589 }
590
591 spin_lock_irqsave(&card->lock, flags);
592 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
593 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
594 ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
595 qeth_get_reply(reply);
596 list_del_init(&reply->list);
597 spin_unlock_irqrestore(&card->lock, flags);
598 keep_reply = 0;
599 if (reply->callback != NULL) {
600 if (cmd) {
601 reply->offset = (__u16)((char *)cmd -
602 (char *)iob->data);
603 keep_reply = reply->callback(card,
604 reply,
605 (unsigned long)cmd);
606 } else
607 keep_reply = reply->callback(card,
608 reply,
609 (unsigned long)iob);
610 }
611 if (cmd)
612 reply->rc = (u16) cmd->hdr.return_code;
613 else if (iob->rc)
614 reply->rc = iob->rc;
615 if (keep_reply) {
616 spin_lock_irqsave(&card->lock, flags);
617 list_add_tail(&reply->list,
618 &card->cmd_waiter_list);
619 spin_unlock_irqrestore(&card->lock, flags);
620 } else {
621 atomic_inc(&reply->received);
622 wake_up(&reply->wait_q);
623 }
624 qeth_put_reply(reply);
625 goto out;
626 }
627 }
628 spin_unlock_irqrestore(&card->lock, flags);
629out:
630 memcpy(&card->seqno.pdu_hdr_ack,
631 QETH_PDU_HEADER_SEQ_NO(iob->data),
632 QETH_SEQ_NO_LENGTH);
633 qeth_release_buffer(channel, iob);
634}
635
636static int qeth_setup_channel(struct qeth_channel *channel)
637{
638 int cnt;
639
640 QETH_DBF_TEXT(setup, 2, "setupch");
641 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
642 channel->iob[cnt].data = (char *)
643 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
644 if (channel->iob[cnt].data == NULL)
645 break;
646 channel->iob[cnt].state = BUF_STATE_FREE;
647 channel->iob[cnt].channel = channel;
648 channel->iob[cnt].callback = qeth_send_control_data_cb;
649 channel->iob[cnt].rc = 0;
650 }
651 if (cnt < QETH_CMD_BUFFER_NO) {
652 while (cnt-- > 0)
653 kfree(channel->iob[cnt].data);
654 return -ENOMEM;
655 }
656 channel->buf_no = 0;
657 channel->io_buf_no = 0;
658 atomic_set(&channel->irq_pending, 0);
659 spin_lock_init(&channel->iob_lock);
660
661 init_waitqueue_head(&channel->wait_q);
662 return 0;
663}
664
665static int qeth_set_thread_start_bit(struct qeth_card *card,
666 unsigned long thread)
667{
668 unsigned long flags;
669
670 spin_lock_irqsave(&card->thread_mask_lock, flags);
671 if (!(card->thread_allowed_mask & thread) ||
672 (card->thread_start_mask & thread)) {
673 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
674 return -EPERM;
675 }
676 card->thread_start_mask |= thread;
677 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
678 return 0;
679}
680
681void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
682{
683 unsigned long flags;
684
685 spin_lock_irqsave(&card->thread_mask_lock, flags);
686 card->thread_start_mask &= ~thread;
687 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
688 wake_up(&card->wait_q);
689}
690EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
691
692void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
693{
694 unsigned long flags;
695
696 spin_lock_irqsave(&card->thread_mask_lock, flags);
697 card->thread_running_mask &= ~thread;
698 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
699 wake_up(&card->wait_q);
700}
701EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
702
703static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
704{
705 unsigned long flags;
706 int rc = 0;
707
708 spin_lock_irqsave(&card->thread_mask_lock, flags);
709 if (card->thread_start_mask & thread) {
710 if ((card->thread_allowed_mask & thread) &&
711 !(card->thread_running_mask & thread)) {
712 rc = 1;
713 card->thread_start_mask &= ~thread;
714 card->thread_running_mask |= thread;
715 } else
716 rc = -EPERM;
717 }
718 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
719 return rc;
720}
721
722int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
723{
724 int rc = 0;
725
726 wait_event(card->wait_q,
727 (rc = __qeth_do_run_thread(card, thread)) >= 0);
728 return rc;
729}
730EXPORT_SYMBOL_GPL(qeth_do_run_thread);
731
732void qeth_schedule_recovery(struct qeth_card *card)
733{
734 QETH_DBF_TEXT(trace, 2, "startrec");
735 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
736 schedule_work(&card->kernel_thread_starter);
737}
738EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
739
740static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
741{
742 int dstat, cstat;
743 char *sense;
744
745 sense = (char *) irb->ecw;
746 cstat = irb->scsw.cstat;
747 dstat = irb->scsw.dstat;
748
749 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
750 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
751 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
752 QETH_DBF_TEXT(trace, 2, "CGENCHK");
753 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
754 cdev->dev.bus_id, dstat, cstat);
755 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
756 16, 1, irb, 64, 1);
757 return 1;
758 }
759
760 if (dstat & DEV_STAT_UNIT_CHECK) {
761 if (sense[SENSE_RESETTING_EVENT_BYTE] &
762 SENSE_RESETTING_EVENT_FLAG) {
763 QETH_DBF_TEXT(trace, 2, "REVIND");
764 return 1;
765 }
766 if (sense[SENSE_COMMAND_REJECT_BYTE] &
767 SENSE_COMMAND_REJECT_FLAG) {
768 QETH_DBF_TEXT(trace, 2, "CMDREJi");
769 return 0;
770 }
771 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
772 QETH_DBF_TEXT(trace, 2, "AFFE");
773 return 1;
774 }
775 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
776 QETH_DBF_TEXT(trace, 2, "ZEROSEN");
777 return 0;
778 }
779 QETH_DBF_TEXT(trace, 2, "DGENCHK");
780 return 1;
781 }
782 return 0;
783}
784
785static long __qeth_check_irb_error(struct ccw_device *cdev,
786 unsigned long intparm, struct irb *irb)
787{
788 if (!IS_ERR(irb))
789 return 0;
790
791 switch (PTR_ERR(irb)) {
792 case -EIO:
793 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
794 QETH_DBF_TEXT(trace, 2, "ckirberr");
795 QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO);
796 break;
797 case -ETIMEDOUT:
798 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
799 QETH_DBF_TEXT(trace, 2, "ckirberr");
800 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
801 if (intparm == QETH_RCD_PARM) {
802 struct qeth_card *card = CARD_FROM_CDEV(cdev);
803
804 if (card && (card->data.ccwdev == cdev)) {
805 card->data.state = CH_STATE_DOWN;
806 wake_up(&card->wait_q);
807 }
808 }
809 break;
810 default:
811 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
812 cdev->dev.bus_id);
813 QETH_DBF_TEXT(trace, 2, "ckirberr");
814 QETH_DBF_TEXT(trace, 2, " rc???");
815 }
816 return PTR_ERR(irb);
817}
818
819static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
820 struct irb *irb)
821{
822 int rc;
823 int cstat, dstat;
824 struct qeth_cmd_buffer *buffer;
825 struct qeth_channel *channel;
826 struct qeth_card *card;
827 struct qeth_cmd_buffer *iob;
828 __u8 index;
829
830 QETH_DBF_TEXT(trace, 5, "irq");
831
832 if (__qeth_check_irb_error(cdev, intparm, irb))
833 return;
834 cstat = irb->scsw.cstat;
835 dstat = irb->scsw.dstat;
836
837 card = CARD_FROM_CDEV(cdev);
838 if (!card)
839 return;
840
841 if (card->read.ccwdev == cdev) {
842 channel = &card->read;
843 QETH_DBF_TEXT(trace, 5, "read");
844 } else if (card->write.ccwdev == cdev) {
845 channel = &card->write;
846 QETH_DBF_TEXT(trace, 5, "write");
847 } else {
848 channel = &card->data;
849 QETH_DBF_TEXT(trace, 5, "data");
850 }
851 atomic_set(&channel->irq_pending, 0);
852
853 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC))
854 channel->state = CH_STATE_STOPPED;
855
856 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC))
857 channel->state = CH_STATE_HALTED;
858
859 /*let's wake up immediately on data channel*/
860 if ((channel == &card->data) && (intparm != 0) &&
861 (intparm != QETH_RCD_PARM))
862 goto out;
863
864 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
865 QETH_DBF_TEXT(trace, 6, "clrchpar");
866 /* we don't have to handle this further */
867 intparm = 0;
868 }
869 if (intparm == QETH_HALT_CHANNEL_PARM) {
870 QETH_DBF_TEXT(trace, 6, "hltchpar");
871 /* we don't have to handle this further */
872 intparm = 0;
873 }
874 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
875 (dstat & DEV_STAT_UNIT_CHECK) ||
876 (cstat)) {
877 if (irb->esw.esw0.erw.cons) {
878 /* TODO: we should make this s390dbf */
879 PRINT_WARN("sense data available on channel %s.\n",
880 CHANNEL_ID(channel));
881 PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
882 print_hex_dump(KERN_WARNING, "qeth: irb ",
883 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
884 print_hex_dump(KERN_WARNING, "qeth: sense data ",
885 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
886 }
887 if (intparm == QETH_RCD_PARM) {
888 channel->state = CH_STATE_DOWN;
889 goto out;
890 }
891 rc = qeth_get_problem(cdev, irb);
892 if (rc) {
893 qeth_schedule_recovery(card);
894 goto out;
895 }
896 }
897
898 if (intparm == QETH_RCD_PARM) {
899 channel->state = CH_STATE_RCD_DONE;
900 goto out;
901 }
902 if (intparm) {
903 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
904 buffer->state = BUF_STATE_PROCESSED;
905 }
906 if (channel == &card->data)
907 return;
908 if (channel == &card->read &&
909 channel->state == CH_STATE_UP)
910 qeth_issue_next_read(card);
911
912 iob = channel->iob;
913 index = channel->buf_no;
914 while (iob[index].state == BUF_STATE_PROCESSED) {
915 if (iob[index].callback != NULL)
916 iob[index].callback(channel, iob + index);
917
918 index = (index + 1) % QETH_CMD_BUFFER_NO;
919 }
920 channel->buf_no = index;
921out:
922 wake_up(&card->wait_q);
923 return;
924}
925
926static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
927 struct qeth_qdio_out_buffer *buf)
928{
929 int i;
930 struct sk_buff *skb;
931
932 /* is PCI flag set on buffer? */
933 if (buf->buffer->element[0].flags & 0x40)
934 atomic_dec(&queue->set_pci_flags_count);
935
936 skb = skb_dequeue(&buf->skb_list);
937 while (skb) {
938 atomic_dec(&skb->users);
939 dev_kfree_skb_any(skb);
940 skb = skb_dequeue(&buf->skb_list);
941 }
942 qeth_eddp_buf_release_contexts(buf);
943 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
944 buf->buffer->element[i].length = 0;
945 buf->buffer->element[i].addr = NULL;
946 buf->buffer->element[i].flags = 0;
947 }
948 buf->next_element_to_fill = 0;
949 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
950}
951
952void qeth_clear_qdio_buffers(struct qeth_card *card)
953{
954 int i, j;
955
956 QETH_DBF_TEXT(trace, 2, "clearqdbf");
957 /* clear outbound buffers to free skbs */
958 for (i = 0; i < card->qdio.no_out_queues; ++i)
959 if (card->qdio.out_qs[i]) {
960 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
961 qeth_clear_output_buffer(card->qdio.out_qs[i],
962 &card->qdio.out_qs[i]->bufs[j]);
963 }
964}
965EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
966
967static void qeth_free_buffer_pool(struct qeth_card *card)
968{
969 struct qeth_buffer_pool_entry *pool_entry, *tmp;
970 int i = 0;
971 QETH_DBF_TEXT(trace, 5, "freepool");
972 list_for_each_entry_safe(pool_entry, tmp,
973 &card->qdio.init_pool.entry_list, init_list){
974 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
975 free_page((unsigned long)pool_entry->elements[i]);
976 list_del(&pool_entry->init_list);
977 kfree(pool_entry);
978 }
979}
980
981static void qeth_free_qdio_buffers(struct qeth_card *card)
982{
983 int i, j;
984
985 QETH_DBF_TEXT(trace, 2, "freeqdbf");
986 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
987 QETH_QDIO_UNINITIALIZED)
988 return;
989 kfree(card->qdio.in_q);
990 card->qdio.in_q = NULL;
991 /* inbound buffer pool */
992 qeth_free_buffer_pool(card);
993 /* free outbound qdio_qs */
994 if (card->qdio.out_qs) {
995 for (i = 0; i < card->qdio.no_out_queues; ++i) {
996 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
997 qeth_clear_output_buffer(card->qdio.out_qs[i],
998 &card->qdio.out_qs[i]->bufs[j]);
999 kfree(card->qdio.out_qs[i]);
1000 }
1001 kfree(card->qdio.out_qs);
1002 card->qdio.out_qs = NULL;
1003 }
1004}
1005
1006static void qeth_clean_channel(struct qeth_channel *channel)
1007{
1008 int cnt;
1009
1010 QETH_DBF_TEXT(setup, 2, "freech");
1011 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1012 kfree(channel->iob[cnt].data);
1013}
1014
1015static int qeth_is_1920_device(struct qeth_card *card)
1016{
1017 int single_queue = 0;
1018 struct ccw_device *ccwdev;
1019 struct channelPath_dsc {
1020 u8 flags;
1021 u8 lsn;
1022 u8 desc;
1023 u8 chpid;
1024 u8 swla;
1025 u8 zeroes;
1026 u8 chla;
1027 u8 chpp;
1028 } *chp_dsc;
1029
1030 QETH_DBF_TEXT(setup, 2, "chk_1920");
1031
1032 ccwdev = card->data.ccwdev;
1033 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1034 if (chp_dsc != NULL) {
1035 /* CHPP field bit 6 == 1 -> single queue */
1036 single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
1037 kfree(chp_dsc);
1038 }
1039 QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue);
1040 return single_queue;
1041}
1042
1043static void qeth_init_qdio_info(struct qeth_card *card)
1044{
1045 QETH_DBF_TEXT(setup, 4, "intqdinf");
1046 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1047 /* inbound */
1048 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1049 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1050 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1051 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1052 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1053}
1054
1055static void qeth_set_intial_options(struct qeth_card *card)
1056{
1057 card->options.route4.type = NO_ROUTER;
1058 card->options.route6.type = NO_ROUTER;
1059 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1060 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1061 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1062 card->options.fake_broadcast = 0;
1063 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1064 card->options.fake_ll = 0;
1065 card->options.performance_stats = 0;
1066 card->options.rx_sg_cb = QETH_RX_SG_CB;
1067}
1068
1069static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1070{
1071 unsigned long flags;
1072 int rc = 0;
1073
1074 spin_lock_irqsave(&card->thread_mask_lock, flags);
1075 QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x",
1076 (u8) card->thread_start_mask,
1077 (u8) card->thread_allowed_mask,
1078 (u8) card->thread_running_mask);
1079 rc = (card->thread_start_mask & thread);
1080 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1081 return rc;
1082}
1083
1084static void qeth_start_kernel_thread(struct work_struct *work)
1085{
1086 struct qeth_card *card = container_of(work, struct qeth_card,
1087 kernel_thread_starter);
1088 QETH_DBF_TEXT(trace , 2, "strthrd");
1089
1090 if (card->read.state != CH_STATE_UP &&
1091 card->write.state != CH_STATE_UP)
1092 return;
1093 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1094 kthread_run(card->discipline.recover, (void *) card,
1095 "qeth_recover");
1096}
1097
1098static int qeth_setup_card(struct qeth_card *card)
1099{
1100
1101 QETH_DBF_TEXT(setup, 2, "setupcrd");
1102 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1103
1104 card->read.state = CH_STATE_DOWN;
1105 card->write.state = CH_STATE_DOWN;
1106 card->data.state = CH_STATE_DOWN;
1107 card->state = CARD_STATE_DOWN;
1108 card->lan_online = 0;
1109 card->use_hard_stop = 0;
1110 card->dev = NULL;
1111 spin_lock_init(&card->vlanlock);
1112 spin_lock_init(&card->mclock);
1113 card->vlangrp = NULL;
1114 spin_lock_init(&card->lock);
1115 spin_lock_init(&card->ip_lock);
1116 spin_lock_init(&card->thread_mask_lock);
1117 card->thread_start_mask = 0;
1118 card->thread_allowed_mask = 0;
1119 card->thread_running_mask = 0;
1120 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1121 INIT_LIST_HEAD(&card->ip_list);
1122 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1123 if (!card->ip_tbd_list) {
1124 QETH_DBF_TEXT(setup, 0, "iptbdnom");
1125 return -ENOMEM;
1126 }
1127 INIT_LIST_HEAD(card->ip_tbd_list);
1128 INIT_LIST_HEAD(&card->cmd_waiter_list);
1129 init_waitqueue_head(&card->wait_q);
1130 /* intial options */
1131 qeth_set_intial_options(card);
1132 /* IP address takeover */
1133 INIT_LIST_HEAD(&card->ipato.entries);
1134 card->ipato.enabled = 0;
1135 card->ipato.invert4 = 0;
1136 card->ipato.invert6 = 0;
1137 /* init QDIO stuff */
1138 qeth_init_qdio_info(card);
1139 return 0;
1140}
1141
1142static struct qeth_card *qeth_alloc_card(void)
1143{
1144 struct qeth_card *card;
1145
1146 QETH_DBF_TEXT(setup, 2, "alloccrd");
1147 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
1148 if (!card)
1149 return NULL;
1150 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
1151 if (qeth_setup_channel(&card->read)) {
1152 kfree(card);
1153 return NULL;
1154 }
1155 if (qeth_setup_channel(&card->write)) {
1156 qeth_clean_channel(&card->read);
1157 kfree(card);
1158 return NULL;
1159 }
1160 card->options.layer2 = -1;
1161 return card;
1162}
1163
1164static int qeth_determine_card_type(struct qeth_card *card)
1165{
1166 int i = 0;
1167
1168 QETH_DBF_TEXT(setup, 2, "detcdtyp");
1169
1170 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1171 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1172 while (known_devices[i][4]) {
1173 if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
1174 (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
1175 card->info.type = known_devices[i][4];
1176 card->qdio.no_out_queues = known_devices[i][8];
1177 card->info.is_multicast_different = known_devices[i][9];
1178 if (qeth_is_1920_device(card)) {
1179 PRINT_INFO("Priority Queueing not able "
1180 "due to hardware limitations!\n");
1181 card->qdio.no_out_queues = 1;
1182 card->qdio.default_out_queue = 0;
1183 }
1184 return 0;
1185 }
1186 i++;
1187 }
1188 card->info.type = QETH_CARD_TYPE_UNKNOWN;
1189 PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
1190 return -ENOENT;
1191}
1192
1193static int qeth_clear_channel(struct qeth_channel *channel)
1194{
1195 unsigned long flags;
1196 struct qeth_card *card;
1197 int rc;
1198
1199 QETH_DBF_TEXT(trace, 3, "clearch");
1200 card = CARD_FROM_CDEV(channel->ccwdev);
1201 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1202 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1203 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1204
1205 if (rc)
1206 return rc;
1207 rc = wait_event_interruptible_timeout(card->wait_q,
1208 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1209 if (rc == -ERESTARTSYS)
1210 return rc;
1211 if (channel->state != CH_STATE_STOPPED)
1212 return -ETIME;
1213 channel->state = CH_STATE_DOWN;
1214 return 0;
1215}
1216
1217static int qeth_halt_channel(struct qeth_channel *channel)
1218{
1219 unsigned long flags;
1220 struct qeth_card *card;
1221 int rc;
1222
1223 QETH_DBF_TEXT(trace, 3, "haltch");
1224 card = CARD_FROM_CDEV(channel->ccwdev);
1225 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1226 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1227 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1228
1229 if (rc)
1230 return rc;
1231 rc = wait_event_interruptible_timeout(card->wait_q,
1232 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1233 if (rc == -ERESTARTSYS)
1234 return rc;
1235 if (channel->state != CH_STATE_HALTED)
1236 return -ETIME;
1237 return 0;
1238}
1239
1240static int qeth_halt_channels(struct qeth_card *card)
1241{
1242 int rc1 = 0, rc2 = 0, rc3 = 0;
1243
1244 QETH_DBF_TEXT(trace, 3, "haltchs");
1245 rc1 = qeth_halt_channel(&card->read);
1246 rc2 = qeth_halt_channel(&card->write);
1247 rc3 = qeth_halt_channel(&card->data);
1248 if (rc1)
1249 return rc1;
1250 if (rc2)
1251 return rc2;
1252 return rc3;
1253}
1254
1255static int qeth_clear_channels(struct qeth_card *card)
1256{
1257 int rc1 = 0, rc2 = 0, rc3 = 0;
1258
1259 QETH_DBF_TEXT(trace, 3, "clearchs");
1260 rc1 = qeth_clear_channel(&card->read);
1261 rc2 = qeth_clear_channel(&card->write);
1262 rc3 = qeth_clear_channel(&card->data);
1263 if (rc1)
1264 return rc1;
1265 if (rc2)
1266 return rc2;
1267 return rc3;
1268}
1269
1270static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1271{
1272 int rc = 0;
1273
1274 QETH_DBF_TEXT(trace, 3, "clhacrd");
1275 QETH_DBF_HEX(trace, 3, &card, sizeof(void *));
1276
1277 if (halt)
1278 rc = qeth_halt_channels(card);
1279 if (rc)
1280 return rc;
1281 return qeth_clear_channels(card);
1282}
1283
1284int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1285{
1286 int rc = 0;
1287
1288 QETH_DBF_TEXT(trace, 3, "qdioclr");
1289 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1290 QETH_QDIO_CLEANING)) {
1291 case QETH_QDIO_ESTABLISHED:
1292 if (card->info.type == QETH_CARD_TYPE_IQD)
1293 rc = qdio_cleanup(CARD_DDEV(card),
1294 QDIO_FLAG_CLEANUP_USING_HALT);
1295 else
1296 rc = qdio_cleanup(CARD_DDEV(card),
1297 QDIO_FLAG_CLEANUP_USING_CLEAR);
1298 if (rc)
1299 QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
1300 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1301 break;
1302 case QETH_QDIO_CLEANING:
1303 return rc;
1304 default:
1305 break;
1306 }
1307 rc = qeth_clear_halt_card(card, use_halt);
1308 if (rc)
1309 QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
1310 card->state = CARD_STATE_DOWN;
1311 return rc;
1312}
1313EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1314
1315static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1316 int *length)
1317{
1318 struct ciw *ciw;
1319 char *rcd_buf;
1320 int ret;
1321 struct qeth_channel *channel = &card->data;
1322 unsigned long flags;
1323
1324 /*
1325 * scan for RCD command in extended SenseID data
1326 */
1327 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1328 if (!ciw || ciw->cmd == 0)
1329 return -EOPNOTSUPP;
1330 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1331 if (!rcd_buf)
1332 return -ENOMEM;
1333
1334 channel->ccw.cmd_code = ciw->cmd;
1335 channel->ccw.cda = (__u32) __pa(rcd_buf);
1336 channel->ccw.count = ciw->count;
1337 channel->ccw.flags = CCW_FLAG_SLI;
1338 channel->state = CH_STATE_RCD;
1339 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1340 ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1341 QETH_RCD_PARM, LPM_ANYPATH, 0,
1342 QETH_RCD_TIMEOUT);
1343 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1344 if (!ret)
1345 wait_event(card->wait_q,
1346 (channel->state == CH_STATE_RCD_DONE ||
1347 channel->state == CH_STATE_DOWN));
1348 if (channel->state == CH_STATE_DOWN)
1349 ret = -EIO;
1350 else
1351 channel->state = CH_STATE_DOWN;
1352 if (ret) {
1353 kfree(rcd_buf);
1354 *buffer = NULL;
1355 *length = 0;
1356 } else {
1357 *length = ciw->count;
1358 *buffer = rcd_buf;
1359 }
1360 return ret;
1361}
1362
1363static int qeth_get_unitaddr(struct qeth_card *card)
1364{
1365 int length;
1366 char *prcd;
1367 int rc;
1368
1369 QETH_DBF_TEXT(setup, 2, "getunit");
1370 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
1371 if (rc) {
1372 PRINT_ERR("qeth_read_conf_data for device %s returned %i\n",
1373 CARD_DDEV_ID(card), rc);
1374 return rc;
1375 }
1376 card->info.chpid = prcd[30];
1377 card->info.unit_addr2 = prcd[31];
1378 card->info.cula = prcd[63];
1379 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1380 (prcd[0x11] == _ascebc['M']));
1381 kfree(prcd);
1382 return 0;
1383}
1384
1385static void qeth_init_tokens(struct qeth_card *card)
1386{
1387 card->token.issuer_rm_w = 0x00010103UL;
1388 card->token.cm_filter_w = 0x00010108UL;
1389 card->token.cm_connection_w = 0x0001010aUL;
1390 card->token.ulp_filter_w = 0x0001010bUL;
1391 card->token.ulp_connection_w = 0x0001010dUL;
1392}
1393
1394static void qeth_init_func_level(struct qeth_card *card)
1395{
1396 if (card->ipato.enabled) {
1397 if (card->info.type == QETH_CARD_TYPE_IQD)
1398 card->info.func_level =
1399 QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
1400 else
1401 card->info.func_level =
1402 QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
1403 } else {
1404 if (card->info.type == QETH_CARD_TYPE_IQD)
1405 /*FIXME:why do we have same values for dis and ena for
1406 osae??? */
1407 card->info.func_level =
1408 QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
1409 else
1410 card->info.func_level =
1411 QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
1412 }
1413}
1414
1415static inline __u16 qeth_raw_devno_from_bus_id(char *id)
1416{
1417 id += (strlen(id) - 4);
1418 return (__u16) simple_strtoul(id, &id, 16);
1419}
1420
1421static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1422 void (*idx_reply_cb)(struct qeth_channel *,
1423 struct qeth_cmd_buffer *))
1424{
1425 struct qeth_cmd_buffer *iob;
1426 unsigned long flags;
1427 int rc;
1428 struct qeth_card *card;
1429
1430 QETH_DBF_TEXT(setup, 2, "idxanswr");
1431 card = CARD_FROM_CDEV(channel->ccwdev);
1432 iob = qeth_get_buffer(channel);
1433 iob->callback = idx_reply_cb;
1434 memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
1435 channel->ccw.count = QETH_BUFSIZE;
1436 channel->ccw.cda = (__u32) __pa(iob->data);
1437
1438 wait_event(card->wait_q,
1439 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1440 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1441 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1442 rc = ccw_device_start(channel->ccwdev,
1443 &channel->ccw, (addr_t) iob, 0, 0);
1444 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1445
1446 if (rc) {
1447 PRINT_ERR("Error2 in activating channel rc=%d\n", rc);
1448 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
1449 atomic_set(&channel->irq_pending, 0);
1450 wake_up(&card->wait_q);
1451 return rc;
1452 }
1453 rc = wait_event_interruptible_timeout(card->wait_q,
1454 channel->state == CH_STATE_UP, QETH_TIMEOUT);
1455 if (rc == -ERESTARTSYS)
1456 return rc;
1457 if (channel->state != CH_STATE_UP) {
1458 rc = -ETIME;
1459 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
1460 qeth_clear_cmd_buffers(channel);
1461 } else
1462 rc = 0;
1463 return rc;
1464}
1465
1466static int qeth_idx_activate_channel(struct qeth_channel *channel,
1467 void (*idx_reply_cb)(struct qeth_channel *,
1468 struct qeth_cmd_buffer *))
1469{
1470 struct qeth_card *card;
1471 struct qeth_cmd_buffer *iob;
1472 unsigned long flags;
1473 __u16 temp;
1474 __u8 tmp;
1475 int rc;
1476
1477 card = CARD_FROM_CDEV(channel->ccwdev);
1478
1479 QETH_DBF_TEXT(setup, 2, "idxactch");
1480
1481 iob = qeth_get_buffer(channel);
1482 iob->callback = idx_reply_cb;
1483 memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
1484 channel->ccw.count = IDX_ACTIVATE_SIZE;
1485 channel->ccw.cda = (__u32) __pa(iob->data);
1486 if (channel == &card->write) {
1487 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1488 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1489 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1490 card->seqno.trans_hdr++;
1491 } else {
1492 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1493 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1494 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1495 }
1496 tmp = ((__u8)card->info.portno) | 0x80;
1497 memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
1498 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1499 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1500 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1501 &card->info.func_level, sizeof(__u16));
1502 temp = qeth_raw_devno_from_bus_id(CARD_DDEV_ID(card));
1503 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2);
1504 temp = (card->info.cula << 8) + card->info.unit_addr2;
1505 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1506
1507 wait_event(card->wait_q,
1508 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1509 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1510 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1511 rc = ccw_device_start(channel->ccwdev,
1512 &channel->ccw, (addr_t) iob, 0, 0);
1513 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1514
1515 if (rc) {
1516 PRINT_ERR("Error1 in activating channel. rc=%d\n", rc);
1517 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1518 atomic_set(&channel->irq_pending, 0);
1519 wake_up(&card->wait_q);
1520 return rc;
1521 }
1522 rc = wait_event_interruptible_timeout(card->wait_q,
1523 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1524 if (rc == -ERESTARTSYS)
1525 return rc;
1526 if (channel->state != CH_STATE_ACTIVATING) {
1527 PRINT_WARN("IDX activate timed out!\n");
1528 QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME);
1529 qeth_clear_cmd_buffers(channel);
1530 return -ETIME;
1531 }
1532 return qeth_idx_activate_get_answer(channel, idx_reply_cb);
1533}
1534
1535static int qeth_peer_func_level(int level)
1536{
1537 if ((level & 0xff) == 8)
1538 return (level & 0xff) + 0x400;
1539 if (((level >> 8) & 3) == 1)
1540 return (level & 0xff) + 0x200;
1541 return level;
1542}
1543
1544static void qeth_idx_write_cb(struct qeth_channel *channel,
1545 struct qeth_cmd_buffer *iob)
1546{
1547 struct qeth_card *card;
1548 __u16 temp;
1549
1550 QETH_DBF_TEXT(setup , 2, "idxwrcb");
1551
1552 if (channel->state == CH_STATE_DOWN) {
1553 channel->state = CH_STATE_ACTIVATING;
1554 goto out;
1555 }
1556 card = CARD_FROM_CDEV(channel->ccwdev);
1557
1558 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1559 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
1560 PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
1561 "adapter exclusively used by another host\n",
1562 CARD_WDEV_ID(card));
1563 else
1564 PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
1565 "negative reply\n", CARD_WDEV_ID(card));
1566 goto out;
1567 }
1568 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1569 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1570 PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
1571 "function level mismatch "
1572 "(sent: 0x%x, received: 0x%x)\n",
1573 CARD_WDEV_ID(card), card->info.func_level, temp);
1574 goto out;
1575 }
1576 channel->state = CH_STATE_UP;
1577out:
1578 qeth_release_buffer(channel, iob);
1579}
1580
1581static void qeth_idx_read_cb(struct qeth_channel *channel,
1582 struct qeth_cmd_buffer *iob)
1583{
1584 struct qeth_card *card;
1585 __u16 temp;
1586
1587 QETH_DBF_TEXT(setup , 2, "idxrdcb");
1588 if (channel->state == CH_STATE_DOWN) {
1589 channel->state = CH_STATE_ACTIVATING;
1590 goto out;
1591 }
1592
1593 card = CARD_FROM_CDEV(channel->ccwdev);
1594 if (qeth_check_idx_response(iob->data))
1595 goto out;
1596
1597 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1598 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
1599 PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
1600 "adapter exclusively used by another host\n",
1601 CARD_RDEV_ID(card));
1602 else
1603 PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
1604 "negative reply\n", CARD_RDEV_ID(card));
1605 goto out;
1606 }
1607
1608/**
1609 * temporary fix for microcode bug
1610 * to revert it,replace OR by AND
1611 */
1612 if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
1613 (card->info.type == QETH_CARD_TYPE_OSAE))
1614 card->info.portname_required = 1;
1615
1616 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1617 if (temp != qeth_peer_func_level(card->info.func_level)) {
1618 PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
1619 "level mismatch (sent: 0x%x, received: 0x%x)\n",
1620 CARD_RDEV_ID(card), card->info.func_level, temp);
1621 goto out;
1622 }
1623 memcpy(&card->token.issuer_rm_r,
1624 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1625 QETH_MPC_TOKEN_LENGTH);
1626 memcpy(&card->info.mcl_level[0],
1627 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1628 channel->state = CH_STATE_UP;
1629out:
1630 qeth_release_buffer(channel, iob);
1631}
1632
1633void qeth_prepare_control_data(struct qeth_card *card, int len,
1634 struct qeth_cmd_buffer *iob)
1635{
1636 qeth_setup_ccw(&card->write, iob->data, len);
1637 iob->callback = qeth_release_buffer;
1638
1639 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1640 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1641 card->seqno.trans_hdr++;
1642 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1643 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1644 card->seqno.pdu_hdr++;
1645 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1646 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1647 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1648}
1649EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
1650
1651int qeth_send_control_data(struct qeth_card *card, int len,
1652 struct qeth_cmd_buffer *iob,
1653 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1654 unsigned long),
1655 void *reply_param)
1656{
1657 int rc;
1658 unsigned long flags;
1659 struct qeth_reply *reply = NULL;
1660 unsigned long timeout;
1661
1662 QETH_DBF_TEXT(trace, 2, "sendctl");
1663
1664 reply = qeth_alloc_reply(card);
1665 if (!reply) {
1666 PRINT_WARN("Could no alloc qeth_reply!\n");
1667 return -ENOMEM;
1668 }
1669 reply->callback = reply_cb;
1670 reply->param = reply_param;
1671 if (card->state == CARD_STATE_DOWN)
1672 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1673 else
1674 reply->seqno = card->seqno.ipa++;
1675 init_waitqueue_head(&reply->wait_q);
1676 spin_lock_irqsave(&card->lock, flags);
1677 list_add_tail(&reply->list, &card->cmd_waiter_list);
1678 spin_unlock_irqrestore(&card->lock, flags);
1679 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1680
1681 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
1682 qeth_prepare_control_data(card, len, iob);
1683
1684 if (IS_IPA(iob->data))
1685 timeout = jiffies + QETH_IPA_TIMEOUT;
1686 else
1687 timeout = jiffies + QETH_TIMEOUT;
1688
1689 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1690 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1691 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1692 (addr_t) iob, 0, 0);
1693 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1694 if (rc) {
1695 PRINT_WARN("qeth_send_control_data: "
1696 "ccw_device_start rc = %i\n", rc);
1697 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1698 spin_lock_irqsave(&card->lock, flags);
1699 list_del_init(&reply->list);
1700 qeth_put_reply(reply);
1701 spin_unlock_irqrestore(&card->lock, flags);
1702 qeth_release_buffer(iob->channel, iob);
1703 atomic_set(&card->write.irq_pending, 0);
1704 wake_up(&card->wait_q);
1705 return rc;
1706 }
1707 while (!atomic_read(&reply->received)) {
1708 if (time_after(jiffies, timeout)) {
1709 spin_lock_irqsave(&reply->card->lock, flags);
1710 list_del_init(&reply->list);
1711 spin_unlock_irqrestore(&reply->card->lock, flags);
1712 reply->rc = -ETIME;
1713 atomic_inc(&reply->received);
1714 wake_up(&reply->wait_q);
1715 }
1716 cpu_relax();
1717 };
1718 rc = reply->rc;
1719 qeth_put_reply(reply);
1720 return rc;
1721}
1722EXPORT_SYMBOL_GPL(qeth_send_control_data);
1723
1724static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1725 unsigned long data)
1726{
1727 struct qeth_cmd_buffer *iob;
1728
1729 QETH_DBF_TEXT(setup, 2, "cmenblcb");
1730
1731 iob = (struct qeth_cmd_buffer *) data;
1732 memcpy(&card->token.cm_filter_r,
1733 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
1734 QETH_MPC_TOKEN_LENGTH);
1735 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1736 return 0;
1737}
1738
1739static int qeth_cm_enable(struct qeth_card *card)
1740{
1741 int rc;
1742 struct qeth_cmd_buffer *iob;
1743
1744 QETH_DBF_TEXT(setup, 2, "cmenable");
1745
1746 iob = qeth_wait_for_buffer(&card->write);
1747 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
1748 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
1749 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1750 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
1751 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
1752
1753 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
1754 qeth_cm_enable_cb, NULL);
1755 return rc;
1756}
1757
1758static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1759 unsigned long data)
1760{
1761
1762 struct qeth_cmd_buffer *iob;
1763
1764 QETH_DBF_TEXT(setup, 2, "cmsetpcb");
1765
1766 iob = (struct qeth_cmd_buffer *) data;
1767 memcpy(&card->token.cm_connection_r,
1768 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
1769 QETH_MPC_TOKEN_LENGTH);
1770 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1771 return 0;
1772}
1773
1774static int qeth_cm_setup(struct qeth_card *card)
1775{
1776 int rc;
1777 struct qeth_cmd_buffer *iob;
1778
1779 QETH_DBF_TEXT(setup, 2, "cmsetup");
1780
1781 iob = qeth_wait_for_buffer(&card->write);
1782 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
1783 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
1784 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
1785 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
1786 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
1787 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
1788 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
1789 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
1790 qeth_cm_setup_cb, NULL);
1791 return rc;
1792
1793}
1794
1795static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
1796{
1797 switch (card->info.type) {
1798 case QETH_CARD_TYPE_UNKNOWN:
1799 return 1500;
1800 case QETH_CARD_TYPE_IQD:
1801 return card->info.max_mtu;
1802 case QETH_CARD_TYPE_OSAE:
1803 switch (card->info.link_type) {
1804 case QETH_LINK_TYPE_HSTR:
1805 case QETH_LINK_TYPE_LANE_TR:
1806 return 2000;
1807 default:
1808 return 1492;
1809 }
1810 default:
1811 return 1500;
1812 }
1813}
1814
1815static inline int qeth_get_max_mtu_for_card(int cardtype)
1816{
1817 switch (cardtype) {
1818
1819 case QETH_CARD_TYPE_UNKNOWN:
1820 case QETH_CARD_TYPE_OSAE:
1821 case QETH_CARD_TYPE_OSN:
1822 return 61440;
1823 case QETH_CARD_TYPE_IQD:
1824 return 57344;
1825 default:
1826 return 1500;
1827 }
1828}
1829
1830static inline int qeth_get_mtu_out_of_mpc(int cardtype)
1831{
1832 switch (cardtype) {
1833 case QETH_CARD_TYPE_IQD:
1834 return 1;
1835 default:
1836 return 0;
1837 }
1838}
1839
1840static inline int qeth_get_mtu_outof_framesize(int framesize)
1841{
1842 switch (framesize) {
1843 case 0x4000:
1844 return 8192;
1845 case 0x6000:
1846 return 16384;
1847 case 0xa000:
1848 return 32768;
1849 case 0xffff:
1850 return 57344;
1851 default:
1852 return 0;
1853 }
1854}
1855
1856static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
1857{
1858 switch (card->info.type) {
1859 case QETH_CARD_TYPE_OSAE:
1860 return ((mtu >= 576) && (mtu <= 61440));
1861 case QETH_CARD_TYPE_IQD:
1862 return ((mtu >= 576) &&
1863 (mtu <= card->info.max_mtu + 4096 - 32));
1864 case QETH_CARD_TYPE_OSN:
1865 case QETH_CARD_TYPE_UNKNOWN:
1866 default:
1867 return 1;
1868 }
1869}
1870
1871static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1872 unsigned long data)
1873{
1874
1875 __u16 mtu, framesize;
1876 __u16 len;
1877 __u8 link_type;
1878 struct qeth_cmd_buffer *iob;
1879
1880 QETH_DBF_TEXT(setup, 2, "ulpenacb");
1881
1882 iob = (struct qeth_cmd_buffer *) data;
1883 memcpy(&card->token.ulp_filter_r,
1884 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1885 QETH_MPC_TOKEN_LENGTH);
1886 if (qeth_get_mtu_out_of_mpc(card->info.type)) {
1887 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1888 mtu = qeth_get_mtu_outof_framesize(framesize);
1889 if (!mtu) {
1890 iob->rc = -EINVAL;
1891 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1892 return 0;
1893 }
1894 card->info.max_mtu = mtu;
1895 card->info.initial_mtu = mtu;
1896 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1897 } else {
1898 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1899 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
1900 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1901 }
1902
1903 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
1904 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
1905 memcpy(&link_type,
1906 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
1907 card->info.link_type = link_type;
1908 } else
1909 card->info.link_type = 0;
1910 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1911 return 0;
1912}
1913
1914static int qeth_ulp_enable(struct qeth_card *card)
1915{
1916 int rc;
1917 char prot_type;
1918 struct qeth_cmd_buffer *iob;
1919
1920 /*FIXME: trace view callbacks*/
1921 QETH_DBF_TEXT(setup, 2, "ulpenabl");
1922
1923 iob = qeth_wait_for_buffer(&card->write);
1924 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
1925
1926 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
1927 (__u8) card->info.portno;
1928 if (card->options.layer2)
1929 if (card->info.type == QETH_CARD_TYPE_OSN)
1930 prot_type = QETH_PROT_OSN2;
1931 else
1932 prot_type = QETH_PROT_LAYER2;
1933 else
1934 prot_type = QETH_PROT_TCPIP;
1935
1936 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
1937 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
1938 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
1939 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
1940 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
1941 memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
1942 card->info.portname, 9);
1943 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
1944 qeth_ulp_enable_cb, NULL);
1945 return rc;
1946
1947}
1948
1949static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
1950 unsigned long data)
1951{
1952 struct qeth_cmd_buffer *iob;
1953
1954 QETH_DBF_TEXT(setup, 2, "ulpstpcb");
1955
1956 iob = (struct qeth_cmd_buffer *) data;
1957 memcpy(&card->token.ulp_connection_r,
1958 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
1959 QETH_MPC_TOKEN_LENGTH);
1960 QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc);
1961 return 0;
1962}
1963
1964static int qeth_ulp_setup(struct qeth_card *card)
1965{
1966 int rc;
1967 __u16 temp;
1968 struct qeth_cmd_buffer *iob;
1969 struct ccw_dev_id dev_id;
1970
1971 QETH_DBF_TEXT(setup, 2, "ulpsetup");
1972
1973 iob = qeth_wait_for_buffer(&card->write);
1974 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
1975
1976 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
1977 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
1978 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
1979 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
1980 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
1981 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
1982
1983 ccw_device_get_id(CARD_DDEV(card), &dev_id);
1984 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
1985 temp = (card->info.cula << 8) + card->info.unit_addr2;
1986 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
1987 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
1988 qeth_ulp_setup_cb, NULL);
1989 return rc;
1990}
1991
1992static int qeth_alloc_qdio_buffers(struct qeth_card *card)
1993{
1994 int i, j;
1995
1996 QETH_DBF_TEXT(setup, 2, "allcqdbf");
1997
1998 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
1999 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2000 return 0;
2001
2002 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
2003 GFP_KERNEL|GFP_DMA);
2004 if (!card->qdio.in_q)
2005 goto out_nomem;
2006 QETH_DBF_TEXT(setup, 2, "inq");
2007 QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
2008 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
2009 /* give inbound qeth_qdio_buffers their qdio_buffers */
2010 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
2011 card->qdio.in_q->bufs[i].buffer =
2012 &card->qdio.in_q->qdio_bufs[i];
2013 /* inbound buffer pool */
2014 if (qeth_alloc_buffer_pool(card))
2015 goto out_freeinq;
2016 /* outbound */
2017 card->qdio.out_qs =
2018 kmalloc(card->qdio.no_out_queues *
2019 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
2020 if (!card->qdio.out_qs)
2021 goto out_freepool;
2022 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2023 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
2024 GFP_KERNEL|GFP_DMA);
2025 if (!card->qdio.out_qs[i])
2026 goto out_freeoutq;
2027 QETH_DBF_TEXT_(setup, 2, "outq %i", i);
2028 QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
2029 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
2030 card->qdio.out_qs[i]->queue_no = i;
2031 /* give outbound qeth_qdio_buffers their qdio_buffers */
2032 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2033 card->qdio.out_qs[i]->bufs[j].buffer =
2034 &card->qdio.out_qs[i]->qdio_bufs[j];
2035 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
2036 skb_list);
2037 lockdep_set_class(
2038 &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
2039 &qdio_out_skb_queue_key);
2040 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
2041 }
2042 }
2043 return 0;
2044
2045out_freeoutq:
2046 while (i > 0)
2047 kfree(card->qdio.out_qs[--i]);
2048 kfree(card->qdio.out_qs);
2049 card->qdio.out_qs = NULL;
2050out_freepool:
2051 qeth_free_buffer_pool(card);
2052out_freeinq:
2053 kfree(card->qdio.in_q);
2054 card->qdio.in_q = NULL;
2055out_nomem:
2056 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2057 return -ENOMEM;
2058}
2059
2060static void qeth_create_qib_param_field(struct qeth_card *card,
2061 char *param_field)
2062{
2063
2064 param_field[0] = _ascebc['P'];
2065 param_field[1] = _ascebc['C'];
2066 param_field[2] = _ascebc['I'];
2067 param_field[3] = _ascebc['T'];
2068 *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2069 *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2070 *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2071}
2072
2073static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2074 char *param_field)
2075{
2076 param_field[16] = _ascebc['B'];
2077 param_field[17] = _ascebc['L'];
2078 param_field[18] = _ascebc['K'];
2079 param_field[19] = _ascebc['T'];
2080 *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2081 *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2082 *((unsigned int *) (&param_field[28])) =
2083 card->info.blkt.inter_packet_jumbo;
2084}
2085
2086static int qeth_qdio_activate(struct qeth_card *card)
2087{
2088 QETH_DBF_TEXT(setup, 3, "qdioact");
2089 return qdio_activate(CARD_DDEV(card), 0);
2090}
2091
2092static int qeth_dm_act(struct qeth_card *card)
2093{
2094 int rc;
2095 struct qeth_cmd_buffer *iob;
2096
2097 QETH_DBF_TEXT(setup, 2, "dmact");
2098
2099 iob = qeth_wait_for_buffer(&card->write);
2100 memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
2101
2102 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2103 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2104 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2105 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2106 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
2107 return rc;
2108}
2109
2110static int qeth_mpc_initialize(struct qeth_card *card)
2111{
2112 int rc;
2113
2114 QETH_DBF_TEXT(setup, 2, "mpcinit");
2115
2116 rc = qeth_issue_next_read(card);
2117 if (rc) {
2118 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
2119 return rc;
2120 }
2121 rc = qeth_cm_enable(card);
2122 if (rc) {
2123 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
2124 goto out_qdio;
2125 }
2126 rc = qeth_cm_setup(card);
2127 if (rc) {
2128 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
2129 goto out_qdio;
2130 }
2131 rc = qeth_ulp_enable(card);
2132 if (rc) {
2133 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
2134 goto out_qdio;
2135 }
2136 rc = qeth_ulp_setup(card);
2137 if (rc) {
2138 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
2139 goto out_qdio;
2140 }
2141 rc = qeth_alloc_qdio_buffers(card);
2142 if (rc) {
2143 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
2144 goto out_qdio;
2145 }
2146 rc = qeth_qdio_establish(card);
2147 if (rc) {
2148 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
2149 qeth_free_qdio_buffers(card);
2150 goto out_qdio;
2151 }
2152 rc = qeth_qdio_activate(card);
2153 if (rc) {
2154 QETH_DBF_TEXT_(setup, 2, "7err%d", rc);
2155 goto out_qdio;
2156 }
2157 rc = qeth_dm_act(card);
2158 if (rc) {
2159 QETH_DBF_TEXT_(setup, 2, "8err%d", rc);
2160 goto out_qdio;
2161 }
2162
2163 return 0;
2164out_qdio:
2165 qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
2166 return rc;
2167}
2168
2169static void qeth_print_status_with_portname(struct qeth_card *card)
2170{
2171 char dbf_text[15];
2172 int i;
2173
2174 sprintf(dbf_text, "%s", card->info.portname + 1);
2175 for (i = 0; i < 8; i++)
2176 dbf_text[i] =
2177 (char) _ebcasc[(__u8) dbf_text[i]];
2178 dbf_text[8] = 0;
2179 PRINT_INFO("Device %s/%s/%s is a%s card%s%s%s\n"
2180 "with link type %s (portname: %s)\n",
2181 CARD_RDEV_ID(card),
2182 CARD_WDEV_ID(card),
2183 CARD_DDEV_ID(card),
2184 qeth_get_cardname(card),
2185 (card->info.mcl_level[0]) ? " (level: " : "",
2186 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2187 (card->info.mcl_level[0]) ? ")" : "",
2188 qeth_get_cardname_short(card),
2189 dbf_text);
2190
2191}
2192
2193static void qeth_print_status_no_portname(struct qeth_card *card)
2194{
2195 if (card->info.portname[0])
2196 PRINT_INFO("Device %s/%s/%s is a%s "
2197 "card%s%s%s\nwith link type %s "
2198 "(no portname needed by interface).\n",
2199 CARD_RDEV_ID(card),
2200 CARD_WDEV_ID(card),
2201 CARD_DDEV_ID(card),
2202 qeth_get_cardname(card),
2203 (card->info.mcl_level[0]) ? " (level: " : "",
2204 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2205 (card->info.mcl_level[0]) ? ")" : "",
2206 qeth_get_cardname_short(card));
2207 else
2208 PRINT_INFO("Device %s/%s/%s is a%s "
2209 "card%s%s%s\nwith link type %s.\n",
2210 CARD_RDEV_ID(card),
2211 CARD_WDEV_ID(card),
2212 CARD_DDEV_ID(card),
2213 qeth_get_cardname(card),
2214 (card->info.mcl_level[0]) ? " (level: " : "",
2215 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2216 (card->info.mcl_level[0]) ? ")" : "",
2217 qeth_get_cardname_short(card));
2218}
2219
2220void qeth_print_status_message(struct qeth_card *card)
2221{
2222 switch (card->info.type) {
2223 case QETH_CARD_TYPE_OSAE:
2224 /* VM will use a non-zero first character
2225 * to indicate a HiperSockets like reporting
2226 * of the level OSA sets the first character to zero
2227 * */
2228 if (!card->info.mcl_level[0]) {
2229 sprintf(card->info.mcl_level, "%02x%02x",
2230 card->info.mcl_level[2],
2231 card->info.mcl_level[3]);
2232
2233 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2234 break;
2235 }
2236 /* fallthrough */
2237 case QETH_CARD_TYPE_IQD:
2238 if (card->info.guestlan) {
2239 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2240 card->info.mcl_level[0]];
2241 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2242 card->info.mcl_level[1]];
2243 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2244 card->info.mcl_level[2]];
2245 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2246 card->info.mcl_level[3]];
2247 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2248 }
2249 break;
2250 default:
2251 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2252 }
2253 if (card->info.portname_required)
2254 qeth_print_status_with_portname(card);
2255 else
2256 qeth_print_status_no_portname(card);
2257}
2258EXPORT_SYMBOL_GPL(qeth_print_status_message);
2259
2260void qeth_put_buffer_pool_entry(struct qeth_card *card,
2261 struct qeth_buffer_pool_entry *entry)
2262{
2263 QETH_DBF_TEXT(trace, 6, "ptbfplen");
2264 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2265}
2266EXPORT_SYMBOL_GPL(qeth_put_buffer_pool_entry);
2267
2268static void qeth_initialize_working_pool_list(struct qeth_card *card)
2269{
2270 struct qeth_buffer_pool_entry *entry;
2271
2272 QETH_DBF_TEXT(trace, 5, "inwrklst");
2273
2274 list_for_each_entry(entry,
2275 &card->qdio.init_pool.entry_list, init_list) {
2276 qeth_put_buffer_pool_entry(card, entry);
2277 }
2278}
2279
2280static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2281 struct qeth_card *card)
2282{
2283 struct list_head *plh;
2284 struct qeth_buffer_pool_entry *entry;
2285 int i, free;
2286 struct page *page;
2287
2288 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2289 return NULL;
2290
2291 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2292 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2293 free = 1;
2294 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2295 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2296 free = 0;
2297 break;
2298 }
2299 }
2300 if (free) {
2301 list_del_init(&entry->list);
2302 return entry;
2303 }
2304 }
2305
2306 /* no free buffer in pool so take first one and swap pages */
2307 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2308 struct qeth_buffer_pool_entry, list);
2309 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2310 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2311 page = alloc_page(GFP_ATOMIC|GFP_DMA);
2312 if (!page) {
2313 return NULL;
2314 } else {
2315 free_page((unsigned long)entry->elements[i]);
2316 entry->elements[i] = page_address(page);
2317 if (card->options.performance_stats)
2318 card->perf_stats.sg_alloc_page_rx++;
2319 }
2320 }
2321 }
2322 list_del_init(&entry->list);
2323 return entry;
2324}
2325
2326static int qeth_init_input_buffer(struct qeth_card *card,
2327 struct qeth_qdio_buffer *buf)
2328{
2329 struct qeth_buffer_pool_entry *pool_entry;
2330 int i;
2331
2332 pool_entry = qeth_find_free_buffer_pool_entry(card);
2333 if (!pool_entry)
2334 return 1;
2335
2336 /*
2337 * since the buffer is accessed only from the input_tasklet
2338 * there shouldn't be a need to synchronize; also, since we use
2339 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2340 * buffers
2341 */
2342 BUG_ON(!pool_entry);
2343
2344 buf->pool_entry = pool_entry;
2345 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2346 buf->buffer->element[i].length = PAGE_SIZE;
2347 buf->buffer->element[i].addr = pool_entry->elements[i];
2348 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2349 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
2350 else
2351 buf->buffer->element[i].flags = 0;
2352 }
2353 return 0;
2354}
2355
2356int qeth_init_qdio_queues(struct qeth_card *card)
2357{
2358 int i, j;
2359 int rc;
2360
2361 QETH_DBF_TEXT(setup, 2, "initqdqs");
2362
2363 /* inbound queue */
2364 memset(card->qdio.in_q->qdio_bufs, 0,
2365 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2366 qeth_initialize_working_pool_list(card);
2367 /*give only as many buffers to hardware as we have buffer pool entries*/
2368 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2369 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2370 card->qdio.in_q->next_buf_to_init =
2371 card->qdio.in_buf_pool.buf_count - 1;
2372 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2373 card->qdio.in_buf_pool.buf_count - 1, NULL);
2374 if (rc) {
2375 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
2376 return rc;
2377 }
2378 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
2379 if (rc) {
2380 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
2381 return rc;
2382 }
2383 /* outbound queue */
2384 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2385 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
2386 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2387 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2388 qeth_clear_output_buffer(card->qdio.out_qs[i],
2389 &card->qdio.out_qs[i]->bufs[j]);
2390 }
2391 card->qdio.out_qs[i]->card = card;
2392 card->qdio.out_qs[i]->next_buf_to_fill = 0;
2393 card->qdio.out_qs[i]->do_pack = 0;
2394 atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
2395 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
2396 atomic_set(&card->qdio.out_qs[i]->state,
2397 QETH_OUT_Q_UNLOCKED);
2398 }
2399 return 0;
2400}
2401EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2402
2403static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
2404{
2405 switch (link_type) {
2406 case QETH_LINK_TYPE_HSTR:
2407 return 2;
2408 default:
2409 return 1;
2410 }
2411}
2412
2413static void qeth_fill_ipacmd_header(struct qeth_card *card,
2414 struct qeth_ipa_cmd *cmd, __u8 command,
2415 enum qeth_prot_versions prot)
2416{
2417 memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
2418 cmd->hdr.command = command;
2419 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2420 cmd->hdr.seqno = card->seqno.ipa;
2421 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2422 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
2423 if (card->options.layer2)
2424 cmd->hdr.prim_version_no = 2;
2425 else
2426 cmd->hdr.prim_version_no = 1;
2427 cmd->hdr.param_count = 1;
2428 cmd->hdr.prot_version = prot;
2429 cmd->hdr.ipa_supported = 0;
2430 cmd->hdr.ipa_enabled = 0;
2431}
2432
2433struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2434 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2435{
2436 struct qeth_cmd_buffer *iob;
2437 struct qeth_ipa_cmd *cmd;
2438
2439 iob = qeth_wait_for_buffer(&card->write);
2440 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2441 qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
2442
2443 return iob;
2444}
2445EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
2446
2447void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2448 char prot_type)
2449{
2450 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2451 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2452 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2453 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2454}
2455EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2456
2457int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2458 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2459 unsigned long),
2460 void *reply_param)
2461{
2462 int rc;
2463 char prot_type;
2464 int cmd;
2465 cmd = ((struct qeth_ipa_cmd *)
2466 (iob->data+IPA_PDU_HEADER_SIZE))->hdr.command;
2467
2468 QETH_DBF_TEXT(trace, 4, "sendipa");
2469
2470 if (card->options.layer2)
2471 if (card->info.type == QETH_CARD_TYPE_OSN)
2472 prot_type = QETH_PROT_OSN2;
2473 else
2474 prot_type = QETH_PROT_LAYER2;
2475 else
2476 prot_type = QETH_PROT_TCPIP;
2477 qeth_prepare_ipa_cmd(card, iob, prot_type);
2478 rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
2479 reply_cb, reply_param);
2480 if (rc != 0) {
2481 char *ipa_cmd_name;
2482 ipa_cmd_name = qeth_get_ipa_cmd_name(cmd);
2483 PRINT_ERR("%s %s(%x) returned %s(%x)\n", __FUNCTION__,
2484 ipa_cmd_name, cmd, qeth_get_ipa_msg(rc), rc);
2485 }
2486 return rc;
2487}
2488EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2489
2490static int qeth_send_startstoplan(struct qeth_card *card,
2491 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2492{
2493 int rc;
2494 struct qeth_cmd_buffer *iob;
2495
2496 iob = qeth_get_ipacmd_buffer(card, ipacmd, prot);
2497 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2498
2499 return rc;
2500}
2501
2502int qeth_send_startlan(struct qeth_card *card)
2503{
2504 int rc;
2505
2506 QETH_DBF_TEXT(setup, 2, "strtlan");
2507
2508 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0);
2509 return rc;
2510}
2511EXPORT_SYMBOL_GPL(qeth_send_startlan);
2512
2513int qeth_send_stoplan(struct qeth_card *card)
2514{
2515 int rc = 0;
2516
2517 /*
2518 * TODO: according to the IPA format document page 14,
2519 * TCP/IP (we!) never issue a STOPLAN
2520 * is this right ?!?
2521 */
2522 QETH_DBF_TEXT(setup, 2, "stoplan");
2523
2524 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0);
2525 return rc;
2526}
2527EXPORT_SYMBOL_GPL(qeth_send_stoplan);
2528
2529int qeth_default_setadapterparms_cb(struct qeth_card *card,
2530 struct qeth_reply *reply, unsigned long data)
2531{
2532 struct qeth_ipa_cmd *cmd;
2533
2534 QETH_DBF_TEXT(trace, 4, "defadpcb");
2535
2536 cmd = (struct qeth_ipa_cmd *) data;
2537 if (cmd->hdr.return_code == 0)
2538 cmd->hdr.return_code =
2539 cmd->data.setadapterparms.hdr.return_code;
2540 return 0;
2541}
2542EXPORT_SYMBOL_GPL(qeth_default_setadapterparms_cb);
2543
2544static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2545 struct qeth_reply *reply, unsigned long data)
2546{
2547 struct qeth_ipa_cmd *cmd;
2548
2549 QETH_DBF_TEXT(trace, 3, "quyadpcb");
2550
2551 cmd = (struct qeth_ipa_cmd *) data;
2552 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
2553 card->info.link_type =
2554 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2555 card->options.adp.supported_funcs =
2556 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2557 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
2558}
2559
2560struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2561 __u32 command, __u32 cmdlen)
2562{
2563 struct qeth_cmd_buffer *iob;
2564 struct qeth_ipa_cmd *cmd;
2565
2566 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
2567 QETH_PROT_IPV4);
2568 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2569 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
2570 cmd->data.setadapterparms.hdr.command_code = command;
2571 cmd->data.setadapterparms.hdr.used_total = 1;
2572 cmd->data.setadapterparms.hdr.seq_no = 1;
2573
2574 return iob;
2575}
2576EXPORT_SYMBOL_GPL(qeth_get_adapter_cmd);
2577
2578int qeth_query_setadapterparms(struct qeth_card *card)
2579{
2580 int rc;
2581 struct qeth_cmd_buffer *iob;
2582
2583 QETH_DBF_TEXT(trace, 3, "queryadp");
2584 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2585 sizeof(struct qeth_ipacmd_setadpparms));
2586 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2587 return rc;
2588}
2589EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2590
2591int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2592 unsigned int siga_error, const char *dbftext)
2593{
2594 if (qdio_error || siga_error) {
2595 QETH_DBF_TEXT(trace, 2, dbftext);
2596 QETH_DBF_TEXT(qerr, 2, dbftext);
2597 QETH_DBF_TEXT_(qerr, 2, " F15=%02X",
2598 buf->element[15].flags & 0xff);
2599 QETH_DBF_TEXT_(qerr, 2, " F14=%02X",
2600 buf->element[14].flags & 0xff);
2601 QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error);
2602 QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error);
2603 return 1;
2604 }
2605 return 0;
2606}
2607EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
2608
2609void qeth_queue_input_buffer(struct qeth_card *card, int index)
2610{
2611 struct qeth_qdio_q *queue = card->qdio.in_q;
2612 int count;
2613 int i;
2614 int rc;
2615 int newcount = 0;
2616
2617 QETH_DBF_TEXT(trace, 6, "queinbuf");
2618 count = (index < queue->next_buf_to_init)?
2619 card->qdio.in_buf_pool.buf_count -
2620 (queue->next_buf_to_init - index) :
2621 card->qdio.in_buf_pool.buf_count -
2622 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
2623 /* only requeue at a certain threshold to avoid SIGAs */
2624 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
2625 for (i = queue->next_buf_to_init;
2626 i < queue->next_buf_to_init + count; ++i) {
2627 if (qeth_init_input_buffer(card,
2628 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
2629 break;
2630 } else {
2631 newcount++;
2632 }
2633 }
2634
2635 if (newcount < count) {
2636 /* we are in memory shortage so we switch back to
2637 traditional skb allocation and drop packages */
2638 if (!atomic_read(&card->force_alloc_skb) &&
2639 net_ratelimit())
2640 PRINT_WARN("Switch to alloc skb\n");
2641 atomic_set(&card->force_alloc_skb, 3);
2642 count = newcount;
2643 } else {
2644 if ((atomic_read(&card->force_alloc_skb) == 1) &&
2645 net_ratelimit())
2646 PRINT_WARN("Switch to sg\n");
2647 atomic_add_unless(&card->force_alloc_skb, -1, 0);
2648 }
2649
2650 /*
2651 * according to old code it should be avoided to requeue all
2652 * 128 buffers in order to benefit from PCI avoidance.
2653 * this function keeps at least one buffer (the buffer at
2654 * 'index') un-requeued -> this buffer is the first buffer that
2655 * will be requeued the next time
2656 */
2657 if (card->options.performance_stats) {
2658 card->perf_stats.inbound_do_qdio_cnt++;
2659 card->perf_stats.inbound_do_qdio_start_time =
2660 qeth_get_micros();
2661 }
2662 rc = do_QDIO(CARD_DDEV(card),
2663 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
2664 0, queue->next_buf_to_init, count, NULL);
2665 if (card->options.performance_stats)
2666 card->perf_stats.inbound_do_qdio_time +=
2667 qeth_get_micros() -
2668 card->perf_stats.inbound_do_qdio_start_time;
2669 if (rc) {
2670 PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
2671 "return %i (device %s).\n",
2672 rc, CARD_DDEV_ID(card));
2673 QETH_DBF_TEXT(trace, 2, "qinberr");
2674 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
2675 }
2676 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
2677 QDIO_MAX_BUFFERS_PER_Q;
2678 }
2679}
2680EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2681
2682static int qeth_handle_send_error(struct qeth_card *card,
2683 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err,
2684 unsigned int siga_err)
2685{
2686 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2687 int cc = siga_err & 3;
2688
2689 QETH_DBF_TEXT(trace, 6, "hdsnderr");
2690 qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr");
2691 switch (cc) {
2692 case 0:
2693 if (qdio_err) {
2694 QETH_DBF_TEXT(trace, 1, "lnkfail");
2695 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
2696 QETH_DBF_TEXT_(trace, 1, "%04x %02x",
2697 (u16)qdio_err, (u8)sbalf15);
2698 return QETH_SEND_ERROR_LINK_FAILURE;
2699 }
2700 return QETH_SEND_ERROR_NONE;
2701 case 2:
2702 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
2703 QETH_DBF_TEXT(trace, 1, "SIGAcc2B");
2704 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
2705 return QETH_SEND_ERROR_KICK_IT;
2706 }
2707 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2708 return QETH_SEND_ERROR_RETRY;
2709 return QETH_SEND_ERROR_LINK_FAILURE;
2710 /* look at qdio_error and sbalf 15 */
2711 case 1:
2712 QETH_DBF_TEXT(trace, 1, "SIGAcc1");
2713 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
2714 return QETH_SEND_ERROR_LINK_FAILURE;
2715 case 3:
2716 default:
2717 QETH_DBF_TEXT(trace, 1, "SIGAcc3");
2718 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
2719 return QETH_SEND_ERROR_KICK_IT;
2720 }
2721}
2722
2723/*
2724 * Switched to packing state if the number of used buffers on a queue
2725 * reaches a certain limit.
2726 */
2727static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2728{
2729 if (!queue->do_pack) {
2730 if (atomic_read(&queue->used_buffers)
2731 >= QETH_HIGH_WATERMARK_PACK){
2732 /* switch non-PACKING -> PACKING */
2733 QETH_DBF_TEXT(trace, 6, "np->pack");
2734 if (queue->card->options.performance_stats)
2735 queue->card->perf_stats.sc_dp_p++;
2736 queue->do_pack = 1;
2737 }
2738 }
2739}
2740
2741/*
2742 * Switches from packing to non-packing mode. If there is a packing
2743 * buffer on the queue this buffer will be prepared to be flushed.
2744 * In that case 1 is returned to inform the caller. If no buffer
2745 * has to be flushed, zero is returned.
2746 */
2747static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2748{
2749 struct qeth_qdio_out_buffer *buffer;
2750 int flush_count = 0;
2751
2752 if (queue->do_pack) {
2753 if (atomic_read(&queue->used_buffers)
2754 <= QETH_LOW_WATERMARK_PACK) {
2755 /* switch PACKING -> non-PACKING */
2756 QETH_DBF_TEXT(trace, 6, "pack->np");
2757 if (queue->card->options.performance_stats)
2758 queue->card->perf_stats.sc_p_dp++;
2759 queue->do_pack = 0;
2760 /* flush packing buffers */
2761 buffer = &queue->bufs[queue->next_buf_to_fill];
2762 if ((atomic_read(&buffer->state) ==
2763 QETH_QDIO_BUF_EMPTY) &&
2764 (buffer->next_element_to_fill > 0)) {
2765 atomic_set(&buffer->state,
2766 QETH_QDIO_BUF_PRIMED);
2767 flush_count++;
2768 queue->next_buf_to_fill =
2769 (queue->next_buf_to_fill + 1) %
2770 QDIO_MAX_BUFFERS_PER_Q;
2771 }
2772 }
2773 }
2774 return flush_count;
2775}
2776
2777/*
2778 * Called to flush a packing buffer if no more pci flags are on the queue.
2779 * Checks if there is a packing buffer and prepares it to be flushed.
2780 * In that case returns 1, otherwise zero.
2781 */
2782static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2783{
2784 struct qeth_qdio_out_buffer *buffer;
2785
2786 buffer = &queue->bufs[queue->next_buf_to_fill];
2787 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2788 (buffer->next_element_to_fill > 0)) {
2789 /* it's a packing buffer */
2790 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
2791 queue->next_buf_to_fill =
2792 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
2793 return 1;
2794 }
2795 return 0;
2796}
2797
2798static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2799 int index, int count)
2800{
2801 struct qeth_qdio_out_buffer *buf;
2802 int rc;
2803 int i;
2804 unsigned int qdio_flags;
2805
2806 QETH_DBF_TEXT(trace, 6, "flushbuf");
2807
2808 for (i = index; i < index + count; ++i) {
2809 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2810 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
2811 SBAL_FLAGS_LAST_ENTRY;
2812
2813 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2814 continue;
2815
2816 if (!queue->do_pack) {
2817 if ((atomic_read(&queue->used_buffers) >=
2818 (QETH_HIGH_WATERMARK_PACK -
2819 QETH_WATERMARK_PACK_FUZZ)) &&
2820 !atomic_read(&queue->set_pci_flags_count)) {
2821 /* it's likely that we'll go to packing
2822 * mode soon */
2823 atomic_inc(&queue->set_pci_flags_count);
2824 buf->buffer->element[0].flags |= 0x40;
2825 }
2826 } else {
2827 if (!atomic_read(&queue->set_pci_flags_count)) {
2828 /*
2829 * there's no outstanding PCI any more, so we
2830 * have to request a PCI to be sure the the PCI
2831 * will wake at some time in the future then we
2832 * can flush packed buffers that might still be
2833 * hanging around, which can happen if no
2834 * further send was requested by the stack
2835 */
2836 atomic_inc(&queue->set_pci_flags_count);
2837 buf->buffer->element[0].flags |= 0x40;
2838 }
2839 }
2840 }
2841
2842 queue->card->dev->trans_start = jiffies;
2843 if (queue->card->options.performance_stats) {
2844 queue->card->perf_stats.outbound_do_qdio_cnt++;
2845 queue->card->perf_stats.outbound_do_qdio_start_time =
2846 qeth_get_micros();
2847 }
2848 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
2849 if (under_int)
2850 qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
2851 if (atomic_read(&queue->set_pci_flags_count))
2852 qdio_flags |= QDIO_FLAG_PCI_OUT;
2853 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
2854 queue->queue_no, index, count, NULL);
2855 if (queue->card->options.performance_stats)
2856 queue->card->perf_stats.outbound_do_qdio_time +=
2857 qeth_get_micros() -
2858 queue->card->perf_stats.outbound_do_qdio_start_time;
2859 if (rc) {
2860 QETH_DBF_TEXT(trace, 2, "flushbuf");
2861 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
2862 QETH_DBF_TEXT_(trace, 2, "%s", CARD_DDEV_ID(queue->card));
2863 queue->card->stats.tx_errors += count;
2864 /* this must not happen under normal circumstances. if it
2865 * happens something is really wrong -> recover */
2866 qeth_schedule_recovery(queue->card);
2867 return;
2868 }
2869 atomic_add(count, &queue->used_buffers);
2870 if (queue->card->options.performance_stats)
2871 queue->card->perf_stats.bufs_sent += count;
2872}
2873
2874static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2875{
2876 int index;
2877 int flush_cnt = 0;
2878 int q_was_packing = 0;
2879
2880 /*
2881 * check if weed have to switch to non-packing mode or if
2882 * we have to get a pci flag out on the queue
2883 */
2884 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2885 !atomic_read(&queue->set_pci_flags_count)) {
2886 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2887 QETH_OUT_Q_UNLOCKED) {
2888 /*
2889 * If we get in here, there was no action in
2890 * do_send_packet. So, we check if there is a
2891 * packing buffer to be flushed here.
2892 */
2893 netif_stop_queue(queue->card->dev);
2894 index = queue->next_buf_to_fill;
2895 q_was_packing = queue->do_pack;
2896 /* queue->do_pack may change */
2897 barrier();
2898 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
2899 if (!flush_cnt &&
2900 !atomic_read(&queue->set_pci_flags_count))
2901 flush_cnt +=
2902 qeth_flush_buffers_on_no_pci(queue);
2903 if (queue->card->options.performance_stats &&
2904 q_was_packing)
2905 queue->card->perf_stats.bufs_sent_pack +=
2906 flush_cnt;
2907 if (flush_cnt)
2908 qeth_flush_buffers(queue, 1, index, flush_cnt);
2909 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2910 }
2911 }
2912}
2913
2914void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
2915 unsigned int qdio_error, unsigned int siga_error,
2916 unsigned int __queue, int first_element, int count,
2917 unsigned long card_ptr)
2918{
2919 struct qeth_card *card = (struct qeth_card *) card_ptr;
2920 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2921 struct qeth_qdio_out_buffer *buffer;
2922 int i;
2923
2924 QETH_DBF_TEXT(trace, 6, "qdouhdl");
2925 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2926 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
2927 QETH_DBF_TEXT(trace, 2, "achkcond");
2928 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
2929 QETH_DBF_TEXT_(trace, 2, "%08x", status);
2930 netif_stop_queue(card->dev);
2931 qeth_schedule_recovery(card);
2932 return;
2933 }
2934 }
2935 if (card->options.performance_stats) {
2936 card->perf_stats.outbound_handler_cnt++;
2937 card->perf_stats.outbound_handler_start_time =
2938 qeth_get_micros();
2939 }
2940 for (i = first_element; i < (first_element + count); ++i) {
2941 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2942 /*we only handle the KICK_IT error by doing a recovery */
2943 if (qeth_handle_send_error(card, buffer,
2944 qdio_error, siga_error)
2945 == QETH_SEND_ERROR_KICK_IT){
2946 netif_stop_queue(card->dev);
2947 qeth_schedule_recovery(card);
2948 return;
2949 }
2950 qeth_clear_output_buffer(queue, buffer);
2951 }
2952 atomic_sub(count, &queue->used_buffers);
2953 /* check if we need to do something on this outbound queue */
2954 if (card->info.type != QETH_CARD_TYPE_IQD)
2955 qeth_check_outbound_queue(queue);
2956
2957 netif_wake_queue(queue->card->dev);
2958 if (card->options.performance_stats)
2959 card->perf_stats.outbound_handler_time += qeth_get_micros() -
2960 card->perf_stats.outbound_handler_start_time;
2961}
2962EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
2963
2964int qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2965{
2966 int cast_type = RTN_UNSPEC;
2967
2968 if (card->info.type == QETH_CARD_TYPE_OSN)
2969 return cast_type;
2970
2971 if (skb->dst && skb->dst->neighbour) {
2972 cast_type = skb->dst->neighbour->type;
2973 if ((cast_type == RTN_BROADCAST) ||
2974 (cast_type == RTN_MULTICAST) ||
2975 (cast_type == RTN_ANYCAST))
2976 return cast_type;
2977 else
2978 return RTN_UNSPEC;
2979 }
2980 /* try something else */
2981 if (skb->protocol == ETH_P_IPV6)
2982 return (skb_network_header(skb)[24] == 0xff) ?
2983 RTN_MULTICAST : 0;
2984 else if (skb->protocol == ETH_P_IP)
2985 return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
2986 RTN_MULTICAST : 0;
2987 /* ... */
2988 if (!memcmp(skb->data, skb->dev->broadcast, 6))
2989 return RTN_BROADCAST;
2990 else {
2991 u16 hdr_mac;
2992
2993 hdr_mac = *((u16 *)skb->data);
2994 /* tr multicast? */
2995 switch (card->info.link_type) {
2996 case QETH_LINK_TYPE_HSTR:
2997 case QETH_LINK_TYPE_LANE_TR:
2998 if ((hdr_mac == QETH_TR_MAC_NC) ||
2999 (hdr_mac == QETH_TR_MAC_C))
3000 return RTN_MULTICAST;
3001 break;
3002 /* eth or so multicast? */
3003 default:
3004 if ((hdr_mac == QETH_ETH_MAC_V4) ||
3005 (hdr_mac == QETH_ETH_MAC_V6))
3006 return RTN_MULTICAST;
3007 }
3008 }
3009 return cast_type;
3010}
3011EXPORT_SYMBOL_GPL(qeth_get_cast_type);
3012
3013int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3014 int ipv, int cast_type)
3015{
3016 if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
3017 return card->qdio.default_out_queue;
3018 switch (card->qdio.no_out_queues) {
3019 case 4:
3020 if (cast_type && card->info.is_multicast_different)
3021 return card->info.is_multicast_different &
3022 (card->qdio.no_out_queues - 1);
3023 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3024 const u8 tos = ip_hdr(skb)->tos;
3025
3026 if (card->qdio.do_prio_queueing ==
3027 QETH_PRIO_Q_ING_TOS) {
3028 if (tos & IP_TOS_NOTIMPORTANT)
3029 return 3;
3030 if (tos & IP_TOS_HIGHRELIABILITY)
3031 return 2;
3032 if (tos & IP_TOS_HIGHTHROUGHPUT)
3033 return 1;
3034 if (tos & IP_TOS_LOWDELAY)
3035 return 0;
3036 }
3037 if (card->qdio.do_prio_queueing ==
3038 QETH_PRIO_Q_ING_PREC)
3039 return 3 - (tos >> 6);
3040 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3041 /* TODO: IPv6!!! */
3042 }
3043 return card->qdio.default_out_queue;
3044 case 1: /* fallthrough for single-out-queue 1920-device */
3045 default:
3046 return card->qdio.default_out_queue;
3047 }
3048}
3049EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3050
3051static void __qeth_free_new_skb(struct sk_buff *orig_skb,
3052 struct sk_buff *new_skb)
3053{
3054 if (orig_skb != new_skb)
3055 dev_kfree_skb_any(new_skb);
3056}
3057
3058static inline struct sk_buff *qeth_realloc_headroom(struct qeth_card *card,
3059 struct sk_buff *skb, int size)
3060{
3061 struct sk_buff *new_skb = skb;
3062
3063 if (skb_headroom(skb) >= size)
3064 return skb;
3065 new_skb = skb_realloc_headroom(skb, size);
3066 if (!new_skb)
3067 PRINT_ERR("Could not realloc headroom for qeth_hdr "
3068 "on interface %s", QETH_CARD_IFNAME(card));
3069 return new_skb;
3070}
3071
3072struct sk_buff *qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3073 struct qeth_hdr **hdr)
3074{
3075 struct sk_buff *new_skb;
3076
3077 QETH_DBF_TEXT(trace, 6, "prepskb");
3078
3079 new_skb = qeth_realloc_headroom(card, skb,
3080 sizeof(struct qeth_hdr));
3081 if (!new_skb)
3082 return NULL;
3083
3084 *hdr = ((struct qeth_hdr *)qeth_push_skb(card, new_skb,
3085 sizeof(struct qeth_hdr)));
3086 if (*hdr == NULL) {
3087 __qeth_free_new_skb(skb, new_skb);
3088 return NULL;
3089 }
3090 return new_skb;
3091}
3092EXPORT_SYMBOL_GPL(qeth_prepare_skb);
3093
3094int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3095 struct sk_buff *skb, int elems)
3096{
3097 int elements_needed = 0;
3098
3099 if (skb_shinfo(skb)->nr_frags > 0)
3100 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
3101 if (elements_needed == 0)
3102 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
3103 + skb->len) >> PAGE_SHIFT);
3104 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3105 PRINT_ERR("Invalid size of IP packet "
3106 "(Number=%d / Length=%d). Discarded.\n",
3107 (elements_needed+elems), skb->len);
3108 return 0;
3109 }
3110 return elements_needed;
3111}
3112EXPORT_SYMBOL_GPL(qeth_get_elements_no);
3113
3114static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
3115 int is_tso, int *next_element_to_fill)
3116{
3117 int length = skb->len;
3118 int length_here;
3119 int element;
3120 char *data;
3121 int first_lap ;
3122
3123 element = *next_element_to_fill;
3124 data = skb->data;
3125 first_lap = (is_tso == 0 ? 1 : 0);
3126
3127 while (length > 0) {
3128 /* length_here is the remaining amount of data in this page */
3129 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3130 if (length < length_here)
3131 length_here = length;
3132
3133 buffer->element[element].addr = data;
3134 buffer->element[element].length = length_here;
3135 length -= length_here;
3136 if (!length) {
3137 if (first_lap)
3138 buffer->element[element].flags = 0;
3139 else
3140 buffer->element[element].flags =
3141 SBAL_FLAGS_LAST_FRAG;
3142 } else {
3143 if (first_lap)
3144 buffer->element[element].flags =
3145 SBAL_FLAGS_FIRST_FRAG;
3146 else
3147 buffer->element[element].flags =
3148 SBAL_FLAGS_MIDDLE_FRAG;
3149 }
3150 data += length_here;
3151 element++;
3152 first_lap = 0;
3153 }
3154 *next_element_to_fill = element;
3155}
3156
3157static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3158 struct qeth_qdio_out_buffer *buf, struct sk_buff *skb)
3159{
3160 struct qdio_buffer *buffer;
3161 struct qeth_hdr_tso *hdr;
3162 int flush_cnt = 0, hdr_len, large_send = 0;
3163
3164 QETH_DBF_TEXT(trace, 6, "qdfillbf");
3165
3166 buffer = buf->buffer;
3167 atomic_inc(&skb->users);
3168 skb_queue_tail(&buf->skb_list, skb);
3169
3170 hdr = (struct qeth_hdr_tso *) skb->data;
3171 /*check first on TSO ....*/
3172 if (hdr->hdr.hdr.l3.id == QETH_HEADER_TYPE_TSO) {
3173 int element = buf->next_element_to_fill;
3174
3175 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
3176 /*fill first buffer entry only with header information */
3177 buffer->element[element].addr = skb->data;
3178 buffer->element[element].length = hdr_len;
3179 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
3180 buf->next_element_to_fill++;
3181 skb->data += hdr_len;
3182 skb->len -= hdr_len;
3183 large_send = 1;
3184 }
3185 if (skb_shinfo(skb)->nr_frags == 0)
3186 __qeth_fill_buffer(skb, buffer, large_send,
3187 (int *)&buf->next_element_to_fill);
3188 else
3189 __qeth_fill_buffer_frag(skb, buffer, large_send,
3190 (int *)&buf->next_element_to_fill);
3191
3192 if (!queue->do_pack) {
3193 QETH_DBF_TEXT(trace, 6, "fillbfnp");
3194 /* set state to PRIMED -> will be flushed */
3195 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3196 flush_cnt = 1;
3197 } else {
3198 QETH_DBF_TEXT(trace, 6, "fillbfpa");
3199 if (queue->card->options.performance_stats)
3200 queue->card->perf_stats.skbs_sent_pack++;
3201 if (buf->next_element_to_fill >=
3202 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
3203 /*
3204 * packed buffer if full -> set state PRIMED
3205 * -> will be flushed
3206 */
3207 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3208 flush_cnt = 1;
3209 }
3210 }
3211 return flush_cnt;
3212}
3213
3214int qeth_do_send_packet_fast(struct qeth_card *card,
3215 struct qeth_qdio_out_q *queue, struct sk_buff *skb,
3216 struct qeth_hdr *hdr, int elements_needed,
3217 struct qeth_eddp_context *ctx)
3218{
3219 struct qeth_qdio_out_buffer *buffer;
3220 int buffers_needed = 0;
3221 int flush_cnt = 0;
3222 int index;
3223
3224 QETH_DBF_TEXT(trace, 6, "dosndpfa");
3225
3226 /* spin until we get the queue ... */
3227 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3228 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3229 /* ... now we've got the queue */
3230 index = queue->next_buf_to_fill;
3231 buffer = &queue->bufs[queue->next_buf_to_fill];
3232 /*
3233 * check if buffer is empty to make sure that we do not 'overtake'
3234 * ourselves and try to fill a buffer that is already primed
3235 */
3236 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3237 goto out;
3238 if (ctx == NULL)
3239 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3240 QDIO_MAX_BUFFERS_PER_Q;
3241 else {
3242 buffers_needed = qeth_eddp_check_buffers_for_context(queue,
3243 ctx);
3244 if (buffers_needed < 0)
3245 goto out;
3246 queue->next_buf_to_fill =
3247 (queue->next_buf_to_fill + buffers_needed) %
3248 QDIO_MAX_BUFFERS_PER_Q;
3249 }
3250 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3251 if (ctx == NULL) {
3252 qeth_fill_buffer(queue, buffer, skb);
3253 qeth_flush_buffers(queue, 0, index, 1);
3254 } else {
3255 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
3256 WARN_ON(buffers_needed != flush_cnt);
3257 qeth_flush_buffers(queue, 0, index, flush_cnt);
3258 }
3259 return 0;
3260out:
3261 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3262 return -EBUSY;
3263}
3264EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
3265
3266int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3267 struct sk_buff *skb, struct qeth_hdr *hdr,
3268 int elements_needed, struct qeth_eddp_context *ctx)
3269{
3270 struct qeth_qdio_out_buffer *buffer;
3271 int start_index;
3272 int flush_count = 0;
3273 int do_pack = 0;
3274 int tmp;
3275 int rc = 0;
3276
3277 QETH_DBF_TEXT(trace, 6, "dosndpkt");
3278
3279 /* spin until we get the queue ... */
3280 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3281 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3282 start_index = queue->next_buf_to_fill;
3283 buffer = &queue->bufs[queue->next_buf_to_fill];
3284 /*
3285 * check if buffer is empty to make sure that we do not 'overtake'
3286 * ourselves and try to fill a buffer that is already primed
3287 */
3288 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3289 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3290 return -EBUSY;
3291 }
3292 /* check if we need to switch packing state of this queue */
3293 qeth_switch_to_packing_if_needed(queue);
3294 if (queue->do_pack) {
3295 do_pack = 1;
3296 if (ctx == NULL) {
3297 /* does packet fit in current buffer? */
3298 if ((QETH_MAX_BUFFER_ELEMENTS(card) -
3299 buffer->next_element_to_fill) < elements_needed) {
3300 /* ... no -> set state PRIMED */
3301 atomic_set(&buffer->state,
3302 QETH_QDIO_BUF_PRIMED);
3303 flush_count++;
3304 queue->next_buf_to_fill =
3305 (queue->next_buf_to_fill + 1) %
3306 QDIO_MAX_BUFFERS_PER_Q;
3307 buffer = &queue->bufs[queue->next_buf_to_fill];
3308 /* we did a step forward, so check buffer state
3309 * again */
3310 if (atomic_read(&buffer->state) !=
3311 QETH_QDIO_BUF_EMPTY){
3312 qeth_flush_buffers(queue, 0,
3313 start_index, flush_count);
3314 atomic_set(&queue->state,
3315 QETH_OUT_Q_UNLOCKED);
3316 return -EBUSY;
3317 }
3318 }
3319 } else {
3320 /* check if we have enough elements (including following
3321 * free buffers) to handle eddp context */
3322 if (qeth_eddp_check_buffers_for_context(queue, ctx)
3323 < 0) {
3324 if (net_ratelimit())
3325 PRINT_WARN("eddp tx_dropped 1\n");
3326 rc = -EBUSY;
3327 goto out;
3328 }
3329 }
3330 }
3331 if (ctx == NULL)
3332 tmp = qeth_fill_buffer(queue, buffer, skb);
3333 else {
3334 tmp = qeth_eddp_fill_buffer(queue, ctx,
3335 queue->next_buf_to_fill);
3336 if (tmp < 0) {
3337 PRINT_ERR("eddp tx_dropped 2\n");
3338 rc = -EBUSY;
3339 goto out;
3340 }
3341 }
3342 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
3343 QDIO_MAX_BUFFERS_PER_Q;
3344 flush_count += tmp;
3345out:
3346 if (flush_count)
3347 qeth_flush_buffers(queue, 0, start_index, flush_count);
3348 else if (!atomic_read(&queue->set_pci_flags_count))
3349 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3350 /*
3351 * queue->state will go from LOCKED -> UNLOCKED or from
3352 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3353 * (switch packing state or flush buffer to get another pci flag out).
3354 * In that case we will enter this loop
3355 */
3356 while (atomic_dec_return(&queue->state)) {
3357 flush_count = 0;
3358 start_index = queue->next_buf_to_fill;
3359 /* check if we can go back to non-packing state */
3360 flush_count += qeth_switch_to_nonpacking_if_needed(queue);
3361 /*
3362 * check if we need to flush a packing buffer to get a pci
3363 * flag out on the queue
3364 */
3365 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
3366 flush_count += qeth_flush_buffers_on_no_pci(queue);
3367 if (flush_count)
3368 qeth_flush_buffers(queue, 0, start_index, flush_count);
3369 }
3370 /* at this point the queue is UNLOCKED again */
3371 if (queue->card->options.performance_stats && do_pack)
3372 queue->card->perf_stats.bufs_sent_pack += flush_count;
3373
3374 return rc;
3375}
3376EXPORT_SYMBOL_GPL(qeth_do_send_packet);
3377
3378static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
3379 struct qeth_reply *reply, unsigned long data)
3380{
3381 struct qeth_ipa_cmd *cmd;
3382 struct qeth_ipacmd_setadpparms *setparms;
3383
3384 QETH_DBF_TEXT(trace, 4, "prmadpcb");
3385
3386 cmd = (struct qeth_ipa_cmd *) data;
3387 setparms = &(cmd->data.setadapterparms);
3388
3389 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
3390 if (cmd->hdr.return_code) {
3391 QETH_DBF_TEXT_(trace, 4, "prmrc%2.2x", cmd->hdr.return_code);
3392 setparms->data.mode = SET_PROMISC_MODE_OFF;
3393 }
3394 card->info.promisc_mode = setparms->data.mode;
3395 return 0;
3396}
3397
3398void qeth_setadp_promisc_mode(struct qeth_card *card)
3399{
3400 enum qeth_ipa_promisc_modes mode;
3401 struct net_device *dev = card->dev;
3402 struct qeth_cmd_buffer *iob;
3403 struct qeth_ipa_cmd *cmd;
3404
3405 QETH_DBF_TEXT(trace, 4, "setprom");
3406
3407 if (((dev->flags & IFF_PROMISC) &&
3408 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
3409 (!(dev->flags & IFF_PROMISC) &&
3410 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
3411 return;
3412 mode = SET_PROMISC_MODE_OFF;
3413 if (dev->flags & IFF_PROMISC)
3414 mode = SET_PROMISC_MODE_ON;
3415 QETH_DBF_TEXT_(trace, 4, "mode:%x", mode);
3416
3417 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
3418 sizeof(struct qeth_ipacmd_setadpparms));
3419 cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
3420 cmd->data.setadapterparms.data.mode = mode;
3421 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
3422}
3423EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
3424
3425int qeth_change_mtu(struct net_device *dev, int new_mtu)
3426{
3427 struct qeth_card *card;
3428 char dbf_text[15];
3429
3430 card = netdev_priv(dev);
3431
3432 QETH_DBF_TEXT(trace, 4, "chgmtu");
3433 sprintf(dbf_text, "%8x", new_mtu);
3434 QETH_DBF_TEXT(trace, 4, dbf_text);
3435
3436 if (new_mtu < 64)
3437 return -EINVAL;
3438 if (new_mtu > 65535)
3439 return -EINVAL;
3440 if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) &&
3441 (!qeth_mtu_is_valid(card, new_mtu)))
3442 return -EINVAL;
3443 dev->mtu = new_mtu;
3444 return 0;
3445}
3446EXPORT_SYMBOL_GPL(qeth_change_mtu);
3447
3448struct net_device_stats *qeth_get_stats(struct net_device *dev)
3449{
3450 struct qeth_card *card;
3451
3452 card = netdev_priv(dev);
3453
3454 QETH_DBF_TEXT(trace, 5, "getstat");
3455
3456 return &card->stats;
3457}
3458EXPORT_SYMBOL_GPL(qeth_get_stats);
3459
3460static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
3461 struct qeth_reply *reply, unsigned long data)
3462{
3463 struct qeth_ipa_cmd *cmd;
3464
3465 QETH_DBF_TEXT(trace, 4, "chgmaccb");
3466
3467 cmd = (struct qeth_ipa_cmd *) data;
3468 if (!card->options.layer2 ||
3469 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
3470 memcpy(card->dev->dev_addr,
3471 &cmd->data.setadapterparms.data.change_addr.addr,
3472 OSA_ADDR_LEN);
3473 card->info.mac_bits |= QETH_LAYER2_MAC_READ;
3474 }
3475 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3476 return 0;
3477}
3478
3479int qeth_setadpparms_change_macaddr(struct qeth_card *card)
3480{
3481 int rc;
3482 struct qeth_cmd_buffer *iob;
3483 struct qeth_ipa_cmd *cmd;
3484
3485 QETH_DBF_TEXT(trace, 4, "chgmac");
3486
3487 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
3488 sizeof(struct qeth_ipacmd_setadpparms));
3489 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3490 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
3491 cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
3492 memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
3493 card->dev->dev_addr, OSA_ADDR_LEN);
3494 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
3495 NULL);
3496 return rc;
3497}
3498EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
3499
3500void qeth_tx_timeout(struct net_device *dev)
3501{
3502 struct qeth_card *card;
3503
3504 card = netdev_priv(dev);
3505 card->stats.tx_errors++;
3506 qeth_schedule_recovery(card);
3507}
3508EXPORT_SYMBOL_GPL(qeth_tx_timeout);
3509
3510int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
3511{
3512 struct qeth_card *card = netdev_priv(dev);
3513 int rc = 0;
3514
3515 switch (regnum) {
3516 case MII_BMCR: /* Basic mode control register */
3517 rc = BMCR_FULLDPLX;
3518 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
3519 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
3520 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
3521 rc |= BMCR_SPEED100;
3522 break;
3523 case MII_BMSR: /* Basic mode status register */
3524 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
3525 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
3526 BMSR_100BASE4;
3527 break;
3528 case MII_PHYSID1: /* PHYS ID 1 */
3529 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
3530 dev->dev_addr[2];
3531 rc = (rc >> 5) & 0xFFFF;
3532 break;
3533 case MII_PHYSID2: /* PHYS ID 2 */
3534 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
3535 break;
3536 case MII_ADVERTISE: /* Advertisement control reg */
3537 rc = ADVERTISE_ALL;
3538 break;
3539 case MII_LPA: /* Link partner ability reg */
3540 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
3541 LPA_100BASE4 | LPA_LPACK;
3542 break;
3543 case MII_EXPANSION: /* Expansion register */
3544 break;
3545 case MII_DCOUNTER: /* disconnect counter */
3546 break;
3547 case MII_FCSCOUNTER: /* false carrier counter */
3548 break;
3549 case MII_NWAYTEST: /* N-way auto-neg test register */
3550 break;
3551 case MII_RERRCOUNTER: /* rx error counter */
3552 rc = card->stats.rx_errors;
3553 break;
3554 case MII_SREVISION: /* silicon revision */
3555 break;
3556 case MII_RESV1: /* reserved 1 */
3557 break;
3558 case MII_LBRERROR: /* loopback, rx, bypass error */
3559 break;
3560 case MII_PHYADDR: /* physical address */
3561 break;
3562 case MII_RESV2: /* reserved 2 */
3563 break;
3564 case MII_TPISTATUS: /* TPI status for 10mbps */
3565 break;
3566 case MII_NCONFIG: /* network interface config */
3567 break;
3568 default:
3569 break;
3570 }
3571 return rc;
3572}
3573EXPORT_SYMBOL_GPL(qeth_mdio_read);
3574
3575static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
3576 struct qeth_cmd_buffer *iob, int len,
3577 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
3578 unsigned long),
3579 void *reply_param)
3580{
3581 u16 s1, s2;
3582
3583 QETH_DBF_TEXT(trace, 4, "sendsnmp");
3584
3585 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3586 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3587 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3588 /* adjust PDU length fields in IPA_PDU_HEADER */
3589 s1 = (u32) IPA_PDU_HEADER_SIZE + len;
3590 s2 = (u32) len;
3591 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
3592 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
3593 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
3594 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
3595 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
3596 reply_cb, reply_param);
3597}
3598
3599static int qeth_snmp_command_cb(struct qeth_card *card,
3600 struct qeth_reply *reply, unsigned long sdata)
3601{
3602 struct qeth_ipa_cmd *cmd;
3603 struct qeth_arp_query_info *qinfo;
3604 struct qeth_snmp_cmd *snmp;
3605 unsigned char *data;
3606 __u16 data_len;
3607
3608 QETH_DBF_TEXT(trace, 3, "snpcmdcb");
3609
3610 cmd = (struct qeth_ipa_cmd *) sdata;
3611 data = (unsigned char *)((char *)cmd - reply->offset);
3612 qinfo = (struct qeth_arp_query_info *) reply->param;
3613 snmp = &cmd->data.setadapterparms.data.snmp;
3614
3615 if (cmd->hdr.return_code) {
3616 QETH_DBF_TEXT_(trace, 4, "scer1%i", cmd->hdr.return_code);
3617 return 0;
3618 }
3619 if (cmd->data.setadapterparms.hdr.return_code) {
3620 cmd->hdr.return_code =
3621 cmd->data.setadapterparms.hdr.return_code;
3622 QETH_DBF_TEXT_(trace, 4, "scer2%i", cmd->hdr.return_code);
3623 return 0;
3624 }
3625 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
3626 if (cmd->data.setadapterparms.hdr.seq_no == 1)
3627 data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
3628 else
3629 data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
3630
3631 /* check if there is enough room in userspace */
3632 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
3633 QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM);
3634 cmd->hdr.return_code = -ENOMEM;
3635 return 0;
3636 }
3637 QETH_DBF_TEXT_(trace, 4, "snore%i",
3638 cmd->data.setadapterparms.hdr.used_total);
3639 QETH_DBF_TEXT_(trace, 4, "sseqn%i",
3640 cmd->data.setadapterparms.hdr.seq_no);
3641 /*copy entries to user buffer*/
3642 if (cmd->data.setadapterparms.hdr.seq_no == 1) {
3643 memcpy(qinfo->udata + qinfo->udata_offset,
3644 (char *)snmp,
3645 data_len + offsetof(struct qeth_snmp_cmd, data));
3646 qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
3647 } else {
3648 memcpy(qinfo->udata + qinfo->udata_offset,
3649 (char *)&snmp->request, data_len);
3650 }
3651 qinfo->udata_offset += data_len;
3652 /* check if all replies received ... */
3653 QETH_DBF_TEXT_(trace, 4, "srtot%i",
3654 cmd->data.setadapterparms.hdr.used_total);
3655 QETH_DBF_TEXT_(trace, 4, "srseq%i",
3656 cmd->data.setadapterparms.hdr.seq_no);
3657 if (cmd->data.setadapterparms.hdr.seq_no <
3658 cmd->data.setadapterparms.hdr.used_total)
3659 return 1;
3660 return 0;
3661}
3662
3663int qeth_snmp_command(struct qeth_card *card, char __user *udata)
3664{
3665 struct qeth_cmd_buffer *iob;
3666 struct qeth_ipa_cmd *cmd;
3667 struct qeth_snmp_ureq *ureq;
3668 int req_len;
3669 struct qeth_arp_query_info qinfo = {0, };
3670 int rc = 0;
3671
3672 QETH_DBF_TEXT(trace, 3, "snmpcmd");
3673
3674 if (card->info.guestlan)
3675 return -EOPNOTSUPP;
3676
3677 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
3678 (!card->options.layer2)) {
3679 PRINT_WARN("SNMP Query MIBS not supported "
3680 "on %s!\n", QETH_CARD_IFNAME(card));
3681 return -EOPNOTSUPP;
3682 }
3683 /* skip 4 bytes (data_len struct member) to get req_len */
3684 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
3685 return -EFAULT;
3686 ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
3687 if (!ureq) {
3688 QETH_DBF_TEXT(trace, 2, "snmpnome");
3689 return -ENOMEM;
3690 }
3691 if (copy_from_user(ureq, udata,
3692 req_len + sizeof(struct qeth_snmp_ureq_hdr))) {
3693 kfree(ureq);
3694 return -EFAULT;
3695 }
3696 qinfo.udata_len = ureq->hdr.data_len;
3697 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
3698 if (!qinfo.udata) {
3699 kfree(ureq);
3700 return -ENOMEM;
3701 }
3702 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
3703
3704 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
3705 QETH_SNMP_SETADP_CMDLENGTH + req_len);
3706 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3707 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
3708 rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
3709 qeth_snmp_command_cb, (void *)&qinfo);
3710 if (rc)
3711 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
3712 QETH_CARD_IFNAME(card), rc);
3713 else {
3714 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
3715 rc = -EFAULT;
3716 }
3717
3718 kfree(ureq);
3719 kfree(qinfo.udata);
3720 return rc;
3721}
3722EXPORT_SYMBOL_GPL(qeth_snmp_command);
3723
3724static inline int qeth_get_qdio_q_format(struct qeth_card *card)
3725{
3726 switch (card->info.type) {
3727 case QETH_CARD_TYPE_IQD:
3728 return 2;
3729 default:
3730 return 0;
3731 }
3732}
3733
3734static int qeth_qdio_establish(struct qeth_card *card)
3735{
3736 struct qdio_initialize init_data;
3737 char *qib_param_field;
3738 struct qdio_buffer **in_sbal_ptrs;
3739 struct qdio_buffer **out_sbal_ptrs;
3740 int i, j, k;
3741 int rc = 0;
3742
3743 QETH_DBF_TEXT(setup, 2, "qdioest");
3744
3745 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3746 GFP_KERNEL);
3747 if (!qib_param_field)
3748 return -ENOMEM;
3749
3750 qeth_create_qib_param_field(card, qib_param_field);
3751 qeth_create_qib_param_field_blkt(card, qib_param_field);
3752
3753 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3754 GFP_KERNEL);
3755 if (!in_sbal_ptrs) {
3756 kfree(qib_param_field);
3757 return -ENOMEM;
3758 }
3759 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
3760 in_sbal_ptrs[i] = (struct qdio_buffer *)
3761 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
3762
3763 out_sbal_ptrs =
3764 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3765 sizeof(void *), GFP_KERNEL);
3766 if (!out_sbal_ptrs) {
3767 kfree(in_sbal_ptrs);
3768 kfree(qib_param_field);
3769 return -ENOMEM;
3770 }
3771 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3772 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
3773 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
3774 card->qdio.out_qs[i]->bufs[j].buffer);
3775 }
3776
3777 memset(&init_data, 0, sizeof(struct qdio_initialize));
3778 init_data.cdev = CARD_DDEV(card);
3779 init_data.q_format = qeth_get_qdio_q_format(card);
3780 init_data.qib_param_field_format = 0;
3781 init_data.qib_param_field = qib_param_field;
3782 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3783 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3784 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3785 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3786 init_data.no_input_qs = 1;
3787 init_data.no_output_qs = card->qdio.no_out_queues;
3788 init_data.input_handler = card->discipline.input_handler;
3789 init_data.output_handler = card->discipline.output_handler;
3790 init_data.int_parm = (unsigned long) card;
3791 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3792 QDIO_OUTBOUND_0COPY_SBALS |
3793 QDIO_USE_OUTBOUND_PCIS;
3794 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3795 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3796
3797 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3798 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
3799 rc = qdio_initialize(&init_data);
3800 if (rc)
3801 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
3802 }
3803 kfree(out_sbal_ptrs);
3804 kfree(in_sbal_ptrs);
3805 kfree(qib_param_field);
3806 return rc;
3807}
3808
3809static void qeth_core_free_card(struct qeth_card *card)
3810{
3811
3812 QETH_DBF_TEXT(setup, 2, "freecrd");
3813 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
3814 qeth_clean_channel(&card->read);
3815 qeth_clean_channel(&card->write);
3816 if (card->dev)
3817 free_netdev(card->dev);
3818 kfree(card->ip_tbd_list);
3819 qeth_free_qdio_buffers(card);
3820 kfree(card);
3821}
3822
3823static struct ccw_device_id qeth_ids[] = {
3824 {CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE},
3825 {CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD},
3826 {CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN},
3827 {},
3828};
3829MODULE_DEVICE_TABLE(ccw, qeth_ids);
3830
3831static struct ccw_driver qeth_ccw_driver = {
3832 .name = "qeth",
3833 .ids = qeth_ids,
3834 .probe = ccwgroup_probe_ccwdev,
3835 .remove = ccwgroup_remove_ccwdev,
3836};
3837
3838static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3839 unsigned long driver_id)
3840{
3841 const char *start, *end;
3842 char bus_ids[3][BUS_ID_SIZE], *argv[3];
3843 int i;
3844
3845 start = buf;
3846 for (i = 0; i < 3; i++) {
3847 static const char delim[] = { ',', ',', '\n' };
3848 int len;
3849
3850 end = strchr(start, delim[i]);
3851 if (!end)
3852 return -EINVAL;
3853 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start);
3854 strncpy(bus_ids[i], start, len);
3855 bus_ids[i][len] = '\0';
3856 start = end + 1;
3857 argv[i] = bus_ids[i];
3858 }
3859
3860 return (ccwgroup_create(root_dev, driver_id,
3861 &qeth_ccw_driver, 3, argv));
3862}
3863
3864int qeth_core_hardsetup_card(struct qeth_card *card)
3865{
3866 int retries = 3;
3867 int mpno;
3868 int rc;
3869
3870 QETH_DBF_TEXT(setup, 2, "hrdsetup");
3871 atomic_set(&card->force_alloc_skb, 0);
3872retry:
3873 if (retries < 3) {
3874 PRINT_WARN("Retrying to do IDX activates.\n");
3875 ccw_device_set_offline(CARD_DDEV(card));
3876 ccw_device_set_offline(CARD_WDEV(card));
3877 ccw_device_set_offline(CARD_RDEV(card));
3878 ccw_device_set_online(CARD_RDEV(card));
3879 ccw_device_set_online(CARD_WDEV(card));
3880 ccw_device_set_online(CARD_DDEV(card));
3881 }
3882 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
3883 if (rc == -ERESTARTSYS) {
3884 QETH_DBF_TEXT(setup, 2, "break1");
3885 return rc;
3886 } else if (rc) {
3887 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3888 if (--retries < 0)
3889 goto out;
3890 else
3891 goto retry;
3892 }
3893
3894 rc = qeth_get_unitaddr(card);
3895 if (rc) {
3896 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3897 return rc;
3898 }
3899
3900 mpno = QETH_MAX_PORTNO;
3901 if (card->info.portno > mpno) {
3902 PRINT_ERR("Device %s does not offer port number %d \n.",
3903 CARD_BUS_ID(card), card->info.portno);
3904 rc = -ENODEV;
3905 goto out;
3906 }
3907 qeth_init_tokens(card);
3908 qeth_init_func_level(card);
3909 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
3910 if (rc == -ERESTARTSYS) {
3911 QETH_DBF_TEXT(setup, 2, "break2");
3912 return rc;
3913 } else if (rc) {
3914 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3915 if (--retries < 0)
3916 goto out;
3917 else
3918 goto retry;
3919 }
3920 rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
3921 if (rc == -ERESTARTSYS) {
3922 QETH_DBF_TEXT(setup, 2, "break3");
3923 return rc;
3924 } else if (rc) {
3925 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3926 if (--retries < 0)
3927 goto out;
3928 else
3929 goto retry;
3930 }
3931 rc = qeth_mpc_initialize(card);
3932 if (rc) {
3933 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3934 goto out;
3935 }
3936 return 0;
3937out:
3938 PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
3939 return rc;
3940}
3941EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
3942
3943static inline int qeth_create_skb_frag(struct qdio_buffer_element *element,
3944 struct sk_buff **pskb, int offset, int *pfrag, int data_len)
3945{
3946 struct page *page = virt_to_page(element->addr);
3947 if (*pskb == NULL) {
3948 /* the upper protocol layers assume that there is data in the
3949 * skb itself. Copy a small amount (64 bytes) to make them
3950 * happy. */
3951 *pskb = dev_alloc_skb(64 + ETH_HLEN);
3952 if (!(*pskb))
3953 return -ENOMEM;
3954 skb_reserve(*pskb, ETH_HLEN);
3955 if (data_len <= 64) {
3956 memcpy(skb_put(*pskb, data_len), element->addr + offset,
3957 data_len);
3958 } else {
3959 get_page(page);
3960 memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
3961 skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
3962 data_len - 64);
3963 (*pskb)->data_len += data_len - 64;
3964 (*pskb)->len += data_len - 64;
3965 (*pskb)->truesize += data_len - 64;
3966 (*pfrag)++;
3967 }
3968 } else {
3969 get_page(page);
3970 skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
3971 (*pskb)->data_len += data_len;
3972 (*pskb)->len += data_len;
3973 (*pskb)->truesize += data_len;
3974 (*pfrag)++;
3975 }
3976 return 0;
3977}
3978
3979struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
3980 struct qdio_buffer *buffer,
3981 struct qdio_buffer_element **__element, int *__offset,
3982 struct qeth_hdr **hdr)
3983{
3984 struct qdio_buffer_element *element = *__element;
3985 int offset = *__offset;
3986 struct sk_buff *skb = NULL;
3987 int skb_len;
3988 void *data_ptr;
3989 int data_len;
3990 int headroom = 0;
3991 int use_rx_sg = 0;
3992 int frag = 0;
3993
3994 QETH_DBF_TEXT(trace, 6, "nextskb");
3995 /* qeth_hdr must not cross element boundaries */
3996 if (element->length < offset + sizeof(struct qeth_hdr)) {
3997 if (qeth_is_last_sbale(element))
3998 return NULL;
3999 element++;
4000 offset = 0;
4001 if (element->length < sizeof(struct qeth_hdr))
4002 return NULL;
4003 }
4004 *hdr = element->addr + offset;
4005
4006 offset += sizeof(struct qeth_hdr);
4007 if (card->options.layer2) {
4008 if (card->info.type == QETH_CARD_TYPE_OSN) {
4009 skb_len = (*hdr)->hdr.osn.pdu_length;
4010 headroom = sizeof(struct qeth_hdr);
4011 } else {
4012 skb_len = (*hdr)->hdr.l2.pkt_length;
4013 }
4014 } else {
4015 skb_len = (*hdr)->hdr.l3.length;
4016 headroom = max((int)ETH_HLEN, (int)TR_HLEN);
4017 }
4018
4019 if (!skb_len)
4020 return NULL;
4021
4022 if ((skb_len >= card->options.rx_sg_cb) &&
4023 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
4024 (!atomic_read(&card->force_alloc_skb))) {
4025 use_rx_sg = 1;
4026 } else {
4027 skb = dev_alloc_skb(skb_len + headroom);
4028 if (!skb)
4029 goto no_mem;
4030 if (headroom)
4031 skb_reserve(skb, headroom);
4032 }
4033
4034 data_ptr = element->addr + offset;
4035 while (skb_len) {
4036 data_len = min(skb_len, (int)(element->length - offset));
4037 if (data_len) {
4038 if (use_rx_sg) {
4039 if (qeth_create_skb_frag(element, &skb, offset,
4040 &frag, data_len))
4041 goto no_mem;
4042 } else {
4043 memcpy(skb_put(skb, data_len), data_ptr,
4044 data_len);
4045 }
4046 }
4047 skb_len -= data_len;
4048 if (skb_len) {
4049 if (qeth_is_last_sbale(element)) {
4050 QETH_DBF_TEXT(trace, 4, "unexeob");
4051 QETH_DBF_TEXT_(trace, 4, "%s",
4052 CARD_BUS_ID(card));
4053 QETH_DBF_TEXT(qerr, 2, "unexeob");
4054 QETH_DBF_TEXT_(qerr, 2, "%s",
4055 CARD_BUS_ID(card));
4056 QETH_DBF_HEX(misc, 4, buffer, sizeof(*buffer));
4057 dev_kfree_skb_any(skb);
4058 card->stats.rx_errors++;
4059 return NULL;
4060 }
4061 element++;
4062 offset = 0;
4063 data_ptr = element->addr;
4064 } else {
4065 offset += data_len;
4066 }
4067 }
4068 *__element = element;
4069 *__offset = offset;
4070 if (use_rx_sg && card->options.performance_stats) {
4071 card->perf_stats.sg_skbs_rx++;
4072 card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
4073 }
4074 return skb;
4075no_mem:
4076 if (net_ratelimit()) {
4077 PRINT_WARN("No memory for packet received on %s.\n",
4078 QETH_CARD_IFNAME(card));
4079 QETH_DBF_TEXT(trace, 2, "noskbmem");
4080 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
4081 }
4082 card->stats.rx_dropped++;
4083 return NULL;
4084}
4085EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
4086
4087static void qeth_unregister_dbf_views(void)
4088{
4089 if (qeth_dbf_setup)
4090 debug_unregister(qeth_dbf_setup);
4091 if (qeth_dbf_qerr)
4092 debug_unregister(qeth_dbf_qerr);
4093 if (qeth_dbf_sense)
4094 debug_unregister(qeth_dbf_sense);
4095 if (qeth_dbf_misc)
4096 debug_unregister(qeth_dbf_misc);
4097 if (qeth_dbf_data)
4098 debug_unregister(qeth_dbf_data);
4099 if (qeth_dbf_control)
4100 debug_unregister(qeth_dbf_control);
4101 if (qeth_dbf_trace)
4102 debug_unregister(qeth_dbf_trace);
4103}
4104
4105static int qeth_register_dbf_views(void)
4106{
4107 qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME,
4108 QETH_DBF_SETUP_PAGES,
4109 QETH_DBF_SETUP_NR_AREAS,
4110 QETH_DBF_SETUP_LEN);
4111 qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME,
4112 QETH_DBF_MISC_PAGES,
4113 QETH_DBF_MISC_NR_AREAS,
4114 QETH_DBF_MISC_LEN);
4115 qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME,
4116 QETH_DBF_DATA_PAGES,
4117 QETH_DBF_DATA_NR_AREAS,
4118 QETH_DBF_DATA_LEN);
4119 qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME,
4120 QETH_DBF_CONTROL_PAGES,
4121 QETH_DBF_CONTROL_NR_AREAS,
4122 QETH_DBF_CONTROL_LEN);
4123 qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME,
4124 QETH_DBF_SENSE_PAGES,
4125 QETH_DBF_SENSE_NR_AREAS,
4126 QETH_DBF_SENSE_LEN);
4127 qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME,
4128 QETH_DBF_QERR_PAGES,
4129 QETH_DBF_QERR_NR_AREAS,
4130 QETH_DBF_QERR_LEN);
4131 qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME,
4132 QETH_DBF_TRACE_PAGES,
4133 QETH_DBF_TRACE_NR_AREAS,
4134 QETH_DBF_TRACE_LEN);
4135
4136 if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) ||
4137 (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) ||
4138 (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) ||
4139 (qeth_dbf_trace == NULL)) {
4140 qeth_unregister_dbf_views();
4141 return -ENOMEM;
4142 }
4143 debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view);
4144 debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL);
4145
4146 debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view);
4147 debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL);
4148
4149 debug_register_view(qeth_dbf_data, &debug_hex_ascii_view);
4150 debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL);
4151
4152 debug_register_view(qeth_dbf_control, &debug_hex_ascii_view);
4153 debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL);
4154
4155 debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view);
4156 debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL);
4157
4158 debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view);
4159 debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL);
4160
4161 debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view);
4162 debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL);
4163
4164 return 0;
4165}
4166
4167int qeth_core_load_discipline(struct qeth_card *card,
4168 enum qeth_discipline_id discipline)
4169{
4170 int rc = 0;
4171 switch (discipline) {
4172 case QETH_DISCIPLINE_LAYER3:
4173 card->discipline.ccwgdriver = try_then_request_module(
4174 symbol_get(qeth_l3_ccwgroup_driver),
4175 "qeth_l3");
4176 break;
4177 case QETH_DISCIPLINE_LAYER2:
4178 card->discipline.ccwgdriver = try_then_request_module(
4179 symbol_get(qeth_l2_ccwgroup_driver),
4180 "qeth_l2");
4181 break;
4182 }
4183 if (!card->discipline.ccwgdriver) {
4184 PRINT_ERR("Support for discipline %d not present\n",
4185 discipline);
4186 rc = -EINVAL;
4187 }
4188 return rc;
4189}
4190
4191void qeth_core_free_discipline(struct qeth_card *card)
4192{
4193 if (card->options.layer2)
4194 symbol_put(qeth_l2_ccwgroup_driver);
4195 else
4196 symbol_put(qeth_l3_ccwgroup_driver);
4197 card->discipline.ccwgdriver = NULL;
4198}
4199
4200static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4201{
4202 struct qeth_card *card;
4203 struct device *dev;
4204 int rc;
4205 unsigned long flags;
4206
4207 QETH_DBF_TEXT(setup, 2, "probedev");
4208
4209 dev = &gdev->dev;
4210 if (!get_device(dev))
4211 return -ENODEV;
4212
4213 QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id);
4214
4215 card = qeth_alloc_card();
4216 if (!card) {
4217 QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM);
4218 rc = -ENOMEM;
4219 goto err_dev;
4220 }
4221 card->read.ccwdev = gdev->cdev[0];
4222 card->write.ccwdev = gdev->cdev[1];
4223 card->data.ccwdev = gdev->cdev[2];
4224 dev_set_drvdata(&gdev->dev, card);
4225 card->gdev = gdev;
4226 gdev->cdev[0]->handler = qeth_irq;
4227 gdev->cdev[1]->handler = qeth_irq;
4228 gdev->cdev[2]->handler = qeth_irq;
4229
4230 rc = qeth_determine_card_type(card);
4231 if (rc) {
4232 PRINT_WARN("%s: not a valid card type\n", __func__);
4233 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
4234 goto err_card;
4235 }
4236 rc = qeth_setup_card(card);
4237 if (rc) {
4238 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
4239 goto err_card;
4240 }
4241
4242 if (card->info.type == QETH_CARD_TYPE_OSN) {
4243 rc = qeth_core_create_osn_attributes(dev);
4244 if (rc)
4245 goto err_card;
4246 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
4247 if (rc) {
4248 qeth_core_remove_osn_attributes(dev);
4249 goto err_card;
4250 }
4251 rc = card->discipline.ccwgdriver->probe(card->gdev);
4252 if (rc) {
4253 qeth_core_free_discipline(card);
4254 qeth_core_remove_osn_attributes(dev);
4255 goto err_card;
4256 }
4257 } else {
4258 rc = qeth_core_create_device_attributes(dev);
4259 if (rc)
4260 goto err_card;
4261 }
4262
4263 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4264 list_add_tail(&card->list, &qeth_core_card_list.list);
4265 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4266 return 0;
4267
4268err_card:
4269 qeth_core_free_card(card);
4270err_dev:
4271 put_device(dev);
4272 return rc;
4273}
4274
4275static void qeth_core_remove_device(struct ccwgroup_device *gdev)
4276{
4277 unsigned long flags;
4278 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4279
4280 if (card->discipline.ccwgdriver) {
4281 card->discipline.ccwgdriver->remove(gdev);
4282 qeth_core_free_discipline(card);
4283 }
4284
4285 if (card->info.type == QETH_CARD_TYPE_OSN) {
4286 qeth_core_remove_osn_attributes(&gdev->dev);
4287 } else {
4288 qeth_core_remove_device_attributes(&gdev->dev);
4289 }
4290 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4291 list_del(&card->list);
4292 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4293 qeth_core_free_card(card);
4294 dev_set_drvdata(&gdev->dev, NULL);
4295 put_device(&gdev->dev);
4296 return;
4297}
4298
4299static int qeth_core_set_online(struct ccwgroup_device *gdev)
4300{
4301 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4302 int rc = 0;
4303 int def_discipline;
4304
4305 if (!card->discipline.ccwgdriver) {
4306 if (card->info.type == QETH_CARD_TYPE_IQD)
4307 def_discipline = QETH_DISCIPLINE_LAYER3;
4308 else
4309 def_discipline = QETH_DISCIPLINE_LAYER2;
4310 rc = qeth_core_load_discipline(card, def_discipline);
4311 if (rc)
4312 goto err;
4313 rc = card->discipline.ccwgdriver->probe(card->gdev);
4314 if (rc)
4315 goto err;
4316 }
4317 rc = card->discipline.ccwgdriver->set_online(gdev);
4318err:
4319 return rc;
4320}
4321
4322static int qeth_core_set_offline(struct ccwgroup_device *gdev)
4323{
4324 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4325 return card->discipline.ccwgdriver->set_offline(gdev);
4326}
4327
4328static void qeth_core_shutdown(struct ccwgroup_device *gdev)
4329{
4330 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4331 if (card->discipline.ccwgdriver &&
4332 card->discipline.ccwgdriver->shutdown)
4333 card->discipline.ccwgdriver->shutdown(gdev);
4334}
4335
4336static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
4337 .owner = THIS_MODULE,
4338 .name = "qeth",
4339 .driver_id = 0xD8C5E3C8,
4340 .probe = qeth_core_probe_device,
4341 .remove = qeth_core_remove_device,
4342 .set_online = qeth_core_set_online,
4343 .set_offline = qeth_core_set_offline,
4344 .shutdown = qeth_core_shutdown,
4345};
4346
4347static ssize_t
4348qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf,
4349 size_t count)
4350{
4351 int err;
4352 err = qeth_core_driver_group(buf, qeth_core_root_dev,
4353 qeth_core_ccwgroup_driver.driver_id);
4354 if (err)
4355 return err;
4356 else
4357 return count;
4358}
4359
4360static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
4361
4362static struct {
4363 const char str[ETH_GSTRING_LEN];
4364} qeth_ethtool_stats_keys[] = {
4365/* 0 */{"rx skbs"},
4366 {"rx buffers"},
4367 {"tx skbs"},
4368 {"tx buffers"},
4369 {"tx skbs no packing"},
4370 {"tx buffers no packing"},
4371 {"tx skbs packing"},
4372 {"tx buffers packing"},
4373 {"tx sg skbs"},
4374 {"tx sg frags"},
4375/* 10 */{"rx sg skbs"},
4376 {"rx sg frags"},
4377 {"rx sg page allocs"},
4378 {"tx large kbytes"},
4379 {"tx large count"},
4380 {"tx pk state ch n->p"},
4381 {"tx pk state ch p->n"},
4382 {"tx pk watermark low"},
4383 {"tx pk watermark high"},
4384 {"queue 0 buffer usage"},
4385/* 20 */{"queue 1 buffer usage"},
4386 {"queue 2 buffer usage"},
4387 {"queue 3 buffer usage"},
4388 {"rx handler time"},
4389 {"rx handler count"},
4390 {"rx do_QDIO time"},
4391 {"rx do_QDIO count"},
4392 {"tx handler time"},
4393 {"tx handler count"},
4394 {"tx time"},
4395/* 30 */{"tx count"},
4396 {"tx do_QDIO time"},
4397 {"tx do_QDIO count"},
4398};
4399
4400int qeth_core_get_stats_count(struct net_device *dev)
4401{
4402 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
4403}
4404EXPORT_SYMBOL_GPL(qeth_core_get_stats_count);
4405
4406void qeth_core_get_ethtool_stats(struct net_device *dev,
4407 struct ethtool_stats *stats, u64 *data)
4408{
4409 struct qeth_card *card = netdev_priv(dev);
4410 data[0] = card->stats.rx_packets -
4411 card->perf_stats.initial_rx_packets;
4412 data[1] = card->perf_stats.bufs_rec;
4413 data[2] = card->stats.tx_packets -
4414 card->perf_stats.initial_tx_packets;
4415 data[3] = card->perf_stats.bufs_sent;
4416 data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
4417 - card->perf_stats.skbs_sent_pack;
4418 data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
4419 data[6] = card->perf_stats.skbs_sent_pack;
4420 data[7] = card->perf_stats.bufs_sent_pack;
4421 data[8] = card->perf_stats.sg_skbs_sent;
4422 data[9] = card->perf_stats.sg_frags_sent;
4423 data[10] = card->perf_stats.sg_skbs_rx;
4424 data[11] = card->perf_stats.sg_frags_rx;
4425 data[12] = card->perf_stats.sg_alloc_page_rx;
4426 data[13] = (card->perf_stats.large_send_bytes >> 10);
4427 data[14] = card->perf_stats.large_send_cnt;
4428 data[15] = card->perf_stats.sc_dp_p;
4429 data[16] = card->perf_stats.sc_p_dp;
4430 data[17] = QETH_LOW_WATERMARK_PACK;
4431 data[18] = QETH_HIGH_WATERMARK_PACK;
4432 data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
4433 data[20] = (card->qdio.no_out_queues > 1) ?
4434 atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
4435 data[21] = (card->qdio.no_out_queues > 2) ?
4436 atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
4437 data[22] = (card->qdio.no_out_queues > 3) ?
4438 atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
4439 data[23] = card->perf_stats.inbound_time;
4440 data[24] = card->perf_stats.inbound_cnt;
4441 data[25] = card->perf_stats.inbound_do_qdio_time;
4442 data[26] = card->perf_stats.inbound_do_qdio_cnt;
4443 data[27] = card->perf_stats.outbound_handler_time;
4444 data[28] = card->perf_stats.outbound_handler_cnt;
4445 data[29] = card->perf_stats.outbound_time;
4446 data[30] = card->perf_stats.outbound_cnt;
4447 data[31] = card->perf_stats.outbound_do_qdio_time;
4448 data[32] = card->perf_stats.outbound_do_qdio_cnt;
4449}
4450EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4451
4452void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4453{
4454 switch (stringset) {
4455 case ETH_SS_STATS:
4456 memcpy(data, &qeth_ethtool_stats_keys,
4457 sizeof(qeth_ethtool_stats_keys));
4458 break;
4459 default:
4460 WARN_ON(1);
4461 break;
4462 }
4463}
4464EXPORT_SYMBOL_GPL(qeth_core_get_strings);
4465
4466void qeth_core_get_drvinfo(struct net_device *dev,
4467 struct ethtool_drvinfo *info)
4468{
4469 struct qeth_card *card = netdev_priv(dev);
4470 if (card->options.layer2)
4471 strcpy(info->driver, "qeth_l2");
4472 else
4473 strcpy(info->driver, "qeth_l3");
4474
4475 strcpy(info->version, "1.0");
4476 strcpy(info->fw_version, card->info.mcl_level);
4477 sprintf(info->bus_info, "%s/%s/%s",
4478 CARD_RDEV_ID(card),
4479 CARD_WDEV_ID(card),
4480 CARD_DDEV_ID(card));
4481}
4482EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
4483
4484static int __init qeth_core_init(void)
4485{
4486 int rc;
4487
4488 PRINT_INFO("loading core functions\n");
4489 INIT_LIST_HEAD(&qeth_core_card_list.list);
4490 rwlock_init(&qeth_core_card_list.rwlock);
4491
4492 rc = qeth_register_dbf_views();
4493 if (rc)
4494 goto out_err;
4495 rc = ccw_driver_register(&qeth_ccw_driver);
4496 if (rc)
4497 goto ccw_err;
4498 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
4499 if (rc)
4500 goto ccwgroup_err;
4501 rc = driver_create_file(&qeth_core_ccwgroup_driver.driver,
4502 &driver_attr_group);
4503 if (rc)
4504 goto driver_err;
4505 qeth_core_root_dev = s390_root_dev_register("qeth");
4506 rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
4507 if (rc)
4508 goto register_err;
4509 return 0;
4510
4511register_err:
4512 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4513 &driver_attr_group);
4514driver_err:
4515 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4516ccwgroup_err:
4517 ccw_driver_unregister(&qeth_ccw_driver);
4518ccw_err:
4519 qeth_unregister_dbf_views();
4520out_err:
4521 PRINT_ERR("Initialization failed with code %d\n", rc);
4522 return rc;
4523}
4524
4525static void __exit qeth_core_exit(void)
4526{
4527 s390_root_dev_unregister(qeth_core_root_dev);
4528 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4529 &driver_attr_group);
4530 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4531 ccw_driver_unregister(&qeth_ccw_driver);
4532 qeth_unregister_dbf_views();
4533 PRINT_INFO("core functions removed\n");
4534}
4535
4536module_init(qeth_core_init);
4537module_exit(qeth_core_exit);
4538MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
4539MODULE_DESCRIPTION("qeth core functions");
4540MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
new file mode 100644
index 000000000000..8653b73e5dcf
--- /dev/null
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -0,0 +1,266 @@
1/*
2 * drivers/s390/net/qeth_core_mpc.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10#include <linux/module.h>
11#include <asm/cio.h>
12#include "qeth_core_mpc.h"
13
14unsigned char IDX_ACTIVATE_READ[] = {
15 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
16 0x19, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
17 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
18 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
19 0x00, 0x00
20};
21
22unsigned char IDX_ACTIVATE_WRITE[] = {
23 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
24 0x15, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
25 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
26 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
27 0x00, 0x00
28};
29
30unsigned char CM_ENABLE[] = {
31 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
32 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63,
33 0x10, 0x00, 0x00, 0x01,
34 0x00, 0x00, 0x00, 0x00,
35 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
36 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x23,
37 0x00, 0x00, 0x23, 0x05, 0x00, 0x00, 0x00, 0x00,
38 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
39 0x01, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x40,
40 0x00, 0x0c, 0x41, 0x02, 0x00, 0x17, 0x00, 0x00,
41 0x00, 0x00, 0x00, 0x00,
42 0x00, 0x0b, 0x04, 0x01,
43 0x7e, 0x04, 0x05, 0x00, 0x01, 0x01, 0x0f,
44 0x00,
45 0x0c, 0x04, 0x02, 0xff, 0xff, 0xff, 0xff, 0xff,
46 0xff, 0xff, 0xff
47};
48
49unsigned char CM_SETUP[] = {
50 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
51 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64,
52 0x10, 0x00, 0x00, 0x01,
53 0x00, 0x00, 0x00, 0x00,
54 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
55 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x24,
56 0x00, 0x00, 0x24, 0x05, 0x00, 0x00, 0x00, 0x00,
57 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
58 0x01, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x40,
59 0x00, 0x0c, 0x41, 0x04, 0x00, 0x18, 0x00, 0x00,
60 0x00, 0x00, 0x00, 0x00,
61 0x00, 0x09, 0x04, 0x04,
62 0x05, 0x00, 0x01, 0x01, 0x11,
63 0x00, 0x09, 0x04,
64 0x05, 0x05, 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x06,
66 0x04, 0x06, 0xc8, 0x00
67};
68
69unsigned char ULP_ENABLE[] = {
70 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
71 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6b,
72 0x10, 0x00, 0x00, 0x01,
73 0x00, 0x00, 0x00, 0x00,
74 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
75 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x2b,
76 0x00, 0x00, 0x2b, 0x05, 0x20, 0x01, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
78 0x01, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x40,
79 0x00, 0x0c, 0x41, 0x02, 0x00, 0x1f, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x0b, 0x04, 0x01,
82 0x03, 0x04, 0x05, 0x00, 0x01, 0x01, 0x12,
83 0x00,
84 0x14, 0x04, 0x0a, 0x00, 0x20, 0x00, 0x00, 0xff,
85 0xff, 0x00, 0x08, 0xc8, 0xe8, 0xc4, 0xf1, 0xc7,
86 0xf1, 0x00, 0x00
87};
88
89unsigned char ULP_SETUP[] = {
90 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
91 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6c,
92 0x10, 0x00, 0x00, 0x01,
93 0x00, 0x00, 0x00, 0x00,
94 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02,
95 0x00, 0x00, 0x00, 0x01, 0x00, 0x24, 0x00, 0x2c,
96 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
98 0x01, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x40,
99 0x00, 0x0c, 0x41, 0x04, 0x00, 0x20, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x09, 0x04, 0x04,
102 0x05, 0x00, 0x01, 0x01, 0x14,
103 0x00, 0x09, 0x04,
104 0x05, 0x05, 0x30, 0x01, 0x00, 0x00,
105 0x00, 0x06,
106 0x04, 0x06, 0x40, 0x00,
107 0x00, 0x08, 0x04, 0x0b,
108 0x00, 0x00, 0x00, 0x00
109};
110
111unsigned char DM_ACT[] = {
112 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
113 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x55,
114 0x10, 0x00, 0x00, 0x01,
115 0x00, 0x00, 0x00, 0x00,
116 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03,
117 0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x15,
118 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
120 0x01, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x40,
121 0x00, 0x0c, 0x43, 0x60, 0x00, 0x09, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x09, 0x04, 0x04,
124 0x05, 0x40, 0x01, 0x01, 0x00
125};
126
127unsigned char IPA_PDU_HEADER[] = {
128 0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77,
129 0x00, 0x00, 0x00, 0x14, 0x00, 0x00,
130 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256,
131 (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256,
132 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
133 0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00, 0x00, 0x24,
135 sizeof(struct qeth_ipa_cmd) / 256,
136 sizeof(struct qeth_ipa_cmd) % 256,
137 0x00,
138 sizeof(struct qeth_ipa_cmd) / 256,
139 sizeof(struct qeth_ipa_cmd) % 256,
140 0x05,
141 0x77, 0x77, 0x77, 0x77,
142 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
143 0x01, 0x00,
144 sizeof(struct qeth_ipa_cmd) / 256,
145 sizeof(struct qeth_ipa_cmd) % 256,
146 0x00, 0x00, 0x00, 0x40,
147};
148EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
149
150unsigned char WRITE_CCW[] = {
151 0x01, CCW_FLAG_SLI, 0, 0,
152 0, 0, 0, 0
153};
154
155unsigned char READ_CCW[] = {
156 0x02, CCW_FLAG_SLI, 0, 0,
157 0, 0, 0, 0
158};
159
160
161struct ipa_rc_msg {
162 enum qeth_ipa_return_codes rc;
163 char *msg;
164};
165
166static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
167 {IPA_RC_SUCCESS, "success"},
168 {IPA_RC_NOTSUPP, "Command not supported"},
169 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
170 {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
171 {IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"},
172 {IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
173 {IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"},
174 {IPA_RC_UNREGISTERED_ADDR, "Address not registered"},
175 {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
176 {IPA_RC_ID_NOT_FOUND, "Identifier not found"},
177 {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"},
178 {IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"},
179 {IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"},
180 {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
181 {IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
182 {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"},
183 {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"},
184 {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"},
185 {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
186 {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
187 {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
188 {IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"},
189 {IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"},
190 {IPA_RC_INVALID_LANTYPE, "Invalid LAN type"},
191 {IPA_RC_INVALID_LANNUM, "Invalid LAN num"},
192 {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"},
193 {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"},
194 {IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"},
195 {IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"},
196 {IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"},
197 {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"},
198 {IPA_RC_MULTICAST_FULL, "No task available, multicast full"},
199 {IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"},
200 {IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"},
201 {IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"},
202 {IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"},
203 {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"},
204 {IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"},
205 {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
206 {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
207 {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
208 {IPA_RC_FFFF, "Unknown Error"}
209};
210
211
212
213char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
214{
215 int x = 0;
216 qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
217 sizeof(struct ipa_rc_msg) - 1].rc = rc;
218 while (qeth_ipa_rc_msg[x].rc != rc)
219 x++;
220 return qeth_ipa_rc_msg[x].msg;
221}
222
223
224struct ipa_cmd_names {
225 enum qeth_ipa_cmds cmd;
226 char *name;
227};
228
229static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
230 {IPA_CMD_STARTLAN, "startlan"},
231 {IPA_CMD_STOPLAN, "stoplan"},
232 {IPA_CMD_SETVMAC, "setvmac"},
233 {IPA_CMD_DELVMAC, "delvmca"},
234 {IPA_CMD_SETGMAC, "setgmac"},
235 {IPA_CMD_DELGMAC, "delgmac"},
236 {IPA_CMD_SETVLAN, "setvlan"},
237 {IPA_CMD_DELVLAN, "delvlan"},
238 {IPA_CMD_SETCCID, "setccid"},
239 {IPA_CMD_DELCCID, "delccid"},
240 {IPA_CMD_MODCCID, "modccid"},
241 {IPA_CMD_SETIP, "setip"},
242 {IPA_CMD_QIPASSIST, "qipassist"},
243 {IPA_CMD_SETASSPARMS, "setassparms"},
244 {IPA_CMD_SETIPM, "setipm"},
245 {IPA_CMD_DELIPM, "delipm"},
246 {IPA_CMD_SETRTG, "setrtg"},
247 {IPA_CMD_DELIP, "delip"},
248 {IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
249 {IPA_CMD_SET_DIAG_ASS, "set_diag_ass"},
250 {IPA_CMD_CREATE_ADDR, "create_addr"},
251 {IPA_CMD_DESTROY_ADDR, "destroy_addr"},
252 {IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"},
253 {IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"},
254 {IPA_CMD_UNKNOWN, "unknown"},
255};
256
257char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
258{
259 int x = 0;
260 qeth_ipa_cmd_names[
261 sizeof(qeth_ipa_cmd_names) /
262 sizeof(struct ipa_cmd_names)-1].cmd = cmd;
263 while (qeth_ipa_cmd_names[x].cmd != cmd)
264 x++;
265 return qeth_ipa_cmd_names[x].name;
266}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
new file mode 100644
index 000000000000..de221932f30f
--- /dev/null
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -0,0 +1,566 @@
1/*
2 * drivers/s390/net/qeth_core_mpc.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10#ifndef __QETH_CORE_MPC_H__
11#define __QETH_CORE_MPC_H__
12
13#include <asm/qeth.h>
14
15#define IPA_PDU_HEADER_SIZE 0x40
16#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
17#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26)
18#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29)
19#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a)
20
21extern unsigned char IPA_PDU_HEADER[];
22#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c)
23
24#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
25
26#define QETH_SEQ_NO_LENGTH 4
27#define QETH_MPC_TOKEN_LENGTH 4
28#define QETH_MCL_LENGTH 4
29#define OSA_ADDR_LEN 6
30
31#define QETH_TIMEOUT (10 * HZ)
32#define QETH_IPA_TIMEOUT (45 * HZ)
33#define QETH_IDX_COMMAND_SEQNO 0xffff0000
34#define SR_INFO_LEN 16
35
36#define QETH_CLEAR_CHANNEL_PARM -10
37#define QETH_HALT_CHANNEL_PARM -11
38#define QETH_RCD_PARM -12
39
40/*****************************************************************************/
41/* IP Assist related definitions */
42/*****************************************************************************/
43#define IPA_CMD_INITIATOR_HOST 0x00
44#define IPA_CMD_INITIATOR_OSA 0x01
45#define IPA_CMD_INITIATOR_HOST_REPLY 0x80
46#define IPA_CMD_INITIATOR_OSA_REPLY 0x81
47#define IPA_CMD_PRIM_VERSION_NO 0x01
48
49enum qeth_card_types {
50 QETH_CARD_TYPE_UNKNOWN = 0,
51 QETH_CARD_TYPE_OSAE = 10,
52 QETH_CARD_TYPE_IQD = 1234,
53 QETH_CARD_TYPE_OSN = 11,
54};
55
56#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
57/* only the first two bytes are looked at in qeth_get_cardname_short */
58enum qeth_link_types {
59 QETH_LINK_TYPE_FAST_ETH = 0x01,
60 QETH_LINK_TYPE_HSTR = 0x02,
61 QETH_LINK_TYPE_GBIT_ETH = 0x03,
62 QETH_LINK_TYPE_OSN = 0x04,
63 QETH_LINK_TYPE_10GBIT_ETH = 0x10,
64 QETH_LINK_TYPE_LANE_ETH100 = 0x81,
65 QETH_LINK_TYPE_LANE_TR = 0x82,
66 QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
67 QETH_LINK_TYPE_LANE = 0x88,
68 QETH_LINK_TYPE_ATM_NATIVE = 0x90,
69};
70
71enum qeth_tr_macaddr_modes {
72 QETH_TR_MACADDR_NONCANONICAL = 0,
73 QETH_TR_MACADDR_CANONICAL = 1,
74};
75
76enum qeth_tr_broadcast_modes {
77 QETH_TR_BROADCAST_ALLRINGS = 0,
78 QETH_TR_BROADCAST_LOCAL = 1,
79};
80
81/* these values match CHECKSUM_* in include/linux/skbuff.h */
82enum qeth_checksum_types {
83 SW_CHECKSUMMING = 0, /* TODO: set to bit flag used in IPA Command */
84 HW_CHECKSUMMING = 1,
85 NO_CHECKSUMMING = 2,
86};
87#define QETH_CHECKSUM_DEFAULT SW_CHECKSUMMING
88
89/*
90 * Routing stuff
91 */
92#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
93enum qeth_routing_types {
94 /* TODO: set to bit flag used in IPA Command */
95 NO_ROUTER = 0,
96 PRIMARY_ROUTER = 1,
97 SECONDARY_ROUTER = 2,
98 MULTICAST_ROUTER = 3,
99 PRIMARY_CONNECTOR = 4,
100 SECONDARY_CONNECTOR = 5,
101};
102
103/* IPA Commands */
104enum qeth_ipa_cmds {
105 IPA_CMD_STARTLAN = 0x01,
106 IPA_CMD_STOPLAN = 0x02,
107 IPA_CMD_SETVMAC = 0x21,
108 IPA_CMD_DELVMAC = 0x22,
109 IPA_CMD_SETGMAC = 0x23,
110 IPA_CMD_DELGMAC = 0x24,
111 IPA_CMD_SETVLAN = 0x25,
112 IPA_CMD_DELVLAN = 0x26,
113 IPA_CMD_SETCCID = 0x41,
114 IPA_CMD_DELCCID = 0x42,
115 IPA_CMD_MODCCID = 0x43,
116 IPA_CMD_SETIP = 0xb1,
117 IPA_CMD_QIPASSIST = 0xb2,
118 IPA_CMD_SETASSPARMS = 0xb3,
119 IPA_CMD_SETIPM = 0xb4,
120 IPA_CMD_DELIPM = 0xb5,
121 IPA_CMD_SETRTG = 0xb6,
122 IPA_CMD_DELIP = 0xb7,
123 IPA_CMD_SETADAPTERPARMS = 0xb8,
124 IPA_CMD_SET_DIAG_ASS = 0xb9,
125 IPA_CMD_CREATE_ADDR = 0xc3,
126 IPA_CMD_DESTROY_ADDR = 0xc4,
127 IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1,
128 IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2,
129 IPA_CMD_UNKNOWN = 0x00
130};
131
132enum qeth_ip_ass_cmds {
133 IPA_CMD_ASS_START = 0x0001,
134 IPA_CMD_ASS_STOP = 0x0002,
135 IPA_CMD_ASS_CONFIGURE = 0x0003,
136 IPA_CMD_ASS_ENABLE = 0x0004,
137};
138
139enum qeth_arp_process_subcmds {
140 IPA_CMD_ASS_ARP_SET_NO_ENTRIES = 0x0003,
141 IPA_CMD_ASS_ARP_QUERY_CACHE = 0x0004,
142 IPA_CMD_ASS_ARP_ADD_ENTRY = 0x0005,
143 IPA_CMD_ASS_ARP_REMOVE_ENTRY = 0x0006,
144 IPA_CMD_ASS_ARP_FLUSH_CACHE = 0x0007,
145 IPA_CMD_ASS_ARP_QUERY_INFO = 0x0104,
146 IPA_CMD_ASS_ARP_QUERY_STATS = 0x0204,
147};
148
149
150/* Return Codes for IPA Commands
151 * according to OSA card Specs */
152
153enum qeth_ipa_return_codes {
154 IPA_RC_SUCCESS = 0x0000,
155 IPA_RC_NOTSUPP = 0x0001,
156 IPA_RC_IP_TABLE_FULL = 0x0002,
157 IPA_RC_UNKNOWN_ERROR = 0x0003,
158 IPA_RC_UNSUPPORTED_COMMAND = 0x0004,
159 IPA_RC_DUP_IPV6_REMOTE = 0x0008,
160 IPA_RC_DUP_IPV6_HOME = 0x0010,
161 IPA_RC_UNREGISTERED_ADDR = 0x0011,
162 IPA_RC_NO_ID_AVAILABLE = 0x0012,
163 IPA_RC_ID_NOT_FOUND = 0x0013,
164 IPA_RC_INVALID_IP_VERSION = 0x0020,
165 IPA_RC_LAN_FRAME_MISMATCH = 0x0040,
166 IPA_RC_L2_UNSUPPORTED_CMD = 0x2003,
167 IPA_RC_L2_DUP_MAC = 0x2005,
168 IPA_RC_L2_ADDR_TABLE_FULL = 0x2006,
169 IPA_RC_L2_DUP_LAYER3_MAC = 0x200a,
170 IPA_RC_L2_GMAC_NOT_FOUND = 0x200b,
171 IPA_RC_L2_MAC_NOT_FOUND = 0x2010,
172 IPA_RC_L2_INVALID_VLAN_ID = 0x2015,
173 IPA_RC_L2_DUP_VLAN_ID = 0x2016,
174 IPA_RC_L2_VLAN_ID_NOT_FOUND = 0x2017,
175 IPA_RC_DATA_MISMATCH = 0xe001,
176 IPA_RC_INVALID_MTU_SIZE = 0xe002,
177 IPA_RC_INVALID_LANTYPE = 0xe003,
178 IPA_RC_INVALID_LANNUM = 0xe004,
179 IPA_RC_DUPLICATE_IP_ADDRESS = 0xe005,
180 IPA_RC_IP_ADDR_TABLE_FULL = 0xe006,
181 IPA_RC_LAN_PORT_STATE_ERROR = 0xe007,
182 IPA_RC_SETIP_NO_STARTLAN = 0xe008,
183 IPA_RC_SETIP_ALREADY_RECEIVED = 0xe009,
184 IPA_RC_IP_ADDR_ALREADY_USED = 0xe00a,
185 IPA_RC_MULTICAST_FULL = 0xe00b,
186 IPA_RC_SETIP_INVALID_VERSION = 0xe00d,
187 IPA_RC_UNSUPPORTED_SUBCMD = 0xe00e,
188 IPA_RC_ARP_ASSIST_NO_ENABLE = 0xe00f,
189 IPA_RC_PRIMARY_ALREADY_DEFINED = 0xe010,
190 IPA_RC_SECOND_ALREADY_DEFINED = 0xe011,
191 IPA_RC_INVALID_SETRTG_INDICATOR = 0xe012,
192 IPA_RC_MC_ADDR_ALREADY_DEFINED = 0xe013,
193 IPA_RC_LAN_OFFLINE = 0xe080,
194 IPA_RC_INVALID_IP_VERSION2 = 0xf001,
195 IPA_RC_FFFF = 0xffff
196};
197
198/* IPA function flags; each flag marks availability of respective function */
199enum qeth_ipa_funcs {
200 IPA_ARP_PROCESSING = 0x00000001L,
201 IPA_INBOUND_CHECKSUM = 0x00000002L,
202 IPA_OUTBOUND_CHECKSUM = 0x00000004L,
203 IPA_IP_FRAGMENTATION = 0x00000008L,
204 IPA_FILTERING = 0x00000010L,
205 IPA_IPV6 = 0x00000020L,
206 IPA_MULTICASTING = 0x00000040L,
207 IPA_IP_REASSEMBLY = 0x00000080L,
208 IPA_QUERY_ARP_COUNTERS = 0x00000100L,
209 IPA_QUERY_ARP_ADDR_INFO = 0x00000200L,
210 IPA_SETADAPTERPARMS = 0x00000400L,
211 IPA_VLAN_PRIO = 0x00000800L,
212 IPA_PASSTHRU = 0x00001000L,
213 IPA_FLUSH_ARP_SUPPORT = 0x00002000L,
214 IPA_FULL_VLAN = 0x00004000L,
215 IPA_INBOUND_PASSTHRU = 0x00008000L,
216 IPA_SOURCE_MAC = 0x00010000L,
217 IPA_OSA_MC_ROUTER = 0x00020000L,
218 IPA_QUERY_ARP_ASSIST = 0x00040000L,
219 IPA_INBOUND_TSO = 0x00080000L,
220 IPA_OUTBOUND_TSO = 0x00100000L,
221};
222
223/* SETIP/DELIP IPA Command: ***************************************************/
224enum qeth_ipa_setdelip_flags {
225 QETH_IPA_SETDELIP_DEFAULT = 0x00L, /* default */
226 QETH_IPA_SETIP_VIPA_FLAG = 0x01L, /* no grat. ARP */
227 QETH_IPA_SETIP_TAKEOVER_FLAG = 0x02L, /* nofail on grat. ARP */
228 QETH_IPA_DELIP_ADDR_2_B_TAKEN_OVER = 0x20L,
229 QETH_IPA_DELIP_VIPA_FLAG = 0x40L,
230 QETH_IPA_DELIP_ADDR_NEEDS_SETIP = 0x80L,
231};
232
233/* SETADAPTER IPA Command: ****************************************************/
234enum qeth_ipa_setadp_cmd {
235 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x0001,
236 IPA_SETADP_ALTER_MAC_ADDRESS = 0x0002,
237 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x0004,
238 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x0008,
239 IPA_SETADP_SET_ADDRESSING_MODE = 0x0010,
240 IPA_SETADP_SET_CONFIG_PARMS = 0x0020,
241 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x0040,
242 IPA_SETADP_SET_BROADCAST_MODE = 0x0080,
243 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100,
244 IPA_SETADP_SET_SNMP_CONTROL = 0x0200,
245 IPA_SETADP_QUERY_CARD_INFO = 0x0400,
246 IPA_SETADP_SET_PROMISC_MODE = 0x0800,
247};
248enum qeth_ipa_mac_ops {
249 CHANGE_ADDR_READ_MAC = 0,
250 CHANGE_ADDR_REPLACE_MAC = 1,
251 CHANGE_ADDR_ADD_MAC = 2,
252 CHANGE_ADDR_DEL_MAC = 4,
253 CHANGE_ADDR_RESET_MAC = 8,
254};
255enum qeth_ipa_addr_ops {
256 CHANGE_ADDR_READ_ADDR = 0,
257 CHANGE_ADDR_ADD_ADDR = 1,
258 CHANGE_ADDR_DEL_ADDR = 2,
259 CHANGE_ADDR_FLUSH_ADDR_TABLE = 4,
260};
261enum qeth_ipa_promisc_modes {
262 SET_PROMISC_MODE_OFF = 0,
263 SET_PROMISC_MODE_ON = 1,
264};
265
266/* (SET)DELIP(M) IPA stuff ***************************************************/
267struct qeth_ipacmd_setdelip4 {
268 __u8 ip_addr[4];
269 __u8 mask[4];
270 __u32 flags;
271} __attribute__ ((packed));
272
273struct qeth_ipacmd_setdelip6 {
274 __u8 ip_addr[16];
275 __u8 mask[16];
276 __u32 flags;
277} __attribute__ ((packed));
278
279struct qeth_ipacmd_setdelipm {
280 __u8 mac[6];
281 __u8 padding[2];
282 __u8 ip6[12];
283 __u8 ip4[4];
284} __attribute__ ((packed));
285
286struct qeth_ipacmd_layer2setdelmac {
287 __u32 mac_length;
288 __u8 mac[6];
289} __attribute__ ((packed));
290
291struct qeth_ipacmd_layer2setdelvlan {
292 __u16 vlan_id;
293} __attribute__ ((packed));
294
295
296struct qeth_ipacmd_setassparms_hdr {
297 __u32 assist_no;
298 __u16 length;
299 __u16 command_code;
300 __u16 return_code;
301 __u8 number_of_replies;
302 __u8 seq_no;
303} __attribute__((packed));
304
305struct qeth_arp_query_data {
306 __u16 request_bits;
307 __u16 reply_bits;
308 __u32 no_entries;
309 char data;
310} __attribute__((packed));
311
312/* used as parameter for arp_query reply */
313struct qeth_arp_query_info {
314 __u32 udata_len;
315 __u16 mask_bits;
316 __u32 udata_offset;
317 __u32 no_entries;
318 char *udata;
319};
320
321/* SETASSPARMS IPA Command: */
322struct qeth_ipacmd_setassparms {
323 struct qeth_ipacmd_setassparms_hdr hdr;
324 union {
325 __u32 flags_32bit;
326 struct qeth_arp_cache_entry add_arp_entry;
327 struct qeth_arp_query_data query_arp;
328 __u8 ip[16];
329 } data;
330} __attribute__ ((packed));
331
332
333/* SETRTG IPA Command: ****************************************************/
334struct qeth_set_routing {
335 __u8 type;
336};
337
338/* SETADAPTERPARMS IPA Command: *******************************************/
339struct qeth_query_cmds_supp {
340 __u32 no_lantypes_supp;
341 __u8 lan_type;
342 __u8 reserved1[3];
343 __u32 supported_cmds;
344 __u8 reserved2[8];
345} __attribute__ ((packed));
346
347struct qeth_change_addr {
348 __u32 cmd;
349 __u32 addr_size;
350 __u32 no_macs;
351 __u8 addr[OSA_ADDR_LEN];
352} __attribute__ ((packed));
353
354
355struct qeth_snmp_cmd {
356 __u8 token[16];
357 __u32 request;
358 __u32 interface;
359 __u32 returncode;
360 __u32 firmwarelevel;
361 __u32 seqno;
362 __u8 data;
363} __attribute__ ((packed));
364
365struct qeth_snmp_ureq_hdr {
366 __u32 data_len;
367 __u32 req_len;
368 __u32 reserved1;
369 __u32 reserved2;
370} __attribute__ ((packed));
371
372struct qeth_snmp_ureq {
373 struct qeth_snmp_ureq_hdr hdr;
374 struct qeth_snmp_cmd cmd;
375} __attribute__((packed));
376
377struct qeth_ipacmd_setadpparms_hdr {
378 __u32 supp_hw_cmds;
379 __u32 reserved1;
380 __u16 cmdlength;
381 __u16 reserved2;
382 __u32 command_code;
383 __u16 return_code;
384 __u8 used_total;
385 __u8 seq_no;
386 __u32 reserved3;
387} __attribute__ ((packed));
388
389struct qeth_ipacmd_setadpparms {
390 struct qeth_ipacmd_setadpparms_hdr hdr;
391 union {
392 struct qeth_query_cmds_supp query_cmds_supp;
393 struct qeth_change_addr change_addr;
394 struct qeth_snmp_cmd snmp;
395 __u32 mode;
396 } data;
397} __attribute__ ((packed));
398
399/* CREATE_ADDR IPA Command: ***********************************************/
400struct qeth_create_destroy_address {
401 __u8 unique_id[8];
402} __attribute__ ((packed));
403
404/* Header for each IPA command */
405struct qeth_ipacmd_hdr {
406 __u8 command;
407 __u8 initiator;
408 __u16 seqno;
409 __u16 return_code;
410 __u8 adapter_type;
411 __u8 rel_adapter_no;
412 __u8 prim_version_no;
413 __u8 param_count;
414 __u16 prot_version;
415 __u32 ipa_supported;
416 __u32 ipa_enabled;
417} __attribute__ ((packed));
418
419/* The IPA command itself */
420struct qeth_ipa_cmd {
421 struct qeth_ipacmd_hdr hdr;
422 union {
423 struct qeth_ipacmd_setdelip4 setdelip4;
424 struct qeth_ipacmd_setdelip6 setdelip6;
425 struct qeth_ipacmd_setdelipm setdelipm;
426 struct qeth_ipacmd_setassparms setassparms;
427 struct qeth_ipacmd_layer2setdelmac setdelmac;
428 struct qeth_ipacmd_layer2setdelvlan setdelvlan;
429 struct qeth_create_destroy_address create_destroy_addr;
430 struct qeth_ipacmd_setadpparms setadapterparms;
431 struct qeth_set_routing setrtg;
432 } data;
433} __attribute__ ((packed));
434
435/*
436 * special command for ARP processing.
437 * this is not included in setassparms command before, because we get
438 * problem with the size of struct qeth_ipacmd_setassparms otherwise
439 */
440enum qeth_ipa_arp_return_codes {
441 QETH_IPA_ARP_RC_SUCCESS = 0x0000,
442 QETH_IPA_ARP_RC_FAILED = 0x0001,
443 QETH_IPA_ARP_RC_NOTSUPP = 0x0002,
444 QETH_IPA_ARP_RC_OUT_OF_RANGE = 0x0003,
445 QETH_IPA_ARP_RC_Q_NOTSUPP = 0x0004,
446 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
447};
448
449
450extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
451extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
452
453#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
454 sizeof(struct qeth_ipacmd_setassparms_hdr))
455#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \
456 QETH_SETASS_BASE_LEN)
457#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
458 sizeof(struct qeth_ipacmd_setadpparms_hdr))
459#define QETH_SNMP_SETADP_CMDLENGTH 16
460
461#define QETH_ARP_DATA_SIZE 3968
462#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
463/* Helper functions */
464#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
465 (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
466
467/*****************************************************************************/
468/* END OF IP Assist related definitions */
469/*****************************************************************************/
470
471
472extern unsigned char WRITE_CCW[];
473extern unsigned char READ_CCW[];
474
475extern unsigned char CM_ENABLE[];
476#define CM_ENABLE_SIZE 0x63
477#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c)
478#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
479#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer + 0x5b)
480
481#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \
482 (PDU_ENCAPSULATION(buffer) + 0x13)
483
484
485extern unsigned char CM_SETUP[];
486#define CM_SETUP_SIZE 0x64
487#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
488#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
489#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
490
491#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
492 (PDU_ENCAPSULATION(buffer) + 0x1a)
493
494extern unsigned char ULP_ENABLE[];
495#define ULP_ENABLE_SIZE 0x6b
496#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61)
497#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c)
498#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
499#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer + 0x62)
500#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \
501 (PDU_ENCAPSULATION(buffer) + 0x13)
502#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \
503 (PDU_ENCAPSULATION(buffer) + 0x1f)
504#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \
505 (PDU_ENCAPSULATION(buffer) + 0x17)
506#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
507 (PDU_ENCAPSULATION(buffer) + 0x2b)
508/* Layer 2 defintions */
509#define QETH_PROT_LAYER2 0x08
510#define QETH_PROT_TCPIP 0x03
511#define QETH_PROT_OSN2 0x0a
512#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
513#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
514
515extern unsigned char ULP_SETUP[];
516#define ULP_SETUP_SIZE 0x6c
517#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
518#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
519#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
520#define QETH_ULP_SETUP_CUA(buffer) (buffer + 0x68)
521#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer + 0x6a)
522
523#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \
524 (PDU_ENCAPSULATION(buffer) + 0x1a)
525
526
527extern unsigned char DM_ACT[];
528#define DM_ACT_SIZE 0x55
529#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c)
530#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51)
531
532
533
534#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer + 4)
535#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c)
536#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20)
537
538extern unsigned char IDX_ACTIVATE_READ[];
539extern unsigned char IDX_ACTIVATE_WRITE[];
540
541#define IDX_ACTIVATE_SIZE 0x22
542#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
543#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
544#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80)
545#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10)
546#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16)
547#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer + 0x1e)
548#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer + 0x20)
549#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
550#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
551#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
552
553#define PDU_ENCAPSULATION(buffer) \
554 (buffer + *(buffer + (*(buffer + 0x0b)) + \
555 *(buffer + *(buffer + 0x0b) + 0x11) + 0x07))
556
557#define IS_IPA(buffer) \
558 ((buffer) && \
559 (*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1))
560
561#define ADDR_FRAME_TYPE_DIX 1
562#define ADDR_FRAME_TYPE_802_3 2
563#define ADDR_FRAME_TYPE_TR_WITHOUT_SR 0x10
564#define ADDR_FRAME_TYPE_TR_WITH_SR 0x20
565
566#endif
diff --git a/drivers/s390/net/qeth_core_offl.c b/drivers/s390/net/qeth_core_offl.c
new file mode 100644
index 000000000000..8b407d6a83cf
--- /dev/null
+++ b/drivers/s390/net/qeth_core_offl.c
@@ -0,0 +1,701 @@
1/*
2 * drivers/s390/net/qeth_core_offl.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
6 * Frank Blaschka <frank.blaschka@de.ibm.com>
7 */
8
9#include <linux/errno.h>
10#include <linux/ip.h>
11#include <linux/inetdevice.h>
12#include <linux/netdevice.h>
13#include <linux/kernel.h>
14#include <linux/tcp.h>
15#include <net/tcp.h>
16#include <linux/skbuff.h>
17
18#include <net/ip.h>
19#include <net/ip6_checksum.h>
20
21#include "qeth_core.h"
22#include "qeth_core_mpc.h"
23#include "qeth_core_offl.h"
24
25int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
26 struct qeth_eddp_context *ctx)
27{
28 int index = queue->next_buf_to_fill;
29 int elements_needed = ctx->num_elements;
30 int elements_in_buffer;
31 int skbs_in_buffer;
32 int buffers_needed = 0;
33
34 QETH_DBF_TEXT(trace, 5, "eddpcbfc");
35 while (elements_needed > 0) {
36 buffers_needed++;
37 if (atomic_read(&queue->bufs[index].state) !=
38 QETH_QDIO_BUF_EMPTY)
39 return -EBUSY;
40
41 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
42 queue->bufs[index].next_element_to_fill;
43 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
44 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
45 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
46 }
47 return buffers_needed;
48}
49
50static void qeth_eddp_free_context(struct qeth_eddp_context *ctx)
51{
52 int i;
53
54 QETH_DBF_TEXT(trace, 5, "eddpfctx");
55 for (i = 0; i < ctx->num_pages; ++i)
56 free_page((unsigned long)ctx->pages[i]);
57 kfree(ctx->pages);
58 kfree(ctx->elements);
59 kfree(ctx);
60}
61
62
63static void qeth_eddp_get_context(struct qeth_eddp_context *ctx)
64{
65 atomic_inc(&ctx->refcnt);
66}
67
68void qeth_eddp_put_context(struct qeth_eddp_context *ctx)
69{
70 if (atomic_dec_return(&ctx->refcnt) == 0)
71 qeth_eddp_free_context(ctx);
72}
73EXPORT_SYMBOL_GPL(qeth_eddp_put_context);
74
75void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
76{
77 struct qeth_eddp_context_reference *ref;
78
79 QETH_DBF_TEXT(trace, 6, "eddprctx");
80 while (!list_empty(&buf->ctx_list)) {
81 ref = list_entry(buf->ctx_list.next,
82 struct qeth_eddp_context_reference, list);
83 qeth_eddp_put_context(ref->ctx);
84 list_del(&ref->list);
85 kfree(ref);
86 }
87}
88
89static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
90 struct qeth_eddp_context *ctx)
91{
92 struct qeth_eddp_context_reference *ref;
93
94 QETH_DBF_TEXT(trace, 6, "eddprfcx");
95 ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
96 if (ref == NULL)
97 return -ENOMEM;
98 qeth_eddp_get_context(ctx);
99 ref->ctx = ctx;
100 list_add_tail(&ref->list, &buf->ctx_list);
101 return 0;
102}
103
104int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
105 struct qeth_eddp_context *ctx, int index)
106{
107 struct qeth_qdio_out_buffer *buf = NULL;
108 struct qdio_buffer *buffer;
109 int elements = ctx->num_elements;
110 int element = 0;
111 int flush_cnt = 0;
112 int must_refcnt = 1;
113 int i;
114
115 QETH_DBF_TEXT(trace, 5, "eddpfibu");
116 while (elements > 0) {
117 buf = &queue->bufs[index];
118 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) {
119 /* normally this should not happen since we checked for
120 * available elements in qeth_check_elements_for_context
121 */
122 if (element == 0)
123 return -EBUSY;
124 else {
125 PRINT_WARN("could only partially fill eddp "
126 "buffer!\n");
127 goto out;
128 }
129 }
130 /* check if the whole next skb fits into current buffer */
131 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
132 buf->next_element_to_fill)
133 < ctx->elements_per_skb){
134 /* no -> go to next buffer */
135 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
136 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
137 flush_cnt++;
138 /* new buffer, so we have to add ctx to buffer'ctx_list
139 * and increment ctx's refcnt */
140 must_refcnt = 1;
141 continue;
142 }
143 if (must_refcnt) {
144 must_refcnt = 0;
145 if (qeth_eddp_buf_ref_context(buf, ctx)) {
146 PRINT_WARN("no memory to create eddp context "
147 "reference\n");
148 goto out_check;
149 }
150 }
151 buffer = buf->buffer;
152 /* fill one skb into buffer */
153 for (i = 0; i < ctx->elements_per_skb; ++i) {
154 if (ctx->elements[element].length != 0) {
155 buffer->element[buf->next_element_to_fill].
156 addr = ctx->elements[element].addr;
157 buffer->element[buf->next_element_to_fill].
158 length = ctx->elements[element].length;
159 buffer->element[buf->next_element_to_fill].
160 flags = ctx->elements[element].flags;
161 buf->next_element_to_fill++;
162 }
163 element++;
164 elements--;
165 }
166 }
167out_check:
168 if (!queue->do_pack) {
169 QETH_DBF_TEXT(trace, 6, "fillbfnp");
170 /* set state to PRIMED -> will be flushed */
171 if (buf->next_element_to_fill > 0) {
172 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
173 flush_cnt++;
174 }
175 } else {
176 if (queue->card->options.performance_stats)
177 queue->card->perf_stats.skbs_sent_pack++;
178 QETH_DBF_TEXT(trace, 6, "fillbfpa");
179 if (buf->next_element_to_fill >=
180 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
181 /*
182 * packed buffer if full -> set state PRIMED
183 * -> will be flushed
184 */
185 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
186 flush_cnt++;
187 }
188 }
189out:
190 return flush_cnt;
191}
192
193static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
194 struct qeth_eddp_data *eddp, int data_len)
195{
196 u8 *page;
197 int page_remainder;
198 int page_offset;
199 int pkt_len;
200 struct qeth_eddp_element *element;
201
202 QETH_DBF_TEXT(trace, 5, "eddpcrsh");
203 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
204 page_offset = ctx->offset % PAGE_SIZE;
205 element = &ctx->elements[ctx->num_elements];
206 pkt_len = eddp->nhl + eddp->thl + data_len;
207 /* FIXME: layer2 and VLAN !!! */
208 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
209 pkt_len += ETH_HLEN;
210 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
211 pkt_len += VLAN_HLEN;
212 /* does complete packet fit in current page ? */
213 page_remainder = PAGE_SIZE - page_offset;
214 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)) {
215 /* no -> go to start of next page */
216 ctx->offset += page_remainder;
217 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
218 page_offset = 0;
219 }
220 memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
221 element->addr = page + page_offset;
222 element->length = sizeof(struct qeth_hdr);
223 ctx->offset += sizeof(struct qeth_hdr);
224 page_offset += sizeof(struct qeth_hdr);
225 /* add mac header (?) */
226 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
227 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
228 element->length += ETH_HLEN;
229 ctx->offset += ETH_HLEN;
230 page_offset += ETH_HLEN;
231 }
232 /* add VLAN tag */
233 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
234 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
235 element->length += VLAN_HLEN;
236 ctx->offset += VLAN_HLEN;
237 page_offset += VLAN_HLEN;
238 }
239 /* add network header */
240 memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
241 element->length += eddp->nhl;
242 eddp->nh_in_ctx = page + page_offset;
243 ctx->offset += eddp->nhl;
244 page_offset += eddp->nhl;
245 /* add transport header */
246 memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
247 element->length += eddp->thl;
248 eddp->th_in_ctx = page + page_offset;
249 ctx->offset += eddp->thl;
250}
251
252static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp,
253 int len, __wsum *hcsum)
254{
255 struct skb_frag_struct *frag;
256 int left_in_frag;
257 int copy_len;
258 u8 *src;
259
260 QETH_DBF_TEXT(trace, 5, "eddpcdtc");
261 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
262 skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
263 dst, len);
264 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
265 *hcsum);
266 eddp->skb_offset += len;
267 } else {
268 while (len > 0) {
269 if (eddp->frag < 0) {
270 /* we're in skb->data */
271 left_in_frag = (eddp->skb->len -
272 eddp->skb->data_len)
273 - eddp->skb_offset;
274 src = eddp->skb->data + eddp->skb_offset;
275 } else {
276 frag = &skb_shinfo(eddp->skb)->frags[
277 eddp->frag];
278 left_in_frag = frag->size - eddp->frag_offset;
279 src = (u8 *)((page_to_pfn(frag->page) <<
280 PAGE_SHIFT) + frag->page_offset +
281 eddp->frag_offset);
282 }
283 if (left_in_frag <= 0) {
284 eddp->frag++;
285 eddp->frag_offset = 0;
286 continue;
287 }
288 copy_len = min(left_in_frag, len);
289 memcpy(dst, src, copy_len);
290 *hcsum = csum_partial(src, copy_len, *hcsum);
291 dst += copy_len;
292 eddp->frag_offset += copy_len;
293 eddp->skb_offset += copy_len;
294 len -= copy_len;
295 }
296 }
297}
298
299static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
300 struct qeth_eddp_data *eddp, int data_len, __wsum hcsum)
301{
302 u8 *page;
303 int page_remainder;
304 int page_offset;
305 struct qeth_eddp_element *element;
306 int first_lap = 1;
307
308 QETH_DBF_TEXT(trace, 5, "eddpcsdt");
309 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
310 page_offset = ctx->offset % PAGE_SIZE;
311 element = &ctx->elements[ctx->num_elements];
312 while (data_len) {
313 page_remainder = PAGE_SIZE - page_offset;
314 if (page_remainder < data_len) {
315 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
316 page_remainder, &hcsum);
317 element->length += page_remainder;
318 if (first_lap)
319 element->flags = SBAL_FLAGS_FIRST_FRAG;
320 else
321 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
322 ctx->num_elements++;
323 element++;
324 data_len -= page_remainder;
325 ctx->offset += page_remainder;
326 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
327 page_offset = 0;
328 element->addr = page + page_offset;
329 } else {
330 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
331 data_len, &hcsum);
332 element->length += data_len;
333 if (!first_lap)
334 element->flags = SBAL_FLAGS_LAST_FRAG;
335 ctx->num_elements++;
336 ctx->offset += data_len;
337 data_len = 0;
338 }
339 first_lap = 0;
340 }
341 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
342}
343
344static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp,
345 int data_len)
346{
347 __wsum phcsum; /* pseudo header checksum */
348
349 QETH_DBF_TEXT(trace, 5, "eddpckt4");
350 eddp->th.tcp.h.check = 0;
351 /* compute pseudo header checksum */
352 phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
353 eddp->thl + data_len, IPPROTO_TCP, 0);
354 /* compute checksum of tcp header */
355 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
356}
357
358static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp,
359 int data_len)
360{
361 __be32 proto;
362 __wsum phcsum; /* pseudo header checksum */
363
364 QETH_DBF_TEXT(trace, 5, "eddpckt6");
365 eddp->th.tcp.h.check = 0;
366 /* compute pseudo header checksum */
367 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
368 sizeof(struct in6_addr), 0);
369 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
370 sizeof(struct in6_addr), phcsum);
371 proto = htonl(IPPROTO_TCP);
372 phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
373 return phcsum;
374}
375
376static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh,
377 u8 *nh, u8 nhl, u8 *th, u8 thl)
378{
379 struct qeth_eddp_data *eddp;
380
381 QETH_DBF_TEXT(trace, 5, "eddpcrda");
382 eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
383 if (eddp) {
384 eddp->nhl = nhl;
385 eddp->thl = thl;
386 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
387 memcpy(&eddp->nh, nh, nhl);
388 memcpy(&eddp->th, th, thl);
389 eddp->frag = -1; /* initially we're in skb->data */
390 }
391 return eddp;
392}
393
394static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
395 struct qeth_eddp_data *eddp)
396{
397 struct tcphdr *tcph;
398 int data_len;
399 __wsum hcsum;
400
401 QETH_DBF_TEXT(trace, 5, "eddpftcp");
402 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
403 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
404 eddp->skb_offset += sizeof(struct ethhdr);
405 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
406 eddp->skb_offset += VLAN_HLEN;
407 }
408 tcph = tcp_hdr(eddp->skb);
409 while (eddp->skb_offset < eddp->skb->len) {
410 data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
411 (int)(eddp->skb->len - eddp->skb_offset));
412 /* prepare qdio hdr */
413 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
414 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
415 eddp->nhl + eddp->thl;
416 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
417 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
418 } else
419 eddp->qh.hdr.l3.length = data_len + eddp->nhl +
420 eddp->thl;
421 /* prepare ip hdr */
422 if (eddp->skb->protocol == htons(ETH_P_IP)) {
423 eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
424 eddp->thl);
425 eddp->nh.ip4.h.check = 0;
426 eddp->nh.ip4.h.check =
427 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
428 eddp->nh.ip4.h.ihl);
429 } else
430 eddp->nh.ip6.h.payload_len = htons(data_len +
431 eddp->thl);
432 /* prepare tcp hdr */
433 if (data_len == (eddp->skb->len - eddp->skb_offset)) {
434 /* last segment -> set FIN and PSH flags */
435 eddp->th.tcp.h.fin = tcph->fin;
436 eddp->th.tcp.h.psh = tcph->psh;
437 }
438 if (eddp->skb->protocol == htons(ETH_P_IP))
439 hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
440 else
441 hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
442 /* fill the next segment into the context */
443 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
444 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
445 if (eddp->skb_offset >= eddp->skb->len)
446 break;
447 /* prepare headers for next round */
448 if (eddp->skb->protocol == htons(ETH_P_IP))
449 eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
450 eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) +
451 data_len);
452 }
453}
454
455static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
456 struct sk_buff *skb, struct qeth_hdr *qhdr)
457{
458 struct qeth_eddp_data *eddp = NULL;
459
460 QETH_DBF_TEXT(trace, 5, "eddpficx");
461 /* create our segmentation headers and copy original headers */
462 if (skb->protocol == htons(ETH_P_IP))
463 eddp = qeth_eddp_create_eddp_data(qhdr,
464 skb_network_header(skb),
465 ip_hdrlen(skb),
466 skb_transport_header(skb),
467 tcp_hdrlen(skb));
468 else
469 eddp = qeth_eddp_create_eddp_data(qhdr,
470 skb_network_header(skb),
471 sizeof(struct ipv6hdr),
472 skb_transport_header(skb),
473 tcp_hdrlen(skb));
474
475 if (eddp == NULL) {
476 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
477 return -ENOMEM;
478 }
479 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
480 skb_set_mac_header(skb, sizeof(struct qeth_hdr));
481 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
482 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
483 eddp->vlan[0] = skb->protocol;
484 eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
485 }
486 }
487 /* the next flags will only be set on the last segment */
488 eddp->th.tcp.h.fin = 0;
489 eddp->th.tcp.h.psh = 0;
490 eddp->skb = skb;
491 /* begin segmentation and fill context */
492 __qeth_eddp_fill_context_tcp(ctx, eddp);
493 kfree(eddp);
494 return 0;
495}
496
497static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx,
498 struct sk_buff *skb, int hdr_len)
499{
500 int skbs_per_page;
501
502 QETH_DBF_TEXT(trace, 5, "eddpcanp");
503 /* can we put multiple skbs in one page? */
504 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
505 if (skbs_per_page > 1) {
506 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
507 skbs_per_page + 1;
508 ctx->elements_per_skb = 1;
509 } else {
510 /* no -> how many elements per skb? */
511 ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
512 PAGE_SIZE) >> PAGE_SHIFT;
513 ctx->num_pages = ctx->elements_per_skb *
514 (skb_shinfo(skb)->gso_segs + 1);
515 }
516 ctx->num_elements = ctx->elements_per_skb *
517 (skb_shinfo(skb)->gso_segs + 1);
518}
519
520static struct qeth_eddp_context *qeth_eddp_create_context_generic(
521 struct qeth_card *card, struct sk_buff *skb, int hdr_len)
522{
523 struct qeth_eddp_context *ctx = NULL;
524 u8 *addr;
525 int i;
526
527 QETH_DBF_TEXT(trace, 5, "creddpcg");
528 /* create the context and allocate pages */
529 ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
530 if (ctx == NULL) {
531 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
532 return NULL;
533 }
534 ctx->type = QETH_LARGE_SEND_EDDP;
535 qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
536 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) {
537 QETH_DBF_TEXT(trace, 2, "ceddpcis");
538 kfree(ctx);
539 return NULL;
540 }
541 ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
542 if (ctx->pages == NULL) {
543 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
544 kfree(ctx);
545 return NULL;
546 }
547 for (i = 0; i < ctx->num_pages; ++i) {
548 addr = (u8 *)get_zeroed_page(GFP_ATOMIC);
549 if (addr == NULL) {
550 QETH_DBF_TEXT(trace, 2, "ceddpcn3");
551 ctx->num_pages = i;
552 qeth_eddp_free_context(ctx);
553 return NULL;
554 }
555 ctx->pages[i] = addr;
556 }
557 ctx->elements = kcalloc(ctx->num_elements,
558 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
559 if (ctx->elements == NULL) {
560 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
561 qeth_eddp_free_context(ctx);
562 return NULL;
563 }
564 /* reset num_elements; will be incremented again in fill_buffer to
565 * reflect number of actually used elements */
566 ctx->num_elements = 0;
567 return ctx;
568}
569
570static struct qeth_eddp_context *qeth_eddp_create_context_tcp(
571 struct qeth_card *card, struct sk_buff *skb,
572 struct qeth_hdr *qhdr)
573{
574 struct qeth_eddp_context *ctx = NULL;
575
576 QETH_DBF_TEXT(trace, 5, "creddpct");
577 if (skb->protocol == htons(ETH_P_IP))
578 ctx = qeth_eddp_create_context_generic(card, skb,
579 (sizeof(struct qeth_hdr) +
580 ip_hdrlen(skb) +
581 tcp_hdrlen(skb)));
582 else if (skb->protocol == htons(ETH_P_IPV6))
583 ctx = qeth_eddp_create_context_generic(card, skb,
584 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
585 tcp_hdrlen(skb));
586 else
587 QETH_DBF_TEXT(trace, 2, "cetcpinv");
588
589 if (ctx == NULL) {
590 QETH_DBF_TEXT(trace, 2, "creddpnl");
591 return NULL;
592 }
593 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) {
594 QETH_DBF_TEXT(trace, 2, "ceddptfe");
595 qeth_eddp_free_context(ctx);
596 return NULL;
597 }
598 atomic_set(&ctx->refcnt, 1);
599 return ctx;
600}
601
602struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card,
603 struct sk_buff *skb, struct qeth_hdr *qhdr,
604 unsigned char sk_protocol)
605{
606 QETH_DBF_TEXT(trace, 5, "creddpc");
607 switch (sk_protocol) {
608 case IPPROTO_TCP:
609 return qeth_eddp_create_context_tcp(card, skb, qhdr);
610 default:
611 QETH_DBF_TEXT(trace, 2, "eddpinvp");
612 }
613 return NULL;
614}
615EXPORT_SYMBOL_GPL(qeth_eddp_create_context);
616
617void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr,
618 struct sk_buff *skb)
619{
620 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
621 struct tcphdr *tcph = tcp_hdr(skb);
622 struct iphdr *iph = ip_hdr(skb);
623 struct ipv6hdr *ip6h = ipv6_hdr(skb);
624
625 QETH_DBF_TEXT(trace, 5, "tsofhdr");
626
627 /*fix header to TSO values ...*/
628 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
629 /*set values which are fix for the first approach ...*/
630 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
631 hdr->ext.imb_hdr_no = 1;
632 hdr->ext.hdr_type = 1;
633 hdr->ext.hdr_version = 1;
634 hdr->ext.hdr_len = 28;
635 /*insert non-fix values */
636 hdr->ext.mss = skb_shinfo(skb)->gso_size;
637 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
638 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
639 sizeof(struct qeth_hdr_tso));
640 tcph->check = 0;
641 if (skb->protocol == ETH_P_IPV6) {
642 ip6h->payload_len = 0;
643 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
644 0, IPPROTO_TCP, 0);
645 } else {
646 /*OSA want us to set these values ...*/
647 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
648 0, IPPROTO_TCP, 0);
649 iph->tot_len = 0;
650 iph->check = 0;
651 }
652}
653EXPORT_SYMBOL_GPL(qeth_tso_fill_header);
654
655void qeth_tx_csum(struct sk_buff *skb)
656{
657 int tlen;
658 if (skb->protocol == htons(ETH_P_IP)) {
659 tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
660 switch (ip_hdr(skb)->protocol) {
661 case IPPROTO_TCP:
662 tcp_hdr(skb)->check = 0;
663 tcp_hdr(skb)->check = csum_tcpudp_magic(
664 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
665 tlen, ip_hdr(skb)->protocol,
666 skb_checksum(skb, skb_transport_offset(skb),
667 tlen, 0));
668 break;
669 case IPPROTO_UDP:
670 udp_hdr(skb)->check = 0;
671 udp_hdr(skb)->check = csum_tcpudp_magic(
672 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
673 tlen, ip_hdr(skb)->protocol,
674 skb_checksum(skb, skb_transport_offset(skb),
675 tlen, 0));
676 break;
677 }
678 } else if (skb->protocol == htons(ETH_P_IPV6)) {
679 switch (ipv6_hdr(skb)->nexthdr) {
680 case IPPROTO_TCP:
681 tcp_hdr(skb)->check = 0;
682 tcp_hdr(skb)->check = csum_ipv6_magic(
683 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
684 ipv6_hdr(skb)->payload_len,
685 ipv6_hdr(skb)->nexthdr,
686 skb_checksum(skb, skb_transport_offset(skb),
687 ipv6_hdr(skb)->payload_len, 0));
688 break;
689 case IPPROTO_UDP:
690 udp_hdr(skb)->check = 0;
691 udp_hdr(skb)->check = csum_ipv6_magic(
692 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
693 ipv6_hdr(skb)->payload_len,
694 ipv6_hdr(skb)->nexthdr,
695 skb_checksum(skb, skb_transport_offset(skb),
696 ipv6_hdr(skb)->payload_len, 0));
697 break;
698 }
699 }
700}
701EXPORT_SYMBOL_GPL(qeth_tx_csum);
diff --git a/drivers/s390/net/qeth_core_offl.h b/drivers/s390/net/qeth_core_offl.h
new file mode 100644
index 000000000000..86bf7df8cf16
--- /dev/null
+++ b/drivers/s390/net/qeth_core_offl.h
@@ -0,0 +1,76 @@
1/*
2 * drivers/s390/net/qeth_core_offl.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
6 * Frank Blaschka <frank.blaschka@de.ibm.com>
7 */
8
9#ifndef __QETH_CORE_OFFL_H__
10#define __QETH_CORE_OFFL_H__
11
12struct qeth_eddp_element {
13 u32 flags;
14 u32 length;
15 void *addr;
16};
17
18struct qeth_eddp_context {
19 atomic_t refcnt;
20 enum qeth_large_send_types type;
21 int num_pages; /* # of allocated pages */
22 u8 **pages; /* pointers to pages */
23 int offset; /* offset in ctx during creation */
24 int num_elements; /* # of required 'SBALEs' */
25 struct qeth_eddp_element *elements; /* array of 'SBALEs' */
26 int elements_per_skb; /* # of 'SBALEs' per skb **/
27};
28
29struct qeth_eddp_context_reference {
30 struct list_head list;
31 struct qeth_eddp_context *ctx;
32};
33
34struct qeth_eddp_data {
35 struct qeth_hdr qh;
36 struct ethhdr mac;
37 __be16 vlan[2];
38 union {
39 struct {
40 struct iphdr h;
41 u8 options[40];
42 } ip4;
43 struct {
44 struct ipv6hdr h;
45 } ip6;
46 } nh;
47 u8 nhl;
48 void *nh_in_ctx; /* address of nh within the ctx */
49 union {
50 struct {
51 struct tcphdr h;
52 u8 options[40];
53 } tcp;
54 } th;
55 u8 thl;
56 void *th_in_ctx; /* address of th within the ctx */
57 struct sk_buff *skb;
58 int skb_offset;
59 int frag;
60 int frag_offset;
61} __attribute__ ((packed));
62
63extern struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *,
64 struct sk_buff *, struct qeth_hdr *, unsigned char);
65extern void qeth_eddp_put_context(struct qeth_eddp_context *);
66extern int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,
67 struct qeth_eddp_context *, int);
68extern void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *);
69extern int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *,
70 struct qeth_eddp_context *);
71
72void qeth_tso_fill_header(struct qeth_card *, struct qeth_hdr *,
73 struct sk_buff *);
74void qeth_tx_csum(struct sk_buff *skb);
75
76#endif /* __QETH_CORE_EDDP_H__ */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
new file mode 100644
index 000000000000..08a50f057284
--- /dev/null
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -0,0 +1,651 @@
1/*
2 * drivers/s390/net/qeth_core_sys.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/list.h>
12#include <linux/rwsem.h>
13#include <asm/ebcdic.h>
14
15#include "qeth_core.h"
16
17static ssize_t qeth_dev_state_show(struct device *dev,
18 struct device_attribute *attr, char *buf)
19{
20 struct qeth_card *card = dev_get_drvdata(dev);
21 if (!card)
22 return -EINVAL;
23
24 switch (card->state) {
25 case CARD_STATE_DOWN:
26 return sprintf(buf, "DOWN\n");
27 case CARD_STATE_HARDSETUP:
28 return sprintf(buf, "HARDSETUP\n");
29 case CARD_STATE_SOFTSETUP:
30 return sprintf(buf, "SOFTSETUP\n");
31 case CARD_STATE_UP:
32 if (card->lan_online)
33 return sprintf(buf, "UP (LAN ONLINE)\n");
34 else
35 return sprintf(buf, "UP (LAN OFFLINE)\n");
36 case CARD_STATE_RECOVER:
37 return sprintf(buf, "RECOVER\n");
38 default:
39 return sprintf(buf, "UNKNOWN\n");
40 }
41}
42
43static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
44
45static ssize_t qeth_dev_chpid_show(struct device *dev,
46 struct device_attribute *attr, char *buf)
47{
48 struct qeth_card *card = dev_get_drvdata(dev);
49 if (!card)
50 return -EINVAL;
51
52 return sprintf(buf, "%02X\n", card->info.chpid);
53}
54
55static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
56
57static ssize_t qeth_dev_if_name_show(struct device *dev,
58 struct device_attribute *attr, char *buf)
59{
60 struct qeth_card *card = dev_get_drvdata(dev);
61 if (!card)
62 return -EINVAL;
63 return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
64}
65
66static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
67
68static ssize_t qeth_dev_card_type_show(struct device *dev,
69 struct device_attribute *attr, char *buf)
70{
71 struct qeth_card *card = dev_get_drvdata(dev);
72 if (!card)
73 return -EINVAL;
74
75 return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
76}
77
78static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
79
80static inline const char *qeth_get_bufsize_str(struct qeth_card *card)
81{
82 if (card->qdio.in_buf_size == 16384)
83 return "16k";
84 else if (card->qdio.in_buf_size == 24576)
85 return "24k";
86 else if (card->qdio.in_buf_size == 32768)
87 return "32k";
88 else if (card->qdio.in_buf_size == 40960)
89 return "40k";
90 else
91 return "64k";
92}
93
94static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
95 struct device_attribute *attr, char *buf)
96{
97 struct qeth_card *card = dev_get_drvdata(dev);
98 if (!card)
99 return -EINVAL;
100
101 return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
102}
103
104static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL);
105
106static ssize_t qeth_dev_portno_show(struct device *dev,
107 struct device_attribute *attr, char *buf)
108{
109 struct qeth_card *card = dev_get_drvdata(dev);
110 if (!card)
111 return -EINVAL;
112
113 return sprintf(buf, "%i\n", card->info.portno);
114}
115
116static ssize_t qeth_dev_portno_store(struct device *dev,
117 struct device_attribute *attr, const char *buf, size_t count)
118{
119 struct qeth_card *card = dev_get_drvdata(dev);
120 char *tmp;
121 unsigned int portno;
122
123 if (!card)
124 return -EINVAL;
125
126 if ((card->state != CARD_STATE_DOWN) &&
127 (card->state != CARD_STATE_RECOVER))
128 return -EPERM;
129
130 portno = simple_strtoul(buf, &tmp, 16);
131 if (portno > QETH_MAX_PORTNO) {
132 PRINT_WARN("portno 0x%X is out of range\n", portno);
133 return -EINVAL;
134 }
135
136 card->info.portno = portno;
137 return count;
138}
139
140static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
141
142static ssize_t qeth_dev_portname_show(struct device *dev,
143 struct device_attribute *attr, char *buf)
144{
145 struct qeth_card *card = dev_get_drvdata(dev);
146 char portname[9] = {0, };
147
148 if (!card)
149 return -EINVAL;
150
151 if (card->info.portname_required) {
152 memcpy(portname, card->info.portname + 1, 8);
153 EBCASC(portname, 8);
154 return sprintf(buf, "%s\n", portname);
155 } else
156 return sprintf(buf, "no portname required\n");
157}
158
159static ssize_t qeth_dev_portname_store(struct device *dev,
160 struct device_attribute *attr, const char *buf, size_t count)
161{
162 struct qeth_card *card = dev_get_drvdata(dev);
163 char *tmp;
164 int i;
165
166 if (!card)
167 return -EINVAL;
168
169 if ((card->state != CARD_STATE_DOWN) &&
170 (card->state != CARD_STATE_RECOVER))
171 return -EPERM;
172
173 tmp = strsep((char **) &buf, "\n");
174 if ((strlen(tmp) > 8) || (strlen(tmp) == 0))
175 return -EINVAL;
176
177 card->info.portname[0] = strlen(tmp);
178 /* for beauty reasons */
179 for (i = 1; i < 9; i++)
180 card->info.portname[i] = ' ';
181 strcpy(card->info.portname + 1, tmp);
182 ASCEBC(card->info.portname + 1, 8);
183
184 return count;
185}
186
187static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
188 qeth_dev_portname_store);
189
190static ssize_t qeth_dev_prioqing_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192{
193 struct qeth_card *card = dev_get_drvdata(dev);
194
195 if (!card)
196 return -EINVAL;
197
198 switch (card->qdio.do_prio_queueing) {
199 case QETH_PRIO_Q_ING_PREC:
200 return sprintf(buf, "%s\n", "by precedence");
201 case QETH_PRIO_Q_ING_TOS:
202 return sprintf(buf, "%s\n", "by type of service");
203 default:
204 return sprintf(buf, "always queue %i\n",
205 card->qdio.default_out_queue);
206 }
207}
208
209static ssize_t qeth_dev_prioqing_store(struct device *dev,
210 struct device_attribute *attr, const char *buf, size_t count)
211{
212 struct qeth_card *card = dev_get_drvdata(dev);
213 char *tmp;
214
215 if (!card)
216 return -EINVAL;
217
218 if ((card->state != CARD_STATE_DOWN) &&
219 (card->state != CARD_STATE_RECOVER))
220 return -EPERM;
221
222 /* check if 1920 devices are supported ,
223 * if though we have to permit priority queueing
224 */
225 if (card->qdio.no_out_queues == 1) {
226 PRINT_WARN("Priority queueing disabled due "
227 "to hardware limitations!\n");
228 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
229 return -EPERM;
230 }
231
232 tmp = strsep((char **) &buf, "\n");
233 if (!strcmp(tmp, "prio_queueing_prec"))
234 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
235 else if (!strcmp(tmp, "prio_queueing_tos"))
236 card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
237 else if (!strcmp(tmp, "no_prio_queueing:0")) {
238 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
239 card->qdio.default_out_queue = 0;
240 } else if (!strcmp(tmp, "no_prio_queueing:1")) {
241 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
242 card->qdio.default_out_queue = 1;
243 } else if (!strcmp(tmp, "no_prio_queueing:2")) {
244 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
245 card->qdio.default_out_queue = 2;
246 } else if (!strcmp(tmp, "no_prio_queueing:3")) {
247 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
248 card->qdio.default_out_queue = 3;
249 } else if (!strcmp(tmp, "no_prio_queueing")) {
250 card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
251 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
252 } else {
253 PRINT_WARN("Unknown queueing type '%s'\n", tmp);
254 return -EINVAL;
255 }
256 return count;
257}
258
259static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
260 qeth_dev_prioqing_store);
261
262static ssize_t qeth_dev_bufcnt_show(struct device *dev,
263 struct device_attribute *attr, char *buf)
264{
265 struct qeth_card *card = dev_get_drvdata(dev);
266
267 if (!card)
268 return -EINVAL;
269
270 return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
271}
272
273static ssize_t qeth_dev_bufcnt_store(struct device *dev,
274 struct device_attribute *attr, const char *buf, size_t count)
275{
276 struct qeth_card *card = dev_get_drvdata(dev);
277 char *tmp;
278 int cnt, old_cnt;
279 int rc;
280
281 if (!card)
282 return -EINVAL;
283
284 if ((card->state != CARD_STATE_DOWN) &&
285 (card->state != CARD_STATE_RECOVER))
286 return -EPERM;
287
288 old_cnt = card->qdio.in_buf_pool.buf_count;
289 cnt = simple_strtoul(buf, &tmp, 10);
290 cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
291 ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
292 if (old_cnt != cnt) {
293 rc = qeth_realloc_buffer_pool(card, cnt);
294 if (rc)
295 PRINT_WARN("Error (%d) while setting "
296 "buffer count.\n", rc);
297 }
298 return count;
299}
300
301static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
302 qeth_dev_bufcnt_store);
303
304static ssize_t qeth_dev_recover_store(struct device *dev,
305 struct device_attribute *attr, const char *buf, size_t count)
306{
307 struct qeth_card *card = dev_get_drvdata(dev);
308 char *tmp;
309 int i;
310
311 if (!card)
312 return -EINVAL;
313
314 if (card->state != CARD_STATE_UP)
315 return -EPERM;
316
317 i = simple_strtoul(buf, &tmp, 16);
318 if (i == 1)
319 qeth_schedule_recovery(card);
320
321 return count;
322}
323
324static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
325
326static ssize_t qeth_dev_performance_stats_show(struct device *dev,
327 struct device_attribute *attr, char *buf)
328{
329 struct qeth_card *card = dev_get_drvdata(dev);
330
331 if (!card)
332 return -EINVAL;
333
334 return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
335}
336
337static ssize_t qeth_dev_performance_stats_store(struct device *dev,
338 struct device_attribute *attr, const char *buf, size_t count)
339{
340 struct qeth_card *card = dev_get_drvdata(dev);
341 char *tmp;
342 int i;
343
344 if (!card)
345 return -EINVAL;
346
347 i = simple_strtoul(buf, &tmp, 16);
348 if ((i == 0) || (i == 1)) {
349 if (i == card->options.performance_stats)
350 return count;
351 card->options.performance_stats = i;
352 if (i == 0)
353 memset(&card->perf_stats, 0,
354 sizeof(struct qeth_perf_stats));
355 card->perf_stats.initial_rx_packets = card->stats.rx_packets;
356 card->perf_stats.initial_tx_packets = card->stats.tx_packets;
357 } else {
358 PRINT_WARN("performance_stats: write 0 or 1 to this file!\n");
359 return -EINVAL;
360 }
361 return count;
362}
363
364static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
365 qeth_dev_performance_stats_store);
366
367static ssize_t qeth_dev_layer2_show(struct device *dev,
368 struct device_attribute *attr, char *buf)
369{
370 struct qeth_card *card = dev_get_drvdata(dev);
371
372 if (!card)
373 return -EINVAL;
374
375 return sprintf(buf, "%i\n", card->options.layer2 ? 1:0);
376}
377
378static ssize_t qeth_dev_layer2_store(struct device *dev,
379 struct device_attribute *attr, const char *buf, size_t count)
380{
381 struct qeth_card *card = dev_get_drvdata(dev);
382 char *tmp;
383 int i, rc;
384 enum qeth_discipline_id newdis;
385
386 if (!card)
387 return -EINVAL;
388
389 if (((card->state != CARD_STATE_DOWN) &&
390 (card->state != CARD_STATE_RECOVER)))
391 return -EPERM;
392
393 i = simple_strtoul(buf, &tmp, 16);
394 switch (i) {
395 case 0:
396 newdis = QETH_DISCIPLINE_LAYER3;
397 break;
398 case 1:
399 newdis = QETH_DISCIPLINE_LAYER2;
400 break;
401 default:
402 PRINT_WARN("layer2: write 0 or 1 to this file!\n");
403 return -EINVAL;
404 }
405
406 if (card->options.layer2 == newdis) {
407 return count;
408 } else {
409 if (card->discipline.ccwgdriver) {
410 card->discipline.ccwgdriver->remove(card->gdev);
411 qeth_core_free_discipline(card);
412 }
413 }
414
415 rc = qeth_core_load_discipline(card, newdis);
416 if (rc)
417 return rc;
418
419 rc = card->discipline.ccwgdriver->probe(card->gdev);
420 if (rc)
421 return rc;
422 return count;
423}
424
425static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
426 qeth_dev_layer2_store);
427
428static ssize_t qeth_dev_large_send_show(struct device *dev,
429 struct device_attribute *attr, char *buf)
430{
431 struct qeth_card *card = dev_get_drvdata(dev);
432
433 if (!card)
434 return -EINVAL;
435
436 switch (card->options.large_send) {
437 case QETH_LARGE_SEND_NO:
438 return sprintf(buf, "%s\n", "no");
439 case QETH_LARGE_SEND_EDDP:
440 return sprintf(buf, "%s\n", "EDDP");
441 case QETH_LARGE_SEND_TSO:
442 return sprintf(buf, "%s\n", "TSO");
443 default:
444 return sprintf(buf, "%s\n", "N/A");
445 }
446}
447
448static ssize_t qeth_dev_large_send_store(struct device *dev,
449 struct device_attribute *attr, const char *buf, size_t count)
450{
451 struct qeth_card *card = dev_get_drvdata(dev);
452 enum qeth_large_send_types type;
453 int rc = 0;
454 char *tmp;
455
456 if (!card)
457 return -EINVAL;
458 tmp = strsep((char **) &buf, "\n");
459 if (!strcmp(tmp, "no")) {
460 type = QETH_LARGE_SEND_NO;
461 } else if (!strcmp(tmp, "EDDP")) {
462 type = QETH_LARGE_SEND_EDDP;
463 } else if (!strcmp(tmp, "TSO")) {
464 type = QETH_LARGE_SEND_TSO;
465 } else {
466 PRINT_WARN("large_send: invalid mode %s!\n", tmp);
467 return -EINVAL;
468 }
469 if (card->options.large_send == type)
470 return count;
471 rc = qeth_set_large_send(card, type);
472 if (rc)
473 return rc;
474 return count;
475}
476
477static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show,
478 qeth_dev_large_send_store);
479
480static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
481{
482
483 if (!card)
484 return -EINVAL;
485
486 return sprintf(buf, "%i\n", value);
487}
488
489static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
490 const char *buf, size_t count, int *value, int max_value)
491{
492 char *tmp;
493 int i;
494
495 if (!card)
496 return -EINVAL;
497
498 if ((card->state != CARD_STATE_DOWN) &&
499 (card->state != CARD_STATE_RECOVER))
500 return -EPERM;
501
502 i = simple_strtoul(buf, &tmp, 10);
503 if (i <= max_value) {
504 *value = i;
505 } else {
506 PRINT_WARN("blkt total time: write values between"
507 " 0 and %d to this file!\n", max_value);
508 return -EINVAL;
509 }
510 return count;
511}
512
513static ssize_t qeth_dev_blkt_total_show(struct device *dev,
514 struct device_attribute *attr, char *buf)
515{
516 struct qeth_card *card = dev_get_drvdata(dev);
517
518 return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
519}
520
521static ssize_t qeth_dev_blkt_total_store(struct device *dev,
522 struct device_attribute *attr, const char *buf, size_t count)
523{
524 struct qeth_card *card = dev_get_drvdata(dev);
525
526 return qeth_dev_blkt_store(card, buf, count,
527 &card->info.blkt.time_total, 1000);
528}
529
530
531
532static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
533 qeth_dev_blkt_total_store);
534
535static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
536 struct device_attribute *attr, char *buf)
537{
538 struct qeth_card *card = dev_get_drvdata(dev);
539
540 return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
541}
542
543static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
544 struct device_attribute *attr, const char *buf, size_t count)
545{
546 struct qeth_card *card = dev_get_drvdata(dev);
547
548 return qeth_dev_blkt_store(card, buf, count,
549 &card->info.blkt.inter_packet, 100);
550}
551
552static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
553 qeth_dev_blkt_inter_store);
554
555static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
556 struct device_attribute *attr, char *buf)
557{
558 struct qeth_card *card = dev_get_drvdata(dev);
559
560 return qeth_dev_blkt_show(buf, card,
561 card->info.blkt.inter_packet_jumbo);
562}
563
564static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
565 struct device_attribute *attr, const char *buf, size_t count)
566{
567 struct qeth_card *card = dev_get_drvdata(dev);
568
569 return qeth_dev_blkt_store(card, buf, count,
570 &card->info.blkt.inter_packet_jumbo, 100);
571}
572
573static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
574 qeth_dev_blkt_inter_jumbo_store);
575
576static struct attribute *qeth_blkt_device_attrs[] = {
577 &dev_attr_total.attr,
578 &dev_attr_inter.attr,
579 &dev_attr_inter_jumbo.attr,
580 NULL,
581};
582
583static struct attribute_group qeth_device_blkt_group = {
584 .name = "blkt",
585 .attrs = qeth_blkt_device_attrs,
586};
587
588static struct attribute *qeth_device_attrs[] = {
589 &dev_attr_state.attr,
590 &dev_attr_chpid.attr,
591 &dev_attr_if_name.attr,
592 &dev_attr_card_type.attr,
593 &dev_attr_inbuf_size.attr,
594 &dev_attr_portno.attr,
595 &dev_attr_portname.attr,
596 &dev_attr_priority_queueing.attr,
597 &dev_attr_buffer_count.attr,
598 &dev_attr_recover.attr,
599 &dev_attr_performance_stats.attr,
600 &dev_attr_layer2.attr,
601 &dev_attr_large_send.attr,
602 NULL,
603};
604
605static struct attribute_group qeth_device_attr_group = {
606 .attrs = qeth_device_attrs,
607};
608
609static struct attribute *qeth_osn_device_attrs[] = {
610 &dev_attr_state.attr,
611 &dev_attr_chpid.attr,
612 &dev_attr_if_name.attr,
613 &dev_attr_card_type.attr,
614 &dev_attr_buffer_count.attr,
615 &dev_attr_recover.attr,
616 NULL,
617};
618
619static struct attribute_group qeth_osn_device_attr_group = {
620 .attrs = qeth_osn_device_attrs,
621};
622
623int qeth_core_create_device_attributes(struct device *dev)
624{
625 int ret;
626 ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group);
627 if (ret)
628 return ret;
629 ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group);
630 if (ret)
631 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
632
633 return 0;
634}
635
636void qeth_core_remove_device_attributes(struct device *dev)
637{
638 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
639 sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
640}
641
642int qeth_core_create_osn_attributes(struct device *dev)
643{
644 return sysfs_create_group(&dev->kobj, &qeth_osn_device_attr_group);
645}
646
647void qeth_core_remove_osn_attributes(struct device *dev)
648{
649 sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
650 return;
651}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
new file mode 100644
index 000000000000..4417a3629ae0
--- /dev/null
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -0,0 +1,1242 @@
1/*
2 * drivers/s390/net/qeth_l2_main.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/etherdevice.h>
17#include <linux/mii.h>
18#include <linux/ip.h>
19
20#include <asm/s390_rdev.h>
21
22#include "qeth_core.h"
23#include "qeth_core_offl.h"
24
25#define QETH_DBF_TEXT_(name, level, text...) \
26 do { \
27 if (qeth_dbf_passes(qeth_dbf_##name, level)) { \
28 char *dbf_txt_buf = get_cpu_var(qeth_l2_dbf_txt_buf); \
29 sprintf(dbf_txt_buf, text); \
30 debug_text_event(qeth_dbf_##name, level, dbf_txt_buf); \
31 put_cpu_var(qeth_l2_dbf_txt_buf); \
32 } \
33 } while (0)
34
35static DEFINE_PER_CPU(char[256], qeth_l2_dbf_txt_buf);
36
37static int qeth_l2_set_offline(struct ccwgroup_device *);
38static int qeth_l2_stop(struct net_device *);
39static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
40static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
41 enum qeth_ipa_cmds,
42 int (*reply_cb) (struct qeth_card *,
43 struct qeth_reply*,
44 unsigned long));
45static void qeth_l2_set_multicast_list(struct net_device *);
46static int qeth_l2_recover(void *);
47
48static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49{
50 struct qeth_card *card = netdev_priv(dev);
51 struct mii_ioctl_data *mii_data;
52 int rc = 0;
53
54 if (!card)
55 return -ENODEV;
56
57 if ((card->state != CARD_STATE_UP) &&
58 (card->state != CARD_STATE_SOFTSETUP))
59 return -ENODEV;
60
61 if (card->info.type == QETH_CARD_TYPE_OSN)
62 return -EPERM;
63
64 switch (cmd) {
65 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
66 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
67 break;
68 case SIOC_QETH_GET_CARD_TYPE:
69 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
70 !card->info.guestlan)
71 return 1;
72 return 0;
73 break;
74 case SIOCGMIIPHY:
75 mii_data = if_mii(rq);
76 mii_data->phy_id = 0;
77 break;
78 case SIOCGMIIREG:
79 mii_data = if_mii(rq);
80 if (mii_data->phy_id != 0)
81 rc = -EINVAL;
82 else
83 mii_data->val_out = qeth_mdio_read(dev,
84 mii_data->phy_id, mii_data->reg_num);
85 break;
86 default:
87 rc = -EOPNOTSUPP;
88 }
89 if (rc)
90 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
91 return rc;
92}
93
94static int qeth_l2_verify_dev(struct net_device *dev)
95{
96 struct qeth_card *card;
97 unsigned long flags;
98 int rc = 0;
99
100 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
101 list_for_each_entry(card, &qeth_core_card_list.list, list) {
102 if (card->dev == dev) {
103 rc = QETH_REAL_CARD;
104 break;
105 }
106 }
107 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
108
109 return rc;
110}
111
112static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
113{
114 struct qeth_card *card;
115 struct net_device *ndev;
116 unsigned char *readno;
117 __u16 temp_dev_no, card_dev_no;
118 char *endp;
119 unsigned long flags;
120
121 ndev = NULL;
122 memcpy(&temp_dev_no, read_dev_no, 2);
123 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
124 list_for_each_entry(card, &qeth_core_card_list.list, list) {
125 readno = CARD_RDEV_ID(card);
126 readno += (strlen(readno) - 4);
127 card_dev_no = simple_strtoul(readno, &endp, 16);
128 if (card_dev_no == temp_dev_no) {
129 ndev = card->dev;
130 break;
131 }
132 }
133 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
134 return ndev;
135}
136
137static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
138 struct qeth_reply *reply,
139 unsigned long data)
140{
141 struct qeth_ipa_cmd *cmd;
142 __u8 *mac;
143
144 QETH_DBF_TEXT(trace, 2, "L2Sgmacb");
145 cmd = (struct qeth_ipa_cmd *) data;
146 mac = &cmd->data.setdelmac.mac[0];
147 /* MAC already registered, needed in couple/uncouple case */
148 if (cmd->hdr.return_code == 0x2005) {
149 PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \
150 "already existing on %s \n",
151 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
152 QETH_CARD_IFNAME(card));
153 cmd->hdr.return_code = 0;
154 }
155 if (cmd->hdr.return_code)
156 PRINT_ERR("Could not set group MAC " \
157 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
158 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
159 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
160 return 0;
161}
162
163static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
164{
165 QETH_DBF_TEXT(trace, 2, "L2Sgmac");
166 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
167 qeth_l2_send_setgroupmac_cb);
168}
169
170static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
171 struct qeth_reply *reply,
172 unsigned long data)
173{
174 struct qeth_ipa_cmd *cmd;
175 __u8 *mac;
176
177 QETH_DBF_TEXT(trace, 2, "L2Dgmacb");
178 cmd = (struct qeth_ipa_cmd *) data;
179 mac = &cmd->data.setdelmac.mac[0];
180 if (cmd->hdr.return_code)
181 PRINT_ERR("Could not delete group MAC " \
182 "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
183 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
184 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
185 return 0;
186}
187
188static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
189{
190 QETH_DBF_TEXT(trace, 2, "L2Dgmac");
191 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
192 qeth_l2_send_delgroupmac_cb);
193}
194
195static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac)
196{
197 struct qeth_mc_mac *mc;
198
199 mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
200
201 if (!mc) {
202 PRINT_ERR("no mem vor mc mac address\n");
203 return;
204 }
205
206 memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
207 mc->mc_addrlen = OSA_ADDR_LEN;
208
209 if (!qeth_l2_send_setgroupmac(card, mac))
210 list_add_tail(&mc->list, &card->mc_list);
211 else
212 kfree(mc);
213}
214
215static void qeth_l2_del_all_mc(struct qeth_card *card)
216{
217 struct qeth_mc_mac *mc, *tmp;
218
219 spin_lock_bh(&card->mclock);
220 list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
221 qeth_l2_send_delgroupmac(card, mc->mc_addr);
222 list_del(&mc->list);
223 kfree(mc);
224 }
225 spin_unlock_bh(&card->mclock);
226}
227
228static void qeth_l2_get_packet_type(struct qeth_card *card,
229 struct qeth_hdr *hdr, struct sk_buff *skb)
230{
231 __u16 hdr_mac;
232
233 if (!memcmp(skb->data + QETH_HEADER_SIZE,
234 skb->dev->broadcast, 6)) {
235 /* broadcast? */
236 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
237 return;
238 }
239 hdr_mac = *((__u16 *)skb->data);
240 /* tr multicast? */
241 switch (card->info.link_type) {
242 case QETH_LINK_TYPE_HSTR:
243 case QETH_LINK_TYPE_LANE_TR:
244 if ((hdr_mac == QETH_TR_MAC_NC) ||
245 (hdr_mac == QETH_TR_MAC_C))
246 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
247 else
248 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
249 break;
250 /* eth or so multicast? */
251 default:
252 if ((hdr_mac == QETH_ETH_MAC_V4) ||
253 (hdr_mac == QETH_ETH_MAC_V6))
254 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
255 else
256 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
257 }
258}
259
260static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
261 struct sk_buff *skb, int ipv, int cast_type)
262{
263 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)((skb->data) +
264 QETH_HEADER_SIZE);
265
266 memset(hdr, 0, sizeof(struct qeth_hdr));
267 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
268
269 /* set byte byte 3 to casting flags */
270 if (cast_type == RTN_MULTICAST)
271 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
272 else if (cast_type == RTN_BROADCAST)
273 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
274 else
275 qeth_l2_get_packet_type(card, hdr, skb);
276
277 hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
278 /* VSWITCH relies on the VLAN
279 * information to be present in
280 * the QDIO header */
281 if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
282 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
283 hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
284 }
285}
286
287static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
288 struct qeth_reply *reply, unsigned long data)
289{
290 struct qeth_ipa_cmd *cmd;
291
292 QETH_DBF_TEXT(trace, 2, "L2sdvcb");
293 cmd = (struct qeth_ipa_cmd *) data;
294 if (cmd->hdr.return_code) {
295 PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. "
296 "Continuing\n", cmd->data.setdelvlan.vlan_id,
297 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
298 QETH_DBF_TEXT_(trace, 2, "L2VL%4x", cmd->hdr.command);
299 QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card));
300 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
301 }
302 return 0;
303}
304
305static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
306 enum qeth_ipa_cmds ipacmd)
307{
308 struct qeth_ipa_cmd *cmd;
309 struct qeth_cmd_buffer *iob;
310
311 QETH_DBF_TEXT_(trace, 4, "L2sdv%x", ipacmd);
312 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
313 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
314 cmd->data.setdelvlan.vlan_id = i;
315 return qeth_send_ipa_cmd(card, iob,
316 qeth_l2_send_setdelvlan_cb, NULL);
317}
318
319static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
320{
321 struct qeth_vlan_vid *id;
322 QETH_DBF_TEXT(trace, 3, "L2prcvln");
323 spin_lock_bh(&card->vlanlock);
324 list_for_each_entry(id, &card->vid_list, list) {
325 if (clear)
326 qeth_l2_send_setdelvlan(card, id->vid,
327 IPA_CMD_DELVLAN);
328 else
329 qeth_l2_send_setdelvlan(card, id->vid,
330 IPA_CMD_SETVLAN);
331 }
332 spin_unlock_bh(&card->vlanlock);
333}
334
335static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
336{
337 struct qeth_card *card = netdev_priv(dev);
338 struct qeth_vlan_vid *id;
339
340 QETH_DBF_TEXT_(trace, 4, "aid:%d", vid);
341 id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
342 if (id) {
343 id->vid = vid;
344 qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
345 spin_lock_bh(&card->vlanlock);
346 list_add_tail(&id->list, &card->vid_list);
347 spin_unlock_bh(&card->vlanlock);
348 } else {
349 PRINT_ERR("no memory for vid\n");
350 }
351}
352
353static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
354{
355 struct qeth_vlan_vid *id, *tmpid = NULL;
356 struct qeth_card *card = netdev_priv(dev);
357
358 QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
359 spin_lock_bh(&card->vlanlock);
360 list_for_each_entry(id, &card->vid_list, list) {
361 if (id->vid == vid) {
362 list_del(&id->list);
363 tmpid = id;
364 break;
365 }
366 }
367 spin_unlock_bh(&card->vlanlock);
368 if (tmpid) {
369 qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
370 kfree(tmpid);
371 }
372 qeth_l2_set_multicast_list(card->dev);
373}
374
375static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
376{
377 int rc = 0;
378
379 QETH_DBF_TEXT(setup , 2, "stopcard");
380 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
381
382 qeth_set_allowed_threads(card, 0, 1);
383 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
384 return -ERESTARTSYS;
385 if (card->read.state == CH_STATE_UP &&
386 card->write.state == CH_STATE_UP &&
387 (card->state == CARD_STATE_UP)) {
388 if (recovery_mode &&
389 card->info.type != QETH_CARD_TYPE_OSN) {
390 qeth_l2_stop(card->dev);
391 } else {
392 rtnl_lock();
393 dev_close(card->dev);
394 rtnl_unlock();
395 }
396 if (!card->use_hard_stop) {
397 __u8 *mac = &card->dev->dev_addr[0];
398 rc = qeth_l2_send_delmac(card, mac);
399 QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc);
400 }
401 card->state = CARD_STATE_SOFTSETUP;
402 }
403 if (card->state == CARD_STATE_SOFTSETUP) {
404 qeth_l2_process_vlans(card, 1);
405 qeth_l2_del_all_mc(card);
406 qeth_clear_ipacmd_list(card);
407 card->state = CARD_STATE_HARDSETUP;
408 }
409 if (card->state == CARD_STATE_HARDSETUP) {
410 qeth_qdio_clear_card(card, 0);
411 qeth_clear_qdio_buffers(card);
412 qeth_clear_working_pool_list(card);
413 card->state = CARD_STATE_DOWN;
414 }
415 if (card->state == CARD_STATE_DOWN) {
416 qeth_clear_cmd_buffers(&card->read);
417 qeth_clear_cmd_buffers(&card->write);
418 }
419 card->use_hard_stop = 0;
420 return rc;
421}
422
423static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
424 struct qeth_qdio_buffer *buf, int index)
425{
426 struct qdio_buffer_element *element;
427 struct sk_buff *skb;
428 struct qeth_hdr *hdr;
429 int offset;
430 unsigned int len;
431
432 /* get first element of current buffer */
433 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
434 offset = 0;
435 if (card->options.performance_stats)
436 card->perf_stats.bufs_rec++;
437 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
438 &offset, &hdr))) {
439 skb->dev = card->dev;
440 /* is device UP ? */
441 if (!(card->dev->flags & IFF_UP)) {
442 dev_kfree_skb_any(skb);
443 continue;
444 }
445
446 switch (hdr->hdr.l2.id) {
447 case QETH_HEADER_TYPE_LAYER2:
448 skb->pkt_type = PACKET_HOST;
449 skb->protocol = eth_type_trans(skb, skb->dev);
450 if (card->options.checksum_type == NO_CHECKSUMMING)
451 skb->ip_summed = CHECKSUM_UNNECESSARY;
452 else
453 skb->ip_summed = CHECKSUM_NONE;
454 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
455 len = skb->len;
456 netif_rx(skb);
457 break;
458 case QETH_HEADER_TYPE_OSN:
459 skb_push(skb, sizeof(struct qeth_hdr));
460 skb_copy_to_linear_data(skb, hdr,
461 sizeof(struct qeth_hdr));
462 len = skb->len;
463 card->osn_info.data_cb(skb);
464 break;
465 default:
466 dev_kfree_skb_any(skb);
467 QETH_DBF_TEXT(trace, 3, "inbunkno");
468 QETH_DBF_HEX(control, 3, hdr, QETH_DBF_CONTROL_LEN);
469 continue;
470 }
471 card->dev->last_rx = jiffies;
472 card->stats.rx_packets++;
473 card->stats.rx_bytes += len;
474 }
475}
476
477static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
478 enum qeth_ipa_cmds ipacmd,
479 int (*reply_cb) (struct qeth_card *,
480 struct qeth_reply*,
481 unsigned long))
482{
483 struct qeth_ipa_cmd *cmd;
484 struct qeth_cmd_buffer *iob;
485
486 QETH_DBF_TEXT(trace, 2, "L2sdmac");
487 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
488 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
489 cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
490 memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
491 return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
492}
493
494static int qeth_l2_send_setmac_cb(struct qeth_card *card,
495 struct qeth_reply *reply,
496 unsigned long data)
497{
498 struct qeth_ipa_cmd *cmd;
499
500 QETH_DBF_TEXT(trace, 2, "L2Smaccb");
501 cmd = (struct qeth_ipa_cmd *) data;
502 if (cmd->hdr.return_code) {
503 QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code);
504 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
505 cmd->hdr.return_code = -EIO;
506 } else {
507 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
508 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
509 OSA_ADDR_LEN);
510 PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
511 "successfully registered on device %s\n",
512 card->dev->dev_addr[0], card->dev->dev_addr[1],
513 card->dev->dev_addr[2], card->dev->dev_addr[3],
514 card->dev->dev_addr[4], card->dev->dev_addr[5],
515 card->dev->name);
516 }
517 return 0;
518}
519
520static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
521{
522 QETH_DBF_TEXT(trace, 2, "L2Setmac");
523 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
524 qeth_l2_send_setmac_cb);
525}
526
527static int qeth_l2_send_delmac_cb(struct qeth_card *card,
528 struct qeth_reply *reply,
529 unsigned long data)
530{
531 struct qeth_ipa_cmd *cmd;
532
533 QETH_DBF_TEXT(trace, 2, "L2Dmaccb");
534 cmd = (struct qeth_ipa_cmd *) data;
535 if (cmd->hdr.return_code) {
536 QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code);
537 cmd->hdr.return_code = -EIO;
538 return 0;
539 }
540 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
541
542 return 0;
543}
544
545static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
546{
547 QETH_DBF_TEXT(trace, 2, "L2Delmac");
548 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
549 return 0;
550 return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
551 qeth_l2_send_delmac_cb);
552}
553
554static int qeth_l2_request_initial_mac(struct qeth_card *card)
555{
556 int rc = 0;
557 char vendor_pre[] = {0x02, 0x00, 0x00};
558
559 QETH_DBF_TEXT(setup, 2, "doL2init");
560 QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card));
561
562 rc = qeth_query_setadapterparms(card);
563 if (rc) {
564 PRINT_WARN("could not query adapter parameters on device %s: "
565 "x%x\n", CARD_BUS_ID(card), rc);
566 }
567
568 if (card->info.guestlan) {
569 rc = qeth_setadpparms_change_macaddr(card);
570 if (rc) {
571 PRINT_WARN("couldn't get MAC address on "
572 "device %s: x%x\n",
573 CARD_BUS_ID(card), rc);
574 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
575 return rc;
576 }
577 QETH_DBF_HEX(setup, 2, card->dev->dev_addr, OSA_ADDR_LEN);
578 } else {
579 random_ether_addr(card->dev->dev_addr);
580 memcpy(card->dev->dev_addr, vendor_pre, 3);
581 }
582 return 0;
583}
584
585static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
586{
587 struct sockaddr *addr = p;
588 struct qeth_card *card = netdev_priv(dev);
589 int rc = 0;
590
591 QETH_DBF_TEXT(trace, 3, "setmac");
592
593 if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
594 QETH_DBF_TEXT(trace, 3, "setmcINV");
595 return -EOPNOTSUPP;
596 }
597
598 if (card->info.type == QETH_CARD_TYPE_OSN) {
599 PRINT_WARN("Setting MAC address on %s is not supported.\n",
600 dev->name);
601 QETH_DBF_TEXT(trace, 3, "setmcOSN");
602 return -EOPNOTSUPP;
603 }
604 QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
605 QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
606 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
607 if (!rc)
608 rc = qeth_l2_send_setmac(card, addr->sa_data);
609 return rc;
610}
611
612static void qeth_l2_set_multicast_list(struct net_device *dev)
613{
614 struct qeth_card *card = netdev_priv(dev);
615 struct dev_mc_list *dm;
616
617 if (card->info.type == QETH_CARD_TYPE_OSN)
618 return ;
619
620 QETH_DBF_TEXT(trace, 3, "setmulti");
621 qeth_l2_del_all_mc(card);
622 spin_lock_bh(&card->mclock);
623 for (dm = dev->mc_list; dm; dm = dm->next)
624 qeth_l2_add_mc(card, dm->dmi_addr);
625 spin_unlock_bh(&card->mclock);
626 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
627 return;
628 qeth_setadp_promisc_mode(card);
629}
630
631static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
632{
633 int rc;
634 struct qeth_hdr *hdr = NULL;
635 int elements = 0;
636 struct qeth_card *card = netdev_priv(dev);
637 struct sk_buff *new_skb = skb;
638 int ipv = qeth_get_ip_version(skb);
639 int cast_type = qeth_get_cast_type(card, skb);
640 struct qeth_qdio_out_q *queue = card->qdio.out_qs
641 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
642 int tx_bytes = skb->len;
643 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
644 struct qeth_eddp_context *ctx = NULL;
645
646 QETH_DBF_TEXT(trace, 6, "l2xmit");
647
648 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
649 card->stats.tx_carrier_errors++;
650 goto tx_drop;
651 }
652
653 if ((card->info.type == QETH_CARD_TYPE_OSN) &&
654 (skb->protocol == htons(ETH_P_IPV6)))
655 goto tx_drop;
656
657 if (card->options.performance_stats) {
658 card->perf_stats.outbound_cnt++;
659 card->perf_stats.outbound_start_time = qeth_get_micros();
660 }
661 netif_stop_queue(dev);
662
663 if (skb_is_gso(skb))
664 large_send = QETH_LARGE_SEND_EDDP;
665
666 if (card->info.type == QETH_CARD_TYPE_OSN)
667 hdr = (struct qeth_hdr *)skb->data;
668 else {
669 new_skb = qeth_prepare_skb(card, skb, &hdr);
670 if (!new_skb)
671 goto tx_drop;
672 qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
673 }
674
675 if (large_send == QETH_LARGE_SEND_EDDP) {
676 ctx = qeth_eddp_create_context(card, new_skb, hdr,
677 skb->sk->sk_protocol);
678 if (ctx == NULL) {
679 PRINT_WARN("could not create eddp context\n");
680 goto tx_drop;
681 }
682 } else {
683 elements = qeth_get_elements_no(card, (void *)hdr, new_skb, 0);
684 if (!elements)
685 goto tx_drop;
686 }
687
688 if ((large_send == QETH_LARGE_SEND_NO) &&
689 (skb->ip_summed == CHECKSUM_PARTIAL))
690 qeth_tx_csum(new_skb);
691
692 if (card->info.type != QETH_CARD_TYPE_IQD)
693 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
694 elements, ctx);
695 else
696 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
697 elements, ctx);
698 if (!rc) {
699 card->stats.tx_packets++;
700 card->stats.tx_bytes += tx_bytes;
701 if (new_skb != skb)
702 dev_kfree_skb_any(skb);
703 if (card->options.performance_stats) {
704 if (large_send != QETH_LARGE_SEND_NO) {
705 card->perf_stats.large_send_bytes += tx_bytes;
706 card->perf_stats.large_send_cnt++;
707 }
708 if (skb_shinfo(new_skb)->nr_frags > 0) {
709 card->perf_stats.sg_skbs_sent++;
710 /* nr_frags + skb->data */
711 card->perf_stats.sg_frags_sent +=
712 skb_shinfo(new_skb)->nr_frags + 1;
713 }
714 }
715
716 if (ctx != NULL) {
717 qeth_eddp_put_context(ctx);
718 dev_kfree_skb_any(new_skb);
719 }
720 } else {
721 if (ctx != NULL)
722 qeth_eddp_put_context(ctx);
723
724 if (rc == -EBUSY) {
725 if (new_skb != skb)
726 dev_kfree_skb_any(new_skb);
727 return NETDEV_TX_BUSY;
728 } else
729 goto tx_drop;
730 }
731
732 netif_wake_queue(dev);
733 if (card->options.performance_stats)
734 card->perf_stats.outbound_time += qeth_get_micros() -
735 card->perf_stats.outbound_start_time;
736 return rc;
737
738tx_drop:
739 card->stats.tx_dropped++;
740 card->stats.tx_errors++;
741 if ((new_skb != skb) && new_skb)
742 dev_kfree_skb_any(new_skb);
743 dev_kfree_skb_any(skb);
744 return NETDEV_TX_OK;
745}
746
747static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
748 unsigned int status, unsigned int qdio_err,
749 unsigned int siga_err, unsigned int queue,
750 int first_element, int count, unsigned long card_ptr)
751{
752 struct net_device *net_dev;
753 struct qeth_card *card;
754 struct qeth_qdio_buffer *buffer;
755 int index;
756 int i;
757
758 QETH_DBF_TEXT(trace, 6, "qdinput");
759 card = (struct qeth_card *) card_ptr;
760 net_dev = card->dev;
761 if (card->options.performance_stats) {
762 card->perf_stats.inbound_cnt++;
763 card->perf_stats.inbound_start_time = qeth_get_micros();
764 }
765 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
766 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
767 QETH_DBF_TEXT(trace, 1, "qdinchk");
768 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
769 QETH_DBF_TEXT_(trace, 1, "%04X%04X", first_element,
770 count);
771 QETH_DBF_TEXT_(trace, 1, "%04X%04X", queue, status);
772 qeth_schedule_recovery(card);
773 return;
774 }
775 }
776 for (i = first_element; i < (first_element + count); ++i) {
777 index = i % QDIO_MAX_BUFFERS_PER_Q;
778 buffer = &card->qdio.in_q->bufs[index];
779 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
780 qeth_check_qdio_errors(buffer->buffer,
781 qdio_err, siga_err, "qinerr")))
782 qeth_l2_process_inbound_buffer(card, buffer, index);
783 /* clear buffer and give back to hardware */
784 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
785 qeth_queue_input_buffer(card, index);
786 }
787 if (card->options.performance_stats)
788 card->perf_stats.inbound_time += qeth_get_micros() -
789 card->perf_stats.inbound_start_time;
790}
791
792static int qeth_l2_open(struct net_device *dev)
793{
794 struct qeth_card *card = netdev_priv(dev);
795
796 QETH_DBF_TEXT(trace, 4, "qethopen");
797 if (card->state != CARD_STATE_SOFTSETUP)
798 return -ENODEV;
799
800 if ((card->info.type != QETH_CARD_TYPE_OSN) &&
801 (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
802 QETH_DBF_TEXT(trace, 4, "nomacadr");
803 return -EPERM;
804 }
805 card->data.state = CH_STATE_UP;
806 card->state = CARD_STATE_UP;
807 card->dev->flags |= IFF_UP;
808 netif_start_queue(dev);
809
810 if (!card->lan_online && netif_carrier_ok(dev))
811 netif_carrier_off(dev);
812 return 0;
813}
814
815
816static int qeth_l2_stop(struct net_device *dev)
817{
818 struct qeth_card *card = netdev_priv(dev);
819
820 QETH_DBF_TEXT(trace, 4, "qethstop");
821 netif_tx_disable(dev);
822 card->dev->flags &= ~IFF_UP;
823 if (card->state == CARD_STATE_UP)
824 card->state = CARD_STATE_SOFTSETUP;
825 return 0;
826}
827
828static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
829{
830 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
831
832 INIT_LIST_HEAD(&card->vid_list);
833 INIT_LIST_HEAD(&card->mc_list);
834 card->options.layer2 = 1;
835 card->discipline.input_handler = (qdio_handler_t *)
836 qeth_l2_qdio_input_handler;
837 card->discipline.output_handler = (qdio_handler_t *)
838 qeth_qdio_output_handler;
839 card->discipline.recover = qeth_l2_recover;
840 return 0;
841}
842
843static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
844{
845 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
846
847 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
848
849 if (cgdev->state == CCWGROUP_ONLINE) {
850 card->use_hard_stop = 1;
851 qeth_l2_set_offline(cgdev);
852 }
853
854 if (card->dev) {
855 unregister_netdev(card->dev);
856 card->dev = NULL;
857 }
858
859 qeth_l2_del_all_mc(card);
860 return;
861}
862
863static struct ethtool_ops qeth_l2_ethtool_ops = {
864 .get_link = ethtool_op_get_link,
865 .get_tx_csum = ethtool_op_get_tx_csum,
866 .set_tx_csum = ethtool_op_set_tx_hw_csum,
867 .get_sg = ethtool_op_get_sg,
868 .set_sg = ethtool_op_set_sg,
869 .get_tso = ethtool_op_get_tso,
870 .set_tso = ethtool_op_set_tso,
871 .get_strings = qeth_core_get_strings,
872 .get_ethtool_stats = qeth_core_get_ethtool_stats,
873 .get_stats_count = qeth_core_get_stats_count,
874 .get_drvinfo = qeth_core_get_drvinfo,
875};
876
877static struct ethtool_ops qeth_l2_osn_ops = {
878 .get_strings = qeth_core_get_strings,
879 .get_ethtool_stats = qeth_core_get_ethtool_stats,
880 .get_stats_count = qeth_core_get_stats_count,
881 .get_drvinfo = qeth_core_get_drvinfo,
882};
883
884static int qeth_l2_setup_netdev(struct qeth_card *card)
885{
886 switch (card->info.type) {
887 case QETH_CARD_TYPE_OSAE:
888 card->dev = alloc_etherdev(0);
889 break;
890 case QETH_CARD_TYPE_IQD:
891 card->dev = alloc_netdev(0, "hsi%d", ether_setup);
892 break;
893 case QETH_CARD_TYPE_OSN:
894 card->dev = alloc_netdev(0, "osn%d", ether_setup);
895 card->dev->flags |= IFF_NOARP;
896 break;
897 default:
898 card->dev = alloc_etherdev(0);
899 }
900
901 if (!card->dev)
902 return -ENODEV;
903
904 card->dev->priv = card;
905 card->dev->tx_timeout = &qeth_tx_timeout;
906 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
907 card->dev->open = qeth_l2_open;
908 card->dev->stop = qeth_l2_stop;
909 card->dev->hard_start_xmit = qeth_l2_hard_start_xmit;
910 card->dev->do_ioctl = qeth_l2_do_ioctl;
911 card->dev->get_stats = qeth_get_stats;
912 card->dev->change_mtu = qeth_change_mtu;
913 card->dev->set_multicast_list = qeth_l2_set_multicast_list;
914 card->dev->vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid;
915 card->dev->vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid;
916 card->dev->set_mac_address = qeth_l2_set_mac_address;
917 card->dev->mtu = card->info.initial_mtu;
918 if (card->info.type != QETH_CARD_TYPE_OSN)
919 SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
920 else
921 SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
922 card->dev->features |= NETIF_F_HW_VLAN_FILTER;
923 card->info.broadcast_capable = 1;
924 qeth_l2_request_initial_mac(card);
925 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
926 return register_netdev(card->dev);
927}
928
929static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
930{
931 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
932 int rc = 0;
933 enum qeth_card_states recover_flag;
934
935 BUG_ON(!card);
936 QETH_DBF_TEXT(setup, 2, "setonlin");
937 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
938
939 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
940 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) {
941 PRINT_WARN("set_online of card %s interrupted by user!\n",
942 CARD_BUS_ID(card));
943 return -ERESTARTSYS;
944 }
945
946 recover_flag = card->state;
947 rc = ccw_device_set_online(CARD_RDEV(card));
948 if (rc) {
949 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
950 return -EIO;
951 }
952 rc = ccw_device_set_online(CARD_WDEV(card));
953 if (rc) {
954 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
955 return -EIO;
956 }
957 rc = ccw_device_set_online(CARD_DDEV(card));
958 if (rc) {
959 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
960 return -EIO;
961 }
962
963 rc = qeth_core_hardsetup_card(card);
964 if (rc) {
965 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
966 goto out_remove;
967 }
968
969 if (!card->dev && qeth_l2_setup_netdev(card))
970 goto out_remove;
971
972 if (card->info.type != QETH_CARD_TYPE_OSN)
973 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
974
975 card->state = CARD_STATE_HARDSETUP;
976 qeth_print_status_message(card);
977
978 /* softsetup */
979 QETH_DBF_TEXT(setup, 2, "softsetp");
980
981 rc = qeth_send_startlan(card);
982 if (rc) {
983 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
984 if (rc == 0xe080) {
985 PRINT_WARN("LAN on card %s if offline! "
986 "Waiting for STARTLAN from card.\n",
987 CARD_BUS_ID(card));
988 card->lan_online = 0;
989 }
990 return rc;
991 } else
992 card->lan_online = 1;
993
994 if (card->info.type != QETH_CARD_TYPE_OSN) {
995 qeth_set_large_send(card, card->options.large_send);
996 qeth_l2_process_vlans(card, 0);
997 }
998
999 netif_tx_disable(card->dev);
1000
1001 rc = qeth_init_qdio_queues(card);
1002 if (rc) {
1003 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
1004 goto out_remove;
1005 }
1006 card->state = CARD_STATE_SOFTSETUP;
1007 netif_carrier_on(card->dev);
1008
1009 qeth_set_allowed_threads(card, 0xffffffff, 0);
1010 if (recover_flag == CARD_STATE_RECOVER) {
1011 if (recovery_mode &&
1012 card->info.type != QETH_CARD_TYPE_OSN) {
1013 qeth_l2_open(card->dev);
1014 } else {
1015 rtnl_lock();
1016 dev_open(card->dev);
1017 rtnl_unlock();
1018 }
1019 /* this also sets saved unicast addresses */
1020 qeth_l2_set_multicast_list(card->dev);
1021 }
1022 /* let user_space know that device is online */
1023 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1024 return 0;
1025out_remove:
1026 card->use_hard_stop = 1;
1027 qeth_l2_stop_card(card, 0);
1028 ccw_device_set_offline(CARD_DDEV(card));
1029 ccw_device_set_offline(CARD_WDEV(card));
1030 ccw_device_set_offline(CARD_RDEV(card));
1031 if (recover_flag == CARD_STATE_RECOVER)
1032 card->state = CARD_STATE_RECOVER;
1033 else
1034 card->state = CARD_STATE_DOWN;
1035 return -ENODEV;
1036}
1037
1038static int qeth_l2_set_online(struct ccwgroup_device *gdev)
1039{
1040 return __qeth_l2_set_online(gdev, 0);
1041}
1042
1043static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
1044 int recovery_mode)
1045{
1046 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
1047 int rc = 0, rc2 = 0, rc3 = 0;
1048 enum qeth_card_states recover_flag;
1049
1050 QETH_DBF_TEXT(setup, 3, "setoffl");
1051 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
1052
1053 if (card->dev && netif_carrier_ok(card->dev))
1054 netif_carrier_off(card->dev);
1055 recover_flag = card->state;
1056 if (qeth_l2_stop_card(card, recovery_mode) == -ERESTARTSYS) {
1057 PRINT_WARN("Stopping card %s interrupted by user!\n",
1058 CARD_BUS_ID(card));
1059 return -ERESTARTSYS;
1060 }
1061 rc = ccw_device_set_offline(CARD_DDEV(card));
1062 rc2 = ccw_device_set_offline(CARD_WDEV(card));
1063 rc3 = ccw_device_set_offline(CARD_RDEV(card));
1064 if (!rc)
1065 rc = (rc2) ? rc2 : rc3;
1066 if (rc)
1067 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
1068 if (recover_flag == CARD_STATE_UP)
1069 card->state = CARD_STATE_RECOVER;
1070 /* let user_space know that device is offline */
1071 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
1072 return 0;
1073}
1074
1075static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
1076{
1077 return __qeth_l2_set_offline(cgdev, 0);
1078}
1079
1080static int qeth_l2_recover(void *ptr)
1081{
1082 struct qeth_card *card;
1083 int rc = 0;
1084
1085 card = (struct qeth_card *) ptr;
1086 QETH_DBF_TEXT(trace, 2, "recover1");
1087 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
1088 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
1089 return 0;
1090 QETH_DBF_TEXT(trace, 2, "recover2");
1091 PRINT_WARN("Recovery of device %s started ...\n",
1092 CARD_BUS_ID(card));
1093 card->use_hard_stop = 1;
1094 __qeth_l2_set_offline(card->gdev, 1);
1095 rc = __qeth_l2_set_online(card->gdev, 1);
1096 /* don't run another scheduled recovery */
1097 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1098 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1099 if (!rc)
1100 PRINT_INFO("Device %s successfully recovered!\n",
1101 CARD_BUS_ID(card));
1102 else
1103 PRINT_INFO("Device %s could not be recovered!\n",
1104 CARD_BUS_ID(card));
1105 return 0;
1106}
1107
1108static int __init qeth_l2_init(void)
1109{
1110 PRINT_INFO("register layer 2 discipline\n");
1111 return 0;
1112}
1113
1114static void __exit qeth_l2_exit(void)
1115{
1116 PRINT_INFO("unregister layer 2 discipline\n");
1117}
1118
1119static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
1120{
1121 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
1122 qeth_qdio_clear_card(card, 0);
1123 qeth_clear_qdio_buffers(card);
1124}
1125
1126struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
1127 .probe = qeth_l2_probe_device,
1128 .remove = qeth_l2_remove_device,
1129 .set_online = qeth_l2_set_online,
1130 .set_offline = qeth_l2_set_offline,
1131 .shutdown = qeth_l2_shutdown,
1132};
1133EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
1134
1135static int qeth_osn_send_control_data(struct qeth_card *card, int len,
1136 struct qeth_cmd_buffer *iob)
1137{
1138 unsigned long flags;
1139 int rc = 0;
1140
1141 QETH_DBF_TEXT(trace, 5, "osndctrd");
1142
1143 wait_event(card->wait_q,
1144 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1145 qeth_prepare_control_data(card, len, iob);
1146 QETH_DBF_TEXT(trace, 6, "osnoirqp");
1147 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1148 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
1149 (addr_t) iob, 0, 0);
1150 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1151 if (rc) {
1152 PRINT_WARN("qeth_osn_send_control_data: "
1153 "ccw_device_start rc = %i\n", rc);
1154 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
1155 qeth_release_buffer(iob->channel, iob);
1156 atomic_set(&card->write.irq_pending, 0);
1157 wake_up(&card->wait_q);
1158 }
1159 return rc;
1160}
1161
1162static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
1163 struct qeth_cmd_buffer *iob, int data_len)
1164{
1165 u16 s1, s2;
1166
1167 QETH_DBF_TEXT(trace, 4, "osndipa");
1168
1169 qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
1170 s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
1171 s2 = (u16)data_len;
1172 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
1173 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
1174 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
1175 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
1176 return qeth_osn_send_control_data(card, s1, iob);
1177}
1178
1179int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
1180{
1181 struct qeth_cmd_buffer *iob;
1182 struct qeth_card *card;
1183 int rc;
1184
1185 QETH_DBF_TEXT(trace, 2, "osnsdmc");
1186 if (!dev)
1187 return -ENODEV;
1188 card = netdev_priv(dev);
1189 if (!card)
1190 return -ENODEV;
1191 if ((card->state != CARD_STATE_UP) &&
1192 (card->state != CARD_STATE_SOFTSETUP))
1193 return -ENODEV;
1194 iob = qeth_wait_for_buffer(&card->write);
1195 memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
1196 rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
1197 return rc;
1198}
1199EXPORT_SYMBOL(qeth_osn_assist);
1200
1201int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
1202 int (*assist_cb)(struct net_device *, void *),
1203 int (*data_cb)(struct sk_buff *))
1204{
1205 struct qeth_card *card;
1206
1207 QETH_DBF_TEXT(trace, 2, "osnreg");
1208 *dev = qeth_l2_netdev_by_devno(read_dev_no);
1209 if (*dev == NULL)
1210 return -ENODEV;
1211 card = netdev_priv(*dev);
1212 if (!card)
1213 return -ENODEV;
1214 if ((assist_cb == NULL) || (data_cb == NULL))
1215 return -EINVAL;
1216 card->osn_info.assist_cb = assist_cb;
1217 card->osn_info.data_cb = data_cb;
1218 return 0;
1219}
1220EXPORT_SYMBOL(qeth_osn_register);
1221
1222void qeth_osn_deregister(struct net_device *dev)
1223{
1224 struct qeth_card *card;
1225
1226 QETH_DBF_TEXT(trace, 2, "osndereg");
1227 if (!dev)
1228 return;
1229 card = netdev_priv(dev);
1230 if (!card)
1231 return;
1232 card->osn_info.assist_cb = NULL;
1233 card->osn_info.data_cb = NULL;
1234 return;
1235}
1236EXPORT_SYMBOL(qeth_osn_deregister);
1237
1238module_init(qeth_l2_init);
1239module_exit(qeth_l2_exit);
1240MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
1241MODULE_DESCRIPTION("qeth layer 2 discipline");
1242MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
new file mode 100644
index 000000000000..f639cc3af22b
--- /dev/null
+++ b/drivers/s390/net/qeth_l3.h
@@ -0,0 +1,76 @@
1/*
2 * drivers/s390/net/qeth_l3.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#ifndef __QETH_L3_H__
12#define __QETH_L3_H__
13
14#include "qeth_core.h"
15
16#define QETH_DBF_TEXT_(name, level, text...) \
17 do { \
18 if (qeth_dbf_passes(qeth_dbf_##name, level)) { \
19 char *dbf_txt_buf = get_cpu_var(qeth_l3_dbf_txt_buf); \
20 sprintf(dbf_txt_buf, text); \
21 debug_text_event(qeth_dbf_##name, level, dbf_txt_buf); \
22 put_cpu_var(qeth_l3_dbf_txt_buf); \
23 } \
24 } while (0)
25
26DECLARE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
27
28struct qeth_ipaddr {
29 struct list_head entry;
30 enum qeth_ip_types type;
31 enum qeth_ipa_setdelip_flags set_flags;
32 enum qeth_ipa_setdelip_flags del_flags;
33 int is_multicast;
34 int users;
35 enum qeth_prot_versions proto;
36 unsigned char mac[OSA_ADDR_LEN];
37 union {
38 struct {
39 unsigned int addr;
40 unsigned int mask;
41 } a4;
42 struct {
43 struct in6_addr addr;
44 unsigned int pfxlen;
45 } a6;
46 } u;
47};
48
49struct qeth_ipato_entry {
50 struct list_head entry;
51 enum qeth_prot_versions proto;
52 char addr[16];
53 int mask_bits;
54};
55
56
57void qeth_l3_ipaddr4_to_string(const __u8 *, char *);
58int qeth_l3_string_to_ipaddr4(const char *, __u8 *);
59void qeth_l3_ipaddr6_to_string(const __u8 *, char *);
60int qeth_l3_string_to_ipaddr6(const char *, __u8 *);
61void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
62int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
63int qeth_l3_create_device_attributes(struct device *);
64void qeth_l3_remove_device_attributes(struct device *);
65int qeth_l3_setrouting_v4(struct qeth_card *);
66int qeth_l3_setrouting_v6(struct qeth_card *);
67int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
68void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions,
69 u8 *, int);
70int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
71void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
72int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
73void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
74 const u8 *);
75
76#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
new file mode 100644
index 000000000000..cfa199a9df00
--- /dev/null
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -0,0 +1,3388 @@
1/*
2 * drivers/s390/net/qeth_l3_main.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/etherdevice.h>
17#include <linux/mii.h>
18#include <linux/ip.h>
19#include <linux/reboot.h>
20#include <linux/inetdevice.h>
21#include <linux/igmp.h>
22
23#include <net/ip.h>
24#include <net/arp.h>
25
26#include <asm/s390_rdev.h>
27
28#include "qeth_l3.h"
29#include "qeth_core_offl.h"
30
31DEFINE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
32
33static int qeth_l3_set_offline(struct ccwgroup_device *);
34static int qeth_l3_recover(void *);
35static int qeth_l3_stop(struct net_device *);
36static void qeth_l3_set_multicast_list(struct net_device *);
37static int qeth_l3_neigh_setup(struct net_device *, struct neigh_parms *);
38static int qeth_l3_register_addr_entry(struct qeth_card *,
39 struct qeth_ipaddr *);
40static int qeth_l3_deregister_addr_entry(struct qeth_card *,
41 struct qeth_ipaddr *);
42static int __qeth_l3_set_online(struct ccwgroup_device *, int);
43static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
44
45
46static int qeth_l3_isxdigit(char *buf)
47{
48 while (*buf) {
49 if (!isxdigit(*buf++))
50 return 0;
51 }
52 return 1;
53}
54
55void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
56{
57 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
58}
59
60int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
61{
62 int count = 0, rc = 0;
63 int in[4];
64 char c;
65
66 rc = sscanf(buf, "%u.%u.%u.%u%c",
67 &in[0], &in[1], &in[2], &in[3], &c);
68 if (rc != 4 && (rc != 5 || c != '\n'))
69 return -EINVAL;
70 for (count = 0; count < 4; count++) {
71 if (in[count] > 255)
72 return -EINVAL;
73 addr[count] = in[count];
74 }
75 return 0;
76}
77
78void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
79{
80 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
81 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
82 addr[0], addr[1], addr[2], addr[3],
83 addr[4], addr[5], addr[6], addr[7],
84 addr[8], addr[9], addr[10], addr[11],
85 addr[12], addr[13], addr[14], addr[15]);
86}
87
88int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
89{
90 const char *end, *end_tmp, *start;
91 __u16 *in;
92 char num[5];
93 int num2, cnt, out, found, save_cnt;
94 unsigned short in_tmp[8] = {0, };
95
96 cnt = out = found = save_cnt = num2 = 0;
97 end = start = buf;
98 in = (__u16 *) addr;
99 memset(in, 0, 16);
100 while (*end) {
101 end = strchr(start, ':');
102 if (end == NULL) {
103 end = buf + strlen(buf);
104 end_tmp = strchr(start, '\n');
105 if (end_tmp != NULL)
106 end = end_tmp;
107 out = 1;
108 }
109 if ((end - start)) {
110 memset(num, 0, 5);
111 if ((end - start) > 4)
112 return -EINVAL;
113 memcpy(num, start, end - start);
114 if (!qeth_l3_isxdigit(num))
115 return -EINVAL;
116 sscanf(start, "%x", &num2);
117 if (found)
118 in_tmp[save_cnt++] = num2;
119 else
120 in[cnt++] = num2;
121 if (out)
122 break;
123 } else {
124 if (found)
125 return -EINVAL;
126 found = 1;
127 }
128 start = ++end;
129 }
130 if (cnt + save_cnt > 8)
131 return -EINVAL;
132 cnt = 7;
133 while (save_cnt)
134 in[cnt--] = in_tmp[--save_cnt];
135 return 0;
136}
137
138void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
139 char *buf)
140{
141 if (proto == QETH_PROT_IPV4)
142 qeth_l3_ipaddr4_to_string(addr, buf);
143 else if (proto == QETH_PROT_IPV6)
144 qeth_l3_ipaddr6_to_string(addr, buf);
145}
146
147int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
148 __u8 *addr)
149{
150 if (proto == QETH_PROT_IPV4)
151 return qeth_l3_string_to_ipaddr4(buf, addr);
152 else if (proto == QETH_PROT_IPV6)
153 return qeth_l3_string_to_ipaddr6(buf, addr);
154 else
155 return -EINVAL;
156}
157
158static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
159{
160 int i, j;
161 u8 octet;
162
163 for (i = 0; i < len; ++i) {
164 octet = addr[i];
165 for (j = 7; j >= 0; --j) {
166 bits[i*8 + j] = octet & 1;
167 octet >>= 1;
168 }
169 }
170}
171
172static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
173 struct qeth_ipaddr *addr)
174{
175 struct qeth_ipato_entry *ipatoe;
176 u8 addr_bits[128] = {0, };
177 u8 ipatoe_bits[128] = {0, };
178 int rc = 0;
179
180 if (!card->ipato.enabled)
181 return 0;
182
183 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
184 (addr->proto == QETH_PROT_IPV4)? 4:16);
185 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
186 if (addr->proto != ipatoe->proto)
187 continue;
188 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
189 (ipatoe->proto == QETH_PROT_IPV4) ?
190 4 : 16);
191 if (addr->proto == QETH_PROT_IPV4)
192 rc = !memcmp(addr_bits, ipatoe_bits,
193 min(32, ipatoe->mask_bits));
194 else
195 rc = !memcmp(addr_bits, ipatoe_bits,
196 min(128, ipatoe->mask_bits));
197 if (rc)
198 break;
199 }
200 /* invert? */
201 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
202 rc = !rc;
203 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
204 rc = !rc;
205
206 return rc;
207}
208
209/*
210 * Add IP to be added to todo list. If there is already an "add todo"
211 * in this list we just incremenent the reference count.
212 * Returns 0 if we just incremented reference count.
213 */
214static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
215 struct qeth_ipaddr *addr, int add)
216{
217 struct qeth_ipaddr *tmp, *t;
218 int found = 0;
219
220 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
221 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
222 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
223 return 0;
224 if ((tmp->proto == QETH_PROT_IPV4) &&
225 (addr->proto == QETH_PROT_IPV4) &&
226 (tmp->type == addr->type) &&
227 (tmp->is_multicast == addr->is_multicast) &&
228 (tmp->u.a4.addr == addr->u.a4.addr) &&
229 (tmp->u.a4.mask == addr->u.a4.mask)) {
230 found = 1;
231 break;
232 }
233 if ((tmp->proto == QETH_PROT_IPV6) &&
234 (addr->proto == QETH_PROT_IPV6) &&
235 (tmp->type == addr->type) &&
236 (tmp->is_multicast == addr->is_multicast) &&
237 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
238 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
239 sizeof(struct in6_addr)) == 0)) {
240 found = 1;
241 break;
242 }
243 }
244 if (found) {
245 if (addr->users != 0)
246 tmp->users += addr->users;
247 else
248 tmp->users += add ? 1 : -1;
249 if (tmp->users == 0) {
250 list_del(&tmp->entry);
251 kfree(tmp);
252 }
253 return 0;
254 } else {
255 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
256 list_add(&addr->entry, card->ip_tbd_list);
257 else {
258 if (addr->users == 0)
259 addr->users += add ? 1 : -1;
260 if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
261 qeth_l3_is_addr_covered_by_ipato(card, addr)) {
262 QETH_DBF_TEXT(trace, 2, "tkovaddr");
263 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
264 }
265 list_add_tail(&addr->entry, card->ip_tbd_list);
266 }
267 return 1;
268 }
269}
270
271static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
272{
273 unsigned long flags;
274 int rc = 0;
275
276 QETH_DBF_TEXT(trace, 4, "delip");
277
278 if (addr->proto == QETH_PROT_IPV4)
279 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
280 else {
281 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
282 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
283 }
284 spin_lock_irqsave(&card->ip_lock, flags);
285 rc = __qeth_l3_insert_ip_todo(card, addr, 0);
286 spin_unlock_irqrestore(&card->ip_lock, flags);
287 return rc;
288}
289
290static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
291{
292 unsigned long flags;
293 int rc = 0;
294
295 QETH_DBF_TEXT(trace, 4, "addip");
296 if (addr->proto == QETH_PROT_IPV4)
297 QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4);
298 else {
299 QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8);
300 QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8);
301 }
302 spin_lock_irqsave(&card->ip_lock, flags);
303 rc = __qeth_l3_insert_ip_todo(card, addr, 1);
304 spin_unlock_irqrestore(&card->ip_lock, flags);
305 return rc;
306}
307
308
309static struct qeth_ipaddr *qeth_l3_get_addr_buffer(
310 enum qeth_prot_versions prot)
311{
312 struct qeth_ipaddr *addr;
313
314 addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
315 if (addr == NULL) {
316 PRINT_WARN("Not enough memory to add address\n");
317 return NULL;
318 }
319 addr->type = QETH_IP_TYPE_NORMAL;
320 addr->proto = prot;
321 return addr;
322}
323
324static void qeth_l3_delete_mc_addresses(struct qeth_card *card)
325{
326 struct qeth_ipaddr *iptodo;
327 unsigned long flags;
328
329 QETH_DBF_TEXT(trace, 4, "delmc");
330 iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
331 if (!iptodo) {
332 QETH_DBF_TEXT(trace, 2, "dmcnomem");
333 return;
334 }
335 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
336 spin_lock_irqsave(&card->ip_lock, flags);
337 if (!__qeth_l3_insert_ip_todo(card, iptodo, 0))
338 kfree(iptodo);
339 spin_unlock_irqrestore(&card->ip_lock, flags);
340}
341
342/*
343 * Add/remove address to/from card's ip list, i.e. try to add or remove
344 * reference to/from an IP address that is already registered on the card.
345 * Returns:
346 * 0 address was on card and its reference count has been adjusted,
347 * but is still > 0, so nothing has to be done
348 * also returns 0 if card was not on card and the todo was to delete
349 * the address -> there is also nothing to be done
350 * 1 address was not on card and the todo is to add it to the card's ip
351 * list
352 * -1 address was on card and its reference count has been decremented
353 * to <= 0 by the todo -> address must be removed from card
354 */
355static int __qeth_l3_ref_ip_on_card(struct qeth_card *card,
356 struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr)
357{
358 struct qeth_ipaddr *addr;
359 int found = 0;
360
361 list_for_each_entry(addr, &card->ip_list, entry) {
362 if ((addr->proto == QETH_PROT_IPV4) &&
363 (todo->proto == QETH_PROT_IPV4) &&
364 (addr->type == todo->type) &&
365 (addr->u.a4.addr == todo->u.a4.addr) &&
366 (addr->u.a4.mask == todo->u.a4.mask)) {
367 found = 1;
368 break;
369 }
370 if ((addr->proto == QETH_PROT_IPV6) &&
371 (todo->proto == QETH_PROT_IPV6) &&
372 (addr->type == todo->type) &&
373 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
374 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
375 sizeof(struct in6_addr)) == 0)) {
376 found = 1;
377 break;
378 }
379 }
380 if (found) {
381 addr->users += todo->users;
382 if (addr->users <= 0) {
383 *__addr = addr;
384 return -1;
385 } else {
386 /* for VIPA and RXIP limit refcount to 1 */
387 if (addr->type != QETH_IP_TYPE_NORMAL)
388 addr->users = 1;
389 return 0;
390 }
391 }
392 if (todo->users > 0) {
393 /* for VIPA and RXIP limit refcount to 1 */
394 if (todo->type != QETH_IP_TYPE_NORMAL)
395 todo->users = 1;
396 return 1;
397 } else
398 return 0;
399}
400
401static void __qeth_l3_delete_all_mc(struct qeth_card *card,
402 unsigned long *flags)
403{
404 struct qeth_ipaddr *addr, *tmp;
405 int rc;
406again:
407 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
408 if (addr->is_multicast) {
409 list_del(&addr->entry);
410 spin_unlock_irqrestore(&card->ip_lock, *flags);
411 rc = qeth_l3_deregister_addr_entry(card, addr);
412 spin_lock_irqsave(&card->ip_lock, *flags);
413 if (!rc) {
414 kfree(addr);
415 goto again;
416 } else
417 list_add(&addr->entry, &card->ip_list);
418 }
419 }
420}
421
422static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
423{
424 struct list_head *tbd_list;
425 struct qeth_ipaddr *todo, *addr;
426 unsigned long flags;
427 int rc;
428
429 QETH_DBF_TEXT(trace, 2, "sdiplist");
430 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
431
432 spin_lock_irqsave(&card->ip_lock, flags);
433 tbd_list = card->ip_tbd_list;
434 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
435 if (!card->ip_tbd_list) {
436 QETH_DBF_TEXT(trace, 0, "silnomem");
437 card->ip_tbd_list = tbd_list;
438 spin_unlock_irqrestore(&card->ip_lock, flags);
439 return;
440 } else
441 INIT_LIST_HEAD(card->ip_tbd_list);
442
443 while (!list_empty(tbd_list)) {
444 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
445 list_del(&todo->entry);
446 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) {
447 __qeth_l3_delete_all_mc(card, &flags);
448 kfree(todo);
449 continue;
450 }
451 rc = __qeth_l3_ref_ip_on_card(card, todo, &addr);
452 if (rc == 0) {
453 /* nothing to be done; only adjusted refcount */
454 kfree(todo);
455 } else if (rc == 1) {
456 /* new entry to be added to on-card list */
457 spin_unlock_irqrestore(&card->ip_lock, flags);
458 rc = qeth_l3_register_addr_entry(card, todo);
459 spin_lock_irqsave(&card->ip_lock, flags);
460 if (!rc)
461 list_add_tail(&todo->entry, &card->ip_list);
462 else
463 kfree(todo);
464 } else if (rc == -1) {
465 /* on-card entry to be removed */
466 list_del_init(&addr->entry);
467 spin_unlock_irqrestore(&card->ip_lock, flags);
468 rc = qeth_l3_deregister_addr_entry(card, addr);
469 spin_lock_irqsave(&card->ip_lock, flags);
470 if (!rc)
471 kfree(addr);
472 else
473 list_add_tail(&addr->entry, &card->ip_list);
474 kfree(todo);
475 }
476 }
477 spin_unlock_irqrestore(&card->ip_lock, flags);
478 kfree(tbd_list);
479}
480
481static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
482 int recover)
483{
484 struct qeth_ipaddr *addr, *tmp;
485 unsigned long flags;
486
487 QETH_DBF_TEXT(trace, 4, "clearip");
488 spin_lock_irqsave(&card->ip_lock, flags);
489 /* clear todo list */
490 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
491 list_del(&addr->entry);
492 kfree(addr);
493 }
494
495 while (!list_empty(&card->ip_list)) {
496 addr = list_entry(card->ip_list.next,
497 struct qeth_ipaddr, entry);
498 list_del_init(&addr->entry);
499 if (clean) {
500 spin_unlock_irqrestore(&card->ip_lock, flags);
501 qeth_l3_deregister_addr_entry(card, addr);
502 spin_lock_irqsave(&card->ip_lock, flags);
503 }
504 if (!recover || addr->is_multicast) {
505 kfree(addr);
506 continue;
507 }
508 list_add_tail(&addr->entry, card->ip_tbd_list);
509 }
510 spin_unlock_irqrestore(&card->ip_lock, flags);
511}
512
513static int qeth_l3_address_exists_in_list(struct list_head *list,
514 struct qeth_ipaddr *addr, int same_type)
515{
516 struct qeth_ipaddr *tmp;
517
518 list_for_each_entry(tmp, list, entry) {
519 if ((tmp->proto == QETH_PROT_IPV4) &&
520 (addr->proto == QETH_PROT_IPV4) &&
521 ((same_type && (tmp->type == addr->type)) ||
522 (!same_type && (tmp->type != addr->type))) &&
523 (tmp->u.a4.addr == addr->u.a4.addr))
524 return 1;
525
526 if ((tmp->proto == QETH_PROT_IPV6) &&
527 (addr->proto == QETH_PROT_IPV6) &&
528 ((same_type && (tmp->type == addr->type)) ||
529 (!same_type && (tmp->type != addr->type))) &&
530 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
531 sizeof(struct in6_addr)) == 0))
532 return 1;
533
534 }
535 return 0;
536}
537
538static int qeth_l3_send_setdelmc(struct qeth_card *card,
539 struct qeth_ipaddr *addr, int ipacmd)
540{
541 int rc;
542 struct qeth_cmd_buffer *iob;
543 struct qeth_ipa_cmd *cmd;
544
545 QETH_DBF_TEXT(trace, 4, "setdelmc");
546
547 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
548 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
549 memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
550 if (addr->proto == QETH_PROT_IPV6)
551 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
552 sizeof(struct in6_addr));
553 else
554 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
555
556 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
557
558 return rc;
559}
560
561static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
562{
563 int i, j;
564 for (i = 0; i < 16; i++) {
565 j = (len) - (i * 8);
566 if (j >= 8)
567 netmask[i] = 0xff;
568 else if (j > 0)
569 netmask[i] = (u8)(0xFF00 >> j);
570 else
571 netmask[i] = 0;
572 }
573}
574
575static int qeth_l3_send_setdelip(struct qeth_card *card,
576 struct qeth_ipaddr *addr, int ipacmd, unsigned int flags)
577{
578 int rc;
579 struct qeth_cmd_buffer *iob;
580 struct qeth_ipa_cmd *cmd;
581 __u8 netmask[16];
582
583 QETH_DBF_TEXT(trace, 4, "setdelip");
584 QETH_DBF_TEXT_(trace, 4, "flags%02X", flags);
585
586 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
587 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
588 if (addr->proto == QETH_PROT_IPV6) {
589 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
590 sizeof(struct in6_addr));
591 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
592 memcpy(cmd->data.setdelip6.mask, netmask,
593 sizeof(struct in6_addr));
594 cmd->data.setdelip6.flags = flags;
595 } else {
596 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
597 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
598 cmd->data.setdelip4.flags = flags;
599 }
600
601 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
602
603 return rc;
604}
605
606static int qeth_l3_send_setrouting(struct qeth_card *card,
607 enum qeth_routing_types type, enum qeth_prot_versions prot)
608{
609 int rc;
610 struct qeth_ipa_cmd *cmd;
611 struct qeth_cmd_buffer *iob;
612
613 QETH_DBF_TEXT(trace, 4, "setroutg");
614 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
615 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
616 cmd->data.setrtg.type = (type);
617 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
618
619 return rc;
620}
621
622static void qeth_l3_correct_routing_type(struct qeth_card *card,
623 enum qeth_routing_types *type, enum qeth_prot_versions prot)
624{
625 if (card->info.type == QETH_CARD_TYPE_IQD) {
626 switch (*type) {
627 case NO_ROUTER:
628 case PRIMARY_CONNECTOR:
629 case SECONDARY_CONNECTOR:
630 case MULTICAST_ROUTER:
631 return;
632 default:
633 goto out_inval;
634 }
635 } else {
636 switch (*type) {
637 case NO_ROUTER:
638 case PRIMARY_ROUTER:
639 case SECONDARY_ROUTER:
640 return;
641 case MULTICAST_ROUTER:
642 if (qeth_is_ipafunc_supported(card, prot,
643 IPA_OSA_MC_ROUTER))
644 return;
645 default:
646 goto out_inval;
647 }
648 }
649out_inval:
650 PRINT_WARN("Routing type '%s' not supported for interface %s.\n"
651 "Router status set to 'no router'.\n",
652 ((*type == PRIMARY_ROUTER)? "primary router" :
653 (*type == SECONDARY_ROUTER)? "secondary router" :
654 (*type == PRIMARY_CONNECTOR)? "primary connector" :
655 (*type == SECONDARY_CONNECTOR)? "secondary connector" :
656 (*type == MULTICAST_ROUTER)? "multicast router" :
657 "unknown"),
658 card->dev->name);
659 *type = NO_ROUTER;
660}
661
662int qeth_l3_setrouting_v4(struct qeth_card *card)
663{
664 int rc;
665
666 QETH_DBF_TEXT(trace, 3, "setrtg4");
667
668 qeth_l3_correct_routing_type(card, &card->options.route4.type,
669 QETH_PROT_IPV4);
670
671 rc = qeth_l3_send_setrouting(card, card->options.route4.type,
672 QETH_PROT_IPV4);
673 if (rc) {
674 card->options.route4.type = NO_ROUTER;
675 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
676 "Type set to 'no router'.\n",
677 rc, QETH_CARD_IFNAME(card));
678 }
679 return rc;
680}
681
682int qeth_l3_setrouting_v6(struct qeth_card *card)
683{
684 int rc = 0;
685
686 QETH_DBF_TEXT(trace, 3, "setrtg6");
687#ifdef CONFIG_QETH_IPV6
688
689 if (!qeth_is_supported(card, IPA_IPV6))
690 return 0;
691 qeth_l3_correct_routing_type(card, &card->options.route6.type,
692 QETH_PROT_IPV6);
693
694 rc = qeth_l3_send_setrouting(card, card->options.route6.type,
695 QETH_PROT_IPV6);
696 if (rc) {
697 card->options.route6.type = NO_ROUTER;
698 PRINT_WARN("Error (0x%04x) while setting routing type on %s. "
699 "Type set to 'no router'.\n",
700 rc, QETH_CARD_IFNAME(card));
701 }
702#endif
703 return rc;
704}
705
706/*
707 * IP address takeover related functions
708 */
709static void qeth_l3_clear_ipato_list(struct qeth_card *card)
710{
711
712 struct qeth_ipato_entry *ipatoe, *tmp;
713 unsigned long flags;
714
715 spin_lock_irqsave(&card->ip_lock, flags);
716 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
717 list_del(&ipatoe->entry);
718 kfree(ipatoe);
719 }
720 spin_unlock_irqrestore(&card->ip_lock, flags);
721}
722
723int qeth_l3_add_ipato_entry(struct qeth_card *card,
724 struct qeth_ipato_entry *new)
725{
726 struct qeth_ipato_entry *ipatoe;
727 unsigned long flags;
728 int rc = 0;
729
730 QETH_DBF_TEXT(trace, 2, "addipato");
731 spin_lock_irqsave(&card->ip_lock, flags);
732 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
733 if (ipatoe->proto != new->proto)
734 continue;
735 if (!memcmp(ipatoe->addr, new->addr,
736 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
737 (ipatoe->mask_bits == new->mask_bits)) {
738 PRINT_WARN("ipato entry already exists!\n");
739 rc = -EEXIST;
740 break;
741 }
742 }
743 if (!rc)
744 list_add_tail(&new->entry, &card->ipato.entries);
745
746 spin_unlock_irqrestore(&card->ip_lock, flags);
747 return rc;
748}
749
750void qeth_l3_del_ipato_entry(struct qeth_card *card,
751 enum qeth_prot_versions proto, u8 *addr, int mask_bits)
752{
753 struct qeth_ipato_entry *ipatoe, *tmp;
754 unsigned long flags;
755
756 QETH_DBF_TEXT(trace, 2, "delipato");
757 spin_lock_irqsave(&card->ip_lock, flags);
758 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
759 if (ipatoe->proto != proto)
760 continue;
761 if (!memcmp(ipatoe->addr, addr,
762 (proto == QETH_PROT_IPV4)? 4:16) &&
763 (ipatoe->mask_bits == mask_bits)) {
764 list_del(&ipatoe->entry);
765 kfree(ipatoe);
766 }
767 }
768 spin_unlock_irqrestore(&card->ip_lock, flags);
769}
770
771/*
772 * VIPA related functions
773 */
774int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
775 const u8 *addr)
776{
777 struct qeth_ipaddr *ipaddr;
778 unsigned long flags;
779 int rc = 0;
780
781 ipaddr = qeth_l3_get_addr_buffer(proto);
782 if (ipaddr) {
783 if (proto == QETH_PROT_IPV4) {
784 QETH_DBF_TEXT(trace, 2, "addvipa4");
785 memcpy(&ipaddr->u.a4.addr, addr, 4);
786 ipaddr->u.a4.mask = 0;
787 } else if (proto == QETH_PROT_IPV6) {
788 QETH_DBF_TEXT(trace, 2, "addvipa6");
789 memcpy(&ipaddr->u.a6.addr, addr, 16);
790 ipaddr->u.a6.pfxlen = 0;
791 }
792 ipaddr->type = QETH_IP_TYPE_VIPA;
793 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
794 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
795 } else
796 return -ENOMEM;
797 spin_lock_irqsave(&card->ip_lock, flags);
798 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
799 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
800 rc = -EEXIST;
801 spin_unlock_irqrestore(&card->ip_lock, flags);
802 if (rc) {
803 PRINT_WARN("Cannot add VIPA. Address already exists!\n");
804 return rc;
805 }
806 if (!qeth_l3_add_ip(card, ipaddr))
807 kfree(ipaddr);
808 qeth_l3_set_ip_addr_list(card);
809 return rc;
810}
811
812void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
813 const u8 *addr)
814{
815 struct qeth_ipaddr *ipaddr;
816
817 ipaddr = qeth_l3_get_addr_buffer(proto);
818 if (ipaddr) {
819 if (proto == QETH_PROT_IPV4) {
820 QETH_DBF_TEXT(trace, 2, "delvipa4");
821 memcpy(&ipaddr->u.a4.addr, addr, 4);
822 ipaddr->u.a4.mask = 0;
823 } else if (proto == QETH_PROT_IPV6) {
824 QETH_DBF_TEXT(trace, 2, "delvipa6");
825 memcpy(&ipaddr->u.a6.addr, addr, 16);
826 ipaddr->u.a6.pfxlen = 0;
827 }
828 ipaddr->type = QETH_IP_TYPE_VIPA;
829 } else
830 return;
831 if (!qeth_l3_delete_ip(card, ipaddr))
832 kfree(ipaddr);
833 qeth_l3_set_ip_addr_list(card);
834}
835
836/*
837 * proxy ARP related functions
838 */
839int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
840 const u8 *addr)
841{
842 struct qeth_ipaddr *ipaddr;
843 unsigned long flags;
844 int rc = 0;
845
846 ipaddr = qeth_l3_get_addr_buffer(proto);
847 if (ipaddr) {
848 if (proto == QETH_PROT_IPV4) {
849 QETH_DBF_TEXT(trace, 2, "addrxip4");
850 memcpy(&ipaddr->u.a4.addr, addr, 4);
851 ipaddr->u.a4.mask = 0;
852 } else if (proto == QETH_PROT_IPV6) {
853 QETH_DBF_TEXT(trace, 2, "addrxip6");
854 memcpy(&ipaddr->u.a6.addr, addr, 16);
855 ipaddr->u.a6.pfxlen = 0;
856 }
857 ipaddr->type = QETH_IP_TYPE_RXIP;
858 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
859 ipaddr->del_flags = 0;
860 } else
861 return -ENOMEM;
862 spin_lock_irqsave(&card->ip_lock, flags);
863 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
864 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
865 rc = -EEXIST;
866 spin_unlock_irqrestore(&card->ip_lock, flags);
867 if (rc) {
868 PRINT_WARN("Cannot add RXIP. Address already exists!\n");
869 return rc;
870 }
871 if (!qeth_l3_add_ip(card, ipaddr))
872 kfree(ipaddr);
873 qeth_l3_set_ip_addr_list(card);
874 return 0;
875}
876
877void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
878 const u8 *addr)
879{
880 struct qeth_ipaddr *ipaddr;
881
882 ipaddr = qeth_l3_get_addr_buffer(proto);
883 if (ipaddr) {
884 if (proto == QETH_PROT_IPV4) {
885 QETH_DBF_TEXT(trace, 2, "addrxip4");
886 memcpy(&ipaddr->u.a4.addr, addr, 4);
887 ipaddr->u.a4.mask = 0;
888 } else if (proto == QETH_PROT_IPV6) {
889 QETH_DBF_TEXT(trace, 2, "addrxip6");
890 memcpy(&ipaddr->u.a6.addr, addr, 16);
891 ipaddr->u.a6.pfxlen = 0;
892 }
893 ipaddr->type = QETH_IP_TYPE_RXIP;
894 } else
895 return;
896 if (!qeth_l3_delete_ip(card, ipaddr))
897 kfree(ipaddr);
898 qeth_l3_set_ip_addr_list(card);
899}
900
901static int qeth_l3_register_addr_entry(struct qeth_card *card,
902 struct qeth_ipaddr *addr)
903{
904 char buf[50];
905 int rc = 0;
906 int cnt = 3;
907
908 if (addr->proto == QETH_PROT_IPV4) {
909 QETH_DBF_TEXT(trace, 2, "setaddr4");
910 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
911 } else if (addr->proto == QETH_PROT_IPV6) {
912 QETH_DBF_TEXT(trace, 2, "setaddr6");
913 QETH_DBF_HEX(trace, 3, &addr->u.a6.addr, 8);
914 QETH_DBF_HEX(trace, 3, ((char *)&addr->u.a6.addr) + 8, 8);
915 } else {
916 QETH_DBF_TEXT(trace, 2, "setaddr?");
917 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
918 }
919 do {
920 if (addr->is_multicast)
921 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
922 else
923 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP,
924 addr->set_flags);
925 if (rc)
926 QETH_DBF_TEXT(trace, 2, "failed");
927 } while ((--cnt > 0) && rc);
928 if (rc) {
929 QETH_DBF_TEXT(trace, 2, "FAILED");
930 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
931 PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
932 buf, rc, rc);
933 }
934 return rc;
935}
936
937static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
938 struct qeth_ipaddr *addr)
939{
940 int rc = 0;
941
942 if (addr->proto == QETH_PROT_IPV4) {
943 QETH_DBF_TEXT(trace, 2, "deladdr4");
944 QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int));
945 } else if (addr->proto == QETH_PROT_IPV6) {
946 QETH_DBF_TEXT(trace, 2, "deladdr6");
947 QETH_DBF_HEX(trace, 3, &addr->u.a6.addr, 8);
948 QETH_DBF_HEX(trace, 3, ((char *)&addr->u.a6.addr) + 8, 8);
949 } else {
950 QETH_DBF_TEXT(trace, 2, "deladdr?");
951 QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr));
952 }
953 if (addr->is_multicast)
954 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
955 else
956 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
957 addr->del_flags);
958 if (rc) {
959 QETH_DBF_TEXT(trace, 2, "failed");
960 /* TODO: re-activate this warning as soon as we have a
961 * clean mirco code
962 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
963 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
964 buf, rc);
965 */
966 }
967
968 return rc;
969}
970
971static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
972{
973 if (cast_type == RTN_MULTICAST)
974 return QETH_CAST_MULTICAST;
975 if (cast_type == RTN_BROADCAST)
976 return QETH_CAST_BROADCAST;
977 return QETH_CAST_UNICAST;
978}
979
980static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
981{
982 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
983 if (cast_type == RTN_MULTICAST)
984 return ct | QETH_CAST_MULTICAST;
985 if (cast_type == RTN_ANYCAST)
986 return ct | QETH_CAST_ANYCAST;
987 if (cast_type == RTN_BROADCAST)
988 return ct | QETH_CAST_BROADCAST;
989 return ct | QETH_CAST_UNICAST;
990}
991
992static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
993 __u32 mode)
994{
995 int rc;
996 struct qeth_cmd_buffer *iob;
997 struct qeth_ipa_cmd *cmd;
998
999 QETH_DBF_TEXT(trace, 4, "adpmode");
1000
1001 iob = qeth_get_adapter_cmd(card, command,
1002 sizeof(struct qeth_ipacmd_setadpparms));
1003 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1004 cmd->data.setadapterparms.data.mode = mode;
1005 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
1006 NULL);
1007 return rc;
1008}
1009
1010static int qeth_l3_setadapter_hstr(struct qeth_card *card)
1011{
1012 int rc;
1013
1014 QETH_DBF_TEXT(trace, 4, "adphstr");
1015
1016 if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
1017 rc = qeth_l3_send_setadp_mode(card,
1018 IPA_SETADP_SET_BROADCAST_MODE,
1019 card->options.broadcast_mode);
1020 if (rc)
1021 PRINT_WARN("couldn't set broadcast mode on "
1022 "device %s: x%x\n",
1023 CARD_BUS_ID(card), rc);
1024 rc = qeth_l3_send_setadp_mode(card,
1025 IPA_SETADP_ALTER_MAC_ADDRESS,
1026 card->options.macaddr_mode);
1027 if (rc)
1028 PRINT_WARN("couldn't set macaddr mode on "
1029 "device %s: x%x\n", CARD_BUS_ID(card), rc);
1030 return rc;
1031 }
1032 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
1033 PRINT_WARN("set adapter parameters not available "
1034 "to set broadcast mode, using ALLRINGS "
1035 "on device %s:\n", CARD_BUS_ID(card));
1036 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
1037 PRINT_WARN("set adapter parameters not available "
1038 "to set macaddr mode, using NONCANONICAL "
1039 "on device %s:\n", CARD_BUS_ID(card));
1040 return 0;
1041}
1042
1043static int qeth_l3_setadapter_parms(struct qeth_card *card)
1044{
1045 int rc;
1046
1047 QETH_DBF_TEXT(setup, 2, "setadprm");
1048
1049 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
1050 PRINT_WARN("set adapter parameters not supported "
1051 "on device %s.\n",
1052 CARD_BUS_ID(card));
1053 QETH_DBF_TEXT(setup, 2, " notsupp");
1054 return 0;
1055 }
1056 rc = qeth_query_setadapterparms(card);
1057 if (rc) {
1058 PRINT_WARN("couldn't set adapter parameters on device %s: "
1059 "x%x\n", CARD_BUS_ID(card), rc);
1060 return rc;
1061 }
1062 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
1063 rc = qeth_setadpparms_change_macaddr(card);
1064 if (rc)
1065 PRINT_WARN("couldn't get MAC address on "
1066 "device %s: x%x\n",
1067 CARD_BUS_ID(card), rc);
1068 }
1069
1070 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
1071 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
1072 rc = qeth_l3_setadapter_hstr(card);
1073
1074 return rc;
1075}
1076
1077static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
1078 struct qeth_reply *reply, unsigned long data)
1079{
1080 struct qeth_ipa_cmd *cmd;
1081
1082 QETH_DBF_TEXT(trace, 4, "defadpcb");
1083
1084 cmd = (struct qeth_ipa_cmd *) data;
1085 if (cmd->hdr.return_code == 0) {
1086 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
1087 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
1088 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1089 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
1090 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1091 }
1092 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
1093 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
1094 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
1095 QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask);
1096 }
1097 return 0;
1098}
1099
1100static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
1101 struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code,
1102 __u16 len, enum qeth_prot_versions prot)
1103{
1104 struct qeth_cmd_buffer *iob;
1105 struct qeth_ipa_cmd *cmd;
1106
1107 QETH_DBF_TEXT(trace, 4, "getasscm");
1108 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
1109
1110 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1111 cmd->data.setassparms.hdr.assist_no = ipa_func;
1112 cmd->data.setassparms.hdr.length = 8 + len;
1113 cmd->data.setassparms.hdr.command_code = cmd_code;
1114 cmd->data.setassparms.hdr.return_code = 0;
1115 cmd->data.setassparms.hdr.seq_no = 0;
1116
1117 return iob;
1118}
1119
1120static int qeth_l3_send_setassparms(struct qeth_card *card,
1121 struct qeth_cmd_buffer *iob, __u16 len, long data,
1122 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
1123 unsigned long),
1124 void *reply_param)
1125{
1126 int rc;
1127 struct qeth_ipa_cmd *cmd;
1128
1129 QETH_DBF_TEXT(trace, 4, "sendassp");
1130
1131 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1132 if (len <= sizeof(__u32))
1133 cmd->data.setassparms.data.flags_32bit = (__u32) data;
1134 else /* (len > sizeof(__u32)) */
1135 memcpy(&cmd->data.setassparms.data, (void *) data, len);
1136
1137 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
1138 return rc;
1139}
1140
1141#ifdef CONFIG_QETH_IPV6
1142static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
1143 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
1144{
1145 int rc;
1146 struct qeth_cmd_buffer *iob;
1147
1148 QETH_DBF_TEXT(trace, 4, "simassp6");
1149 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1150 0, QETH_PROT_IPV6);
1151 rc = qeth_l3_send_setassparms(card, iob, 0, 0,
1152 qeth_l3_default_setassparms_cb, NULL);
1153 return rc;
1154}
1155#endif
1156
1157static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
1158 enum qeth_ipa_funcs ipa_func, __u16 cmd_code, long data)
1159{
1160 int rc;
1161 int length = 0;
1162 struct qeth_cmd_buffer *iob;
1163
1164 QETH_DBF_TEXT(trace, 4, "simassp4");
1165 if (data)
1166 length = sizeof(__u32);
1167 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
1168 length, QETH_PROT_IPV4);
1169 rc = qeth_l3_send_setassparms(card, iob, length, data,
1170 qeth_l3_default_setassparms_cb, NULL);
1171 return rc;
1172}
1173
1174static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
1175{
1176 int rc;
1177
1178 QETH_DBF_TEXT(trace, 3, "ipaarp");
1179
1180 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
1181 PRINT_WARN("ARP processing not supported "
1182 "on %s!\n", QETH_CARD_IFNAME(card));
1183 return 0;
1184 }
1185 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
1186 IPA_CMD_ASS_START, 0);
1187 if (rc) {
1188 PRINT_WARN("Could not start ARP processing "
1189 "assist on %s: 0x%x\n",
1190 QETH_CARD_IFNAME(card), rc);
1191 }
1192 return rc;
1193}
1194
1195static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card)
1196{
1197 int rc;
1198
1199 QETH_DBF_TEXT(trace, 3, "ipaipfrg");
1200
1201 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
1202 PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
1203 QETH_CARD_IFNAME(card));
1204 return -EOPNOTSUPP;
1205 }
1206
1207 rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
1208 IPA_CMD_ASS_START, 0);
1209 if (rc) {
1210 PRINT_WARN("Could not start Hardware IP fragmentation "
1211 "assist on %s: 0x%x\n",
1212 QETH_CARD_IFNAME(card), rc);
1213 } else
1214 PRINT_INFO("Hardware IP fragmentation enabled \n");
1215 return rc;
1216}
1217
1218static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
1219{
1220 int rc;
1221
1222 QETH_DBF_TEXT(trace, 3, "stsrcmac");
1223
1224 if (!card->options.fake_ll)
1225 return -EOPNOTSUPP;
1226
1227 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
1228 PRINT_INFO("Inbound source address not "
1229 "supported on %s\n", QETH_CARD_IFNAME(card));
1230 return -EOPNOTSUPP;
1231 }
1232
1233 rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC,
1234 IPA_CMD_ASS_START, 0);
1235 if (rc)
1236 PRINT_WARN("Could not start inbound source "
1237 "assist on %s: 0x%x\n",
1238 QETH_CARD_IFNAME(card), rc);
1239 return rc;
1240}
1241
1242static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
1243{
1244 int rc = 0;
1245
1246 QETH_DBF_TEXT(trace, 3, "strtvlan");
1247
1248 if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
1249 PRINT_WARN("VLAN not supported on %s\n",
1250 QETH_CARD_IFNAME(card));
1251 return -EOPNOTSUPP;
1252 }
1253
1254 rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO,
1255 IPA_CMD_ASS_START, 0);
1256 if (rc) {
1257 PRINT_WARN("Could not start vlan "
1258 "assist on %s: 0x%x\n",
1259 QETH_CARD_IFNAME(card), rc);
1260 } else {
1261 PRINT_INFO("VLAN enabled \n");
1262 }
1263 return rc;
1264}
1265
1266static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
1267{
1268 int rc;
1269
1270 QETH_DBF_TEXT(trace, 3, "stmcast");
1271
1272 if (!qeth_is_supported(card, IPA_MULTICASTING)) {
1273 PRINT_WARN("Multicast not supported on %s\n",
1274 QETH_CARD_IFNAME(card));
1275 return -EOPNOTSUPP;
1276 }
1277
1278 rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING,
1279 IPA_CMD_ASS_START, 0);
1280 if (rc) {
1281 PRINT_WARN("Could not start multicast "
1282 "assist on %s: rc=%i\n",
1283 QETH_CARD_IFNAME(card), rc);
1284 } else {
1285 PRINT_INFO("Multicast enabled\n");
1286 card->dev->flags |= IFF_MULTICAST;
1287 }
1288 return rc;
1289}
1290
1291static int qeth_l3_query_ipassists_cb(struct qeth_card *card,
1292 struct qeth_reply *reply, unsigned long data)
1293{
1294 struct qeth_ipa_cmd *cmd;
1295
1296 QETH_DBF_TEXT(setup, 2, "qipasscb");
1297
1298 cmd = (struct qeth_ipa_cmd *) data;
1299 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
1300 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
1301 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1302 } else {
1303 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
1304 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1305 }
1306 QETH_DBF_TEXT(setup, 2, "suppenbl");
1307 QETH_DBF_TEXT_(setup, 2, "%x", cmd->hdr.ipa_supported);
1308 QETH_DBF_TEXT_(setup, 2, "%x", cmd->hdr.ipa_enabled);
1309 return 0;
1310}
1311
1312static int qeth_l3_query_ipassists(struct qeth_card *card,
1313 enum qeth_prot_versions prot)
1314{
1315 int rc;
1316 struct qeth_cmd_buffer *iob;
1317
1318 QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot);
1319 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
1320 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL);
1321 return rc;
1322}
1323
1324#ifdef CONFIG_QETH_IPV6
1325static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
1326{
1327 int rc;
1328
1329 QETH_DBF_TEXT(trace, 3, "softipv6");
1330
1331 if (card->info.type == QETH_CARD_TYPE_IQD)
1332 goto out;
1333
1334 rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6);
1335 if (rc) {
1336 PRINT_ERR("IPv6 query ipassist failed on %s\n",
1337 QETH_CARD_IFNAME(card));
1338 return rc;
1339 }
1340 rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6,
1341 IPA_CMD_ASS_START, 3);
1342 if (rc) {
1343 PRINT_WARN("IPv6 start assist (version 4) failed "
1344 "on %s: 0x%x\n",
1345 QETH_CARD_IFNAME(card), rc);
1346 return rc;
1347 }
1348 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6,
1349 IPA_CMD_ASS_START);
1350 if (rc) {
1351 PRINT_WARN("IPV6 start assist (version 6) failed "
1352 "on %s: 0x%x\n",
1353 QETH_CARD_IFNAME(card), rc);
1354 return rc;
1355 }
1356 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
1357 IPA_CMD_ASS_START);
1358 if (rc) {
1359 PRINT_WARN("Could not enable passthrough "
1360 "on %s: 0x%x\n",
1361 QETH_CARD_IFNAME(card), rc);
1362 return rc;
1363 }
1364out:
1365 PRINT_INFO("IPV6 enabled \n");
1366 return 0;
1367}
1368#endif
1369
1370static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
1371{
1372 int rc = 0;
1373
1374 QETH_DBF_TEXT(trace, 3, "strtipv6");
1375
1376 if (!qeth_is_supported(card, IPA_IPV6)) {
1377 PRINT_WARN("IPv6 not supported on %s\n",
1378 QETH_CARD_IFNAME(card));
1379 return 0;
1380 }
1381#ifdef CONFIG_QETH_IPV6
1382 rc = qeth_l3_softsetup_ipv6(card);
1383#endif
1384 return rc ;
1385}
1386
1387static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
1388{
1389 int rc;
1390
1391 QETH_DBF_TEXT(trace, 3, "stbrdcst");
1392 card->info.broadcast_capable = 0;
1393 if (!qeth_is_supported(card, IPA_FILTERING)) {
1394 PRINT_WARN("Broadcast not supported on %s\n",
1395 QETH_CARD_IFNAME(card));
1396 rc = -EOPNOTSUPP;
1397 goto out;
1398 }
1399 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1400 IPA_CMD_ASS_START, 0);
1401 if (rc) {
1402 PRINT_WARN("Could not enable broadcasting filtering "
1403 "on %s: 0x%x\n",
1404 QETH_CARD_IFNAME(card), rc);
1405 goto out;
1406 }
1407
1408 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1409 IPA_CMD_ASS_CONFIGURE, 1);
1410 if (rc) {
1411 PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
1412 QETH_CARD_IFNAME(card), rc);
1413 goto out;
1414 }
1415 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
1416 PRINT_INFO("Broadcast enabled \n");
1417 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
1418 IPA_CMD_ASS_ENABLE, 1);
1419 if (rc) {
1420 PRINT_WARN("Could not set up broadcast echo filtering on "
1421 "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
1422 goto out;
1423 }
1424 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
1425out:
1426 if (card->info.broadcast_capable)
1427 card->dev->flags |= IFF_BROADCAST;
1428 else
1429 card->dev->flags &= ~IFF_BROADCAST;
1430 return rc;
1431}
1432
1433static int qeth_l3_send_checksum_command(struct qeth_card *card)
1434{
1435 int rc;
1436
1437 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
1438 IPA_CMD_ASS_START, 0);
1439 if (rc) {
1440 PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
1441 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
1442 QETH_CARD_IFNAME(card), rc);
1443 return rc;
1444 }
1445 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
1446 IPA_CMD_ASS_ENABLE,
1447 card->info.csum_mask);
1448 if (rc) {
1449 PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
1450 "0x%x,\ncontinuing using Inbound SW Checksumming\n",
1451 QETH_CARD_IFNAME(card), rc);
1452 return rc;
1453 }
1454 return 0;
1455}
1456
1457static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1458{
1459 int rc = 0;
1460
1461 QETH_DBF_TEXT(trace, 3, "strtcsum");
1462
1463 if (card->options.checksum_type == NO_CHECKSUMMING) {
1464 PRINT_WARN("Using no checksumming on %s.\n",
1465 QETH_CARD_IFNAME(card));
1466 return 0;
1467 }
1468 if (card->options.checksum_type == SW_CHECKSUMMING) {
1469 PRINT_WARN("Using SW checksumming on %s.\n",
1470 QETH_CARD_IFNAME(card));
1471 return 0;
1472 }
1473 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
1474 PRINT_WARN("Inbound HW Checksumming not "
1475 "supported on %s,\ncontinuing "
1476 "using Inbound SW Checksumming\n",
1477 QETH_CARD_IFNAME(card));
1478 card->options.checksum_type = SW_CHECKSUMMING;
1479 return 0;
1480 }
1481 rc = qeth_l3_send_checksum_command(card);
1482 if (!rc)
1483 PRINT_INFO("HW Checksumming (inbound) enabled \n");
1484
1485 return rc;
1486}
1487
1488static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1489{
1490 int rc;
1491
1492 QETH_DBF_TEXT(trace, 3, "sttso");
1493
1494 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
1495 PRINT_WARN("Outbound TSO not supported on %s\n",
1496 QETH_CARD_IFNAME(card));
1497 rc = -EOPNOTSUPP;
1498 } else {
1499 rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
1500 IPA_CMD_ASS_START, 0);
1501 if (rc)
1502 PRINT_WARN("Could not start outbound TSO "
1503 "assist on %s: rc=%i\n",
1504 QETH_CARD_IFNAME(card), rc);
1505 else
1506 PRINT_INFO("Outbound TSO enabled\n");
1507 }
1508 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) {
1509 card->options.large_send = QETH_LARGE_SEND_NO;
1510 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
1511 }
1512 return rc;
1513}
1514
1515static int qeth_l3_start_ipassists(struct qeth_card *card)
1516{
1517 QETH_DBF_TEXT(trace, 3, "strtipas");
1518 qeth_l3_start_ipa_arp_processing(card); /* go on*/
1519 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
1520 qeth_l3_start_ipa_source_mac(card); /* go on*/
1521 qeth_l3_start_ipa_vlan(card); /* go on*/
1522 qeth_l3_start_ipa_multicast(card); /* go on*/
1523 qeth_l3_start_ipa_ipv6(card); /* go on*/
1524 qeth_l3_start_ipa_broadcast(card); /* go on*/
1525 qeth_l3_start_ipa_checksum(card); /* go on*/
1526 qeth_l3_start_ipa_tso(card); /* go on*/
1527 return 0;
1528}
1529
1530static int qeth_l3_put_unique_id(struct qeth_card *card)
1531{
1532
1533 int rc = 0;
1534 struct qeth_cmd_buffer *iob;
1535 struct qeth_ipa_cmd *cmd;
1536
1537 QETH_DBF_TEXT(trace, 2, "puniqeid");
1538
1539 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
1540 UNIQUE_ID_NOT_BY_CARD)
1541 return -1;
1542 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
1543 QETH_PROT_IPV6);
1544 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1545 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1546 card->info.unique_id;
1547 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
1548 card->dev->dev_addr, OSA_ADDR_LEN);
1549 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
1550 return rc;
1551}
1552
1553static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
1554 struct qeth_reply *reply, unsigned long data)
1555{
1556 struct qeth_ipa_cmd *cmd;
1557
1558 cmd = (struct qeth_ipa_cmd *) data;
1559 if (cmd->hdr.return_code == 0)
1560 memcpy(card->dev->dev_addr,
1561 cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
1562 else
1563 random_ether_addr(card->dev->dev_addr);
1564
1565 return 0;
1566}
1567
1568static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
1569{
1570 int rc = 0;
1571 struct qeth_cmd_buffer *iob;
1572 struct qeth_ipa_cmd *cmd;
1573
1574 QETH_DBF_TEXT(setup, 2, "hsrmac");
1575
1576 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1577 QETH_PROT_IPV6);
1578 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1579 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1580 card->info.unique_id;
1581
1582 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
1583 NULL);
1584 return rc;
1585}
1586
1587static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
1588 struct qeth_reply *reply, unsigned long data)
1589{
1590 struct qeth_ipa_cmd *cmd;
1591
1592 cmd = (struct qeth_ipa_cmd *) data;
1593 if (cmd->hdr.return_code == 0)
1594 card->info.unique_id = *((__u16 *)
1595 &cmd->data.create_destroy_addr.unique_id[6]);
1596 else {
1597 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1598 UNIQUE_ID_NOT_BY_CARD;
1599 PRINT_WARN("couldn't get a unique id from the card on device "
1600 "%s (result=x%x), using default id. ipv6 "
1601 "autoconfig on other lpars may lead to duplicate "
1602 "ip addresses. please use manually "
1603 "configured ones.\n",
1604 CARD_BUS_ID(card), cmd->hdr.return_code);
1605 }
1606 return 0;
1607}
1608
1609static int qeth_l3_get_unique_id(struct qeth_card *card)
1610{
1611 int rc = 0;
1612 struct qeth_cmd_buffer *iob;
1613 struct qeth_ipa_cmd *cmd;
1614
1615 QETH_DBF_TEXT(setup, 2, "guniqeid");
1616
1617 if (!qeth_is_supported(card, IPA_IPV6)) {
1618 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
1619 UNIQUE_ID_NOT_BY_CARD;
1620 return 0;
1621 }
1622
1623 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
1624 QETH_PROT_IPV6);
1625 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1626 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1627 card->info.unique_id;
1628
1629 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
1630 return rc;
1631}
1632
1633static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
1634 struct net_device *dev)
1635{
1636 if (dev->type == ARPHRD_IEEE802_TR)
1637 ip_tr_mc_map(ipm, mac);
1638 else
1639 ip_eth_mc_map(ipm, mac);
1640}
1641
1642static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
1643{
1644 struct qeth_ipaddr *ipm;
1645 struct ip_mc_list *im4;
1646 char buf[MAX_ADDR_LEN];
1647
1648 QETH_DBF_TEXT(trace, 4, "addmc");
1649 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
1650 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
1651 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1652 if (!ipm)
1653 continue;
1654 ipm->u.a4.addr = im4->multiaddr;
1655 memcpy(ipm->mac, buf, OSA_ADDR_LEN);
1656 ipm->is_multicast = 1;
1657 if (!qeth_l3_add_ip(card, ipm))
1658 kfree(ipm);
1659 }
1660}
1661
1662static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1663{
1664 struct in_device *in_dev;
1665 struct vlan_group *vg;
1666 int i;
1667
1668 QETH_DBF_TEXT(trace, 4, "addmcvl");
1669 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
1670 return;
1671
1672 vg = card->vlangrp;
1673 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
1674 struct net_device *netdev = vlan_group_get_device(vg, i);
1675 if (netdev == NULL ||
1676 !(netdev->flags & IFF_UP))
1677 continue;
1678 in_dev = in_dev_get(netdev);
1679 if (!in_dev)
1680 continue;
1681 read_lock(&in_dev->mc_list_lock);
1682 qeth_l3_add_mc(card, in_dev);
1683 read_unlock(&in_dev->mc_list_lock);
1684 in_dev_put(in_dev);
1685 }
1686}
1687
1688static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
1689{
1690 struct in_device *in4_dev;
1691
1692 QETH_DBF_TEXT(trace, 4, "chkmcv4");
1693 in4_dev = in_dev_get(card->dev);
1694 if (in4_dev == NULL)
1695 return;
1696 read_lock(&in4_dev->mc_list_lock);
1697 qeth_l3_add_mc(card, in4_dev);
1698 qeth_l3_add_vlan_mc(card);
1699 read_unlock(&in4_dev->mc_list_lock);
1700 in_dev_put(in4_dev);
1701}
1702
1703#ifdef CONFIG_QETH_IPV6
1704static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
1705{
1706 struct qeth_ipaddr *ipm;
1707 struct ifmcaddr6 *im6;
1708 char buf[MAX_ADDR_LEN];
1709
1710 QETH_DBF_TEXT(trace, 4, "addmc6");
1711 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
1712 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
1713 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1714 if (!ipm)
1715 continue;
1716 ipm->is_multicast = 1;
1717 memcpy(ipm->mac, buf, OSA_ADDR_LEN);
1718 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
1719 sizeof(struct in6_addr));
1720 if (!qeth_l3_add_ip(card, ipm))
1721 kfree(ipm);
1722 }
1723}
1724
1725static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1726{
1727 struct inet6_dev *in_dev;
1728 struct vlan_group *vg;
1729 int i;
1730
1731 QETH_DBF_TEXT(trace, 4, "admc6vl");
1732 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
1733 return;
1734
1735 vg = card->vlangrp;
1736 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
1737 struct net_device *netdev = vlan_group_get_device(vg, i);
1738 if (netdev == NULL ||
1739 !(netdev->flags & IFF_UP))
1740 continue;
1741 in_dev = in6_dev_get(netdev);
1742 if (!in_dev)
1743 continue;
1744 read_lock_bh(&in_dev->lock);
1745 qeth_l3_add_mc6(card, in_dev);
1746 read_unlock_bh(&in_dev->lock);
1747 in6_dev_put(in_dev);
1748 }
1749}
1750
1751static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
1752{
1753 struct inet6_dev *in6_dev;
1754
1755 QETH_DBF_TEXT(trace, 4, "chkmcv6");
1756 if (!qeth_is_supported(card, IPA_IPV6))
1757 return ;
1758 in6_dev = in6_dev_get(card->dev);
1759 if (in6_dev == NULL)
1760 return;
1761 read_lock_bh(&in6_dev->lock);
1762 qeth_l3_add_mc6(card, in6_dev);
1763 qeth_l3_add_vlan_mc6(card);
1764 read_unlock_bh(&in6_dev->lock);
1765 in6_dev_put(in6_dev);
1766}
1767#endif /* CONFIG_QETH_IPV6 */
1768
1769static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
1770 unsigned short vid)
1771{
1772 struct in_device *in_dev;
1773 struct in_ifaddr *ifa;
1774 struct qeth_ipaddr *addr;
1775
1776 QETH_DBF_TEXT(trace, 4, "frvaddr4");
1777
1778 in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid));
1779 if (!in_dev)
1780 return;
1781 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1782 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1783 if (addr) {
1784 addr->u.a4.addr = ifa->ifa_address;
1785 addr->u.a4.mask = ifa->ifa_mask;
1786 addr->type = QETH_IP_TYPE_NORMAL;
1787 if (!qeth_l3_delete_ip(card, addr))
1788 kfree(addr);
1789 }
1790 }
1791 in_dev_put(in_dev);
1792}
1793
1794static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
1795 unsigned short vid)
1796{
1797#ifdef CONFIG_QETH_IPV6
1798 struct inet6_dev *in6_dev;
1799 struct inet6_ifaddr *ifa;
1800 struct qeth_ipaddr *addr;
1801
1802 QETH_DBF_TEXT(trace, 4, "frvaddr6");
1803
1804 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
1805 if (!in6_dev)
1806 return;
1807 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) {
1808 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1809 if (addr) {
1810 memcpy(&addr->u.a6.addr, &ifa->addr,
1811 sizeof(struct in6_addr));
1812 addr->u.a6.pfxlen = ifa->prefix_len;
1813 addr->type = QETH_IP_TYPE_NORMAL;
1814 if (!qeth_l3_delete_ip(card, addr))
1815 kfree(addr);
1816 }
1817 }
1818 in6_dev_put(in6_dev);
1819#endif /* CONFIG_QETH_IPV6 */
1820}
1821
1822static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
1823 unsigned short vid)
1824{
1825 if (!card->vlangrp)
1826 return;
1827 qeth_l3_free_vlan_addresses4(card, vid);
1828 qeth_l3_free_vlan_addresses6(card, vid);
1829}
1830
1831static void qeth_l3_vlan_rx_register(struct net_device *dev,
1832 struct vlan_group *grp)
1833{
1834 struct qeth_card *card = netdev_priv(dev);
1835 unsigned long flags;
1836
1837 QETH_DBF_TEXT(trace, 4, "vlanreg");
1838 spin_lock_irqsave(&card->vlanlock, flags);
1839 card->vlangrp = grp;
1840 spin_unlock_irqrestore(&card->vlanlock, flags);
1841}
1842
1843static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1844{
1845 struct net_device *vlandev;
1846 struct qeth_card *card = (struct qeth_card *) dev->priv;
1847 struct in_device *in_dev;
1848
1849 if (card->info.type == QETH_CARD_TYPE_IQD)
1850 return;
1851
1852 vlandev = vlan_group_get_device(card->vlangrp, vid);
1853 vlandev->neigh_setup = qeth_l3_neigh_setup;
1854
1855 in_dev = in_dev_get(vlandev);
1856#ifdef CONFIG_SYSCTL
1857 neigh_sysctl_unregister(in_dev->arp_parms);
1858#endif
1859 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
1860
1861 in_dev->arp_parms = neigh_parms_alloc(vlandev, &arp_tbl);
1862#ifdef CONFIG_SYSCTL
1863 neigh_sysctl_register(vlandev, in_dev->arp_parms, NET_IPV4,
1864 NET_IPV4_NEIGH, "ipv4", NULL, NULL);
1865#endif
1866 in_dev_put(in_dev);
1867 return;
1868}
1869
1870static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1871{
1872 struct qeth_card *card = netdev_priv(dev);
1873 unsigned long flags;
1874
1875 QETH_DBF_TEXT_(trace, 4, "kid:%d", vid);
1876 spin_lock_irqsave(&card->vlanlock, flags);
1877 /* unregister IP addresses of vlan device */
1878 qeth_l3_free_vlan_addresses(card, vid);
1879 vlan_group_set_device(card->vlangrp, vid, NULL);
1880 spin_unlock_irqrestore(&card->vlanlock, flags);
1881 qeth_l3_set_multicast_list(card->dev);
1882}
1883
1884static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
1885 struct sk_buff *skb, struct qeth_hdr *hdr)
1886{
1887 unsigned short vlan_id = 0;
1888 __be16 prot;
1889 struct iphdr *ip_hdr;
1890 unsigned char tg_addr[MAX_ADDR_LEN];
1891
1892 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
1893 prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
1894 ETH_P_IP);
1895 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
1896 case QETH_CAST_MULTICAST:
1897 switch (prot) {
1898#ifdef CONFIG_QETH_IPV6
1899 case __constant_htons(ETH_P_IPV6):
1900 ndisc_mc_map((struct in6_addr *)
1901 skb->data + 24,
1902 tg_addr, card->dev, 0);
1903 break;
1904#endif
1905 case __constant_htons(ETH_P_IP):
1906 ip_hdr = (struct iphdr *)skb->data;
1907 (card->dev->type == ARPHRD_IEEE802_TR) ?
1908 ip_tr_mc_map(ip_hdr->daddr, tg_addr):
1909 ip_eth_mc_map(ip_hdr->daddr, tg_addr);
1910 break;
1911 default:
1912 memcpy(tg_addr, card->dev->broadcast,
1913 card->dev->addr_len);
1914 }
1915 card->stats.multicast++;
1916 skb->pkt_type = PACKET_MULTICAST;
1917 break;
1918 case QETH_CAST_BROADCAST:
1919 memcpy(tg_addr, card->dev->broadcast,
1920 card->dev->addr_len);
1921 card->stats.multicast++;
1922 skb->pkt_type = PACKET_BROADCAST;
1923 break;
1924 case QETH_CAST_UNICAST:
1925 case QETH_CAST_ANYCAST:
1926 case QETH_CAST_NOCAST:
1927 default:
1928 skb->pkt_type = PACKET_HOST;
1929 memcpy(tg_addr, card->dev->dev_addr,
1930 card->dev->addr_len);
1931 }
1932 card->dev->header_ops->create(skb, card->dev, prot, tg_addr,
1933 "FAKELL", card->dev->addr_len);
1934 }
1935
1936#ifdef CONFIG_TR
1937 if (card->dev->type == ARPHRD_IEEE802_TR)
1938 skb->protocol = tr_type_trans(skb, card->dev);
1939 else
1940#endif
1941 skb->protocol = eth_type_trans(skb, card->dev);
1942
1943 if (hdr->hdr.l3.ext_flags &
1944 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
1945 vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
1946 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
1947 }
1948
1949 skb->ip_summed = card->options.checksum_type;
1950 if (card->options.checksum_type == HW_CHECKSUMMING) {
1951 if ((hdr->hdr.l3.ext_flags &
1952 (QETH_HDR_EXT_CSUM_HDR_REQ |
1953 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
1954 (QETH_HDR_EXT_CSUM_HDR_REQ |
1955 QETH_HDR_EXT_CSUM_TRANSP_REQ))
1956 skb->ip_summed = CHECKSUM_UNNECESSARY;
1957 else
1958 skb->ip_summed = SW_CHECKSUMMING;
1959 }
1960
1961 return vlan_id;
1962}
1963
1964static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
1965 struct qeth_qdio_buffer *buf, int index)
1966{
1967 struct qdio_buffer_element *element;
1968 struct sk_buff *skb;
1969 struct qeth_hdr *hdr;
1970 int offset;
1971 __u16 vlan_tag = 0;
1972 unsigned int len;
1973
1974 /* get first element of current buffer */
1975 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
1976 offset = 0;
1977 if (card->options.performance_stats)
1978 card->perf_stats.bufs_rec++;
1979 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
1980 &offset, &hdr))) {
1981 skb->dev = card->dev;
1982 /* is device UP ? */
1983 if (!(card->dev->flags & IFF_UP)) {
1984 dev_kfree_skb_any(skb);
1985 continue;
1986 }
1987
1988 switch (hdr->hdr.l3.id) {
1989 case QETH_HEADER_TYPE_LAYER3:
1990 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
1991 len = skb->len;
1992 if (vlan_tag)
1993 if (card->vlangrp)
1994 vlan_hwaccel_rx(skb, card->vlangrp,
1995 vlan_tag);
1996 else {
1997 dev_kfree_skb_any(skb);
1998 continue;
1999 }
2000 else
2001 netif_rx(skb);
2002 break;
2003 default:
2004 dev_kfree_skb_any(skb);
2005 QETH_DBF_TEXT(trace, 3, "inbunkno");
2006 QETH_DBF_HEX(control, 3, hdr, QETH_DBF_CONTROL_LEN);
2007 continue;
2008 }
2009
2010 card->dev->last_rx = jiffies;
2011 card->stats.rx_packets++;
2012 card->stats.rx_bytes += len;
2013 }
2014}
2015
2016static int qeth_l3_verify_vlan_dev(struct net_device *dev,
2017 struct qeth_card *card)
2018{
2019 int rc = 0;
2020 struct vlan_group *vg;
2021 int i;
2022
2023 vg = card->vlangrp;
2024 if (!vg)
2025 return rc;
2026
2027 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
2028 if (vlan_group_get_device(vg, i) == dev) {
2029 rc = QETH_VLAN_CARD;
2030 break;
2031 }
2032 }
2033
2034 if (rc && !(netdev_priv(vlan_dev_info(dev)->real_dev) == (void *)card))
2035 return 0;
2036
2037 return rc;
2038}
2039
2040static int qeth_l3_verify_dev(struct net_device *dev)
2041{
2042 struct qeth_card *card;
2043 unsigned long flags;
2044 int rc = 0;
2045
2046 read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
2047 list_for_each_entry(card, &qeth_core_card_list.list, list) {
2048 if (card->dev == dev) {
2049 rc = QETH_REAL_CARD;
2050 break;
2051 }
2052 rc = qeth_l3_verify_vlan_dev(dev, card);
2053 if (rc)
2054 break;
2055 }
2056 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
2057
2058 return rc;
2059}
2060
2061static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
2062{
2063 struct qeth_card *card = NULL;
2064 int rc;
2065
2066 rc = qeth_l3_verify_dev(dev);
2067 if (rc == QETH_REAL_CARD)
2068 card = netdev_priv(dev);
2069 else if (rc == QETH_VLAN_CARD)
2070 card = netdev_priv(vlan_dev_info(dev)->real_dev);
2071 if (card->options.layer2)
2072 card = NULL;
2073 QETH_DBF_TEXT_(trace, 4, "%d", rc);
2074 return card ;
2075}
2076
2077static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2078{
2079 int rc = 0;
2080
2081 QETH_DBF_TEXT(setup, 2, "stopcard");
2082 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
2083
2084 qeth_set_allowed_threads(card, 0, 1);
2085 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD))
2086 return -ERESTARTSYS;
2087 if (card->read.state == CH_STATE_UP &&
2088 card->write.state == CH_STATE_UP &&
2089 (card->state == CARD_STATE_UP)) {
2090 if (recovery_mode)
2091 qeth_l3_stop(card->dev);
2092 if (!card->use_hard_stop) {
2093 rc = qeth_send_stoplan(card);
2094 if (rc)
2095 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
2096 }
2097 card->state = CARD_STATE_SOFTSETUP;
2098 }
2099 if (card->state == CARD_STATE_SOFTSETUP) {
2100 qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1);
2101 qeth_clear_ipacmd_list(card);
2102 card->state = CARD_STATE_HARDSETUP;
2103 }
2104 if (card->state == CARD_STATE_HARDSETUP) {
2105 if (!card->use_hard_stop &&
2106 (card->info.type != QETH_CARD_TYPE_IQD)) {
2107 rc = qeth_l3_put_unique_id(card);
2108 if (rc)
2109 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
2110 }
2111 qeth_qdio_clear_card(card, 0);
2112 qeth_clear_qdio_buffers(card);
2113 qeth_clear_working_pool_list(card);
2114 card->state = CARD_STATE_DOWN;
2115 }
2116 if (card->state == CARD_STATE_DOWN) {
2117 qeth_clear_cmd_buffers(&card->read);
2118 qeth_clear_cmd_buffers(&card->write);
2119 }
2120 card->use_hard_stop = 0;
2121 return rc;
2122}
2123
2124static void qeth_l3_set_multicast_list(struct net_device *dev)
2125{
2126 struct qeth_card *card = netdev_priv(dev);
2127
2128 QETH_DBF_TEXT(trace, 3, "setmulti");
2129 qeth_l3_delete_mc_addresses(card);
2130 qeth_l3_add_multicast_ipv4(card);
2131#ifdef CONFIG_QETH_IPV6
2132 qeth_l3_add_multicast_ipv6(card);
2133#endif
2134 qeth_l3_set_ip_addr_list(card);
2135 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2136 return;
2137 qeth_setadp_promisc_mode(card);
2138}
2139
2140static const char *qeth_l3_arp_get_error_cause(int *rc)
2141{
2142 switch (*rc) {
2143 case QETH_IPA_ARP_RC_FAILED:
2144 *rc = -EIO;
2145 return "operation failed";
2146 case QETH_IPA_ARP_RC_NOTSUPP:
2147 *rc = -EOPNOTSUPP;
2148 return "operation not supported";
2149 case QETH_IPA_ARP_RC_OUT_OF_RANGE:
2150 *rc = -EINVAL;
2151 return "argument out of range";
2152 case QETH_IPA_ARP_RC_Q_NOTSUPP:
2153 *rc = -EOPNOTSUPP;
2154 return "query operation not supported";
2155 case QETH_IPA_ARP_RC_Q_NO_DATA:
2156 *rc = -ENOENT;
2157 return "no query data available";
2158 default:
2159 return "unknown error";
2160 }
2161}
2162
2163static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
2164{
2165 int tmp;
2166 int rc;
2167
2168 QETH_DBF_TEXT(trace, 3, "arpstnoe");
2169
2170 /*
2171 * currently GuestLAN only supports the ARP assist function
2172 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
2173 * thus we say EOPNOTSUPP for this ARP function
2174 */
2175 if (card->info.guestlan)
2176 return -EOPNOTSUPP;
2177 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2178 PRINT_WARN("ARP processing not supported "
2179 "on %s!\n", QETH_CARD_IFNAME(card));
2180 return -EOPNOTSUPP;
2181 }
2182 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
2183 IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
2184 no_entries);
2185 if (rc) {
2186 tmp = rc;
2187 PRINT_WARN("Could not set number of ARP entries on %s: "
2188 "%s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
2189 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2190 }
2191 return rc;
2192}
2193
2194static void qeth_l3_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
2195 struct qeth_arp_query_data *qdata, int entry_size,
2196 int uentry_size)
2197{
2198 char *entry_ptr;
2199 char *uentry_ptr;
2200 int i;
2201
2202 entry_ptr = (char *)&qdata->data;
2203 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
2204 for (i = 0; i < qdata->no_entries; ++i) {
2205 /* strip off 32 bytes "media specific information" */
2206 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
2207 entry_ptr += entry_size;
2208 uentry_ptr += uentry_size;
2209 }
2210}
2211
2212static int qeth_l3_arp_query_cb(struct qeth_card *card,
2213 struct qeth_reply *reply, unsigned long data)
2214{
2215 struct qeth_ipa_cmd *cmd;
2216 struct qeth_arp_query_data *qdata;
2217 struct qeth_arp_query_info *qinfo;
2218 int entry_size;
2219 int uentry_size;
2220 int i;
2221
2222 QETH_DBF_TEXT(trace, 4, "arpquecb");
2223
2224 qinfo = (struct qeth_arp_query_info *) reply->param;
2225 cmd = (struct qeth_ipa_cmd *) data;
2226 if (cmd->hdr.return_code) {
2227 QETH_DBF_TEXT_(trace, 4, "qaer1%i", cmd->hdr.return_code);
2228 return 0;
2229 }
2230 if (cmd->data.setassparms.hdr.return_code) {
2231 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
2232 QETH_DBF_TEXT_(trace, 4, "qaer2%i", cmd->hdr.return_code);
2233 return 0;
2234 }
2235 qdata = &cmd->data.setassparms.data.query_arp;
2236 switch (qdata->reply_bits) {
2237 case 5:
2238 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
2239 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2240 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
2241 break;
2242 case 7:
2243 /* fall through to default */
2244 default:
2245 /* tr is the same as eth -> entry7 */
2246 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
2247 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2248 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
2249 break;
2250 }
2251 /* check if there is enough room in userspace */
2252 if ((qinfo->udata_len - qinfo->udata_offset) <
2253 qdata->no_entries * uentry_size){
2254 QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM);
2255 cmd->hdr.return_code = -ENOMEM;
2256 PRINT_WARN("query ARP user space buffer is too small for "
2257 "the returned number of ARP entries. "
2258 "Aborting query!\n");
2259 goto out_error;
2260 }
2261 QETH_DBF_TEXT_(trace, 4, "anore%i",
2262 cmd->data.setassparms.hdr.number_of_replies);
2263 QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
2264 QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries);
2265
2266 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
2267 /* strip off "media specific information" */
2268 qeth_l3_copy_arp_entries_stripped(qinfo, qdata, entry_size,
2269 uentry_size);
2270 } else
2271 /*copy entries to user buffer*/
2272 memcpy(qinfo->udata + qinfo->udata_offset,
2273 (char *)&qdata->data, qdata->no_entries*uentry_size);
2274
2275 qinfo->no_entries += qdata->no_entries;
2276 qinfo->udata_offset += (qdata->no_entries*uentry_size);
2277 /* check if all replies received ... */
2278 if (cmd->data.setassparms.hdr.seq_no <
2279 cmd->data.setassparms.hdr.number_of_replies)
2280 return 1;
2281 memcpy(qinfo->udata, &qinfo->no_entries, 4);
2282 /* keep STRIP_ENTRIES flag so the user program can distinguish
2283 * stripped entries from normal ones */
2284 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2285 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
2286 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
2287 return 0;
2288out_error:
2289 i = 0;
2290 memcpy(qinfo->udata, &i, 4);
2291 return 0;
2292}
2293
2294static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
2295 struct qeth_cmd_buffer *iob, int len,
2296 int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
2297 unsigned long),
2298 void *reply_param)
2299{
2300 QETH_DBF_TEXT(trace, 4, "sendarp");
2301
2302 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2303 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2304 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2305 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
2306 reply_cb, reply_param);
2307}
2308
2309static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
2310{
2311 struct qeth_cmd_buffer *iob;
2312 struct qeth_arp_query_info qinfo = {0, };
2313 int tmp;
2314 int rc;
2315
2316 QETH_DBF_TEXT(trace, 3, "arpquery");
2317
2318 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
2319 IPA_ARP_PROCESSING)) {
2320 PRINT_WARN("ARP processing not supported "
2321 "on %s!\n", QETH_CARD_IFNAME(card));
2322 return -EOPNOTSUPP;
2323 }
2324 /* get size of userspace buffer and mask_bits -> 6 bytes */
2325 if (copy_from_user(&qinfo, udata, 6))
2326 return -EFAULT;
2327 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
2328 if (!qinfo.udata)
2329 return -ENOMEM;
2330 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
2331 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2332 IPA_CMD_ASS_ARP_QUERY_INFO,
2333 sizeof(int), QETH_PROT_IPV4);
2334
2335 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
2336 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
2337 qeth_l3_arp_query_cb, (void *)&qinfo);
2338 if (rc) {
2339 tmp = rc;
2340 PRINT_WARN("Error while querying ARP cache on %s: %s "
2341 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
2342 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2343 if (copy_to_user(udata, qinfo.udata, 4))
2344 rc = -EFAULT;
2345 } else {
2346 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
2347 rc = -EFAULT;
2348 }
2349 kfree(qinfo.udata);
2350 return rc;
2351}
2352
2353static int qeth_l3_arp_add_entry(struct qeth_card *card,
2354 struct qeth_arp_cache_entry *entry)
2355{
2356 struct qeth_cmd_buffer *iob;
2357 char buf[16];
2358 int tmp;
2359 int rc;
2360
2361 QETH_DBF_TEXT(trace, 3, "arpadent");
2362
2363 /*
2364 * currently GuestLAN only supports the ARP assist function
2365 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
2366 * thus we say EOPNOTSUPP for this ARP function
2367 */
2368 if (card->info.guestlan)
2369 return -EOPNOTSUPP;
2370 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2371 PRINT_WARN("ARP processing not supported "
2372 "on %s!\n", QETH_CARD_IFNAME(card));
2373 return -EOPNOTSUPP;
2374 }
2375
2376 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2377 IPA_CMD_ASS_ARP_ADD_ENTRY,
2378 sizeof(struct qeth_arp_cache_entry),
2379 QETH_PROT_IPV4);
2380 rc = qeth_l3_send_setassparms(card, iob,
2381 sizeof(struct qeth_arp_cache_entry),
2382 (unsigned long) entry,
2383 qeth_l3_default_setassparms_cb, NULL);
2384 if (rc) {
2385 tmp = rc;
2386 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
2387 PRINT_WARN("Could not add ARP entry for address %s on %s: "
2388 "%s (0x%x/%d)\n",
2389 buf, QETH_CARD_IFNAME(card),
2390 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2391 }
2392 return rc;
2393}
2394
2395static int qeth_l3_arp_remove_entry(struct qeth_card *card,
2396 struct qeth_arp_cache_entry *entry)
2397{
2398 struct qeth_cmd_buffer *iob;
2399 char buf[16] = {0, };
2400 int tmp;
2401 int rc;
2402
2403 QETH_DBF_TEXT(trace, 3, "arprment");
2404
2405 /*
2406 * currently GuestLAN only supports the ARP assist function
2407 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
2408 * thus we say EOPNOTSUPP for this ARP function
2409 */
2410 if (card->info.guestlan)
2411 return -EOPNOTSUPP;
2412 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2413 PRINT_WARN("ARP processing not supported "
2414 "on %s!\n", QETH_CARD_IFNAME(card));
2415 return -EOPNOTSUPP;
2416 }
2417 memcpy(buf, entry, 12);
2418 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2419 IPA_CMD_ASS_ARP_REMOVE_ENTRY,
2420 12,
2421 QETH_PROT_IPV4);
2422 rc = qeth_l3_send_setassparms(card, iob,
2423 12, (unsigned long)buf,
2424 qeth_l3_default_setassparms_cb, NULL);
2425 if (rc) {
2426 tmp = rc;
2427 memset(buf, 0, 16);
2428 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
2429 PRINT_WARN("Could not delete ARP entry for address %s on %s: "
2430 "%s (0x%x/%d)\n",
2431 buf, QETH_CARD_IFNAME(card),
2432 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2433 }
2434 return rc;
2435}
2436
2437static int qeth_l3_arp_flush_cache(struct qeth_card *card)
2438{
2439 int rc;
2440 int tmp;
2441
2442 QETH_DBF_TEXT(trace, 3, "arpflush");
2443
2444 /*
2445 * currently GuestLAN only supports the ARP assist function
2446 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
2447 * thus we say EOPNOTSUPP for this ARP function
2448 */
2449 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
2450 return -EOPNOTSUPP;
2451 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
2452 PRINT_WARN("ARP processing not supported "
2453 "on %s!\n", QETH_CARD_IFNAME(card));
2454 return -EOPNOTSUPP;
2455 }
2456 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
2457 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
2458 if (rc) {
2459 tmp = rc;
2460 PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n",
2461 QETH_CARD_IFNAME(card),
2462 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2463 }
2464 return rc;
2465}
2466
2467static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2468{
2469 struct qeth_card *card = netdev_priv(dev);
2470 struct qeth_arp_cache_entry arp_entry;
2471 struct mii_ioctl_data *mii_data;
2472 int rc = 0;
2473
2474 if (!card)
2475 return -ENODEV;
2476
2477 if ((card->state != CARD_STATE_UP) &&
2478 (card->state != CARD_STATE_SOFTSETUP))
2479 return -ENODEV;
2480
2481 switch (cmd) {
2482 case SIOC_QETH_ARP_SET_NO_ENTRIES:
2483 if (!capable(CAP_NET_ADMIN)) {
2484 rc = -EPERM;
2485 break;
2486 }
2487 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
2488 break;
2489 case SIOC_QETH_ARP_QUERY_INFO:
2490 if (!capable(CAP_NET_ADMIN)) {
2491 rc = -EPERM;
2492 break;
2493 }
2494 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
2495 break;
2496 case SIOC_QETH_ARP_ADD_ENTRY:
2497 if (!capable(CAP_NET_ADMIN)) {
2498 rc = -EPERM;
2499 break;
2500 }
2501 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
2502 sizeof(struct qeth_arp_cache_entry)))
2503 rc = -EFAULT;
2504 else
2505 rc = qeth_l3_arp_add_entry(card, &arp_entry);
2506 break;
2507 case SIOC_QETH_ARP_REMOVE_ENTRY:
2508 if (!capable(CAP_NET_ADMIN)) {
2509 rc = -EPERM;
2510 break;
2511 }
2512 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
2513 sizeof(struct qeth_arp_cache_entry)))
2514 rc = -EFAULT;
2515 else
2516 rc = qeth_l3_arp_remove_entry(card, &arp_entry);
2517 break;
2518 case SIOC_QETH_ARP_FLUSH_CACHE:
2519 if (!capable(CAP_NET_ADMIN)) {
2520 rc = -EPERM;
2521 break;
2522 }
2523 rc = qeth_l3_arp_flush_cache(card);
2524 break;
2525 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
2526 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
2527 break;
2528 case SIOC_QETH_GET_CARD_TYPE:
2529 if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
2530 !card->info.guestlan)
2531 return 1;
2532 return 0;
2533 break;
2534 case SIOCGMIIPHY:
2535 mii_data = if_mii(rq);
2536 mii_data->phy_id = 0;
2537 break;
2538 case SIOCGMIIREG:
2539 mii_data = if_mii(rq);
2540 if (mii_data->phy_id != 0)
2541 rc = -EINVAL;
2542 else
2543 mii_data->val_out = qeth_mdio_read(dev,
2544 mii_data->phy_id,
2545 mii_data->reg_num);
2546 break;
2547 default:
2548 rc = -EOPNOTSUPP;
2549 }
2550 if (rc)
2551 QETH_DBF_TEXT_(trace, 2, "ioce%d", rc);
2552 return rc;
2553}
2554
2555static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2556 struct sk_buff *skb, int ipv, int cast_type)
2557{
2558 QETH_DBF_TEXT(trace, 6, "fillhdr");
2559
2560 memset(hdr, 0, sizeof(struct qeth_hdr));
2561 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2562 hdr->hdr.l3.ext_flags = 0;
2563
2564 /*
2565 * before we're going to overwrite this location with next hop ip.
2566 * v6 uses passthrough, v4 sets the tag in the QDIO header.
2567 */
2568 if (card->vlangrp && vlan_tx_tag_present(skb)) {
2569 hdr->hdr.l3.ext_flags = (ipv == 4) ?
2570 QETH_HDR_EXT_VLAN_FRAME :
2571 QETH_HDR_EXT_INCLUDE_VLAN_TAG;
2572 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
2573 }
2574
2575 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
2576 if (ipv == 4) {
2577 /* IPv4 */
2578 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
2579 memset(hdr->hdr.l3.dest_addr, 0, 12);
2580 if ((skb->dst) && (skb->dst->neighbour)) {
2581 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2582 *((u32 *) skb->dst->neighbour->primary_key);
2583 } else {
2584 /* fill in destination address used in ip header */
2585 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2586 ip_hdr(skb)->daddr;
2587 }
2588 } else if (ipv == 6) {
2589 /* IPv6 */
2590 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
2591 if (card->info.type == QETH_CARD_TYPE_IQD)
2592 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
2593 if ((skb->dst) && (skb->dst->neighbour)) {
2594 memcpy(hdr->hdr.l3.dest_addr,
2595 skb->dst->neighbour->primary_key, 16);
2596 } else {
2597 /* fill in destination address used in ip header */
2598 memcpy(hdr->hdr.l3.dest_addr,
2599 &ipv6_hdr(skb)->daddr, 16);
2600 }
2601 } else {
2602 /* passthrough */
2603 if ((skb->dev->type == ARPHRD_IEEE802_TR) &&
2604 !memcmp(skb->data + sizeof(struct qeth_hdr) +
2605 sizeof(__u16), skb->dev->broadcast, 6)) {
2606 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2607 QETH_HDR_PASSTHRU;
2608 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
2609 skb->dev->broadcast, 6)) {
2610 /* broadcast? */
2611 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2612 QETH_HDR_PASSTHRU;
2613 } else {
2614 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
2615 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
2616 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
2617 }
2618 }
2619}
2620
2621static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2622{
2623 int rc;
2624 u16 *tag;
2625 struct qeth_hdr *hdr = NULL;
2626 int elements_needed = 0;
2627 struct qeth_card *card = netdev_priv(dev);
2628 struct sk_buff *new_skb = NULL;
2629 int ipv = qeth_get_ip_version(skb);
2630 int cast_type = qeth_get_cast_type(card, skb);
2631 struct qeth_qdio_out_q *queue = card->qdio.out_qs
2632 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
2633 int tx_bytes = skb->len;
2634 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
2635 struct qeth_eddp_context *ctx = NULL;
2636
2637 QETH_DBF_TEXT(trace, 6, "l3xmit");
2638
2639 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2640 (skb->protocol != htons(ETH_P_IPV6)) &&
2641 (skb->protocol != htons(ETH_P_IP)))
2642 goto tx_drop;
2643
2644 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
2645 card->stats.tx_carrier_errors++;
2646 goto tx_drop;
2647 }
2648
2649 if ((cast_type == RTN_BROADCAST) &&
2650 (card->info.broadcast_capable == 0))
2651 goto tx_drop;
2652
2653 if (card->options.performance_stats) {
2654 card->perf_stats.outbound_cnt++;
2655 card->perf_stats.outbound_start_time = qeth_get_micros();
2656 }
2657
2658 /* create a clone with writeable headroom */
2659 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
2660 VLAN_HLEN);
2661 if (!new_skb)
2662 goto tx_drop;
2663
2664 if (card->info.type == QETH_CARD_TYPE_IQD) {
2665 skb_pull(new_skb, ETH_HLEN);
2666 } else {
2667 if (new_skb->protocol == htons(ETH_P_IP)) {
2668 if (card->dev->type == ARPHRD_IEEE802_TR)
2669 skb_pull(new_skb, TR_HLEN);
2670 else
2671 skb_pull(new_skb, ETH_HLEN);
2672 }
2673
2674 if (new_skb->protocol == ETH_P_IPV6 && card->vlangrp &&
2675 vlan_tx_tag_present(new_skb)) {
2676 skb_push(new_skb, VLAN_HLEN);
2677 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
2678 skb_copy_to_linear_data_offset(new_skb, 4,
2679 new_skb->data + 8, 4);
2680 skb_copy_to_linear_data_offset(new_skb, 8,
2681 new_skb->data + 12, 4);
2682 tag = (u16 *)(new_skb->data + 12);
2683 *tag = __constant_htons(ETH_P_8021Q);
2684 *(tag + 1) = htons(vlan_tx_tag_get(new_skb));
2685 VLAN_TX_SKB_CB(new_skb)->magic = 0;
2686 }
2687 }
2688
2689 netif_stop_queue(dev);
2690
2691 if (skb_is_gso(new_skb))
2692 large_send = card->options.large_send;
2693
2694 /* fix hardware limitation: as long as we do not have sbal
2695 * chaining we can not send long frag lists so we temporary
2696 * switch to EDDP
2697 */
2698 if ((large_send == QETH_LARGE_SEND_TSO) &&
2699 ((skb_shinfo(new_skb)->nr_frags + 2) > 16))
2700 large_send = QETH_LARGE_SEND_EDDP;
2701
2702 if ((large_send == QETH_LARGE_SEND_TSO) &&
2703 (cast_type == RTN_UNSPEC)) {
2704 hdr = (struct qeth_hdr *)skb_push(new_skb,
2705 sizeof(struct qeth_hdr_tso));
2706 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2707 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2708 qeth_tso_fill_header(card, hdr, new_skb);
2709 elements_needed++;
2710 } else {
2711 hdr = (struct qeth_hdr *)skb_push(new_skb,
2712 sizeof(struct qeth_hdr));
2713 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2714 }
2715
2716 if (large_send == QETH_LARGE_SEND_EDDP) {
2717 /* new_skb is not owned by a socket so we use skb to get
2718 * the protocol
2719 */
2720 ctx = qeth_eddp_create_context(card, new_skb, hdr,
2721 skb->sk->sk_protocol);
2722 if (ctx == NULL) {
2723 PRINT_WARN("could not create eddp context\n");
2724 goto tx_drop;
2725 }
2726 } else {
2727 int elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
2728 elements_needed);
2729 if (!elems)
2730 goto tx_drop;
2731 elements_needed += elems;
2732 }
2733
2734 if ((large_send == QETH_LARGE_SEND_NO) &&
2735 (new_skb->ip_summed == CHECKSUM_PARTIAL))
2736 qeth_tx_csum(new_skb);
2737
2738 if (card->info.type != QETH_CARD_TYPE_IQD)
2739 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
2740 elements_needed, ctx);
2741 else
2742 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
2743 elements_needed, ctx);
2744
2745 if (!rc) {
2746 card->stats.tx_packets++;
2747 card->stats.tx_bytes += tx_bytes;
2748 if (new_skb != skb)
2749 dev_kfree_skb_any(skb);
2750 if (card->options.performance_stats) {
2751 if (large_send != QETH_LARGE_SEND_NO) {
2752 card->perf_stats.large_send_bytes += tx_bytes;
2753 card->perf_stats.large_send_cnt++;
2754 }
2755 if (skb_shinfo(new_skb)->nr_frags > 0) {
2756 card->perf_stats.sg_skbs_sent++;
2757 /* nr_frags + skb->data */
2758 card->perf_stats.sg_frags_sent +=
2759 skb_shinfo(new_skb)->nr_frags + 1;
2760 }
2761 }
2762
2763 if (ctx != NULL) {
2764 qeth_eddp_put_context(ctx);
2765 dev_kfree_skb_any(new_skb);
2766 }
2767 } else {
2768 if (ctx != NULL)
2769 qeth_eddp_put_context(ctx);
2770
2771 if (rc == -EBUSY) {
2772 if (new_skb != skb)
2773 dev_kfree_skb_any(new_skb);
2774 return NETDEV_TX_BUSY;
2775 } else
2776 goto tx_drop;
2777 }
2778
2779 netif_wake_queue(dev);
2780 if (card->options.performance_stats)
2781 card->perf_stats.outbound_time += qeth_get_micros() -
2782 card->perf_stats.outbound_start_time;
2783 return rc;
2784
2785tx_drop:
2786 card->stats.tx_dropped++;
2787 card->stats.tx_errors++;
2788 if ((new_skb != skb) && new_skb)
2789 dev_kfree_skb_any(new_skb);
2790 dev_kfree_skb_any(skb);
2791 return NETDEV_TX_OK;
2792}
2793
2794static int qeth_l3_open(struct net_device *dev)
2795{
2796 struct qeth_card *card = netdev_priv(dev);
2797
2798 QETH_DBF_TEXT(trace, 4, "qethopen");
2799 if (card->state != CARD_STATE_SOFTSETUP)
2800 return -ENODEV;
2801 card->data.state = CH_STATE_UP;
2802 card->state = CARD_STATE_UP;
2803 card->dev->flags |= IFF_UP;
2804 netif_start_queue(dev);
2805
2806 if (!card->lan_online && netif_carrier_ok(dev))
2807 netif_carrier_off(dev);
2808 return 0;
2809}
2810
2811static int qeth_l3_stop(struct net_device *dev)
2812{
2813 struct qeth_card *card = netdev_priv(dev);
2814
2815 QETH_DBF_TEXT(trace, 4, "qethstop");
2816 netif_tx_disable(dev);
2817 card->dev->flags &= ~IFF_UP;
2818 if (card->state == CARD_STATE_UP)
2819 card->state = CARD_STATE_SOFTSETUP;
2820 return 0;
2821}
2822
2823static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
2824{
2825 struct qeth_card *card = netdev_priv(dev);
2826
2827 return (card->options.checksum_type == HW_CHECKSUMMING);
2828}
2829
2830static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2831{
2832 struct qeth_card *card = netdev_priv(dev);
2833 enum qeth_card_states old_state;
2834 enum qeth_checksum_types csum_type;
2835
2836 if ((card->state != CARD_STATE_UP) &&
2837 (card->state != CARD_STATE_DOWN))
2838 return -EPERM;
2839
2840 if (data)
2841 csum_type = HW_CHECKSUMMING;
2842 else
2843 csum_type = SW_CHECKSUMMING;
2844
2845 if (card->options.checksum_type != csum_type) {
2846 old_state = card->state;
2847 if (card->state == CARD_STATE_UP)
2848 __qeth_l3_set_offline(card->gdev, 1);
2849 card->options.checksum_type = csum_type;
2850 if (old_state == CARD_STATE_UP)
2851 __qeth_l3_set_online(card->gdev, 1);
2852 }
2853 return 0;
2854}
2855
2856static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2857{
2858 struct qeth_card *card = netdev_priv(dev);
2859
2860 if (data) {
2861 if (card->options.large_send == QETH_LARGE_SEND_NO) {
2862 if (card->info.type == QETH_CARD_TYPE_IQD)
2863 card->options.large_send = QETH_LARGE_SEND_EDDP;
2864 else
2865 card->options.large_send = QETH_LARGE_SEND_TSO;
2866 dev->features |= NETIF_F_TSO;
2867 }
2868 } else {
2869 dev->features &= ~NETIF_F_TSO;
2870 card->options.large_send = QETH_LARGE_SEND_NO;
2871 }
2872 return 0;
2873}
2874
2875static struct ethtool_ops qeth_l3_ethtool_ops = {
2876 .get_link = ethtool_op_get_link,
2877 .get_tx_csum = ethtool_op_get_tx_csum,
2878 .set_tx_csum = ethtool_op_set_tx_hw_csum,
2879 .get_rx_csum = qeth_l3_ethtool_get_rx_csum,
2880 .set_rx_csum = qeth_l3_ethtool_set_rx_csum,
2881 .get_sg = ethtool_op_get_sg,
2882 .set_sg = ethtool_op_set_sg,
2883 .get_tso = ethtool_op_get_tso,
2884 .set_tso = qeth_l3_ethtool_set_tso,
2885 .get_strings = qeth_core_get_strings,
2886 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2887 .get_stats_count = qeth_core_get_stats_count,
2888 .get_drvinfo = qeth_core_get_drvinfo,
2889};
2890
2891/*
2892 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
2893 * NOARP on the netdevice is no option because it also turns off neighbor
2894 * solicitation. For IPv4 we install a neighbor_setup function. We don't want
2895 * arp resolution but we want the hard header (packet socket will work
2896 * e.g. tcpdump)
2897 */
2898static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
2899{
2900 n->nud_state = NUD_NOARP;
2901 memcpy(n->ha, "FAKELL", 6);
2902 n->output = n->ops->connected_output;
2903 return 0;
2904}
2905
2906static int
2907qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
2908{
2909 if (np->tbl->family == AF_INET)
2910 np->neigh_setup = qeth_l3_neigh_setup_noarp;
2911
2912 return 0;
2913}
2914
2915static int qeth_l3_setup_netdev(struct qeth_card *card)
2916{
2917 if (card->info.type == QETH_CARD_TYPE_OSAE) {
2918 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
2919 (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
2920#ifdef CONFIG_TR
2921 card->dev = alloc_trdev(0);
2922#endif
2923 if (!card->dev)
2924 return -ENODEV;
2925 } else {
2926 card->dev = alloc_etherdev(0);
2927 if (!card->dev)
2928 return -ENODEV;
2929 card->dev->neigh_setup = qeth_l3_neigh_setup;
2930
2931 /*IPv6 address autoconfiguration stuff*/
2932 qeth_l3_get_unique_id(card);
2933 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
2934 card->dev->dev_id = card->info.unique_id &
2935 0xffff;
2936 }
2937 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
2938 card->dev = alloc_netdev(0, "hsi%d", ether_setup);
2939 if (!card->dev)
2940 return -ENODEV;
2941 card->dev->flags |= IFF_NOARP;
2942 qeth_l3_iqd_read_initial_mac(card);
2943 } else
2944 return -ENODEV;
2945
2946 card->dev->hard_start_xmit = qeth_l3_hard_start_xmit;
2947 card->dev->priv = card;
2948 card->dev->tx_timeout = &qeth_tx_timeout;
2949 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
2950 card->dev->open = qeth_l3_open;
2951 card->dev->stop = qeth_l3_stop;
2952 card->dev->do_ioctl = qeth_l3_do_ioctl;
2953 card->dev->get_stats = qeth_get_stats;
2954 card->dev->change_mtu = qeth_change_mtu;
2955 card->dev->set_multicast_list = qeth_l3_set_multicast_list;
2956 card->dev->vlan_rx_register = qeth_l3_vlan_rx_register;
2957 card->dev->vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid;
2958 card->dev->vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid;
2959 card->dev->mtu = card->info.initial_mtu;
2960 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
2961 card->dev->features |= NETIF_F_HW_VLAN_TX |
2962 NETIF_F_HW_VLAN_RX |
2963 NETIF_F_HW_VLAN_FILTER;
2964
2965 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
2966 return register_netdev(card->dev);
2967}
2968
2969static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
2970 unsigned int status, unsigned int qdio_err,
2971 unsigned int siga_err, unsigned int queue, int first_element,
2972 int count, unsigned long card_ptr)
2973{
2974 struct net_device *net_dev;
2975 struct qeth_card *card;
2976 struct qeth_qdio_buffer *buffer;
2977 int index;
2978 int i;
2979
2980 QETH_DBF_TEXT(trace, 6, "qdinput");
2981 card = (struct qeth_card *) card_ptr;
2982 net_dev = card->dev;
2983 if (card->options.performance_stats) {
2984 card->perf_stats.inbound_cnt++;
2985 card->perf_stats.inbound_start_time = qeth_get_micros();
2986 }
2987 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2988 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
2989 QETH_DBF_TEXT(trace, 1, "qdinchk");
2990 QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card));
2991 QETH_DBF_TEXT_(trace, 1, "%04X%04X",
2992 first_element, count);
2993 QETH_DBF_TEXT_(trace, 1, "%04X%04X", queue, status);
2994 qeth_schedule_recovery(card);
2995 return;
2996 }
2997 }
2998 for (i = first_element; i < (first_element + count); ++i) {
2999 index = i % QDIO_MAX_BUFFERS_PER_Q;
3000 buffer = &card->qdio.in_q->bufs[index];
3001 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
3002 qeth_check_qdio_errors(buffer->buffer,
3003 qdio_err, siga_err, "qinerr")))
3004 qeth_l3_process_inbound_buffer(card, buffer, index);
3005 /* clear buffer and give back to hardware */
3006 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
3007 qeth_queue_input_buffer(card, index);
3008 }
3009 if (card->options.performance_stats)
3010 card->perf_stats.inbound_time += qeth_get_micros() -
3011 card->perf_stats.inbound_start_time;
3012}
3013
3014static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3015{
3016 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3017
3018 qeth_l3_create_device_attributes(&gdev->dev);
3019 card->options.layer2 = 0;
3020 card->discipline.input_handler = (qdio_handler_t *)
3021 qeth_l3_qdio_input_handler;
3022 card->discipline.output_handler = (qdio_handler_t *)
3023 qeth_qdio_output_handler;
3024 card->discipline.recover = qeth_l3_recover;
3025 return 0;
3026}
3027
3028static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3029{
3030 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
3031
3032 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
3033
3034 if (cgdev->state == CCWGROUP_ONLINE) {
3035 card->use_hard_stop = 1;
3036 qeth_l3_set_offline(cgdev);
3037 }
3038
3039 if (card->dev) {
3040 unregister_netdev(card->dev);
3041 card->dev = NULL;
3042 }
3043
3044 qeth_l3_remove_device_attributes(&cgdev->dev);
3045 qeth_l3_clear_ip_list(card, 0, 0);
3046 qeth_l3_clear_ipato_list(card);
3047 return;
3048}
3049
3050static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3051{
3052 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3053 int rc = 0;
3054 enum qeth_card_states recover_flag;
3055
3056 BUG_ON(!card);
3057 QETH_DBF_TEXT(setup, 2, "setonlin");
3058 QETH_DBF_HEX(setup, 2, &card, sizeof(void *));
3059
3060 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3061 if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) {
3062 PRINT_WARN("set_online of card %s interrupted by user!\n",
3063 CARD_BUS_ID(card));
3064 return -ERESTARTSYS;
3065 }
3066
3067 recover_flag = card->state;
3068 rc = ccw_device_set_online(CARD_RDEV(card));
3069 if (rc) {
3070 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3071 return -EIO;
3072 }
3073 rc = ccw_device_set_online(CARD_WDEV(card));
3074 if (rc) {
3075 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3076 return -EIO;
3077 }
3078 rc = ccw_device_set_online(CARD_DDEV(card));
3079 if (rc) {
3080 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3081 return -EIO;
3082 }
3083
3084 rc = qeth_core_hardsetup_card(card);
3085 if (rc) {
3086 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3087 goto out_remove;
3088 }
3089
3090 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3091
3092 if (!card->dev && qeth_l3_setup_netdev(card))
3093 goto out_remove;
3094
3095 card->state = CARD_STATE_HARDSETUP;
3096 qeth_print_status_message(card);
3097
3098 /* softsetup */
3099 QETH_DBF_TEXT(setup, 2, "softsetp");
3100
3101 rc = qeth_send_startlan(card);
3102 if (rc) {
3103 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3104 if (rc == 0xe080) {
3105 PRINT_WARN("LAN on card %s if offline! "
3106 "Waiting for STARTLAN from card.\n",
3107 CARD_BUS_ID(card));
3108 card->lan_online = 0;
3109 }
3110 return rc;
3111 } else
3112 card->lan_online = 1;
3113 qeth_set_large_send(card, card->options.large_send);
3114
3115 rc = qeth_l3_setadapter_parms(card);
3116 if (rc)
3117 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3118 rc = qeth_l3_start_ipassists(card);
3119 if (rc)
3120 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3121 rc = qeth_l3_setrouting_v4(card);
3122 if (rc)
3123 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3124 rc = qeth_l3_setrouting_v6(card);
3125 if (rc)
3126 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3127 netif_tx_disable(card->dev);
3128
3129 rc = qeth_init_qdio_queues(card);
3130 if (rc) {
3131 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
3132 goto out_remove;
3133 }
3134 card->state = CARD_STATE_SOFTSETUP;
3135 netif_carrier_on(card->dev);
3136
3137 qeth_set_allowed_threads(card, 0xffffffff, 0);
3138 if ((recover_flag == CARD_STATE_RECOVER) && recovery_mode) {
3139 qeth_l3_open(card->dev);
3140 qeth_l3_set_multicast_list(card->dev);
3141 }
3142 /* let user_space know that device is online */
3143 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
3144 return 0;
3145out_remove:
3146 card->use_hard_stop = 1;
3147 qeth_l3_stop_card(card, 0);
3148 ccw_device_set_offline(CARD_DDEV(card));
3149 ccw_device_set_offline(CARD_WDEV(card));
3150 ccw_device_set_offline(CARD_RDEV(card));
3151 if (recover_flag == CARD_STATE_RECOVER)
3152 card->state = CARD_STATE_RECOVER;
3153 else
3154 card->state = CARD_STATE_DOWN;
3155 return -ENODEV;
3156}
3157
3158static int qeth_l3_set_online(struct ccwgroup_device *gdev)
3159{
3160 return __qeth_l3_set_online(gdev, 0);
3161}
3162
3163static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
3164 int recovery_mode)
3165{
3166 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
3167 int rc = 0, rc2 = 0, rc3 = 0;
3168 enum qeth_card_states recover_flag;
3169
3170 QETH_DBF_TEXT(setup, 3, "setoffl");
3171 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
3172
3173 if (card->dev && netif_carrier_ok(card->dev))
3174 netif_carrier_off(card->dev);
3175 recover_flag = card->state;
3176 if (qeth_l3_stop_card(card, recovery_mode) == -ERESTARTSYS) {
3177 PRINT_WARN("Stopping card %s interrupted by user!\n",
3178 CARD_BUS_ID(card));
3179 return -ERESTARTSYS;
3180 }
3181 rc = ccw_device_set_offline(CARD_DDEV(card));
3182 rc2 = ccw_device_set_offline(CARD_WDEV(card));
3183 rc3 = ccw_device_set_offline(CARD_RDEV(card));
3184 if (!rc)
3185 rc = (rc2) ? rc2 : rc3;
3186 if (rc)
3187 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
3188 if (recover_flag == CARD_STATE_UP)
3189 card->state = CARD_STATE_RECOVER;
3190 /* let user_space know that device is offline */
3191 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
3192 return 0;
3193}
3194
3195static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
3196{
3197 return __qeth_l3_set_offline(cgdev, 0);
3198}
3199
3200static int qeth_l3_recover(void *ptr)
3201{
3202 struct qeth_card *card;
3203 int rc = 0;
3204
3205 card = (struct qeth_card *) ptr;
3206 QETH_DBF_TEXT(trace, 2, "recover1");
3207 QETH_DBF_HEX(trace, 2, &card, sizeof(void *));
3208 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
3209 return 0;
3210 QETH_DBF_TEXT(trace, 2, "recover2");
3211 PRINT_WARN("Recovery of device %s started ...\n",
3212 CARD_BUS_ID(card));
3213 card->use_hard_stop = 1;
3214 __qeth_l3_set_offline(card->gdev, 1);
3215 rc = __qeth_l3_set_online(card->gdev, 1);
3216 /* don't run another scheduled recovery */
3217 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3218 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3219 if (!rc)
3220 PRINT_INFO("Device %s successfully recovered!\n",
3221 CARD_BUS_ID(card));
3222 else
3223 PRINT_INFO("Device %s could not be recovered!\n",
3224 CARD_BUS_ID(card));
3225 return 0;
3226}
3227
3228static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
3229{
3230 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3231 qeth_l3_clear_ip_list(card, 0, 0);
3232 qeth_qdio_clear_card(card, 0);
3233 qeth_clear_qdio_buffers(card);
3234}
3235
3236struct ccwgroup_driver qeth_l3_ccwgroup_driver = {
3237 .probe = qeth_l3_probe_device,
3238 .remove = qeth_l3_remove_device,
3239 .set_online = qeth_l3_set_online,
3240 .set_offline = qeth_l3_set_offline,
3241 .shutdown = qeth_l3_shutdown,
3242};
3243EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver);
3244
3245static int qeth_l3_ip_event(struct notifier_block *this,
3246 unsigned long event, void *ptr)
3247{
3248 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3249 struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev;
3250 struct qeth_ipaddr *addr;
3251 struct qeth_card *card;
3252
3253 QETH_DBF_TEXT(trace, 3, "ipevent");
3254 card = qeth_l3_get_card_from_dev(dev);
3255 if (!card)
3256 return NOTIFY_DONE;
3257
3258 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
3259 if (addr != NULL) {
3260 addr->u.a4.addr = ifa->ifa_address;
3261 addr->u.a4.mask = ifa->ifa_mask;
3262 addr->type = QETH_IP_TYPE_NORMAL;
3263 } else
3264 goto out;
3265
3266 switch (event) {
3267 case NETDEV_UP:
3268 if (!qeth_l3_add_ip(card, addr))
3269 kfree(addr);
3270 break;
3271 case NETDEV_DOWN:
3272 if (!qeth_l3_delete_ip(card, addr))
3273 kfree(addr);
3274 break;
3275 default:
3276 break;
3277 }
3278 qeth_l3_set_ip_addr_list(card);
3279out:
3280 return NOTIFY_DONE;
3281}
3282
3283static struct notifier_block qeth_l3_ip_notifier = {
3284 qeth_l3_ip_event,
3285 NULL,
3286};
3287
3288#ifdef CONFIG_QETH_IPV6
3289/**
3290 * IPv6 event handler
3291 */
3292static int qeth_l3_ip6_event(struct notifier_block *this,
3293 unsigned long event, void *ptr)
3294{
3295 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
3296 struct net_device *dev = (struct net_device *)ifa->idev->dev;
3297 struct qeth_ipaddr *addr;
3298 struct qeth_card *card;
3299
3300 QETH_DBF_TEXT(trace, 3, "ip6event");
3301
3302 card = qeth_l3_get_card_from_dev(dev);
3303 if (!card)
3304 return NOTIFY_DONE;
3305 if (!qeth_is_supported(card, IPA_IPV6))
3306 return NOTIFY_DONE;
3307
3308 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
3309 if (addr != NULL) {
3310 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
3311 addr->u.a6.pfxlen = ifa->prefix_len;
3312 addr->type = QETH_IP_TYPE_NORMAL;
3313 } else
3314 goto out;
3315
3316 switch (event) {
3317 case NETDEV_UP:
3318 if (!qeth_l3_add_ip(card, addr))
3319 kfree(addr);
3320 break;
3321 case NETDEV_DOWN:
3322 if (!qeth_l3_delete_ip(card, addr))
3323 kfree(addr);
3324 break;
3325 default:
3326 break;
3327 }
3328 qeth_l3_set_ip_addr_list(card);
3329out:
3330 return NOTIFY_DONE;
3331}
3332
3333static struct notifier_block qeth_l3_ip6_notifier = {
3334 qeth_l3_ip6_event,
3335 NULL,
3336};
3337#endif
3338
3339static int qeth_l3_register_notifiers(void)
3340{
3341 int rc;
3342
3343 QETH_DBF_TEXT(trace, 5, "regnotif");
3344 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
3345 if (rc)
3346 return rc;
3347#ifdef CONFIG_QETH_IPV6
3348 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
3349 if (rc) {
3350 unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
3351 return rc;
3352 }
3353#else
3354 PRINT_WARN("layer 3 discipline no IPv6 support\n");
3355#endif
3356 return 0;
3357}
3358
3359static void qeth_l3_unregister_notifiers(void)
3360{
3361
3362 QETH_DBF_TEXT(trace, 5, "unregnot");
3363 BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
3364#ifdef CONFIG_QETH_IPV6
3365 BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
3366#endif /* QETH_IPV6 */
3367}
3368
3369static int __init qeth_l3_init(void)
3370{
3371 int rc = 0;
3372
3373 PRINT_INFO("register layer 3 discipline\n");
3374 rc = qeth_l3_register_notifiers();
3375 return rc;
3376}
3377
3378static void __exit qeth_l3_exit(void)
3379{
3380 qeth_l3_unregister_notifiers();
3381 PRINT_INFO("unregister layer 3 discipline\n");
3382}
3383
3384module_init(qeth_l3_init);
3385module_exit(qeth_l3_exit);
3386MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
3387MODULE_DESCRIPTION("qeth layer 3 discipline");
3388MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
new file mode 100644
index 000000000000..08f51fd902c4
--- /dev/null
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -0,0 +1,1051 @@
1/*
2 * drivers/s390/net/qeth_l3_sys.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
6 * Frank Pavlic <fpavlic@de.ibm.com>,
7 * Thomas Spatzier <tspat@de.ibm.com>,
8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */
10
11#include "qeth_l3.h"
12
13#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
14struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
15
16static const char *qeth_l3_get_checksum_str(struct qeth_card *card)
17{
18 if (card->options.checksum_type == SW_CHECKSUMMING)
19 return "sw";
20 else if (card->options.checksum_type == HW_CHECKSUMMING)
21 return "hw";
22 else
23 return "no";
24}
25
26static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
27 struct qeth_routing_info *route, char *buf)
28{
29 switch (route->type) {
30 case PRIMARY_ROUTER:
31 return sprintf(buf, "%s\n", "primary router");
32 case SECONDARY_ROUTER:
33 return sprintf(buf, "%s\n", "secondary router");
34 case MULTICAST_ROUTER:
35 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
36 return sprintf(buf, "%s\n", "multicast router+");
37 else
38 return sprintf(buf, "%s\n", "multicast router");
39 case PRIMARY_CONNECTOR:
40 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
41 return sprintf(buf, "%s\n", "primary connector+");
42 else
43 return sprintf(buf, "%s\n", "primary connector");
44 case SECONDARY_CONNECTOR:
45 if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
46 return sprintf(buf, "%s\n", "secondary connector+");
47 else
48 return sprintf(buf, "%s\n", "secondary connector");
49 default:
50 return sprintf(buf, "%s\n", "no");
51 }
52}
53
54static ssize_t qeth_l3_dev_route4_show(struct device *dev,
55 struct device_attribute *attr, char *buf)
56{
57 struct qeth_card *card = dev_get_drvdata(dev);
58
59 if (!card)
60 return -EINVAL;
61
62 return qeth_l3_dev_route_show(card, &card->options.route4, buf);
63}
64
65static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
66 struct qeth_routing_info *route, enum qeth_prot_versions prot,
67 const char *buf, size_t count)
68{
69 enum qeth_routing_types old_route_type = route->type;
70 char *tmp;
71 int rc;
72
73 tmp = strsep((char **) &buf, "\n");
74
75 if (!strcmp(tmp, "no_router")) {
76 route->type = NO_ROUTER;
77 } else if (!strcmp(tmp, "primary_connector")) {
78 route->type = PRIMARY_CONNECTOR;
79 } else if (!strcmp(tmp, "secondary_connector")) {
80 route->type = SECONDARY_CONNECTOR;
81 } else if (!strcmp(tmp, "primary_router")) {
82 route->type = PRIMARY_ROUTER;
83 } else if (!strcmp(tmp, "secondary_router")) {
84 route->type = SECONDARY_ROUTER;
85 } else if (!strcmp(tmp, "multicast_router")) {
86 route->type = MULTICAST_ROUTER;
87 } else {
88 PRINT_WARN("Invalid routing type '%s'.\n", tmp);
89 return -EINVAL;
90 }
91 if (((card->state == CARD_STATE_SOFTSETUP) ||
92 (card->state == CARD_STATE_UP)) &&
93 (old_route_type != route->type)) {
94 if (prot == QETH_PROT_IPV4)
95 rc = qeth_l3_setrouting_v4(card);
96 else if (prot == QETH_PROT_IPV6)
97 rc = qeth_l3_setrouting_v6(card);
98 }
99 return count;
100}
101
102static ssize_t qeth_l3_dev_route4_store(struct device *dev,
103 struct device_attribute *attr, const char *buf, size_t count)
104{
105 struct qeth_card *card = dev_get_drvdata(dev);
106
107 if (!card)
108 return -EINVAL;
109
110 return qeth_l3_dev_route_store(card, &card->options.route4,
111 QETH_PROT_IPV4, buf, count);
112}
113
114static DEVICE_ATTR(route4, 0644, qeth_l3_dev_route4_show,
115 qeth_l3_dev_route4_store);
116
117static ssize_t qeth_l3_dev_route6_show(struct device *dev,
118 struct device_attribute *attr, char *buf)
119{
120 struct qeth_card *card = dev_get_drvdata(dev);
121
122 if (!card)
123 return -EINVAL;
124
125 if (!qeth_is_supported(card, IPA_IPV6))
126 return sprintf(buf, "%s\n", "n/a");
127
128 return qeth_l3_dev_route_show(card, &card->options.route6, buf);
129}
130
131static ssize_t qeth_l3_dev_route6_store(struct device *dev,
132 struct device_attribute *attr, const char *buf, size_t count)
133{
134 struct qeth_card *card = dev_get_drvdata(dev);
135
136 if (!card)
137 return -EINVAL;
138
139 if (!qeth_is_supported(card, IPA_IPV6)) {
140 PRINT_WARN("IPv6 not supported for interface %s.\n"
141 "Routing status no changed.\n",
142 QETH_CARD_IFNAME(card));
143 return -ENOTSUPP;
144 }
145
146 return qeth_l3_dev_route_store(card, &card->options.route6,
147 QETH_PROT_IPV6, buf, count);
148}
149
150static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show,
151 qeth_l3_dev_route6_store);
152
153static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
154 struct device_attribute *attr, char *buf)
155{
156 struct qeth_card *card = dev_get_drvdata(dev);
157
158 if (!card)
159 return -EINVAL;
160
161 return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
162}
163
164static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
165 struct device_attribute *attr, const char *buf, size_t count)
166{
167 struct qeth_card *card = dev_get_drvdata(dev);
168 char *tmp;
169 int i;
170
171 if (!card)
172 return -EINVAL;
173
174 if ((card->state != CARD_STATE_DOWN) &&
175 (card->state != CARD_STATE_RECOVER))
176 return -EPERM;
177
178 i = simple_strtoul(buf, &tmp, 16);
179 if ((i == 0) || (i == 1))
180 card->options.fake_broadcast = i;
181 else {
182 PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n");
183 return -EINVAL;
184 }
185 return count;
186}
187
188static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
189 qeth_l3_dev_fake_broadcast_store);
190
191static ssize_t qeth_l3_dev_broadcast_mode_show(struct device *dev,
192 struct device_attribute *attr, char *buf)
193{
194 struct qeth_card *card = dev_get_drvdata(dev);
195
196 if (!card)
197 return -EINVAL;
198
199 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
200 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
201 return sprintf(buf, "n/a\n");
202
203 return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
204 QETH_TR_BROADCAST_ALLRINGS)?
205 "all rings":"local");
206}
207
208static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
209 struct device_attribute *attr, const char *buf, size_t count)
210{
211 struct qeth_card *card = dev_get_drvdata(dev);
212 char *tmp;
213
214 if (!card)
215 return -EINVAL;
216
217 if ((card->state != CARD_STATE_DOWN) &&
218 (card->state != CARD_STATE_RECOVER))
219 return -EPERM;
220
221 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
222 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
223 PRINT_WARN("Device is not a tokenring device!\n");
224 return -EINVAL;
225 }
226
227 tmp = strsep((char **) &buf, "\n");
228
229 if (!strcmp(tmp, "local")) {
230 card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
231 return count;
232 } else if (!strcmp(tmp, "all_rings")) {
233 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
234 return count;
235 } else {
236 PRINT_WARN("broadcast_mode: invalid mode %s!\n",
237 tmp);
238 return -EINVAL;
239 }
240 return count;
241}
242
243static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show,
244 qeth_l3_dev_broadcast_mode_store);
245
246static ssize_t qeth_l3_dev_canonical_macaddr_show(struct device *dev,
247 struct device_attribute *attr, char *buf)
248{
249 struct qeth_card *card = dev_get_drvdata(dev);
250
251 if (!card)
252 return -EINVAL;
253
254 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
255 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
256 return sprintf(buf, "n/a\n");
257
258 return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
259 QETH_TR_MACADDR_CANONICAL)? 1:0);
260}
261
262static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
263 struct device_attribute *attr, const char *buf, size_t count)
264{
265 struct qeth_card *card = dev_get_drvdata(dev);
266 char *tmp;
267 int i;
268
269 if (!card)
270 return -EINVAL;
271
272 if ((card->state != CARD_STATE_DOWN) &&
273 (card->state != CARD_STATE_RECOVER))
274 return -EPERM;
275
276 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
277 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
278 PRINT_WARN("Device is not a tokenring device!\n");
279 return -EINVAL;
280 }
281
282 i = simple_strtoul(buf, &tmp, 16);
283 if ((i == 0) || (i == 1))
284 card->options.macaddr_mode = i?
285 QETH_TR_MACADDR_CANONICAL :
286 QETH_TR_MACADDR_NONCANONICAL;
287 else {
288 PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n");
289 return -EINVAL;
290 }
291 return count;
292}
293
294static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
295 qeth_l3_dev_canonical_macaddr_store);
296
297static ssize_t qeth_l3_dev_checksum_show(struct device *dev,
298 struct device_attribute *attr, char *buf)
299{
300 struct qeth_card *card = dev_get_drvdata(dev);
301
302 if (!card)
303 return -EINVAL;
304
305 return sprintf(buf, "%s checksumming\n",
306 qeth_l3_get_checksum_str(card));
307}
308
309static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
310 struct device_attribute *attr, const char *buf, size_t count)
311{
312 struct qeth_card *card = dev_get_drvdata(dev);
313 char *tmp;
314
315 if (!card)
316 return -EINVAL;
317
318 if ((card->state != CARD_STATE_DOWN) &&
319 (card->state != CARD_STATE_RECOVER))
320 return -EPERM;
321
322 tmp = strsep((char **) &buf, "\n");
323 if (!strcmp(tmp, "sw_checksumming"))
324 card->options.checksum_type = SW_CHECKSUMMING;
325 else if (!strcmp(tmp, "hw_checksumming"))
326 card->options.checksum_type = HW_CHECKSUMMING;
327 else if (!strcmp(tmp, "no_checksumming"))
328 card->options.checksum_type = NO_CHECKSUMMING;
329 else {
330 PRINT_WARN("Unknown checksumming type '%s'\n", tmp);
331 return -EINVAL;
332 }
333 return count;
334}
335
336static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
337 qeth_l3_dev_checksum_store);
338
339static struct attribute *qeth_l3_device_attrs[] = {
340 &dev_attr_route4.attr,
341 &dev_attr_route6.attr,
342 &dev_attr_fake_broadcast.attr,
343 &dev_attr_broadcast_mode.attr,
344 &dev_attr_canonical_macaddr.attr,
345 &dev_attr_checksumming.attr,
346 NULL,
347};
348
349static struct attribute_group qeth_l3_device_attr_group = {
350 .attrs = qeth_l3_device_attrs,
351};
352
353static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
354 struct device_attribute *attr, char *buf)
355{
356 struct qeth_card *card = dev_get_drvdata(dev);
357
358 if (!card)
359 return -EINVAL;
360
361 return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
362}
363
364static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
365 struct device_attribute *attr, const char *buf, size_t count)
366{
367 struct qeth_card *card = dev_get_drvdata(dev);
368 char *tmp;
369
370 if (!card)
371 return -EINVAL;
372
373 if ((card->state != CARD_STATE_DOWN) &&
374 (card->state != CARD_STATE_RECOVER))
375 return -EPERM;
376
377 tmp = strsep((char **) &buf, "\n");
378 if (!strcmp(tmp, "toggle")) {
379 card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
380 } else if (!strcmp(tmp, "1")) {
381 card->ipato.enabled = 1;
382 } else if (!strcmp(tmp, "0")) {
383 card->ipato.enabled = 0;
384 } else {
385 PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to "
386 "this file\n");
387 return -EINVAL;
388 }
389 return count;
390}
391
392static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
393 qeth_l3_dev_ipato_enable_show,
394 qeth_l3_dev_ipato_enable_store);
395
396static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
397 struct device_attribute *attr, char *buf)
398{
399 struct qeth_card *card = dev_get_drvdata(dev);
400
401 if (!card)
402 return -EINVAL;
403
404 return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
405}
406
407static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
408 struct device_attribute *attr,
409 const char *buf, size_t count)
410{
411 struct qeth_card *card = dev_get_drvdata(dev);
412 char *tmp;
413
414 if (!card)
415 return -EINVAL;
416
417 tmp = strsep((char **) &buf, "\n");
418 if (!strcmp(tmp, "toggle")) {
419 card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
420 } else if (!strcmp(tmp, "1")) {
421 card->ipato.invert4 = 1;
422 } else if (!strcmp(tmp, "0")) {
423 card->ipato.invert4 = 0;
424 } else {
425 PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to "
426 "this file\n");
427 return -EINVAL;
428 }
429 return count;
430}
431
432static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
433 qeth_l3_dev_ipato_invert4_show,
434 qeth_l3_dev_ipato_invert4_store);
435
436static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
437 enum qeth_prot_versions proto)
438{
439 struct qeth_ipato_entry *ipatoe;
440 unsigned long flags;
441 char addr_str[40];
442 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
443 int i = 0;
444
445 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
446 /* add strlen for "/<mask>\n" */
447 entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
448 spin_lock_irqsave(&card->ip_lock, flags);
449 list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
450 if (ipatoe->proto != proto)
451 continue;
452 /* String must not be longer than PAGE_SIZE. So we check if
453 * string length gets near PAGE_SIZE. Then we can savely display
454 * the next IPv6 address (worst case, compared to IPv4) */
455 if ((PAGE_SIZE - i) <= entry_len)
456 break;
457 qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
458 i += snprintf(buf + i, PAGE_SIZE - i,
459 "%s/%i\n", addr_str, ipatoe->mask_bits);
460 }
461 spin_unlock_irqrestore(&card->ip_lock, flags);
462 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
463
464 return i;
465}
466
467static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
468 struct device_attribute *attr, char *buf)
469{
470 struct qeth_card *card = dev_get_drvdata(dev);
471
472 if (!card)
473 return -EINVAL;
474
475 return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
476}
477
478static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
479 u8 *addr, int *mask_bits)
480{
481 const char *start, *end;
482 char *tmp;
483 char buffer[40] = {0, };
484
485 start = buf;
486 /* get address string */
487 end = strchr(start, '/');
488 if (!end || (end - start >= 40)) {
489 PRINT_WARN("Invalid format for ipato_addx/delx. "
490 "Use <ip addr>/<mask bits>\n");
491 return -EINVAL;
492 }
493 strncpy(buffer, start, end - start);
494 if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) {
495 PRINT_WARN("Invalid IP address format!\n");
496 return -EINVAL;
497 }
498 start = end + 1;
499 *mask_bits = simple_strtoul(start, &tmp, 10);
500 if (!strlen(start) ||
501 (tmp == start) ||
502 (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
503 PRINT_WARN("Invalid mask bits for ipato_addx/delx !\n");
504 return -EINVAL;
505 }
506 return 0;
507}
508
509static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
510 struct qeth_card *card, enum qeth_prot_versions proto)
511{
512 struct qeth_ipato_entry *ipatoe;
513 u8 addr[16];
514 int mask_bits;
515 int rc;
516
517 rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
518 if (rc)
519 return rc;
520
521 ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
522 if (!ipatoe) {
523 PRINT_WARN("No memory to allocate ipato entry\n");
524 return -ENOMEM;
525 }
526 ipatoe->proto = proto;
527 memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
528 ipatoe->mask_bits = mask_bits;
529
530 rc = qeth_l3_add_ipato_entry(card, ipatoe);
531 if (rc) {
532 kfree(ipatoe);
533 return rc;
534 }
535
536 return count;
537}
538
539static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
540 struct device_attribute *attr, const char *buf, size_t count)
541{
542 struct qeth_card *card = dev_get_drvdata(dev);
543
544 if (!card)
545 return -EINVAL;
546
547 return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
548}
549
550static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
551 qeth_l3_dev_ipato_add4_show,
552 qeth_l3_dev_ipato_add4_store);
553
554static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
555 struct qeth_card *card, enum qeth_prot_versions proto)
556{
557 u8 addr[16];
558 int mask_bits;
559 int rc;
560
561 rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
562 if (rc)
563 return rc;
564
565 qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
566
567 return count;
568}
569
570static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
571 struct device_attribute *attr, const char *buf, size_t count)
572{
573 struct qeth_card *card = dev_get_drvdata(dev);
574
575 if (!card)
576 return -EINVAL;
577
578 return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
579}
580
581static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
582 qeth_l3_dev_ipato_del4_store);
583
584static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
585 struct device_attribute *attr, char *buf)
586{
587 struct qeth_card *card = dev_get_drvdata(dev);
588
589 if (!card)
590 return -EINVAL;
591
592 return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
593}
594
595static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
596 struct device_attribute *attr, const char *buf, size_t count)
597{
598 struct qeth_card *card = dev_get_drvdata(dev);
599 char *tmp;
600
601 if (!card)
602 return -EINVAL;
603
604 tmp = strsep((char **) &buf, "\n");
605 if (!strcmp(tmp, "toggle")) {
606 card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
607 } else if (!strcmp(tmp, "1")) {
608 card->ipato.invert6 = 1;
609 } else if (!strcmp(tmp, "0")) {
610 card->ipato.invert6 = 0;
611 } else {
612 PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to "
613 "this file\n");
614 return -EINVAL;
615 }
616 return count;
617}
618
619static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
620 qeth_l3_dev_ipato_invert6_show,
621 qeth_l3_dev_ipato_invert6_store);
622
623
624static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev,
625 struct device_attribute *attr, char *buf)
626{
627 struct qeth_card *card = dev_get_drvdata(dev);
628
629 if (!card)
630 return -EINVAL;
631
632 return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
633}
634
635static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev,
636 struct device_attribute *attr, const char *buf, size_t count)
637{
638 struct qeth_card *card = dev_get_drvdata(dev);
639
640 if (!card)
641 return -EINVAL;
642
643 return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
644}
645
646static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
647 qeth_l3_dev_ipato_add6_show,
648 qeth_l3_dev_ipato_add6_store);
649
650static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev,
651 struct device_attribute *attr, const char *buf, size_t count)
652{
653 struct qeth_card *card = dev_get_drvdata(dev);
654
655 if (!card)
656 return -EINVAL;
657
658 return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
659}
660
661static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
662 qeth_l3_dev_ipato_del6_store);
663
664static struct attribute *qeth_ipato_device_attrs[] = {
665 &dev_attr_ipato_enable.attr,
666 &dev_attr_ipato_invert4.attr,
667 &dev_attr_ipato_add4.attr,
668 &dev_attr_ipato_del4.attr,
669 &dev_attr_ipato_invert6.attr,
670 &dev_attr_ipato_add6.attr,
671 &dev_attr_ipato_del6.attr,
672 NULL,
673};
674
675static struct attribute_group qeth_device_ipato_group = {
676 .name = "ipa_takeover",
677 .attrs = qeth_ipato_device_attrs,
678};
679
680static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
681 enum qeth_prot_versions proto)
682{
683 struct qeth_ipaddr *ipaddr;
684 char addr_str[40];
685 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
686 unsigned long flags;
687 int i = 0;
688
689 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
690 entry_len += 2; /* \n + terminator */
691 spin_lock_irqsave(&card->ip_lock, flags);
692 list_for_each_entry(ipaddr, &card->ip_list, entry) {
693 if (ipaddr->proto != proto)
694 continue;
695 if (ipaddr->type != QETH_IP_TYPE_VIPA)
696 continue;
697 /* String must not be longer than PAGE_SIZE. So we check if
698 * string length gets near PAGE_SIZE. Then we can savely display
699 * the next IPv6 address (worst case, compared to IPv4) */
700 if ((PAGE_SIZE - i) <= entry_len)
701 break;
702 qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
703 addr_str);
704 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
705 }
706 spin_unlock_irqrestore(&card->ip_lock, flags);
707 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
708
709 return i;
710}
711
712static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
713 struct device_attribute *attr, char *buf)
714{
715 struct qeth_card *card = dev_get_drvdata(dev);
716
717 if (!card)
718 return -EINVAL;
719
720 return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
721}
722
723static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
724 u8 *addr)
725{
726 if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
727 PRINT_WARN("Invalid IP address format!\n");
728 return -EINVAL;
729 }
730 return 0;
731}
732
733static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
734 struct qeth_card *card, enum qeth_prot_versions proto)
735{
736 u8 addr[16] = {0, };
737 int rc;
738
739 rc = qeth_l3_parse_vipae(buf, proto, addr);
740 if (rc)
741 return rc;
742
743 rc = qeth_l3_add_vipa(card, proto, addr);
744 if (rc)
745 return rc;
746
747 return count;
748}
749
750static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
751 struct device_attribute *attr, const char *buf, size_t count)
752{
753 struct qeth_card *card = dev_get_drvdata(dev);
754
755 if (!card)
756 return -EINVAL;
757
758 return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
759}
760
761static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
762 qeth_l3_dev_vipa_add4_show,
763 qeth_l3_dev_vipa_add4_store);
764
765static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
766 struct qeth_card *card, enum qeth_prot_versions proto)
767{
768 u8 addr[16];
769 int rc;
770
771 rc = qeth_l3_parse_vipae(buf, proto, addr);
772 if (rc)
773 return rc;
774
775 qeth_l3_del_vipa(card, proto, addr);
776
777 return count;
778}
779
780static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
781 struct device_attribute *attr, const char *buf, size_t count)
782{
783 struct qeth_card *card = dev_get_drvdata(dev);
784
785 if (!card)
786 return -EINVAL;
787
788 return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
789}
790
791static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
792 qeth_l3_dev_vipa_del4_store);
793
794static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
795 struct device_attribute *attr, char *buf)
796{
797 struct qeth_card *card = dev_get_drvdata(dev);
798
799 if (!card)
800 return -EINVAL;
801
802 return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
803}
804
805static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
806 struct device_attribute *attr, const char *buf, size_t count)
807{
808 struct qeth_card *card = dev_get_drvdata(dev);
809
810 if (!card)
811 return -EINVAL;
812
813 return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
814}
815
816static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
817 qeth_l3_dev_vipa_add6_show,
818 qeth_l3_dev_vipa_add6_store);
819
820static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
821 struct device_attribute *attr, const char *buf, size_t count)
822{
823 struct qeth_card *card = dev_get_drvdata(dev);
824
825 if (!card)
826 return -EINVAL;
827
828 return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
829}
830
831static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
832 qeth_l3_dev_vipa_del6_store);
833
834static struct attribute *qeth_vipa_device_attrs[] = {
835 &dev_attr_vipa_add4.attr,
836 &dev_attr_vipa_del4.attr,
837 &dev_attr_vipa_add6.attr,
838 &dev_attr_vipa_del6.attr,
839 NULL,
840};
841
842static struct attribute_group qeth_device_vipa_group = {
843 .name = "vipa",
844 .attrs = qeth_vipa_device_attrs,
845};
846
847static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
848 enum qeth_prot_versions proto)
849{
850 struct qeth_ipaddr *ipaddr;
851 char addr_str[40];
852 int entry_len; /* length of 1 entry string, differs between v4 and v6 */
853 unsigned long flags;
854 int i = 0;
855
856 entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
857 entry_len += 2; /* \n + terminator */
858 spin_lock_irqsave(&card->ip_lock, flags);
859 list_for_each_entry(ipaddr, &card->ip_list, entry) {
860 if (ipaddr->proto != proto)
861 continue;
862 if (ipaddr->type != QETH_IP_TYPE_RXIP)
863 continue;
864 /* String must not be longer than PAGE_SIZE. So we check if
865 * string length gets near PAGE_SIZE. Then we can savely display
866 * the next IPv6 address (worst case, compared to IPv4) */
867 if ((PAGE_SIZE - i) <= entry_len)
868 break;
869 qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
870 addr_str);
871 i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
872 }
873 spin_unlock_irqrestore(&card->ip_lock, flags);
874 i += snprintf(buf + i, PAGE_SIZE - i, "\n");
875
876 return i;
877}
878
879static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
880 struct device_attribute *attr, char *buf)
881{
882 struct qeth_card *card = dev_get_drvdata(dev);
883
884 if (!card)
885 return -EINVAL;
886
887 return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
888}
889
890static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
891 u8 *addr)
892{
893 if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
894 PRINT_WARN("Invalid IP address format!\n");
895 return -EINVAL;
896 }
897 return 0;
898}
899
900static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
901 struct qeth_card *card, enum qeth_prot_versions proto)
902{
903 u8 addr[16] = {0, };
904 int rc;
905
906 rc = qeth_l3_parse_rxipe(buf, proto, addr);
907 if (rc)
908 return rc;
909
910 rc = qeth_l3_add_rxip(card, proto, addr);
911 if (rc)
912 return rc;
913
914 return count;
915}
916
917static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
918 struct device_attribute *attr, const char *buf, size_t count)
919{
920 struct qeth_card *card = dev_get_drvdata(dev);
921
922 if (!card)
923 return -EINVAL;
924
925 return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
926}
927
928static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
929 qeth_l3_dev_rxip_add4_show,
930 qeth_l3_dev_rxip_add4_store);
931
932static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
933 struct qeth_card *card, enum qeth_prot_versions proto)
934{
935 u8 addr[16];
936 int rc;
937
938 rc = qeth_l3_parse_rxipe(buf, proto, addr);
939 if (rc)
940 return rc;
941
942 qeth_l3_del_rxip(card, proto, addr);
943
944 return count;
945}
946
947static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
948 struct device_attribute *attr, const char *buf, size_t count)
949{
950 struct qeth_card *card = dev_get_drvdata(dev);
951
952 if (!card)
953 return -EINVAL;
954
955 return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
956}
957
958static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
959 qeth_l3_dev_rxip_del4_store);
960
961static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
962 struct device_attribute *attr, char *buf)
963{
964 struct qeth_card *card = dev_get_drvdata(dev);
965
966 if (!card)
967 return -EINVAL;
968
969 return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
970}
971
972static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
973 struct device_attribute *attr, const char *buf, size_t count)
974{
975 struct qeth_card *card = dev_get_drvdata(dev);
976
977 if (!card)
978 return -EINVAL;
979
980 return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
981}
982
983static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
984 qeth_l3_dev_rxip_add6_show,
985 qeth_l3_dev_rxip_add6_store);
986
987static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
988 struct device_attribute *attr, const char *buf, size_t count)
989{
990 struct qeth_card *card = dev_get_drvdata(dev);
991
992 if (!card)
993 return -EINVAL;
994
995 return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
996}
997
998static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
999 qeth_l3_dev_rxip_del6_store);
1000
1001static struct attribute *qeth_rxip_device_attrs[] = {
1002 &dev_attr_rxip_add4.attr,
1003 &dev_attr_rxip_del4.attr,
1004 &dev_attr_rxip_add6.attr,
1005 &dev_attr_rxip_del6.attr,
1006 NULL,
1007};
1008
1009static struct attribute_group qeth_device_rxip_group = {
1010 .name = "rxip",
1011 .attrs = qeth_rxip_device_attrs,
1012};
1013
1014int qeth_l3_create_device_attributes(struct device *dev)
1015{
1016 int ret;
1017
1018 ret = sysfs_create_group(&dev->kobj, &qeth_l3_device_attr_group);
1019 if (ret)
1020 return ret;
1021
1022 ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group);
1023 if (ret) {
1024 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1025 return ret;
1026 }
1027
1028 ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group);
1029 if (ret) {
1030 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1031 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1032 return ret;
1033 }
1034
1035 ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group);
1036 if (ret) {
1037 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1038 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1039 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1040 return ret;
1041 }
1042 return 0;
1043}
1044
1045void qeth_l3_remove_device_attributes(struct device *dev)
1046{
1047 sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
1048 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
1049 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
1050 sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
1051}